2007-06-12 13:07:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2007-02-02 14:18:22 +00:00
|
|
|
#ifndef __DISKIO__
|
|
|
|
#define __DISKIO__
|
|
|
|
|
2015-12-14 16:42:10 +00:00
|
|
|
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
|
2008-04-10 20:19:33 +00:00
|
|
|
#define BTRFS_SUPER_INFO_SIZE 4096
|
2008-12-08 21:46:26 +00:00
|
|
|
|
|
|
|
#define BTRFS_SUPER_MIRROR_MAX 3
|
|
|
|
#define BTRFS_SUPER_MIRROR_SHIFT 12
|
|
|
|
|
2017-06-15 23:48:05 +00:00
|
|
|
/*
|
|
|
|
* Fixed blocksize for all devices, applies to specific ways of reading
|
|
|
|
* metadata like superblock. Must meet the set_blocksize requirements.
|
|
|
|
*
|
|
|
|
* Do not change.
|
|
|
|
*/
|
|
|
|
#define BTRFS_BDEV_BLOCKSIZE (4096)
|
|
|
|
|
2014-07-29 22:25:45 +00:00
|
|
|
enum btrfs_wq_endio_type {
|
2013-01-29 23:40:14 +00:00
|
|
|
BTRFS_WQ_ENDIO_DATA = 0,
|
|
|
|
BTRFS_WQ_ENDIO_METADATA = 1,
|
|
|
|
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
|
|
|
|
BTRFS_WQ_ENDIO_RAID56 = 3,
|
2014-09-12 10:44:03 +00:00
|
|
|
BTRFS_WQ_ENDIO_DIO_REPAIR = 4,
|
2013-01-29 23:40:14 +00:00
|
|
|
};
|
|
|
|
|
2008-12-08 21:46:26 +00:00
|
|
|
static inline u64 btrfs_sb_offset(int mirror)
|
|
|
|
{
|
2015-12-14 16:42:10 +00:00
|
|
|
u64 start = SZ_16K;
|
2008-12-08 21:46:26 +00:00
|
|
|
if (mirror)
|
|
|
|
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
|
|
|
|
return BTRFS_SUPER_INFO_OFFSET;
|
|
|
|
}
|
|
|
|
|
2008-03-24 19:01:56 +00:00
|
|
|
struct btrfs_device;
|
2008-03-24 19:02:07 +00:00
|
|
|
struct btrfs_fs_devices;
|
2007-03-22 16:13:20 +00:00
|
|
|
|
2016-06-22 22:54:24 +00:00
|
|
|
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 bytenr, u64 parent_transid);
|
|
|
|
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
|
|
|
|
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
|
2011-05-23 12:25:41 +00:00
|
|
|
int mirror_num, struct extent_buffer **eb);
|
2016-06-22 22:54:24 +00:00
|
|
|
struct extent_buffer *btrfs_find_create_tree_block(
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
u64 bytenr);
|
2017-02-10 17:47:57 +00:00
|
|
|
void clean_tree_block(struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
|
2011-11-17 06:10:02 +00:00
|
|
|
int open_ctree(struct super_block *sb,
|
|
|
|
struct btrfs_fs_devices *fs_devices,
|
|
|
|
char *options);
|
2016-06-22 01:16:51 +00:00
|
|
|
void close_ctree(struct btrfs_fs_info *fs_info);
|
2017-02-10 18:04:32 +00:00
|
|
|
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
|
2008-12-08 21:46:26 +00:00
|
|
|
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
|
2015-08-14 10:32:58 +00:00
|
|
|
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
|
|
|
|
struct buffer_head **bh_ret);
|
2016-06-22 01:16:51 +00:00
|
|
|
int btrfs_commit_super(struct btrfs_fs_info *fs_info);
|
2013-05-15 07:48:19 +00:00
|
|
|
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
|
|
|
|
struct btrfs_key *location);
|
|
|
|
int btrfs_init_fs_root(struct btrfs_root *root);
|
2016-08-18 01:58:33 +00:00
|
|
|
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 root_id);
|
2013-05-15 07:48:19 +00:00
|
|
|
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_root *root);
|
2014-05-07 21:06:09 +00:00
|
|
|
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
|
2013-09-25 13:47:44 +00:00
|
|
|
|
|
|
|
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_key *key,
|
|
|
|
bool check_ref);
|
|
|
|
static inline struct btrfs_root *
|
|
|
|
btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_key *location)
|
|
|
|
{
|
|
|
|
return btrfs_get_fs_root(fs_info, location, true);
|
|
|
|
}
|
|
|
|
|
2008-11-12 19:34:12 +00:00
|
|
|
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
|
2016-06-22 22:54:24 +00:00
|
|
|
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
|
|
|
|
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
|
2013-05-15 07:48:19 +00:00
|
|
|
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_root *root);
|
|
|
|
void btrfs_free_fs_root(struct btrfs_root *root);
|
2013-05-15 07:48:20 +00:00
|
|
|
|
2013-09-19 20:07:01 +00:00
|
|
|
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
2016-06-15 13:22:56 +00:00
|
|
|
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
|
2013-09-19 20:07:01 +00:00
|
|
|
#endif
|
|
|
|
|
2013-05-15 07:48:20 +00:00
|
|
|
/*
|
|
|
|
* This function is used to grab the root, and avoid it is freed when we
|
|
|
|
* access it. But it doesn't ensure that the tree is not dropped.
|
|
|
|
*
|
|
|
|
* If you want to ensure the whole tree is safe, you should use
|
|
|
|
* fs_info->subvol_srcu
|
|
|
|
*/
|
|
|
|
static inline struct btrfs_root *btrfs_grab_fs_root(struct btrfs_root *root)
|
|
|
|
{
|
2017-03-03 08:55:18 +00:00
|
|
|
if (refcount_inc_not_zero(&root->refs))
|
2013-05-15 07:48:20 +00:00
|
|
|
return root;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void btrfs_put_fs_root(struct btrfs_root *root)
|
|
|
|
{
|
2017-03-03 08:55:18 +00:00
|
|
|
if (refcount_dec_and_test(&root->refs))
|
2013-05-15 07:48:20 +00:00
|
|
|
kfree(root);
|
|
|
|
}
|
|
|
|
|
2007-10-15 20:14:19 +00:00
|
|
|
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
2012-05-06 11:23:47 +00:00
|
|
|
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
|
|
|
int atomic);
|
2008-05-12 16:59:19 +00:00
|
|
|
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
|
2017-02-14 17:03:49 +00:00
|
|
|
u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
|
2016-10-27 07:52:33 +00:00
|
|
|
void btrfs_csum_final(u32 crc, u8 *result);
|
2017-06-03 07:38:06 +00:00
|
|
|
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
2014-07-29 22:25:45 +00:00
|
|
|
enum btrfs_wq_endio_type metadata);
|
2017-07-05 23:41:23 +00:00
|
|
|
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
2017-05-05 15:57:13 +00:00
|
|
|
int mirror_num, unsigned long bio_flags,
|
|
|
|
u64 bio_offset, void *private_data,
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 03:03:00 +00:00
|
|
|
extent_submit_bio_hook_t *submit_bio_start,
|
|
|
|
extent_submit_bio_hook_t *submit_bio_done);
|
2008-08-20 17:39:41 +00:00
|
|
|
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
|
2008-09-05 20:13:11 +00:00
|
|
|
int btrfs_write_tree_block(struct extent_buffer *buf);
|
2017-05-25 10:39:52 +00:00
|
|
|
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
|
2008-09-05 20:13:11 +00:00
|
|
|
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info);
|
2009-01-21 17:54:03 +00:00
|
|
|
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root);
|
2016-07-21 00:44:12 +00:00
|
|
|
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
|
2016-06-22 22:54:24 +00:00
|
|
|
struct btrfs_fs_info *fs_info);
|
2012-03-01 16:24:58 +00:00
|
|
|
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
|
2016-06-22 22:54:24 +00:00
|
|
|
struct btrfs_fs_info *fs_info);
|
2011-09-13 10:44:20 +00:00
|
|
|
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
u64 objectid);
|
|
|
|
int btree_lock_page_hook(struct page *page, void *data,
|
|
|
|
void (*flush_fn)(void *));
|
2015-08-19 07:54:15 +00:00
|
|
|
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
|
2014-07-29 22:55:42 +00:00
|
|
|
int __init btrfs_end_io_wq_init(void);
|
|
|
|
void btrfs_end_io_wq_exit(void);
|
2009-02-12 19:09:45 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
2011-07-26 20:11:19 +00:00
|
|
|
void btrfs_init_lockdep(void);
|
|
|
|
void btrfs_set_buffer_lockdep_class(u64 objectid,
|
|
|
|
struct extent_buffer *eb, int level);
|
2009-02-12 19:09:45 +00:00
|
|
|
#else
|
2011-07-26 20:11:19 +00:00
|
|
|
static inline void btrfs_init_lockdep(void)
|
|
|
|
{ }
|
|
|
|
static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
|
|
|
|
struct extent_buffer *eb, int level)
|
2009-02-12 19:09:45 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2007-02-02 14:18:22 +00:00
|
|
|
#endif
|