mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'for-5.20-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba:
"This brings some long awaited changes, the send protocol bump,
otherwise lots of small improvements and fixes. The main core part is
reworking bio handling, cleaning up the submission and endio and
improving error handling.
There are some changes outside of btrfs adding helpers or updating
API, listed at the end of the changelog.
Features:
- sysfs:
- export chunk size, in debug mode add tunable for setting its size
- show zoned among features (was only in debug mode)
- show commit stats (number, last/max/total duration)
- send protocol updated to 2
- new commands:
- ability write larger data chunks than 64K
- send raw compressed extents (uses the encoded data ioctls),
ie. no decompression on send side, no compression needed on
receive side if supported
- send 'otime' (inode creation time) among other timestamps
- send file attributes (a.k.a file flags and xflags)
- this is first version bump, backward compatibility on send and
receive side is provided
- there are still some known and wanted commands that will be
implemented in the near future, another version bump will be
needed, however we want to minimize that to avoid causing
usability issues
- print checksum type and implementation at mount time
- don't print some messages at mount (mentioned as people asked about
it), we want to print messages namely for new features so let's
make some space for that
- big metadata - this has been supported for a long time and is
not a feature that's worth mentioning
- skinny metadata - same reason, set by default by mkfs
Performance improvements:
- reduced amount of reserved metadata for delayed items
- when inserted items can be batched into one leaf
- when deleting batched directory index items
- when deleting delayed items used for deletion
- overall improved count of files/sec, decreased subvolume lock
contention
- metadata item access bounds checker micro-optimized, with a few
percent of improved runtime for metadata-heavy operations
- increase direct io limit for read to 256 sectors, improved
throughput by 3x on sample workload
Notable fixes:
- raid56
- reduce parity writes, skip sectors of stripe when there are no
data updates
- restore reading from on-disk data instead of using stripe cache,
this reduces chances to damage correct data due to RMW cycle
- refuse to replay log with unknown incompat read-only feature bit
set
- zoned
- fix page locking when COW fails in the middle of allocation
- improved tracking of active zones, ZNS drives may limit the
number and there are ENOSPC errors due to that limit and not
actual lack of space
- adjust maximum extent size for zone append so it does not cause
late ENOSPC due to underreservation
- mirror reading error messages show the mirror number
- don't fallback to buffered IO for NOWAIT direct IO writes, we don't
have the NOWAIT semantics for buffered io yet
- send, fix sending link commands for existing file paths when there
are deleted and created hardlinks for same files
- repair all mirrors for profiles with more than 1 copy (raid1c34)
- fix repair of compressed extents, unify where error detection and
repair happen
Core changes:
- bio completion cleanups
- don't double defer compression bios
- simplify endio workqueues
- add more data to btrfs_bio to avoid allocation for read requests
- rework bio error handling so it's same what block layer does,
the submission works and errors are consumed in endio
- when asynchronous bio offload fails fall back to synchronous
checksum calculation to avoid errors under writeback or memory
pressure
- new trace points
- raid56 events
- ordered extent operations
- super block log_root_transid deprecated (never used)
- mixed_backref and big_metadata sysfs feature files removed, they've
been default for sufficiently long time, there are no known users
and mixed_backref could be confused with mixed_groups
Non-btrfs changes, API updates:
- minor highmem API update to cover const arguments
- switch all kmap/kmap_atomic to kmap_local
- remove redundant flush_dcache_page()
- address_space_operations::writepage callback removed
- add bdev_max_segments() helper"
* tag 'for-5.20-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (163 commits)
btrfs: don't call btrfs_page_set_checked in finish_compressed_bio_read
btrfs: fix repair of compressed extents
btrfs: remove the start argument to check_data_csum and export
btrfs: pass a btrfs_bio to btrfs_repair_one_sector
btrfs: simplify the pending I/O counting in struct compressed_bio
btrfs: repair all known bad mirrors
btrfs: merge btrfs_dev_stat_print_on_error with its only caller
btrfs: join running log transaction when logging new name
btrfs: simplify error handling in btrfs_lookup_dentry
btrfs: send: always use the rbtree based inode ref management infrastructure
btrfs: send: fix sending link commands for existing file paths
btrfs: send: introduce recorded_ref_alloc and recorded_ref_free
btrfs: zoned: wait until zone is finished when allocation didn't progress
btrfs: zoned: write out partially allocated region
btrfs: zoned: activate necessary block group
btrfs: zoned: activate metadata block group on flush_space
btrfs: zoned: disable metadata overcommit for zoned
btrfs: zoned: introduce space_info->active_total_bytes
btrfs: zoned: finish least available block group on data bg allocation
btrfs: let can_allocate_chunk return error
...
This commit is contained in:
@@ -22,7 +22,7 @@ void flush_kernel_icache_range_asm(unsigned long, unsigned long);
|
||||
void flush_user_dcache_range_asm(unsigned long, unsigned long);
|
||||
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
|
||||
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
|
||||
void flush_kernel_dcache_page_asm(void *);
|
||||
void flush_kernel_dcache_page_asm(const void *addr);
|
||||
void flush_kernel_icache_page(void *);
|
||||
|
||||
/* Cache flush operations */
|
||||
@@ -31,7 +31,7 @@ void flush_cache_all_local(void);
|
||||
void flush_cache_all(void);
|
||||
void flush_cache_mm(struct mm_struct *mm);
|
||||
|
||||
void flush_kernel_dcache_page_addr(void *addr);
|
||||
void flush_kernel_dcache_page_addr(const void *addr);
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
@@ -75,7 +75,7 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(void *addr)
|
||||
static inline void kunmap_flush_on_unmap(const void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
}
|
||||
|
||||
@@ -549,7 +549,7 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
|
||||
extern void clear_user_page_asm(void *, unsigned long);
|
||||
extern void copy_user_page_asm(void *, void *, unsigned long);
|
||||
|
||||
void flush_kernel_dcache_page_addr(void *addr)
|
||||
void flush_kernel_dcache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ struct btrfs_fs_info;
|
||||
struct btrfs_workqueue;
|
||||
struct btrfs_work;
|
||||
typedef void (*btrfs_func_t)(struct btrfs_work *arg);
|
||||
typedef void (*btrfs_work_func_t)(struct work_struct *arg);
|
||||
|
||||
struct btrfs_work {
|
||||
btrfs_func_t func;
|
||||
|
||||
@@ -2028,10 +2028,29 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
|
||||
{
|
||||
struct btrfs_data_container *inodes = ctx;
|
||||
const size_t c = 3 * sizeof(u64);
|
||||
|
||||
if (inodes->bytes_left >= c) {
|
||||
inodes->bytes_left -= c;
|
||||
inodes->val[inodes->elem_cnt] = inum;
|
||||
inodes->val[inodes->elem_cnt + 1] = offset;
|
||||
inodes->val[inodes->elem_cnt + 2] = root;
|
||||
inodes->elem_cnt += 3;
|
||||
} else {
|
||||
inodes->bytes_missing += c - inodes->bytes_left;
|
||||
inodes->bytes_left = 0;
|
||||
inodes->elem_missed += 3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_path *path,
|
||||
iterate_extent_inodes_t *iterate, void *ctx,
|
||||
bool ignore_offset)
|
||||
void *ctx, bool ignore_offset)
|
||||
{
|
||||
int ret;
|
||||
u64 extent_item_pos;
|
||||
@@ -2049,17 +2068,15 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
||||
extent_item_pos = logical - found_key.objectid;
|
||||
ret = iterate_extent_inodes(fs_info, found_key.objectid,
|
||||
extent_item_pos, search_commit_root,
|
||||
iterate, ctx, ignore_offset);
|
||||
build_ino_list, ctx, ignore_offset);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
|
||||
struct extent_buffer *eb, void *ctx);
|
||||
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
|
||||
struct extent_buffer *eb, struct inode_fs_paths *ipath);
|
||||
|
||||
static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
|
||||
struct btrfs_path *path,
|
||||
iterate_irefs_t *iterate, void *ctx)
|
||||
static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
|
||||
{
|
||||
int ret = 0;
|
||||
int slot;
|
||||
@@ -2068,6 +2085,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
|
||||
u32 name_len;
|
||||
u64 parent = 0;
|
||||
int found = 0;
|
||||
struct btrfs_root *fs_root = ipath->fs_root;
|
||||
struct btrfs_path *path = ipath->btrfs_path;
|
||||
struct extent_buffer *eb;
|
||||
struct btrfs_inode_ref *iref;
|
||||
struct btrfs_key found_key;
|
||||
@@ -2103,8 +2122,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
|
||||
"following ref at offset %u for inode %llu in tree %llu",
|
||||
cur, found_key.objectid,
|
||||
fs_root->root_key.objectid);
|
||||
ret = iterate(parent, name_len,
|
||||
(unsigned long)(iref + 1), eb, ctx);
|
||||
ret = inode_to_path(parent, name_len,
|
||||
(unsigned long)(iref + 1), eb, ipath);
|
||||
if (ret)
|
||||
break;
|
||||
len = sizeof(*iref) + name_len;
|
||||
@@ -2118,15 +2137,15 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
|
||||
struct btrfs_path *path,
|
||||
iterate_irefs_t *iterate, void *ctx)
|
||||
static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
|
||||
{
|
||||
int ret;
|
||||
int slot;
|
||||
u64 offset = 0;
|
||||
u64 parent;
|
||||
int found = 0;
|
||||
struct btrfs_root *fs_root = ipath->fs_root;
|
||||
struct btrfs_path *path = ipath->btrfs_path;
|
||||
struct extent_buffer *eb;
|
||||
struct btrfs_inode_extref *extref;
|
||||
u32 item_size;
|
||||
@@ -2162,8 +2181,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
|
||||
extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
|
||||
parent = btrfs_inode_extref_parent(eb, extref);
|
||||
name_len = btrfs_inode_extref_name_len(eb, extref);
|
||||
ret = iterate(parent, name_len,
|
||||
(unsigned long)&extref->name, eb, ctx);
|
||||
ret = inode_to_path(parent, name_len,
|
||||
(unsigned long)&extref->name, eb, ipath);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -2180,34 +2199,13 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
|
||||
struct btrfs_path *path, iterate_irefs_t *iterate,
|
||||
void *ctx)
|
||||
{
|
||||
int ret;
|
||||
int found_refs = 0;
|
||||
|
||||
ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
|
||||
if (!ret)
|
||||
++found_refs;
|
||||
else if (ret != -ENOENT)
|
||||
return ret;
|
||||
|
||||
ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
|
||||
if (ret == -ENOENT && found_refs)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if the path could be dumped (probably truncated)
|
||||
* returns <0 in case of an error
|
||||
*/
|
||||
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
|
||||
struct extent_buffer *eb, void *ctx)
|
||||
struct extent_buffer *eb, struct inode_fs_paths *ipath)
|
||||
{
|
||||
struct inode_fs_paths *ipath = ctx;
|
||||
char *fspath;
|
||||
char *fspath_min;
|
||||
int i = ipath->fspath->elem_cnt;
|
||||
@@ -2248,8 +2246,20 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
|
||||
*/
|
||||
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
|
||||
{
|
||||
return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
|
||||
inode_to_path, ipath);
|
||||
int ret;
|
||||
int found_refs = 0;
|
||||
|
||||
ret = iterate_inode_refs(inum, ipath);
|
||||
if (!ret)
|
||||
++found_refs;
|
||||
else if (ret != -ENOENT)
|
||||
return ret;
|
||||
|
||||
ret = iterate_inode_extrefs(inum, ipath);
|
||||
if (ret == -ENOENT && found_refs)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct btrfs_data_container *init_data_container(u32 total_bytes)
|
||||
|
||||
@@ -35,8 +35,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
||||
bool ignore_offset);
|
||||
|
||||
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_path *path,
|
||||
iterate_extent_inodes_t *iterate, void *ctx,
|
||||
struct btrfs_path *path, void *ctx,
|
||||
bool ignore_offset);
|
||||
|
||||
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
|
||||
|
||||
@@ -1051,8 +1051,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
||||
< block_group->zone_unusable);
|
||||
WARN_ON(block_group->space_info->disk_total
|
||||
< block_group->length * factor);
|
||||
WARN_ON(block_group->zone_is_active &&
|
||||
block_group->space_info->active_total_bytes
|
||||
< block_group->length);
|
||||
}
|
||||
block_group->space_info->total_bytes -= block_group->length;
|
||||
if (block_group->zone_is_active)
|
||||
block_group->space_info->active_total_bytes -= block_group->length;
|
||||
block_group->space_info->bytes_readonly -=
|
||||
(block_group->length - block_group->zone_unusable);
|
||||
block_group->space_info->bytes_zone_unusable -=
|
||||
@@ -1816,11 +1821,10 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
|
||||
stripe_nr = physical - map->stripes[i].physical;
|
||||
stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
|
||||
BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
}
|
||||
/*
|
||||
* The remaining case would be for RAID56, multiply by
|
||||
@@ -2108,7 +2112,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
|
||||
trace_btrfs_add_block_group(info, cache, 0);
|
||||
btrfs_update_space_info(info, cache->flags, cache->length,
|
||||
cache->used, cache->bytes_super,
|
||||
cache->zone_unusable, &space_info);
|
||||
cache->zone_unusable, cache->zone_is_active,
|
||||
&space_info);
|
||||
|
||||
cache->space_info = space_info;
|
||||
|
||||
@@ -2178,7 +2183,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
|
||||
}
|
||||
|
||||
btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
|
||||
0, 0, &space_info);
|
||||
0, 0, false, &space_info);
|
||||
bg->space_info = space_info;
|
||||
link_block_group(bg);
|
||||
|
||||
@@ -2559,7 +2564,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
|
||||
trace_btrfs_add_block_group(fs_info, cache, 1);
|
||||
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
|
||||
cache->bytes_super, cache->zone_unusable,
|
||||
&cache->space_info);
|
||||
cache->zone_is_active, &cache->space_info);
|
||||
btrfs_update_global_block_rsv(fs_info);
|
||||
|
||||
link_block_group(cache);
|
||||
@@ -2659,6 +2664,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
|
||||
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
/*
|
||||
* We have allocated a new chunk. We also need to activate that chunk to
|
||||
* grant metadata tickets for zoned filesystem.
|
||||
*/
|
||||
ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = inc_block_group_ro(cache, 0);
|
||||
if (ret == -ETXTBSY)
|
||||
goto unlock_out;
|
||||
@@ -3761,6 +3774,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||
* attempt.
|
||||
*/
|
||||
wait_for_alloc = true;
|
||||
force = CHUNK_ALLOC_NO_FORCE;
|
||||
spin_unlock(&space_info->lock);
|
||||
mutex_lock(&fs_info->chunk_mutex);
|
||||
mutex_unlock(&fs_info->chunk_mutex);
|
||||
@@ -3883,6 +3897,14 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans,
|
||||
if (IS_ERR(bg)) {
|
||||
ret = PTR_ERR(bg);
|
||||
} else {
|
||||
/*
|
||||
* We have a new chunk. We also need to activate it for
|
||||
* zoned filesystem.
|
||||
*/
|
||||
ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we fail to add the chunk item here, we end up
|
||||
* trying again at phase 2 of chunk allocation, at
|
||||
|
||||
@@ -118,7 +118,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
||||
if (block_rsv->reserved >= block_rsv->size) {
|
||||
num_bytes = block_rsv->reserved - block_rsv->size;
|
||||
block_rsv->reserved = block_rsv->size;
|
||||
block_rsv->full = 1;
|
||||
block_rsv->full = true;
|
||||
} else {
|
||||
num_bytes = 0;
|
||||
}
|
||||
@@ -142,7 +142,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
||||
bytes_to_add = min(num_bytes, bytes_to_add);
|
||||
dest->reserved += bytes_to_add;
|
||||
if (dest->reserved >= dest->size)
|
||||
dest->full = 1;
|
||||
dest->full = true;
|
||||
num_bytes -= bytes_to_add;
|
||||
}
|
||||
spin_unlock(&dest->lock);
|
||||
@@ -171,7 +171,7 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
|
||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
|
||||
{
|
||||
memset(rsv, 0, sizeof(*rsv));
|
||||
spin_lock_init(&rsv->lock);
|
||||
@@ -180,7 +180,7 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
|
||||
|
||||
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv,
|
||||
unsigned short type)
|
||||
enum btrfs_rsv_type type)
|
||||
{
|
||||
btrfs_init_block_rsv(rsv, type);
|
||||
rsv->space_info = btrfs_find_space_info(fs_info,
|
||||
@@ -188,7 +188,7 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
|
||||
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
unsigned short type)
|
||||
enum btrfs_rsv_type type)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
|
||||
@@ -304,7 +304,7 @@ int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
|
||||
if (block_rsv->reserved >= num_bytes) {
|
||||
block_rsv->reserved -= num_bytes;
|
||||
if (block_rsv->reserved < block_rsv->size)
|
||||
block_rsv->full = 0;
|
||||
block_rsv->full = false;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&block_rsv->lock);
|
||||
@@ -319,7 +319,7 @@ void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
|
||||
if (update_size)
|
||||
block_rsv->size += num_bytes;
|
||||
else if (block_rsv->reserved >= block_rsv->size)
|
||||
block_rsv->full = 1;
|
||||
block_rsv->full = true;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
}
|
||||
|
||||
@@ -341,7 +341,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
|
||||
}
|
||||
global_rsv->reserved -= num_bytes;
|
||||
if (global_rsv->reserved < global_rsv->size)
|
||||
global_rsv->full = 0;
|
||||
global_rsv->full = false;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
|
||||
btrfs_block_rsv_add_bytes(dest, num_bytes, true);
|
||||
@@ -408,10 +408,7 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
|
||||
btrfs_try_granting_tickets(fs_info, sinfo);
|
||||
}
|
||||
|
||||
if (block_rsv->reserved == block_rsv->size)
|
||||
block_rsv->full = 1;
|
||||
else
|
||||
block_rsv->full = 0;
|
||||
block_rsv->full = (block_rsv->reserved == block_rsv->size);
|
||||
|
||||
if (block_rsv->size >= sinfo->total_bytes)
|
||||
sinfo->force_alloc = CHUNK_ALLOC_FORCE;
|
||||
|
||||
@@ -9,7 +9,7 @@ enum btrfs_reserve_flush_enum;
|
||||
/*
|
||||
* Types of block reserves
|
||||
*/
|
||||
enum {
|
||||
enum btrfs_rsv_type {
|
||||
BTRFS_BLOCK_RSV_GLOBAL,
|
||||
BTRFS_BLOCK_RSV_DELALLOC,
|
||||
BTRFS_BLOCK_RSV_TRANS,
|
||||
@@ -25,9 +25,10 @@ struct btrfs_block_rsv {
|
||||
u64 reserved;
|
||||
struct btrfs_space_info *space_info;
|
||||
spinlock_t lock;
|
||||
unsigned short full;
|
||||
unsigned short type;
|
||||
unsigned short failfast;
|
||||
bool full;
|
||||
bool failfast;
|
||||
/* Block reserve type, one of BTRFS_BLOCK_RSV_* */
|
||||
enum btrfs_rsv_type type:8;
|
||||
|
||||
/*
|
||||
* Qgroup equivalent for @size @reserved
|
||||
@@ -49,13 +50,13 @@ struct btrfs_block_rsv {
|
||||
u64 qgroup_rsv_reserved;
|
||||
};
|
||||
|
||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
|
||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
|
||||
void btrfs_init_root_block_rsv(struct btrfs_root *root);
|
||||
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
unsigned short type);
|
||||
enum btrfs_rsv_type type);
|
||||
void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv,
|
||||
unsigned short type);
|
||||
enum btrfs_rsv_type type);
|
||||
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv);
|
||||
int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
|
||||
|
||||
@@ -279,19 +279,31 @@ static inline void btrfs_insert_inode_hash(struct inode *inode)
|
||||
__insert_inode_hash(inode, h);
|
||||
}
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
|
||||
/*
|
||||
* On 32 bit systems the i_ino of struct inode is 32 bits (unsigned long), so
|
||||
* we use the inode's location objectid which is a u64 to avoid truncation.
|
||||
*/
|
||||
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
|
||||
{
|
||||
u64 ino = inode->location.objectid;
|
||||
|
||||
/*
|
||||
* !ino: btree_inode
|
||||
* type == BTRFS_ROOT_ITEM_KEY: subvol dir
|
||||
*/
|
||||
if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
|
||||
/* type == BTRFS_ROOT_ITEM_KEY: subvol dir */
|
||||
if (inode->location.type == BTRFS_ROOT_ITEM_KEY)
|
||||
ino = inode->vfs_inode.i_ino;
|
||||
return ino;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline u64 btrfs_ino(const struct btrfs_inode *inode)
|
||||
{
|
||||
return inode->vfs_inode.i_ino;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
|
||||
{
|
||||
i_size_write(&inode->vfs_inode, size);
|
||||
@@ -305,8 +317,7 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
|
||||
if (root == root->fs_info->tree_root &&
|
||||
btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
|
||||
return true;
|
||||
if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,8 +30,8 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
|
||||
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
|
||||
|
||||
struct compressed_bio {
|
||||
/* Number of sectors with unfinished IO (unsubmitted or unfinished) */
|
||||
refcount_t pending_sectors;
|
||||
/* Number of outstanding bios */
|
||||
refcount_t pending_ios;
|
||||
|
||||
/* Number of compressed pages in the array */
|
||||
unsigned int nr_pages;
|
||||
@@ -59,16 +59,12 @@ struct compressed_bio {
|
||||
|
||||
/* IO errors */
|
||||
blk_status_t status;
|
||||
int mirror_num;
|
||||
|
||||
/* for reads, this is the bio we are copying the data into */
|
||||
struct bio *orig_bio;
|
||||
|
||||
/*
|
||||
* the start of a variable length array of checksums only
|
||||
* used by reads
|
||||
*/
|
||||
u8 sums[];
|
||||
union {
|
||||
/* For reads, this is the bio we are copying the data into */
|
||||
struct bio *orig_bio;
|
||||
struct work_struct write_end_work;
|
||||
};
|
||||
};
|
||||
|
||||
static inline unsigned int btrfs_compress_type(unsigned int type_level)
|
||||
|
||||
105
fs/btrfs/ctree.h
105
fs/btrfs/ctree.h
@@ -107,14 +107,6 @@ struct btrfs_ioctl_encoded_io_args;
|
||||
#define BTRFS_STAT_CURR 0
|
||||
#define BTRFS_STAT_PREV 1
|
||||
|
||||
/*
|
||||
* Count how many BTRFS_MAX_EXTENT_SIZE cover the @size
|
||||
*/
|
||||
static inline u32 count_max_extents(u64 size)
|
||||
{
|
||||
return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
|
||||
}
|
||||
|
||||
static inline unsigned long btrfs_chunk_item_size(int num_stripes)
|
||||
{
|
||||
BUG_ON(num_stripes == 0);
|
||||
@@ -229,6 +221,13 @@ struct btrfs_root_backup {
|
||||
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
|
||||
#define BTRFS_SUPER_INFO_SIZE 4096
|
||||
|
||||
/*
|
||||
* The reserved space at the beginning of each device.
|
||||
* It covers the primary super block and leaves space for potential use by other
|
||||
* tools like bootloaders or to lower potential damage of accidental overwrite.
|
||||
*/
|
||||
#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
|
||||
|
||||
/*
|
||||
* the super block basically lists the main trees of the FS
|
||||
* it currently lacks any block count etc etc
|
||||
@@ -248,8 +247,12 @@ struct btrfs_super_block {
|
||||
__le64 chunk_root;
|
||||
__le64 log_root;
|
||||
|
||||
/* this will help find the new super based on the log root */
|
||||
__le64 log_root_transid;
|
||||
/*
|
||||
* This member has never been utilized since the very beginning, thus
|
||||
* it's always 0 regardless of kernel version. We always use
|
||||
* generation + 1 to read log tree root. So here we mark it deprecated.
|
||||
*/
|
||||
__le64 __unused_log_root_transid;
|
||||
__le64 total_bytes;
|
||||
__le64 bytes_used;
|
||||
__le64 root_dir_objectid;
|
||||
@@ -635,6 +638,9 @@ enum {
|
||||
/* Indicate we have half completed snapshot deletions pending. */
|
||||
BTRFS_FS_UNFINISHED_DROPS,
|
||||
|
||||
/* Indicate we have to finish a zone to do next allocation. */
|
||||
BTRFS_FS_NEED_ZONE_FINISH,
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
/* Indicate if we have error/warn message printed on 32bit systems */
|
||||
BTRFS_FS_32BIT_ERROR,
|
||||
@@ -656,6 +662,18 @@ enum btrfs_exclusive_operation {
|
||||
BTRFS_EXCLOP_SWAP_ACTIVATE,
|
||||
};
|
||||
|
||||
/* Store data about transaction commits, exported via sysfs. */
|
||||
struct btrfs_commit_stats {
|
||||
/* Total number of commits */
|
||||
u64 commit_count;
|
||||
/* The maximum commit duration so far in ns */
|
||||
u64 max_commit_dur;
|
||||
/* The last commit duration in ns */
|
||||
u64 last_commit_dur;
|
||||
/* The total commit duration in ns */
|
||||
u64 total_commit_dur;
|
||||
};
|
||||
|
||||
struct btrfs_fs_info {
|
||||
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
|
||||
unsigned long flags;
|
||||
@@ -850,11 +868,11 @@ struct btrfs_fs_info {
|
||||
struct btrfs_workqueue *hipri_workers;
|
||||
struct btrfs_workqueue *delalloc_workers;
|
||||
struct btrfs_workqueue *flush_workers;
|
||||
struct btrfs_workqueue *endio_workers;
|
||||
struct btrfs_workqueue *endio_meta_workers;
|
||||
struct btrfs_workqueue *endio_raid56_workers;
|
||||
struct workqueue_struct *endio_workers;
|
||||
struct workqueue_struct *endio_meta_workers;
|
||||
struct workqueue_struct *endio_raid56_workers;
|
||||
struct workqueue_struct *rmw_workers;
|
||||
struct btrfs_workqueue *endio_meta_write_workers;
|
||||
struct workqueue_struct *compressed_write_workers;
|
||||
struct btrfs_workqueue *endio_write_workers;
|
||||
struct btrfs_workqueue *endio_freespace_worker;
|
||||
struct btrfs_workqueue *caching_workers;
|
||||
@@ -1032,6 +1050,12 @@ struct btrfs_fs_info {
|
||||
u32 csums_per_leaf;
|
||||
u32 stripesize;
|
||||
|
||||
/*
|
||||
* Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
|
||||
* filesystem, on zoned it depends on the device constraints.
|
||||
*/
|
||||
u64 max_extent_size;
|
||||
|
||||
/* Block groups and devices containing active swapfiles. */
|
||||
spinlock_t swapfile_pins_lock;
|
||||
struct rb_root swapfile_pins;
|
||||
@@ -1047,6 +1071,8 @@ struct btrfs_fs_info {
|
||||
*/
|
||||
u64 zone_size;
|
||||
|
||||
/* Max size to emit ZONE_APPEND write command */
|
||||
u64 max_zone_append_size;
|
||||
struct mutex zoned_meta_io_lock;
|
||||
spinlock_t treelog_bg_lock;
|
||||
u64 treelog_bg;
|
||||
@@ -1063,6 +1089,11 @@ struct btrfs_fs_info {
|
||||
|
||||
spinlock_t zone_active_bgs_lock;
|
||||
struct list_head zone_active_bgs;
|
||||
/* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
|
||||
wait_queue_head_t zone_finish_wait;
|
||||
|
||||
/* Updates are not protected by any lock */
|
||||
struct btrfs_commit_stats commit_stats;
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
|
||||
spinlock_t ref_verify_lock;
|
||||
@@ -2475,8 +2506,6 @@ BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
|
||||
chunk_root_level, 8);
|
||||
BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block,
|
||||
log_root, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block,
|
||||
log_root_transid, 64);
|
||||
BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
|
||||
log_root_level, 8);
|
||||
BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
|
||||
@@ -2733,8 +2762,16 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
|
||||
enum btrfs_inline_ref_type is_data);
|
||||
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
|
||||
|
||||
static inline u8 *btrfs_csum_ptr(const struct btrfs_fs_info *fs_info, u8 *csums,
|
||||
u64 offset)
|
||||
{
|
||||
u64 offset_in_sectors = offset >> fs_info->sectorsize_bits;
|
||||
|
||||
return csums + offset_in_sectors * fs_info->csum_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take the number of bytes to be checksummmed and figure out how many leaves
|
||||
* Take the number of bytes to be checksummed and figure out how many leaves
|
||||
* it would require to store the csums for that many bytes.
|
||||
*/
|
||||
static inline u64 btrfs_csum_bytes_to_leaves(
|
||||
@@ -3251,11 +3288,18 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
|
||||
u64 btrfs_file_extent_end(const struct btrfs_path *path);
|
||||
|
||||
/* inode.c */
|
||||
void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, enum btrfs_compression_type compress_type);
|
||||
void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
|
||||
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, enum btrfs_compression_type compress_type);
|
||||
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
|
||||
u32 pgoff, u8 *csum, const u8 * const csum_expected);
|
||||
int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
|
||||
u32 bio_offset, struct page *page, u32 pgoff);
|
||||
unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
|
||||
u32 bio_offset, struct page *page,
|
||||
u64 start, u64 end);
|
||||
int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
|
||||
u32 bio_offset, struct page *page, u32 pgoff);
|
||||
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
|
||||
u64 start, u64 len);
|
||||
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||||
@@ -3305,9 +3349,9 @@ void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
|
||||
struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
|
||||
struct inode *dir);
|
||||
void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
|
||||
unsigned *bits);
|
||||
u32 bits);
|
||||
void btrfs_clear_delalloc_extent(struct inode *inode,
|
||||
struct extent_state *state, unsigned *bits);
|
||||
struct extent_state *state, u32 bits);
|
||||
void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
|
||||
struct extent_state *other);
|
||||
void btrfs_split_delalloc_extent(struct inode *inode,
|
||||
@@ -3353,6 +3397,12 @@ int btrfs_writepage_cow_fixup(struct page *page);
|
||||
void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
|
||||
struct page *page, u64 start,
|
||||
u64 end, bool uptodate);
|
||||
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
|
||||
int compress_type);
|
||||
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
|
||||
u64 file_offset, u64 disk_bytenr,
|
||||
u64 disk_io_size,
|
||||
struct page **pages);
|
||||
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct btrfs_ioctl_encoded_io_args *encoded);
|
||||
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
@@ -4009,6 +4059,19 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
|
||||
return fs_info->zone_size > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Count how many fs_info->max_extent_size cover the @size
|
||||
*/
|
||||
static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
|
||||
{
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
if (!fs_info)
|
||||
return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
|
||||
#endif
|
||||
|
||||
return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
|
||||
}
|
||||
|
||||
static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
|
||||
{
|
||||
return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
|
||||
|
||||
@@ -273,7 +273,7 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
|
||||
u64 num_bytes, u64 disk_num_bytes,
|
||||
u64 *meta_reserve, u64 *qgroup_reserve)
|
||||
{
|
||||
u64 nr_extents = count_max_extents(num_bytes);
|
||||
u64 nr_extents = count_max_extents(fs_info, num_bytes);
|
||||
u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
|
||||
u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
|
||||
|
||||
@@ -350,7 +350,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
|
||||
* needs to free the reservation we just made.
|
||||
*/
|
||||
spin_lock(&inode->lock);
|
||||
nr_extents = count_max_extents(num_bytes);
|
||||
nr_extents = count_max_extents(fs_info, num_bytes);
|
||||
btrfs_mod_outstanding_extents(inode, nr_extents);
|
||||
inode->csum_bytes += disk_num_bytes;
|
||||
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
|
||||
@@ -413,7 +413,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
|
||||
unsigned num_extents;
|
||||
|
||||
spin_lock(&inode->lock);
|
||||
num_extents = count_max_extents(num_bytes);
|
||||
num_extents = count_max_extents(fs_info, num_bytes);
|
||||
btrfs_mod_outstanding_extents(inode, -num_extents);
|
||||
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
|
||||
spin_unlock(&inode->lock);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -58,6 +58,17 @@ struct btrfs_delayed_node {
|
||||
u64 index_cnt;
|
||||
unsigned long flags;
|
||||
int count;
|
||||
/*
|
||||
* The size of the next batch of dir index items to insert (if this
|
||||
* node is from a directory inode). Protected by @mutex.
|
||||
*/
|
||||
u32 curr_index_batch_size;
|
||||
/*
|
||||
* Number of leaves reserved for inserting dir index items (if this
|
||||
* node belongs to a directory inode). This may be larger then the
|
||||
* actual number of leaves we end up using. Protected by @mutex.
|
||||
*/
|
||||
u32 index_item_leaves;
|
||||
};
|
||||
|
||||
struct btrfs_delayed_item {
|
||||
|
||||
@@ -132,7 +132,7 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
|
||||
|
||||
spin_lock(&delayed_rsv->lock);
|
||||
delayed_rsv->size += num_bytes;
|
||||
delayed_rsv->full = 0;
|
||||
delayed_rsv->full = false;
|
||||
spin_unlock(&delayed_rsv->lock);
|
||||
trans->delayed_ref_updates = 0;
|
||||
}
|
||||
@@ -175,7 +175,7 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
|
||||
if (num_bytes)
|
||||
delayed_refs_rsv->reserved += num_bytes;
|
||||
if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
|
||||
delayed_refs_rsv->full = 1;
|
||||
delayed_refs_rsv->full = true;
|
||||
spin_unlock(&delayed_refs_rsv->lock);
|
||||
|
||||
if (num_bytes)
|
||||
|
||||
@@ -587,7 +587,8 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
|
||||
ASSERT(!IS_ERR(em));
|
||||
map = em->map_lookup;
|
||||
|
||||
num_extents = cur_extent = 0;
|
||||
num_extents = 0;
|
||||
cur_extent = 0;
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
/* We have more device extent to copy */
|
||||
if (srcdev != map->stripes[i].dev)
|
||||
|
||||
@@ -51,7 +51,6 @@
|
||||
BTRFS_SUPER_FLAG_METADUMP |\
|
||||
BTRFS_SUPER_FLAG_METADUMP_V2)
|
||||
|
||||
static void end_workqueue_fn(struct btrfs_work *work);
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
|
||||
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
@@ -64,40 +63,6 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
|
||||
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
|
||||
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
|
||||
|
||||
/*
|
||||
* btrfs_end_io_wq structs are used to do processing in task context when an IO
|
||||
* is complete. This is used during reads to verify checksums, and it is used
|
||||
* by writes to insert metadata for new file extents after IO is complete.
|
||||
*/
|
||||
struct btrfs_end_io_wq {
|
||||
struct bio *bio;
|
||||
bio_end_io_t *end_io;
|
||||
void *private;
|
||||
struct btrfs_fs_info *info;
|
||||
blk_status_t status;
|
||||
enum btrfs_wq_endio_type metadata;
|
||||
struct btrfs_work work;
|
||||
};
|
||||
|
||||
static struct kmem_cache *btrfs_end_io_wq_cache;
|
||||
|
||||
int __init btrfs_end_io_wq_init(void)
|
||||
{
|
||||
btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
|
||||
sizeof(struct btrfs_end_io_wq),
|
||||
0,
|
||||
SLAB_MEM_SPREAD,
|
||||
NULL);
|
||||
if (!btrfs_end_io_wq_cache)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __cold btrfs_end_io_wq_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(btrfs_end_io_wq_cache);
|
||||
}
|
||||
|
||||
static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (fs_info->csum_shash)
|
||||
@@ -256,8 +221,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
|
||||
goto out;
|
||||
}
|
||||
btrfs_err_rl(eb->fs_info,
|
||||
"parent transid verify failed on %llu wanted %llu found %llu",
|
||||
eb->start,
|
||||
"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
|
||||
eb->start, eb->read_mirror,
|
||||
parent_transid, btrfs_header_generation(eb));
|
||||
ret = 1;
|
||||
clear_extent_buffer_uptodate(eb);
|
||||
@@ -587,21 +552,23 @@ static int validate_extent_buffer(struct extent_buffer *eb)
|
||||
|
||||
found_start = btrfs_header_bytenr(eb);
|
||||
if (found_start != eb->start) {
|
||||
btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
|
||||
eb->start, found_start);
|
||||
btrfs_err_rl(fs_info,
|
||||
"bad tree block start, mirror %u want %llu have %llu",
|
||||
eb->read_mirror, eb->start, found_start);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (check_tree_block_fsid(eb)) {
|
||||
btrfs_err_rl(fs_info, "bad fsid on block %llu",
|
||||
eb->start);
|
||||
btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
|
||||
eb->start, eb->read_mirror);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
found_level = btrfs_header_level(eb);
|
||||
if (found_level >= BTRFS_MAX_LEVEL) {
|
||||
btrfs_err(fs_info, "bad tree block level %d on %llu",
|
||||
(int)btrfs_header_level(eb), eb->start);
|
||||
btrfs_err(fs_info,
|
||||
"bad tree block level, mirror %u level %d on logical %llu",
|
||||
eb->read_mirror, btrfs_header_level(eb), eb->start);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
@@ -612,8 +579,8 @@ static int validate_extent_buffer(struct extent_buffer *eb)
|
||||
|
||||
if (memcmp(result, header_csum, csum_size) != 0) {
|
||||
btrfs_warn_rl(fs_info,
|
||||
"checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
|
||||
eb->start,
|
||||
"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
|
||||
eb->start, eb->read_mirror,
|
||||
CSUM_FMT_VALUE(csum_size, header_csum),
|
||||
CSUM_FMT_VALUE(csum_size, result),
|
||||
btrfs_header_level(eb));
|
||||
@@ -638,8 +605,8 @@ static int validate_extent_buffer(struct extent_buffer *eb)
|
||||
set_extent_buffer_uptodate(eb);
|
||||
else
|
||||
btrfs_err(fs_info,
|
||||
"block=%llu read time tree block corruption detected",
|
||||
eb->start);
|
||||
"read time tree block corruption detected on logical %llu mirror %u",
|
||||
eb->start, eb->read_mirror);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -740,58 +707,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void end_workqueue_bio(struct bio *bio)
|
||||
{
|
||||
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_workqueue *wq;
|
||||
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->status = bio->bi_status;
|
||||
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
|
||||
wq = fs_info->endio_meta_write_workers;
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
|
||||
wq = fs_info->endio_freespace_worker;
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
wq = fs_info->endio_raid56_workers;
|
||||
else
|
||||
wq = fs_info->endio_write_workers;
|
||||
} else {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
wq = fs_info->endio_raid56_workers;
|
||||
else if (end_io_wq->metadata)
|
||||
wq = fs_info->endio_meta_workers;
|
||||
else
|
||||
wq = fs_info->endio_workers;
|
||||
}
|
||||
|
||||
btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
|
||||
btrfs_queue_work(wq, &end_io_wq->work);
|
||||
}
|
||||
|
||||
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
enum btrfs_wq_endio_type metadata)
|
||||
{
|
||||
struct btrfs_end_io_wq *end_io_wq;
|
||||
|
||||
end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
|
||||
if (!end_io_wq)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
end_io_wq->private = bio->bi_private;
|
||||
end_io_wq->end_io = bio->bi_end_io;
|
||||
end_io_wq->info = info;
|
||||
end_io_wq->status = 0;
|
||||
end_io_wq->bio = bio;
|
||||
end_io_wq->metadata = metadata;
|
||||
|
||||
bio->bi_private = end_io_wq;
|
||||
bio->bi_end_io = end_workqueue_bio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void run_one_async_start(struct btrfs_work *work)
|
||||
{
|
||||
struct async_submit_bio *async;
|
||||
@@ -816,7 +731,6 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||
{
|
||||
struct async_submit_bio *async;
|
||||
struct inode *inode;
|
||||
blk_status_t ret;
|
||||
|
||||
async = container_of(work, struct async_submit_bio, work);
|
||||
inode = async->inode;
|
||||
@@ -834,11 +748,7 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||
* This changes nothing when cgroups aren't in use.
|
||||
*/
|
||||
async->bio->bi_opf |= REQ_CGROUP_PUNT;
|
||||
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
|
||||
if (ret) {
|
||||
async->bio->bi_status = ret;
|
||||
bio_endio(async->bio);
|
||||
}
|
||||
btrfs_submit_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
|
||||
}
|
||||
|
||||
static void run_one_async_free(struct btrfs_work *work)
|
||||
@@ -849,16 +759,23 @@ static void run_one_async_free(struct btrfs_work *work)
|
||||
kfree(async);
|
||||
}
|
||||
|
||||
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start)
|
||||
/*
|
||||
* Submit bio to an async queue.
|
||||
*
|
||||
* Retrun:
|
||||
* - true if the work has been succesfuly submitted
|
||||
* - false in case of error
|
||||
*/
|
||||
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
|
||||
u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct async_submit_bio *async;
|
||||
|
||||
async = kmalloc(sizeof(*async), GFP_NOFS);
|
||||
if (!async)
|
||||
return BLK_STS_RESOURCE;
|
||||
return false;
|
||||
|
||||
async->inode = inode;
|
||||
async->bio = bio;
|
||||
@@ -876,7 +793,7 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
btrfs_queue_work(fs_info->hipri_workers, &async->work);
|
||||
else
|
||||
btrfs_queue_work(fs_info->workers, &async->work);
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static blk_status_t btree_csum_one_bio(struct bio *bio)
|
||||
@@ -902,7 +819,7 @@ static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
|
||||
{
|
||||
/*
|
||||
* when we're called for a write, we're already in the async
|
||||
* submission context. Just jump into btrfs_map_bio
|
||||
* submission context. Just jump into btrfs_submit_bio.
|
||||
*/
|
||||
return btree_csum_one_bio(bio);
|
||||
}
|
||||
@@ -924,32 +841,29 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
blk_status_t ret;
|
||||
|
||||
bio->bi_opf |= REQ_META;
|
||||
|
||||
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
|
||||
/*
|
||||
* called for a read, do the setup so that checksum validation
|
||||
* can happen in the async kernel threads
|
||||
*/
|
||||
ret = btrfs_bio_wq_end_io(fs_info, bio,
|
||||
BTRFS_WQ_ENDIO_METADATA);
|
||||
if (!ret)
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
} else if (!should_async_write(fs_info, BTRFS_I(inode))) {
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (!ret)
|
||||
ret = btrfs_map_bio(fs_info, bio, mirror_num);
|
||||
} else {
|
||||
/*
|
||||
* kthread helpers are used to submit writes so that
|
||||
* checksumming can happen in parallel across all CPUs
|
||||
*/
|
||||
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
|
||||
btree_submit_bio_start);
|
||||
btrfs_submit_bio(fs_info, bio, mirror_num);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kthread helpers are used to submit writes so that checksumming can
|
||||
* happen in parallel across all CPUs.
|
||||
*/
|
||||
if (should_async_write(fs_info, BTRFS_I(inode)) &&
|
||||
btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btree_submit_bio_start))
|
||||
return;
|
||||
|
||||
ret = btree_csum_one_bio(bio);
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
btrfs_submit_bio(fs_info, bio, mirror_num);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
@@ -1870,7 +1784,7 @@ again:
|
||||
fail:
|
||||
/*
|
||||
* If our caller provided us an anonymous device, then it's his
|
||||
* responsability to free it in case we fail. So we have to set our
|
||||
* responsibility to free it in case we fail. So we have to set our
|
||||
* root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
|
||||
* and once again by our caller.
|
||||
*/
|
||||
@@ -1953,25 +1867,6 @@ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
|
||||
return root;
|
||||
}
|
||||
|
||||
/*
|
||||
* called by the kthread helper functions to finally call the bio end_io
|
||||
* functions. This is where read checksum verification actually happens
|
||||
*/
|
||||
static void end_workqueue_fn(struct btrfs_work *work)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct btrfs_end_io_wq *end_io_wq;
|
||||
|
||||
end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
|
||||
bio = end_io_wq->bio;
|
||||
|
||||
bio->bi_status = end_io_wq->status;
|
||||
bio->bi_private = end_io_wq->private;
|
||||
bio->bi_end_io = end_io_wq->end_io;
|
||||
bio_endio(bio);
|
||||
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
|
||||
}
|
||||
|
||||
static int cleaner_kthread(void *arg)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = arg;
|
||||
@@ -2278,10 +2173,14 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
|
||||
btrfs_destroy_workqueue(fs_info->delalloc_workers);
|
||||
btrfs_destroy_workqueue(fs_info->hipri_workers);
|
||||
btrfs_destroy_workqueue(fs_info->workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
|
||||
if (fs_info->endio_workers)
|
||||
destroy_workqueue(fs_info->endio_workers);
|
||||
if (fs_info->endio_raid56_workers)
|
||||
destroy_workqueue(fs_info->endio_raid56_workers);
|
||||
if (fs_info->rmw_workers)
|
||||
destroy_workqueue(fs_info->rmw_workers);
|
||||
if (fs_info->compressed_write_workers)
|
||||
destroy_workqueue(fs_info->compressed_write_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_write_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
|
||||
btrfs_destroy_workqueue(fs_info->delayed_workers);
|
||||
@@ -2295,8 +2194,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
|
||||
* the queues used for metadata I/O, since tasks from those other work
|
||||
* queues can do metadata I/O operations.
|
||||
*/
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
|
||||
if (fs_info->endio_meta_workers)
|
||||
destroy_workqueue(fs_info->endio_meta_workers);
|
||||
}
|
||||
|
||||
static void free_root_extent_buffers(struct btrfs_root *root)
|
||||
@@ -2426,7 +2325,9 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
|
||||
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
|
||||
|
||||
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
|
||||
memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
|
||||
BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
|
||||
BTRFS_I(inode)->location.type = 0;
|
||||
BTRFS_I(inode)->location.offset = 0;
|
||||
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
|
||||
btrfs_insert_inode_hash(inode);
|
||||
}
|
||||
@@ -2475,25 +2376,18 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
|
||||
fs_info->fixup_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
|
||||
|
||||
/*
|
||||
* endios are largely parallel and should have a very
|
||||
* low idle thresh
|
||||
*/
|
||||
fs_info->endio_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
|
||||
alloc_workqueue("btrfs-endio", flags, max_active);
|
||||
fs_info->endio_meta_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
|
||||
max_active, 4);
|
||||
fs_info->endio_meta_write_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
|
||||
max_active, 2);
|
||||
alloc_workqueue("btrfs-endio-meta", flags, max_active);
|
||||
fs_info->endio_raid56_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
|
||||
max_active, 4);
|
||||
alloc_workqueue("btrfs-endio-raid56", flags, max_active);
|
||||
fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
|
||||
fs_info->endio_write_workers =
|
||||
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
|
||||
max_active, 2);
|
||||
fs_info->compressed_write_workers =
|
||||
alloc_workqueue("btrfs-compressed-write", flags, max_active);
|
||||
fs_info->endio_freespace_worker =
|
||||
btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
|
||||
max_active, 0);
|
||||
@@ -2508,7 +2402,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
|
||||
if (!(fs_info->workers && fs_info->hipri_workers &&
|
||||
fs_info->delalloc_workers && fs_info->flush_workers &&
|
||||
fs_info->endio_workers && fs_info->endio_meta_workers &&
|
||||
fs_info->endio_meta_write_workers &&
|
||||
fs_info->compressed_write_workers &&
|
||||
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
|
||||
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
|
||||
fs_info->caching_workers && fs_info->fixup_workers &&
|
||||
@@ -2535,6 +2429,9 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
|
||||
|
||||
fs_info->csum_shash = csum_shash;
|
||||
|
||||
btrfs_info(fs_info, "using %s (%s) checksum algorithm",
|
||||
btrfs_super_csum_name(csum_type),
|
||||
crypto_shash_driver_name(csum_shash));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3253,6 +3150,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
|
||||
init_waitqueue_head(&fs_info->transaction_blocked_wait);
|
||||
init_waitqueue_head(&fs_info->async_submit_wait);
|
||||
init_waitqueue_head(&fs_info->delayed_iputs_wait);
|
||||
init_waitqueue_head(&fs_info->zone_finish_wait);
|
||||
|
||||
/* Usable values until the real ones are cached from the superblock */
|
||||
fs_info->nodesize = 4096;
|
||||
@@ -3260,6 +3158,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
|
||||
fs_info->sectorsize_bits = ilog2(4096);
|
||||
fs_info->stripesize = 4096;
|
||||
|
||||
fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
|
||||
|
||||
spin_lock_init(&fs_info->swapfile_pins_lock);
|
||||
fs_info->swapfile_pins = RB_ROOT;
|
||||
|
||||
@@ -3591,16 +3491,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
||||
*/
|
||||
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
|
||||
|
||||
/*
|
||||
* Flag our filesystem as having big metadata blocks if they are bigger
|
||||
* than the page size.
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
|
||||
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
|
||||
btrfs_info(fs_info,
|
||||
"flagging fs with big metadata feature");
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
}
|
||||
|
||||
/* Set up fs_info before parsing mount options */
|
||||
nodesize = btrfs_super_nodesize(disk_super);
|
||||
@@ -3638,8 +3528,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
||||
else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
|
||||
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
|
||||
|
||||
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
|
||||
btrfs_info(fs_info, "has skinny extents");
|
||||
/*
|
||||
* Flag our filesystem as having big metadata blocks if they are bigger
|
||||
* than the page size.
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
|
||||
/*
|
||||
* mixed block groups end up with duplicate but slightly offset
|
||||
@@ -3668,6 +3562,20 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
||||
err = -EINVAL;
|
||||
goto fail_alloc;
|
||||
}
|
||||
/*
|
||||
* We have unsupported RO compat features, although RO mounted, we
|
||||
* should not cause any metadata write, including log replay.
|
||||
* Or we could screw up whatever the new feature requires.
|
||||
*/
|
||||
if (unlikely(features && btrfs_super_log_root(disk_super) &&
|
||||
!btrfs_test_opt(fs_info, NOLOGREPLAY))) {
|
||||
btrfs_err(fs_info,
|
||||
"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
|
||||
features);
|
||||
err = -EINVAL;
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
|
||||
if (sectorsize < PAGE_SIZE) {
|
||||
struct btrfs_subpage_info *subpage_info;
|
||||
|
||||
@@ -17,13 +17,6 @@
|
||||
*/
|
||||
#define BTRFS_BDEV_BLOCKSIZE (4096)
|
||||
|
||||
enum btrfs_wq_endio_type {
|
||||
BTRFS_WQ_ENDIO_DATA,
|
||||
BTRFS_WQ_ENDIO_METADATA,
|
||||
BTRFS_WQ_ENDIO_FREE_SPACE,
|
||||
BTRFS_WQ_ENDIO_RAID56,
|
||||
};
|
||||
|
||||
static inline u64 btrfs_sb_offset(int mirror)
|
||||
{
|
||||
u64 start = SZ_16K;
|
||||
@@ -121,11 +114,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
||||
int atomic);
|
||||
int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid,
|
||||
int level, struct btrfs_key *first_key);
|
||||
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
enum btrfs_wq_endio_type metadata);
|
||||
blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start);
|
||||
bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
|
||||
u64 dio_file_offset,
|
||||
extent_submit_bio_start_t *submit_bio_start);
|
||||
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
|
||||
int mirror_num);
|
||||
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
|
||||
@@ -145,8 +136,6 @@ int btree_lock_page_hook(struct page *page, void *data,
|
||||
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
|
||||
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
|
||||
int btrfs_init_root_free_objectid(struct btrfs_root *root);
|
||||
int __init btrfs_end_io_wq_init(void);
|
||||
void __cold btrfs_end_io_wq_exit(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void btrfs_set_buffer_lockdep_class(u64 objectid,
|
||||
|
||||
@@ -1269,7 +1269,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
|
||||
static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
|
||||
{
|
||||
struct btrfs_device *dev = stripe->dev;
|
||||
struct btrfs_fs_info *fs_info = dev->fs_info;
|
||||
@@ -1316,76 +1316,60 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
|
||||
u64 discarded_bytes = 0;
|
||||
u64 end = bytenr + num_bytes;
|
||||
u64 cur = bytenr;
|
||||
struct btrfs_io_context *bioc = NULL;
|
||||
|
||||
/*
|
||||
* Avoid races with device replace and make sure our bioc has devices
|
||||
* associated to its stripes that don't go away while we are discarding.
|
||||
* Avoid races with device replace and make sure the devices in the
|
||||
* stripes don't go away while we are discarding.
|
||||
*/
|
||||
btrfs_bio_counter_inc_blocked(fs_info);
|
||||
while (cur < end) {
|
||||
struct btrfs_io_stripe *stripe;
|
||||
struct btrfs_discard_stripe *stripes;
|
||||
unsigned int num_stripes;
|
||||
int i;
|
||||
|
||||
num_bytes = end - cur;
|
||||
/* Tell the block device(s) that the sectors can be discarded */
|
||||
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
|
||||
&num_bytes, &bioc, 0);
|
||||
/*
|
||||
* Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
|
||||
* -EOPNOTSUPP. For any such error, @num_bytes is not updated,
|
||||
* thus we can't continue anyway.
|
||||
*/
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
|
||||
if (IS_ERR(stripes)) {
|
||||
ret = PTR_ERR(stripes);
|
||||
if (ret == -EOPNOTSUPP)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
stripe = bioc->stripes;
|
||||
for (i = 0; i < bioc->num_stripes; i++, stripe++) {
|
||||
for (i = 0; i < num_stripes; i++) {
|
||||
struct btrfs_discard_stripe *stripe = stripes + i;
|
||||
u64 bytes;
|
||||
struct btrfs_device *device = stripe->dev;
|
||||
|
||||
if (!device->bdev) {
|
||||
if (!stripe->dev->bdev) {
|
||||
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
|
||||
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
|
||||
&stripe->dev->dev_state))
|
||||
continue;
|
||||
|
||||
ret = do_discard_extent(stripe, &bytes);
|
||||
if (!ret) {
|
||||
discarded_bytes += bytes;
|
||||
} else if (ret != -EOPNOTSUPP) {
|
||||
if (ret) {
|
||||
/*
|
||||
* Logic errors or -ENOMEM, or -EIO, but
|
||||
* unlikely to happen.
|
||||
*
|
||||
* And since there are two loops, explicitly
|
||||
* go to out to avoid confusion.
|
||||
* Keep going if discard is not supported by the
|
||||
* device.
|
||||
*/
|
||||
btrfs_put_bioc(bioc);
|
||||
goto out;
|
||||
if (ret != -EOPNOTSUPP)
|
||||
break;
|
||||
ret = 0;
|
||||
} else {
|
||||
discarded_bytes += bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Just in case we get back EOPNOTSUPP for some reason,
|
||||
* just ignore the return value so we don't screw up
|
||||
* people calling discard_extent.
|
||||
*/
|
||||
ret = 0;
|
||||
}
|
||||
btrfs_put_bioc(bioc);
|
||||
kfree(stripes);
|
||||
if (ret)
|
||||
break;
|
||||
cur += num_bytes;
|
||||
}
|
||||
out:
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
|
||||
if (actual_bytes)
|
||||
*actual_bytes = discarded_bytes;
|
||||
|
||||
|
||||
if (ret == -EOPNOTSUPP)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3981,23 +3965,63 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
|
||||
}
|
||||
}
|
||||
|
||||
static bool can_allocate_chunk(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl)
|
||||
static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl)
|
||||
{
|
||||
/* If we can activate new zone, just allocate a chunk and use it */
|
||||
if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We already reached the max active zones. Try to finish one block
|
||||
* group to make a room for a new block group. This is only possible
|
||||
* for a data block group because btrfs_zone_finish() may need to wait
|
||||
* for a running transaction which can cause a deadlock for metadata
|
||||
* allocation.
|
||||
*/
|
||||
if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
|
||||
int ret = btrfs_zone_finish_one_bg(fs_info);
|
||||
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have enough free space left in an already active block group
|
||||
* and we can't activate any other zone now, do not allow allocating a
|
||||
* new chunk and let find_free_extent() retry with a smaller size.
|
||||
*/
|
||||
if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* Even min_alloc_size is not left in any block groups. Since we cannot
|
||||
* activate a new block group, allocating it may not help. Let's tell a
|
||||
* caller to try again and hope it progress something by writing some
|
||||
* parts of the region. That is only possible for data block groups,
|
||||
* where a part of the region can be written.
|
||||
*/
|
||||
if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* We cannot activate a new block group and no enough space left in any
|
||||
* block groups. So, allocating a new block group may not help. But,
|
||||
* there is nothing to do anyway, so let's go with it.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
|
||||
struct find_free_extent_ctl *ffe_ctl)
|
||||
{
|
||||
switch (ffe_ctl->policy) {
|
||||
case BTRFS_EXTENT_ALLOC_CLUSTERED:
|
||||
return true;
|
||||
return 0;
|
||||
case BTRFS_EXTENT_ALLOC_ZONED:
|
||||
/*
|
||||
* If we have enough free space left in an already
|
||||
* active block group and we can't activate any other
|
||||
* zone now, do not allow allocating a new chunk and
|
||||
* let find_free_extent() retry with a smaller size.
|
||||
*/
|
||||
if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size &&
|
||||
!btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
|
||||
return false;
|
||||
return true;
|
||||
return can_allocate_chunk_zoned(fs_info, ffe_ctl);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@@ -4079,8 +4103,9 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
|
||||
int exist = 0;
|
||||
|
||||
/*Check if allocation policy allows to create a new chunk */
|
||||
if (!can_allocate_chunk(fs_info, ffe_ctl))
|
||||
return -ENOSPC;
|
||||
ret = can_allocate_chunk(fs_info, ffe_ctl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trans = current->journal_info;
|
||||
if (trans)
|
||||
@@ -5992,7 +6017,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
|
||||
*/
|
||||
static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
|
||||
{
|
||||
u64 start = SZ_1M, len = 0, end = 0;
|
||||
u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
|
||||
int ret;
|
||||
|
||||
*trimmed = 0;
|
||||
@@ -6036,8 +6061,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure we skip the reserved area in the first 1M */
|
||||
start = max_t(u64, start, SZ_1M);
|
||||
/* Ensure we skip the reserved space on each device. */
|
||||
start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
|
||||
|
||||
/*
|
||||
* If find_first_clear_extent_bit find a range that spans the
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user