You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-next into for-linus-3.9
Signed-off-by: Chris Mason <chris.mason@fusionio.com> Conflicts: fs/btrfs/disk-io.c
This commit is contained in:
+1
-1
@@ -19,7 +19,7 @@
|
||||
#ifndef __BTRFS_BACKREF__
|
||||
#define __BTRFS_BACKREF__
|
||||
|
||||
#include "ioctl.h"
|
||||
#include <linux/btrfs.h>
|
||||
#include "ulist.h"
|
||||
#include "extent_io.h"
|
||||
|
||||
|
||||
@@ -40,6 +40,8 @@
|
||||
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
|
||||
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
|
||||
#define BTRFS_INODE_COPY_EVERYTHING 8
|
||||
#define BTRFS_INODE_IN_DELALLOC_LIST 9
|
||||
#define BTRFS_INODE_READDIO_NEED_LOCK 10
|
||||
|
||||
/* in memory btrfs inode */
|
||||
struct btrfs_inode {
|
||||
@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable DIO read nolock optimization, so new dio readers will be forced
|
||||
* to grab i_mutex. It is used to avoid the endless truncate due to
|
||||
* nonlocked dio read.
|
||||
*/
|
||||
static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
|
||||
{
|
||||
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
|
||||
(bh->b_data + (dev_bytenr & 4095));
|
||||
|
||||
if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
|
||||
strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
|
||||
sizeof(super_tmp->magic)) ||
|
||||
super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
|
||||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
|
||||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
|
||||
btrfs_super_leafsize(super_tmp) != state->metablock_size ||
|
||||
|
||||
+12
-56
@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
|
||||
switch (tm->op) {
|
||||
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
|
||||
BUG_ON(tm->slot < n);
|
||||
/* Fallthrough */
|
||||
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
|
||||
case MOD_LOG_KEY_REMOVE:
|
||||
btrfs_set_node_key(eb, &tm->key, tm->slot);
|
||||
@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
||||
|
||||
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
|
||||
WARN_ON(btrfs_header_nritems(eb_rewin) >
|
||||
BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
|
||||
BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
|
||||
|
||||
return eb_rewin;
|
||||
}
|
||||
@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
|
||||
*/
|
||||
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct extent_buffer *parent,
|
||||
int start_slot, int cache_only, u64 *last_ret,
|
||||
int start_slot, u64 *last_ret,
|
||||
struct btrfs_key *progress)
|
||||
{
|
||||
struct extent_buffer *cur;
|
||||
@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_disk_key disk_key;
|
||||
|
||||
parent_level = btrfs_header_level(parent);
|
||||
if (cache_only && parent_level != 1)
|
||||
return 0;
|
||||
|
||||
WARN_ON(trans->transaction != root->fs_info->running_transaction);
|
||||
WARN_ON(trans->transid != root->fs_info->generation);
|
||||
@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
else
|
||||
uptodate = 0;
|
||||
if (!cur || !uptodate) {
|
||||
if (cache_only) {
|
||||
free_extent_buffer(cur);
|
||||
continue;
|
||||
}
|
||||
if (!cur) {
|
||||
cur = read_tree_block(root, blocknr,
|
||||
blocksize, gen);
|
||||
@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
|
||||
|
||||
/*
|
||||
* A helper function to walk down the tree starting at min_key, and looking
|
||||
* for nodes or leaves that are either in cache or have a minimum
|
||||
* transaction id. This is used by the btree defrag code, and tree logging
|
||||
* for nodes or leaves that are have a minimum transaction id.
|
||||
* This is used by the btree defrag code, and tree logging
|
||||
*
|
||||
* This does not cow, but it does stuff the starting key it finds back
|
||||
* into min_key, so you can call btrfs_search_slot with cow=1 on the
|
||||
@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
|
||||
*/
|
||||
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
|
||||
struct btrfs_key *max_key,
|
||||
struct btrfs_path *path, int cache_only,
|
||||
struct btrfs_path *path,
|
||||
u64 min_trans)
|
||||
{
|
||||
struct extent_buffer *cur;
|
||||
@@ -4887,15 +4882,12 @@ again:
|
||||
if (sret && slot > 0)
|
||||
slot--;
|
||||
/*
|
||||
* check this node pointer against the cache_only and
|
||||
* min_trans parameters. If it isn't in cache or is too
|
||||
* old, skip to the next one.
|
||||
* check this node pointer against the min_trans parameters.
|
||||
* If it is too old, old, skip to the next one.
|
||||
*/
|
||||
while (slot < nritems) {
|
||||
u64 blockptr;
|
||||
u64 gen;
|
||||
struct extent_buffer *tmp;
|
||||
struct btrfs_disk_key disk_key;
|
||||
|
||||
blockptr = btrfs_node_blockptr(cur, slot);
|
||||
gen = btrfs_node_ptr_generation(cur, slot);
|
||||
@@ -4903,27 +4895,7 @@ again:
|
||||
slot++;
|
||||
continue;
|
||||
}
|
||||
if (!cache_only)
|
||||
break;
|
||||
|
||||
if (max_key) {
|
||||
btrfs_node_key(cur, &disk_key, slot);
|
||||
if (comp_keys(&disk_key, max_key) >= 0) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
tmp = btrfs_find_tree_block(root, blockptr,
|
||||
btrfs_level_size(root, level - 1));
|
||||
|
||||
if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
|
||||
free_extent_buffer(tmp);
|
||||
break;
|
||||
}
|
||||
if (tmp)
|
||||
free_extent_buffer(tmp);
|
||||
slot++;
|
||||
break;
|
||||
}
|
||||
find_next_key:
|
||||
/*
|
||||
@@ -4934,7 +4906,7 @@ find_next_key:
|
||||
path->slots[level] = slot;
|
||||
btrfs_set_path_blocking(path);
|
||||
sret = btrfs_find_next_key(root, path, min_key, level,
|
||||
cache_only, min_trans);
|
||||
min_trans);
|
||||
if (sret == 0) {
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
@@ -5399,8 +5371,7 @@ out:
|
||||
/*
|
||||
* this is similar to btrfs_next_leaf, but does not try to preserve
|
||||
* and fixup the path. It looks for and returns the next key in the
|
||||
* tree based on the current path and the cache_only and min_trans
|
||||
* parameters.
|
||||
* tree based on the current path and the min_trans parameters.
|
||||
*
|
||||
* 0 is returned if another key is found, < 0 if there are any errors
|
||||
* and 1 is returned if there are no higher keys in the tree
|
||||
@@ -5409,8 +5380,7 @@ out:
|
||||
* calling this function.
|
||||
*/
|
||||
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *key, int level,
|
||||
int cache_only, u64 min_trans)
|
||||
struct btrfs_key *key, int level, u64 min_trans)
|
||||
{
|
||||
int slot;
|
||||
struct extent_buffer *c;
|
||||
@@ -5461,22 +5431,8 @@ next:
|
||||
if (level == 0)
|
||||
btrfs_item_key_to_cpu(c, key, slot);
|
||||
else {
|
||||
u64 blockptr = btrfs_node_blockptr(c, slot);
|
||||
u64 gen = btrfs_node_ptr_generation(c, slot);
|
||||
|
||||
if (cache_only) {
|
||||
struct extent_buffer *cur;
|
||||
cur = btrfs_find_tree_block(root, blockptr,
|
||||
btrfs_level_size(root, level - 1));
|
||||
if (!cur ||
|
||||
btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
|
||||
slot++;
|
||||
if (cur)
|
||||
free_extent_buffer(cur);
|
||||
goto next;
|
||||
}
|
||||
free_extent_buffer(cur);
|
||||
}
|
||||
if (gen < min_trans) {
|
||||
slot++;
|
||||
goto next;
|
||||
|
||||
+68
-27
@@ -31,10 +31,10 @@
|
||||
#include <trace/events/btrfs.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "extent_io.h"
|
||||
#include "extent_map.h"
|
||||
#include "async-thread.h"
|
||||
#include "ioctl.h"
|
||||
|
||||
struct btrfs_trans_handle;
|
||||
struct btrfs_transaction;
|
||||
@@ -46,7 +46,7 @@ extern struct kmem_cache *btrfs_path_cachep;
|
||||
extern struct kmem_cache *btrfs_free_space_cachep;
|
||||
struct btrfs_ordered_sum;
|
||||
|
||||
#define BTRFS_MAGIC "_BHRfS_M"
|
||||
#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
|
||||
|
||||
#define BTRFS_MAX_MIRRORS 3
|
||||
|
||||
@@ -191,6 +191,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
|
||||
/* ioprio of readahead is set to idle */
|
||||
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
|
||||
|
||||
#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
|
||||
|
||||
/*
|
||||
* The key defines the order in the tree, and so it also defines (optimal)
|
||||
* block layout.
|
||||
@@ -336,7 +338,9 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
|
||||
/*
|
||||
* File system states
|
||||
*/
|
||||
#define BTRFS_FS_STATE_ERROR 0
|
||||
|
||||
/* Super block flags */
|
||||
/* Errors detected */
|
||||
#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2)
|
||||
|
||||
@@ -953,7 +957,15 @@ struct btrfs_dev_replace_item {
|
||||
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
|
||||
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
|
||||
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
|
||||
#define BTRFS_NR_RAID_TYPES 5
|
||||
|
||||
enum btrfs_raid_types {
|
||||
BTRFS_RAID_RAID10,
|
||||
BTRFS_RAID_RAID1,
|
||||
BTRFS_RAID_DUP,
|
||||
BTRFS_RAID_RAID0,
|
||||
BTRFS_RAID_SINGLE,
|
||||
BTRFS_NR_RAID_TYPES
|
||||
};
|
||||
|
||||
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
|
||||
BTRFS_BLOCK_GROUP_SYSTEM | \
|
||||
@@ -1225,6 +1237,11 @@ struct seq_list {
|
||||
u64 seq;
|
||||
};
|
||||
|
||||
enum btrfs_orphan_cleanup_state {
|
||||
ORPHAN_CLEANUP_STARTED = 1,
|
||||
ORPHAN_CLEANUP_DONE = 2,
|
||||
};
|
||||
|
||||
/* fs_info */
|
||||
struct reloc_control;
|
||||
struct btrfs_device;
|
||||
@@ -1250,6 +1267,7 @@ struct btrfs_fs_info {
|
||||
|
||||
/* block group cache stuff */
|
||||
spinlock_t block_group_cache_lock;
|
||||
u64 first_logical_byte;
|
||||
struct rb_root block_group_cache_tree;
|
||||
|
||||
/* keep track of unallocated space */
|
||||
@@ -1288,7 +1306,23 @@ struct btrfs_fs_info {
|
||||
u64 last_trans_log_full_commit;
|
||||
unsigned long mount_opt;
|
||||
unsigned long compress_type:4;
|
||||
/*
|
||||
* It is a suggestive number, the read side is safe even it gets a
|
||||
* wrong number because we will write out the data into a regular
|
||||
* extent. The write side(mount/remount) is under ->s_umount lock,
|
||||
* so it is also safe.
|
||||
*/
|
||||
u64 max_inline;
|
||||
/*
|
||||
* Protected by ->chunk_mutex and sb->s_umount.
|
||||
*
|
||||
* The reason that we use two lock to protect it is because only
|
||||
* remount and mount operations can change it and these two operations
|
||||
* are under sb->s_umount, but the read side (chunk allocation) can not
|
||||
* acquire sb->s_umount or the deadlock would happen. So we use two
|
||||
* locks to protect it. On the write side, we must acquire two locks,
|
||||
* and on the read side, we just need acquire one of them.
|
||||
*/
|
||||
u64 alloc_start;
|
||||
struct btrfs_transaction *running_transaction;
|
||||
wait_queue_head_t transaction_throttle;
|
||||
@@ -1365,6 +1399,7 @@ struct btrfs_fs_info {
|
||||
*/
|
||||
struct list_head ordered_extents;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
/*
|
||||
* all of the inodes that have delalloc bytes. It is possible for
|
||||
* this list to be empty even when there is still dirty data=ordered
|
||||
@@ -1372,13 +1407,6 @@ struct btrfs_fs_info {
|
||||
*/
|
||||
struct list_head delalloc_inodes;
|
||||
|
||||
/*
|
||||
* special rename and truncate targets that must be on disk before
|
||||
* we're allowed to commit. This is basically the ext3 style
|
||||
* data=ordered list.
|
||||
*/
|
||||
struct list_head ordered_operations;
|
||||
|
||||
/*
|
||||
* there is a pool of worker threads for checksumming during writes
|
||||
* and a pool for checksumming after reads. This is because readers
|
||||
@@ -1423,10 +1451,12 @@ struct btrfs_fs_info {
|
||||
|
||||
u64 total_pinned;
|
||||
|
||||
/* protected by the delalloc lock, used to keep from writing
|
||||
* metadata until there is a nice batch
|
||||
*/
|
||||
u64 dirty_metadata_bytes;
|
||||
/* used to keep from writing metadata until there is a nice batch */
|
||||
struct percpu_counter dirty_metadata_bytes;
|
||||
struct percpu_counter delalloc_bytes;
|
||||
s32 dirty_metadata_batch;
|
||||
s32 delalloc_batch;
|
||||
|
||||
struct list_head dirty_cowonly_roots;
|
||||
|
||||
struct btrfs_fs_devices *fs_devices;
|
||||
@@ -1442,9 +1472,6 @@ struct btrfs_fs_info {
|
||||
|
||||
struct reloc_control *reloc_ctl;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
u64 delalloc_bytes;
|
||||
|
||||
/* data_alloc_cluster is only used in ssd mode */
|
||||
struct btrfs_free_cluster data_alloc_cluster;
|
||||
|
||||
@@ -1456,6 +1483,8 @@ struct btrfs_fs_info {
|
||||
struct rb_root defrag_inodes;
|
||||
atomic_t defrag_running;
|
||||
|
||||
/* Used to protect avail_{data, metadata, system}_alloc_bits */
|
||||
seqlock_t profiles_lock;
|
||||
/*
|
||||
* these three are in extended format (availability of single
|
||||
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
|
||||
@@ -1520,7 +1549,7 @@ struct btrfs_fs_info {
|
||||
u64 qgroup_seq;
|
||||
|
||||
/* filesystem state */
|
||||
u64 fs_state;
|
||||
unsigned long fs_state;
|
||||
|
||||
struct btrfs_delayed_root *delayed_root;
|
||||
|
||||
@@ -1623,6 +1652,9 @@ struct btrfs_root {
|
||||
|
||||
struct list_head root_list;
|
||||
|
||||
spinlock_t log_extents_lock[2];
|
||||
struct list_head logged_list[2];
|
||||
|
||||
spinlock_t orphan_lock;
|
||||
atomic_t orphan_inodes;
|
||||
struct btrfs_block_rsv *orphan_block_rsv;
|
||||
@@ -2936,8 +2968,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
|
||||
u64 num_bytes, u64 *refs, u64 *flags);
|
||||
int btrfs_pin_extent(struct btrfs_root *root,
|
||||
u64 bytenr, u64 num, int reserved);
|
||||
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes);
|
||||
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
@@ -3092,10 +3123,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
|
||||
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
|
||||
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *key, int lowest_level,
|
||||
int cache_only, u64 min_trans);
|
||||
u64 min_trans);
|
||||
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
|
||||
struct btrfs_key *max_key,
|
||||
struct btrfs_path *path, int cache_only,
|
||||
struct btrfs_path *path,
|
||||
u64 min_trans);
|
||||
enum btrfs_compare_tree_result {
|
||||
BTRFS_COMPARE_TREE_NEW,
|
||||
@@ -3148,7 +3179,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
|
||||
int find_higher, int return_any);
|
||||
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct extent_buffer *parent,
|
||||
int start_slot, int cache_only, u64 *last_ret,
|
||||
int start_slot, u64 *last_ret,
|
||||
struct btrfs_key *progress);
|
||||
void btrfs_release_path(struct btrfs_path *p);
|
||||
struct btrfs_path *btrfs_alloc_path(void);
|
||||
@@ -3543,7 +3574,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
|
||||
|
||||
/* tree-defrag.c */
|
||||
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int cache_only);
|
||||
struct btrfs_root *root);
|
||||
|
||||
/* sysfs.c */
|
||||
int btrfs_init_sysfs(void);
|
||||
@@ -3620,11 +3651,14 @@ __printf(5, 6)
|
||||
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...);
|
||||
|
||||
/*
|
||||
* If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
|
||||
* will panic(). Otherwise we BUG() here.
|
||||
*/
|
||||
#define btrfs_panic(fs_info, errno, fmt, args...) \
|
||||
do { \
|
||||
struct btrfs_fs_info *_i = (fs_info); \
|
||||
__btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
|
||||
BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
|
||||
__btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
|
||||
BUG(); \
|
||||
} while (0)
|
||||
|
||||
/* acl.c */
|
||||
@@ -3745,4 +3779,11 @@ static inline int is_fstree(u64 rootid)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
return signal_pending(current);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
+114
-61
@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_item *delayed_item)
|
||||
{
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_item *item;
|
||||
char *ptr;
|
||||
int ret;
|
||||
|
||||
@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
|
||||
|
||||
leaf = path->nodes[0];
|
||||
|
||||
item = btrfs_item_nr(leaf, path->slots[0]);
|
||||
ptr = btrfs_item_ptr(leaf, path->slots[0], char);
|
||||
|
||||
write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
|
||||
@@ -1065,32 +1063,25 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
|
||||
}
|
||||
}
|
||||
|
||||
static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct btrfs_inode_item *inode_item;
|
||||
struct extent_buffer *leaf;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&node->mutex);
|
||||
if (!node->inode_dirty) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
key.objectid = node->inode_id;
|
||||
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
||||
key.offset = 0;
|
||||
|
||||
ret = btrfs_lookup_inode(trans, root, path, &key, 1);
|
||||
if (ret > 0) {
|
||||
btrfs_release_path(path);
|
||||
mutex_unlock(&node->mutex);
|
||||
return -ENOENT;
|
||||
} else if (ret < 0) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
|
||||
btrfs_delayed_inode_release_metadata(root, node);
|
||||
btrfs_release_delayed_inode(node);
|
||||
mutex_unlock(&node->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&node->mutex);
|
||||
if (!node->inode_dirty) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __btrfs_update_delayed_inode(trans, root, path, node);
|
||||
mutex_unlock(&node->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when committing the transaction.
|
||||
* Returns 0 on success.
|
||||
@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int nr)
|
||||
{
|
||||
struct btrfs_root *curr_root = root;
|
||||
struct btrfs_delayed_root *delayed_root;
|
||||
struct btrfs_delayed_node *curr_node, *prev_node;
|
||||
struct btrfs_path *path;
|
||||
@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
|
||||
|
||||
curr_node = btrfs_first_delayed_node(delayed_root);
|
||||
while (curr_node && (!count || (count && nr--))) {
|
||||
curr_root = curr_node->root;
|
||||
ret = btrfs_insert_delayed_items(trans, path, curr_root,
|
||||
curr_node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path,
|
||||
curr_root, curr_node);
|
||||
if (!ret)
|
||||
ret = btrfs_update_delayed_inode(trans, curr_root,
|
||||
path, curr_node);
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, path,
|
||||
curr_node);
|
||||
if (ret) {
|
||||
btrfs_release_delayed_node(curr_node);
|
||||
curr_node = NULL;
|
||||
@@ -1183,36 +1202,12 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
|
||||
return __btrfs_run_delayed_items(trans, root, nr);
|
||||
}
|
||||
|
||||
static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
|
||||
if (!ret)
|
||||
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
|
||||
btrfs_free_path(path);
|
||||
|
||||
trans->block_rsv = block_rsv;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
if (!delayed_node)
|
||||
@@ -1226,8 +1221,74 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
}
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
|
||||
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
btrfs_free_path(path);
|
||||
trans->block_rsv = block_rsv;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_commit_inode_delayed_inode(struct inode *inode)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
if (!delayed_node)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
if (!delayed_node->inode_dirty) {
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
trans = btrfs_join_transaction(delayed_node->root);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto trans_out;
|
||||
}
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
if (delayed_node->inode_dirty)
|
||||
ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
|
||||
path, delayed_node);
|
||||
else
|
||||
ret = 0;
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
btrfs_free_path(path);
|
||||
trans->block_rsv = block_rsv;
|
||||
trans_out:
|
||||
btrfs_end_transaction(trans, delayed_node->root);
|
||||
btrfs_btree_balance_dirty(delayed_node->root);
|
||||
out:
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int need_requeue = 0;
|
||||
int ret;
|
||||
|
||||
async_node = container_of(work, struct btrfs_async_delayed_node, work);
|
||||
|
||||
@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path, root,
|
||||
delayed_node);
|
||||
|
||||
if (!ret)
|
||||
btrfs_update_delayed_inode(trans, root, path, delayed_node);
|
||||
|
||||
__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
|
||||
/*
|
||||
* Maybe new delayed items have been inserted, so we need requeue
|
||||
* the work. Besides that, we must dequeue the empty delayed nodes
|
||||
|
||||
@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
/* Used for evicting the inode. */
|
||||
void btrfs_remove_delayed_node(struct inode *inode);
|
||||
void btrfs_kill_delayed_inode_items(struct inode *inode);
|
||||
int btrfs_commit_inode_delayed_inode(struct inode *inode);
|
||||
|
||||
|
||||
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
||||
|
||||
+71
-11
@@ -23,6 +23,10 @@
|
||||
#include "delayed-ref.h"
|
||||
#include "transaction.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
/*
|
||||
* delayed back reference update tracking. For subvolume trees
|
||||
* we queue up extent allocations and backref maintenance for
|
||||
@@ -422,6 +426,14 @@ again:
|
||||
return 1;
|
||||
}
|
||||
|
||||
void btrfs_release_ref_cluster(struct list_head *cluster)
|
||||
{
|
||||
struct list_head *pos, *q;
|
||||
|
||||
list_for_each_safe(pos, q, cluster)
|
||||
list_del_init(pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to update an extent delayed ref in the
|
||||
* rbtree. existing and update must both have the same
|
||||
@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
|
||||
ref->extent_op->flags_to_set;
|
||||
existing_ref->extent_op->update_flags = 1;
|
||||
}
|
||||
kfree(ref->extent_op);
|
||||
btrfs_free_delayed_extent_op(ref->extent_op);
|
||||
}
|
||||
}
|
||||
/*
|
||||
@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(head_ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
} else {
|
||||
delayed_refs->num_heads++;
|
||||
delayed_refs->num_heads_ready++;
|
||||
@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && !extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
|
||||
return btrfs_delayed_node_to_head(ref);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void btrfs_delayed_ref_exit(void)
|
||||
{
|
||||
if (btrfs_delayed_ref_head_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
|
||||
if (btrfs_delayed_tree_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
|
||||
if (btrfs_delayed_data_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
|
||||
if (btrfs_delayed_extent_op_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
|
||||
}
|
||||
|
||||
int btrfs_delayed_ref_init(void)
|
||||
{
|
||||
btrfs_delayed_ref_head_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_ref_head",
|
||||
sizeof(struct btrfs_delayed_ref_head), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_ref_head_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_tree_ref",
|
||||
sizeof(struct btrfs_delayed_tree_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_tree_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_data_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_data_ref",
|
||||
sizeof(struct btrfs_delayed_data_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_data_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_extent_op_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_extent_op",
|
||||
sizeof(struct btrfs_delayed_extent_op), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_extent_op_cachep)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
btrfs_delayed_ref_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
+42
-1
@@ -141,12 +141,47 @@ struct btrfs_delayed_ref_root {
|
||||
u64 run_delayed_start;
|
||||
};
|
||||
|
||||
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
|
||||
int btrfs_delayed_ref_init(void);
|
||||
void btrfs_delayed_ref_exit(void);
|
||||
|
||||
static inline struct btrfs_delayed_extent_op *
|
||||
btrfs_alloc_delayed_extent_op(void)
|
||||
{
|
||||
return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline void
|
||||
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
|
||||
{
|
||||
if (op)
|
||||
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
|
||||
}
|
||||
|
||||
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
||||
{
|
||||
WARN_ON(atomic_read(&ref->refs) == 0);
|
||||
if (atomic_dec_and_test(&ref->refs)) {
|
||||
WARN_ON(ref->in_tree);
|
||||
kfree(ref);
|
||||
switch (ref->type) {
|
||||
case BTRFS_TREE_BLOCK_REF_KEY:
|
||||
case BTRFS_SHARED_BLOCK_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
break;
|
||||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
case BTRFS_SHARED_DATA_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
break;
|
||||
case 0:
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,8 +211,14 @@ struct btrfs_delayed_ref_head *
|
||||
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
|
||||
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_head *head);
|
||||
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
mutex_unlock(&head->mutex);
|
||||
}
|
||||
|
||||
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
|
||||
struct list_head *cluster, u64 search_start);
|
||||
void btrfs_release_ref_cluster(struct list_head *cluster);
|
||||
|
||||
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
|
||||
@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
||||
* flush all outstanding I/O and inode extent mappings before the
|
||||
* copy operation is declared as being finished
|
||||
*/
|
||||
btrfs_start_delalloc_inodes(root, 0);
|
||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
return ret;
|
||||
}
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
|
||||
+81
-79
@@ -56,7 +56,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
|
||||
static void free_fs_root(struct btrfs_root *root);
|
||||
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
int read_only);
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
||||
struct btrfs_root *root);
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
|
||||
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
struct btrfs_root *root);
|
||||
@@ -420,7 +421,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
|
||||
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
|
||||
{
|
||||
struct extent_io_tree *tree;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 found_start;
|
||||
struct extent_buffer *eb;
|
||||
|
||||
@@ -946,18 +947,20 @@ static int btree_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct extent_io_tree *tree;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
int ret;
|
||||
|
||||
tree = &BTRFS_I(mapping->host)->io_tree;
|
||||
if (wbc->sync_mode == WB_SYNC_NONE) {
|
||||
struct btrfs_root *root = BTRFS_I(mapping->host)->root;
|
||||
u64 num_dirty;
|
||||
unsigned long thresh = 32 * 1024 * 1024;
|
||||
|
||||
if (wbc->for_kupdate)
|
||||
return 0;
|
||||
|
||||
fs_info = BTRFS_I(mapping->host)->root->fs_info;
|
||||
/* this is a bit racy, but that's ok */
|
||||
num_dirty = root->fs_info->dirty_metadata_bytes;
|
||||
if (num_dirty < thresh)
|
||||
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
}
|
||||
return btree_write_cache_pages(mapping, wbc);
|
||||
@@ -1125,24 +1128,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
||||
void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
struct extent_buffer *buf)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
if (btrfs_header_generation(buf) ==
|
||||
root->fs_info->running_transaction->transid) {
|
||||
fs_info->running_transaction->transid) {
|
||||
btrfs_assert_tree_locked(buf);
|
||||
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
if (root->fs_info->dirty_metadata_bytes >= buf->len)
|
||||
root->fs_info->dirty_metadata_bytes -= buf->len;
|
||||
else {
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
btrfs_panic(root->fs_info, -EOVERFLOW,
|
||||
"Can't clear %lu bytes from "
|
||||
" dirty_mdatadata_bytes (%llu)",
|
||||
buf->len,
|
||||
root->fs_info->dirty_metadata_bytes);
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
/* ugh, clear_extent_buffer_dirty needs to lock the page */
|
||||
btrfs_set_lock_blocking(buf);
|
||||
clear_extent_buffer_dirty(buf);
|
||||
@@ -1178,9 +1173,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
||||
|
||||
INIT_LIST_HEAD(&root->dirty_list);
|
||||
INIT_LIST_HEAD(&root->root_list);
|
||||
INIT_LIST_HEAD(&root->logged_list[0]);
|
||||
INIT_LIST_HEAD(&root->logged_list[1]);
|
||||
spin_lock_init(&root->orphan_lock);
|
||||
spin_lock_init(&root->inode_lock);
|
||||
spin_lock_init(&root->accounting_lock);
|
||||
spin_lock_init(&root->log_extents_lock[0]);
|
||||
spin_lock_init(&root->log_extents_lock[1]);
|
||||
mutex_init(&root->objectid_mutex);
|
||||
mutex_init(&root->log_mutex);
|
||||
init_waitqueue_head(&root->log_writer_wait);
|
||||
@@ -2004,10 +2003,24 @@ int open_ctree(struct super_block *sb,
|
||||
goto fail_srcu;
|
||||
}
|
||||
|
||||
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto fail_bdi;
|
||||
}
|
||||
fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
|
||||
(1 + ilog2(nr_cpu_ids));
|
||||
|
||||
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto fail_dirty_metadata_bytes;
|
||||
}
|
||||
|
||||
fs_info->btree_inode = new_inode(sb);
|
||||
if (!fs_info->btree_inode) {
|
||||
err = -ENOMEM;
|
||||
goto fail_bdi;
|
||||
goto fail_delalloc_bytes;
|
||||
}
|
||||
|
||||
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
|
||||
@@ -2017,7 +2030,6 @@ int open_ctree(struct super_block *sb,
|
||||
INIT_LIST_HEAD(&fs_info->dead_roots);
|
||||
INIT_LIST_HEAD(&fs_info->delayed_iputs);
|
||||
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
|
||||
INIT_LIST_HEAD(&fs_info->ordered_operations);
|
||||
INIT_LIST_HEAD(&fs_info->caching_block_groups);
|
||||
spin_lock_init(&fs_info->delalloc_lock);
|
||||
spin_lock_init(&fs_info->trans_lock);
|
||||
@@ -2028,6 +2040,7 @@ int open_ctree(struct super_block *sb,
|
||||
spin_lock_init(&fs_info->tree_mod_seq_lock);
|
||||
rwlock_init(&fs_info->tree_mod_log_lock);
|
||||
mutex_init(&fs_info->reloc_mutex);
|
||||
seqlock_init(&fs_info->profiles_lock);
|
||||
|
||||
init_completion(&fs_info->kobj_unregister);
|
||||
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
|
||||
@@ -2126,6 +2139,7 @@ int open_ctree(struct super_block *sb,
|
||||
|
||||
spin_lock_init(&fs_info->block_group_cache_lock);
|
||||
fs_info->block_group_cache_tree = RB_ROOT;
|
||||
fs_info->first_logical_byte = (u64)-1;
|
||||
|
||||
extent_io_tree_init(&fs_info->freed_extents[0],
|
||||
fs_info->btree_inode->i_mapping);
|
||||
@@ -2187,7 +2201,8 @@ int open_ctree(struct super_block *sb,
|
||||
goto fail_alloc;
|
||||
|
||||
/* check FS state, whether FS is broken. */
|
||||
fs_info->fs_state |= btrfs_super_flags(disk_super);
|
||||
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
|
||||
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
|
||||
|
||||
ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
|
||||
if (ret) {
|
||||
@@ -2261,6 +2276,8 @@ int open_ctree(struct super_block *sb,
|
||||
leafsize = btrfs_super_leafsize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = btrfs_super_stripesize(disk_super);
|
||||
fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
/*
|
||||
* mixed block groups end up with duplicate but slightly offset
|
||||
@@ -2390,8 +2407,7 @@ int open_ctree(struct super_block *sb,
|
||||
sb->s_blocksize = sectorsize;
|
||||
sb->s_blocksize_bits = blksize_bits(sectorsize);
|
||||
|
||||
if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
|
||||
sizeof(disk_super->magic))) {
|
||||
if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
|
||||
printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
|
||||
goto fail_sb_buffer;
|
||||
}
|
||||
@@ -2694,13 +2710,13 @@ fail_cleaner:
|
||||
* kthreads
|
||||
*/
|
||||
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
|
||||
fail_block_groups:
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
fail_tree_roots:
|
||||
free_root_pointers(fs_info, 1);
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
|
||||
fail_sb_buffer:
|
||||
btrfs_stop_workers(&fs_info->generic_worker);
|
||||
@@ -2721,8 +2737,11 @@ fail_alloc:
|
||||
fail_iput:
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
iput(fs_info->btree_inode);
|
||||
fail_delalloc_bytes:
|
||||
percpu_counter_destroy(&fs_info->delalloc_bytes);
|
||||
fail_dirty_metadata_bytes:
|
||||
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
|
||||
fail_bdi:
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
fail_srcu:
|
||||
@@ -2795,8 +2814,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
|
||||
|
||||
super = (struct btrfs_super_block *)bh->b_data;
|
||||
if (btrfs_super_bytenr(super) != bytenr ||
|
||||
strncmp((char *)(&super->magic), BTRFS_MAGIC,
|
||||
sizeof(super->magic))) {
|
||||
super->magic != cpu_to_le64(BTRFS_MAGIC)) {
|
||||
brelse(bh);
|
||||
continue;
|
||||
}
|
||||
@@ -3339,7 +3357,7 @@ int close_ctree(struct btrfs_root *root)
|
||||
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
|
||||
}
|
||||
|
||||
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
|
||||
btrfs_error_commit_super(root);
|
||||
|
||||
btrfs_put_block_group_cache(fs_info);
|
||||
@@ -3352,9 +3370,9 @@ int close_ctree(struct btrfs_root *root)
|
||||
|
||||
btrfs_free_qgroup_config(root->fs_info);
|
||||
|
||||
if (fs_info->delalloc_bytes) {
|
||||
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
|
||||
(unsigned long long)fs_info->delalloc_bytes);
|
||||
if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
|
||||
printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
|
||||
percpu_counter_sum(&fs_info->delalloc_bytes));
|
||||
}
|
||||
|
||||
free_extent_buffer(fs_info->extent_root->node);
|
||||
@@ -3401,6 +3419,8 @@ int close_ctree(struct btrfs_root *root)
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
||||
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
|
||||
percpu_counter_destroy(&fs_info->delalloc_bytes);
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
|
||||
@@ -3443,11 +3463,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
||||
(unsigned long long)transid,
|
||||
(unsigned long long)root->fs_info->generation);
|
||||
was_dirty = set_extent_buffer_dirty(buf);
|
||||
if (!was_dirty) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->dirty_metadata_bytes += buf->len;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
if (!was_dirty)
|
||||
__percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
|
||||
buf->len,
|
||||
root->fs_info->dirty_metadata_batch);
|
||||
}
|
||||
|
||||
static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
||||
@@ -3457,8 +3476,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
||||
* looks as though older kernels can get into trouble with
|
||||
* this code, they end up stuck in balance_dirty_pages forever
|
||||
*/
|
||||
u64 num_dirty;
|
||||
unsigned long thresh = 32 * 1024 * 1024;
|
||||
int ret;
|
||||
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
return;
|
||||
@@ -3466,9 +3484,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
||||
if (flush_delayed)
|
||||
btrfs_balance_delayed_items(root);
|
||||
|
||||
num_dirty = root->fs_info->dirty_metadata_bytes;
|
||||
|
||||
if (num_dirty > thresh) {
|
||||
ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
if (ret > 0) {
|
||||
balance_dirty_pages_ratelimited(
|
||||
root->fs_info->btree_inode->i_mapping);
|
||||
}
|
||||
@@ -3518,7 +3536,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
|
||||
btrfs_cleanup_transaction(root);
|
||||
}
|
||||
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_inode *btrfs_inode;
|
||||
struct list_head splice;
|
||||
@@ -3528,7 +3547,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
||||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
list_splice_init(&root->fs_info->ordered_operations, &splice);
|
||||
list_splice_init(&t->ordered_operations, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||
ordered_operations);
|
||||
@@ -3544,35 +3563,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
||||
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
|
||||
{
|
||||
struct list_head splice;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct inode *inode;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
list_splice_init(&root->fs_info->ordered_extents, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
ordered = list_entry(splice.next, struct btrfs_ordered_extent,
|
||||
root_extent_list);
|
||||
|
||||
list_del_init(&ordered->root_extent_list);
|
||||
atomic_inc(&ordered->refs);
|
||||
|
||||
/* the inode may be getting freed (in sys_unlink path). */
|
||||
inode = igrab(ordered->inode);
|
||||
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
if (inode)
|
||||
iput(inode);
|
||||
|
||||
atomic_set(&ordered->refs, 1);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This will just short circuit the ordered completion stuff which will
|
||||
* make sure the ordered extent gets properly cleaned up.
|
||||
*/
|
||||
list_for_each_entry(ordered, &root->fs_info->ordered_extents,
|
||||
root_extent_list)
|
||||
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
|
||||
@@ -3594,11 +3594,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
}
|
||||
|
||||
while ((node = rb_first(&delayed_refs->root)) != NULL) {
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
struct btrfs_delayed_ref_head *head = NULL;
|
||||
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
atomic_set(&ref->refs, 1);
|
||||
if (btrfs_delayed_ref_is_head(ref)) {
|
||||
struct btrfs_delayed_ref_head *head;
|
||||
|
||||
head = btrfs_delayed_node_to_head(ref);
|
||||
if (!mutex_trylock(&head->mutex)) {
|
||||
@@ -3614,16 +3614,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
continue;
|
||||
}
|
||||
|
||||
kfree(head->extent_op);
|
||||
btrfs_free_delayed_extent_op(head->extent_op);
|
||||
delayed_refs->num_heads--;
|
||||
if (list_empty(&head->cluster))
|
||||
delayed_refs->num_heads_ready--;
|
||||
list_del_init(&head->cluster);
|
||||
}
|
||||
|
||||
ref->in_tree = 0;
|
||||
rb_erase(&ref->rb_node, &delayed_refs->root);
|
||||
delayed_refs->num_entries--;
|
||||
|
||||
if (head)
|
||||
mutex_unlock(&head->mutex);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
btrfs_put_delayed_ref(ref);
|
||||
|
||||
@@ -3671,6 +3673,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
|
||||
delalloc_inodes);
|
||||
|
||||
list_del_init(&btrfs_inode->delalloc_inodes);
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&btrfs_inode->runtime_flags);
|
||||
|
||||
btrfs_invalidate_inodes(btrfs_inode->root);
|
||||
}
|
||||
@@ -3823,10 +3827,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
t = list_entry(list.next, struct btrfs_transaction, list);
|
||||
if (!t)
|
||||
break;
|
||||
|
||||
btrfs_destroy_ordered_operations(root);
|
||||
btrfs_destroy_ordered_operations(t, root);
|
||||
|
||||
btrfs_destroy_ordered_extents(root);
|
||||
|
||||
|
||||
+186
-98
File diff suppressed because it is too large
Load Diff
+25
-43
@@ -1834,7 +1834,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
*/
|
||||
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
|
||||
SetPageUptodate(page);
|
||||
@@ -1846,7 +1846,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
||||
*/
|
||||
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
|
||||
unlock_page(page);
|
||||
@@ -1960,7 +1960,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
|
||||
return -EIO;
|
||||
}
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_add_page(bio, page, length, start-page_offset(page));
|
||||
bio_add_page(bio, page, length, start - page_offset(page));
|
||||
btrfsic_submit_bio(WRITE_SYNC, bio);
|
||||
wait_for_completion(&compl);
|
||||
|
||||
@@ -2293,8 +2293,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
||||
struct page *page = bvec->bv_page;
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
||||
bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
end = start + bvec->bv_len - 1;
|
||||
|
||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
||||
@@ -2353,8 +2352,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||
(long int)bio->bi_bdev);
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
||||
bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
end = start + bvec->bv_len - 1;
|
||||
|
||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
||||
@@ -2471,7 +2469,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
|
||||
struct extent_io_tree *tree = bio->bi_private;
|
||||
u64 start;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
|
||||
bio->bi_private = NULL;
|
||||
|
||||
@@ -2595,7 +2593,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
unsigned long *bio_flags)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 page_end = start + PAGE_CACHE_SIZE - 1;
|
||||
u64 end;
|
||||
u64 cur = start;
|
||||
@@ -2648,6 +2646,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
}
|
||||
}
|
||||
while (cur <= end) {
|
||||
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
|
||||
|
||||
if (cur >= last_byte) {
|
||||
char *userpage;
|
||||
struct extent_state *cached = NULL;
|
||||
@@ -2735,26 +2735,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (tree->ops && tree->ops->readpage_io_hook) {
|
||||
ret = tree->ops->readpage_io_hook(page, cur,
|
||||
cur + iosize - 1);
|
||||
}
|
||||
if (!ret) {
|
||||
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
|
||||
pnr -= page->index;
|
||||
ret = submit_extent_page(READ, tree, page,
|
||||
pnr -= page->index;
|
||||
ret = submit_extent_page(READ, tree, page,
|
||||
sector, disk_io_size, pg_offset,
|
||||
bdev, bio, pnr,
|
||||
end_bio_extent_readpage, mirror_num,
|
||||
*bio_flags,
|
||||
this_bio_flag);
|
||||
if (!ret) {
|
||||
nr++;
|
||||
*bio_flags = this_bio_flag;
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
if (!ret) {
|
||||
nr++;
|
||||
*bio_flags = this_bio_flag;
|
||||
} else {
|
||||
SetPageError(page);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
}
|
||||
@@ -2806,7 +2797,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct extent_page_data *epd = data;
|
||||
struct extent_io_tree *tree = epd->tree;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 delalloc_start;
|
||||
u64 page_end = start + PAGE_CACHE_SIZE - 1;
|
||||
u64 end;
|
||||
@@ -3124,12 +3115,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
|
||||
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
spin_unlock(&eb->refs_lock);
|
||||
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
|
||||
spin_lock(&fs_info->delalloc_lock);
|
||||
if (fs_info->dirty_metadata_bytes >= eb->len)
|
||||
fs_info->dirty_metadata_bytes -= eb->len;
|
||||
else
|
||||
WARN_ON(1);
|
||||
spin_unlock(&fs_info->delalloc_lock);
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-eb->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
ret = 1;
|
||||
} else {
|
||||
spin_unlock(&eb->refs_lock);
|
||||
@@ -3446,15 +3434,9 @@ retry:
|
||||
* swizzled back from swapper_space to tmpfs file
|
||||
* mapping
|
||||
*/
|
||||
if (tree->ops &&
|
||||
tree->ops->write_cache_pages_lock_hook) {
|
||||
tree->ops->write_cache_pages_lock_hook(page,
|
||||
data, flush_fn);
|
||||
} else {
|
||||
if (!trylock_page(page)) {
|
||||
flush_fn(data);
|
||||
lock_page(page);
|
||||
}
|
||||
if (!trylock_page(page)) {
|
||||
flush_fn(data);
|
||||
lock_page(page);
|
||||
}
|
||||
|
||||
if (unlikely(page->mapping != mapping)) {
|
||||
@@ -3674,7 +3656,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
|
||||
struct page *page, unsigned long offset)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
|
||||
|
||||
@@ -3700,7 +3682,7 @@ int try_release_extent_state(struct extent_map_tree *map,
|
||||
struct extent_io_tree *tree, struct page *page,
|
||||
gfp_t mask)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
int ret = 1;
|
||||
|
||||
@@ -3739,7 +3721,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
||||
gfp_t mask)
|
||||
{
|
||||
struct extent_map *em;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
|
||||
if ((mask & __GFP_WAIT) &&
|
||||
|
||||
@@ -75,7 +75,6 @@ struct extent_io_ops {
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
struct extent_state *state, int mirror);
|
||||
@@ -90,8 +89,6 @@ struct extent_io_ops {
|
||||
struct extent_state *other);
|
||||
void (*split_extent_hook)(struct inode *inode,
|
||||
struct extent_state *orig, u64 split);
|
||||
int (*write_cache_pages_lock_hook)(struct page *page, void *data,
|
||||
void (*flush_fn)(void *));
|
||||
};
|
||||
|
||||
struct extent_io_tree {
|
||||
|
||||
+46
-21
@@ -684,6 +684,24 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
|
||||
struct btrfs_sector_sum *sector_sum,
|
||||
u64 total_bytes, u64 sectorsize)
|
||||
{
|
||||
u64 tmp = sectorsize;
|
||||
u64 next_sector = sector_sum->bytenr;
|
||||
struct btrfs_sector_sum *next = sector_sum + 1;
|
||||
|
||||
while ((tmp + total_bytes) < sums->len) {
|
||||
if (next_sector + sectorsize != next->bytenr)
|
||||
break;
|
||||
tmp += sectorsize;
|
||||
next_sector = next->bytenr;
|
||||
next++;
|
||||
}
|
||||
return tmp;
|
||||
}
|
||||
|
||||
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_ordered_sum *sums)
|
||||
@@ -789,20 +807,32 @@ again:
|
||||
goto insert;
|
||||
}
|
||||
|
||||
if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
|
||||
if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
|
||||
csum_size) {
|
||||
u32 diff = (csum_offset + 1) * csum_size;
|
||||
int extend_nr;
|
||||
u64 tmp;
|
||||
u32 diff;
|
||||
u32 free_space;
|
||||
|
||||
/*
|
||||
* is the item big enough already? we dropped our lock
|
||||
* before and need to recheck
|
||||
*/
|
||||
if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
|
||||
goto csum;
|
||||
if (btrfs_leaf_free_space(root, leaf) <
|
||||
sizeof(struct btrfs_item) + csum_size * 2)
|
||||
goto insert;
|
||||
|
||||
free_space = btrfs_leaf_free_space(root, leaf) -
|
||||
sizeof(struct btrfs_item) - csum_size;
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
WARN_ON(tmp < 1);
|
||||
|
||||
extend_nr = max_t(int, 1, (int)tmp);
|
||||
diff = (csum_offset + extend_nr) * csum_size;
|
||||
diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
|
||||
|
||||
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
|
||||
if (diff != csum_size)
|
||||
goto insert;
|
||||
diff = min(free_space, diff);
|
||||
diff /= csum_size;
|
||||
diff *= csum_size;
|
||||
|
||||
btrfs_extend_item(trans, root, path, diff);
|
||||
goto csum;
|
||||
@@ -812,19 +842,14 @@ insert:
|
||||
btrfs_release_path(path);
|
||||
csum_offset = 0;
|
||||
if (found_next) {
|
||||
u64 tmp = total_bytes + root->sectorsize;
|
||||
u64 next_sector = sector_sum->bytenr;
|
||||
struct btrfs_sector_sum *next = sector_sum + 1;
|
||||
u64 tmp;
|
||||
|
||||
while (tmp < sums->len) {
|
||||
if (next_sector + root->sectorsize != next->bytenr)
|
||||
break;
|
||||
tmp += root->sectorsize;
|
||||
next_sector = next->bytenr;
|
||||
next++;
|
||||
}
|
||||
tmp = min(tmp, next_offset - file_key.offset);
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
tmp = min(tmp, (next_offset - file_key.offset) >>
|
||||
root->fs_info->sb->s_blocksize_bits);
|
||||
|
||||
tmp = max((u64)1, tmp);
|
||||
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
|
||||
ins_size = csum_size * tmp;
|
||||
|
||||
+41
-8
@@ -30,11 +30,11 @@
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "print-tree.h"
|
||||
#include "tree-log.h"
|
||||
#include "locking.h"
|
||||
@@ -1544,7 +1544,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
||||
* although we have opened a file as writable, we have
|
||||
* to stop this write operation to ensure FS consistency.
|
||||
*/
|
||||
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
err = -EROFS;
|
||||
goto out;
|
||||
@@ -1627,7 +1627,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
|
||||
*/
|
||||
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
|
||||
/*
|
||||
* We need to block on a committing transaction to keep us from
|
||||
* throwing a ordered operation on to the list and causing
|
||||
* something like sync to deadlock trying to flush out this
|
||||
* inode.
|
||||
*/
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
|
||||
filemap_flush(inode->i_mapping);
|
||||
}
|
||||
@@ -1654,16 +1667,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret = 0;
|
||||
struct btrfs_trans_handle *trans;
|
||||
bool full_sync = 0;
|
||||
|
||||
trace_btrfs_sync_file(file, datasync);
|
||||
|
||||
/*
|
||||
* We write the dirty pages in the range and wait until they complete
|
||||
* out of the ->i_mutex. If so, we can flush the dirty pages by
|
||||
* multi-task, and make the performance up.
|
||||
* multi-task, and make the performance up. See
|
||||
* btrfs_wait_ordered_range for an explanation of the ASYNC check.
|
||||
*/
|
||||
atomic_inc(&BTRFS_I(inode)->sync_writers);
|
||||
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
atomic_dec(&BTRFS_I(inode)->sync_writers);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -1675,7 +1693,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
* range being left.
|
||||
*/
|
||||
atomic_inc(&root->log_batch);
|
||||
btrfs_wait_ordered_range(inode, start, end - start + 1);
|
||||
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
if (full_sync)
|
||||
btrfs_wait_ordered_range(inode, start, end - start + 1);
|
||||
atomic_inc(&root->log_batch);
|
||||
|
||||
/*
|
||||
@@ -1742,13 +1763,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
|
||||
if (ret != BTRFS_NO_LOG_SYNC) {
|
||||
if (ret > 0) {
|
||||
/*
|
||||
* If we didn't already wait for ordered extents we need
|
||||
* to do that now.
|
||||
*/
|
||||
if (!full_sync)
|
||||
btrfs_wait_ordered_range(inode, start,
|
||||
end - start + 1);
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
} else {
|
||||
ret = btrfs_sync_log(trans, root);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
else
|
||||
} else {
|
||||
if (!full_sync)
|
||||
btrfs_wait_ordered_range(inode, start,
|
||||
end -
|
||||
start + 1);
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
|
||||
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
||||
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
|
||||
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
|
||||
|
||||
max_bitmaps = max(max_bitmaps, 1);
|
||||
|
||||
BUG_ON(ctl->total_bitmaps > max_bitmaps);
|
||||
|
||||
/*
|
||||
@@ -1636,10 +1638,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||
}
|
||||
|
||||
/*
|
||||
* some block groups are so tiny they can't be enveloped by a bitmap, so
|
||||
* don't even bother to create a bitmap for this
|
||||
* The original block groups from mkfs can be really small, like 8
|
||||
* megabytes, so don't bother with a bitmap for those entries. However
|
||||
* some block groups can be smaller than what a bitmap would cover but
|
||||
* are still large enough that they could overflow the 32k memory limit,
|
||||
* so allow those block groups to still be allowed to have a bitmap
|
||||
* entry.
|
||||
*/
|
||||
if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
|
||||
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
||||
+204
-117
File diff suppressed because it is too large
Load Diff
+96
-24
@@ -42,12 +42,12 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "print-tree.h"
|
||||
#include "volumes.h"
|
||||
#include "locking.h"
|
||||
@@ -367,7 +367,7 @@ static noinline int create_subvol(struct btrfs_root *root,
|
||||
struct dentry *dentry,
|
||||
char *name, int namelen,
|
||||
u64 *async_transid,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_key key;
|
||||
@@ -401,8 +401,7 @@ static noinline int create_subvol(struct btrfs_root *root,
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
|
||||
inherit ? *inherit : NULL);
|
||||
ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
@@ -533,7 +532,7 @@ fail:
|
||||
|
||||
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
||||
char *name, int namelen, u64 *async_transid,
|
||||
bool readonly, struct btrfs_qgroup_inherit **inherit)
|
||||
bool readonly, struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct btrfs_pending_snapshot *pending_snapshot;
|
||||
@@ -552,10 +551,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
||||
pending_snapshot->dentry = dentry;
|
||||
pending_snapshot->root = root;
|
||||
pending_snapshot->readonly = readonly;
|
||||
if (inherit) {
|
||||
pending_snapshot->inherit = *inherit;
|
||||
*inherit = NULL; /* take responsibility to free it */
|
||||
}
|
||||
pending_snapshot->inherit = inherit;
|
||||
|
||||
trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
|
||||
if (IS_ERR(trans)) {
|
||||
@@ -695,7 +691,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
|
||||
char *name, int namelen,
|
||||
struct btrfs_root *snap_src,
|
||||
u64 *async_transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct inode *dir = parent->dentry->d_inode;
|
||||
struct dentry *dentry;
|
||||
@@ -818,7 +814,7 @@ static int find_new_extents(struct btrfs_root *root,
|
||||
|
||||
while(1) {
|
||||
ret = btrfs_search_forward(root, &min_key, &max_key,
|
||||
path, 0, newer_than);
|
||||
path, newer_than);
|
||||
if (ret != 0)
|
||||
goto none;
|
||||
if (min_key.objectid != ino)
|
||||
@@ -1206,6 +1202,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
||||
if (!(inode->i_sb->s_flags & MS_ACTIVE))
|
||||
break;
|
||||
|
||||
if (btrfs_defrag_cancelled(root->fs_info)) {
|
||||
printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
|
||||
extent_thresh, &last_len, &skip,
|
||||
&defrag_end, range->flags &
|
||||
@@ -1329,9 +1331,6 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
||||
int ret = 0;
|
||||
int mod = 0;
|
||||
|
||||
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
@@ -1363,6 +1362,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
||||
*devstr = '\0';
|
||||
devstr = vol_args->name;
|
||||
devid = simple_strtoull(devstr, &end, 10);
|
||||
if (!devid) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
printk(KERN_INFO "btrfs: resizing devid %llu\n",
|
||||
(unsigned long long)devid);
|
||||
}
|
||||
@@ -1371,7 +1374,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
||||
if (!device) {
|
||||
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
|
||||
(unsigned long long)devid);
|
||||
ret = -EINVAL;
|
||||
ret = -ENODEV;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@@ -1379,7 +1382,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
||||
printk(KERN_INFO "btrfs: resizer unable to apply on "
|
||||
"readonly device %llu\n",
|
||||
(unsigned long long)devid);
|
||||
ret = -EINVAL;
|
||||
ret = -EPERM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@@ -1401,7 +1404,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
||||
}
|
||||
|
||||
if (device->is_tgtdev_for_dev_replace) {
|
||||
ret = -EINVAL;
|
||||
ret = -EPERM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@@ -1457,7 +1460,7 @@ out:
|
||||
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
|
||||
char *name, unsigned long fd, int subvol,
|
||||
u64 *transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
int namelen;
|
||||
int ret = 0;
|
||||
@@ -1566,7 +1569,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
|
||||
|
||||
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
|
||||
vol_args->fd, subvol, ptr,
|
||||
readonly, &inherit);
|
||||
readonly, inherit);
|
||||
|
||||
if (ret == 0 && ptr &&
|
||||
copy_to_user(arg +
|
||||
@@ -1863,7 +1866,7 @@ static noinline int search_ioctl(struct inode *inode,
|
||||
path->keep_locks = 1;
|
||||
|
||||
while(1) {
|
||||
ret = btrfs_search_forward(root, &key, &max_key, path, 0,
|
||||
ret = btrfs_search_forward(root, &key, &max_key, path,
|
||||
sk->min_transid);
|
||||
if (ret != 0) {
|
||||
if (ret > 0)
|
||||
@@ -2171,6 +2174,12 @@ out_unlock:
|
||||
shrink_dcache_sb(root->fs_info->sb);
|
||||
btrfs_invalidate_inodes(dest);
|
||||
d_delete(dentry);
|
||||
|
||||
/* the last ref */
|
||||
if (dest->cache_inode) {
|
||||
iput(dest->cache_inode);
|
||||
dest->cache_inode = NULL;
|
||||
}
|
||||
}
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
@@ -2211,10 +2220,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
ret = btrfs_defrag_root(root, 0);
|
||||
ret = btrfs_defrag_root(root);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
|
||||
ret = btrfs_defrag_root(root->fs_info->extent_root);
|
||||
break;
|
||||
case S_IFREG:
|
||||
if (!(file->f_mode & FMODE_WRITE)) {
|
||||
@@ -3111,7 +3120,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
|
||||
u64 transid;
|
||||
int ret;
|
||||
|
||||
trans = btrfs_attach_transaction(root);
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
if (PTR_ERR(trans) != -ENOENT)
|
||||
return PTR_ERR(trans);
|
||||
@@ -3289,7 +3298,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
|
||||
struct inode_fs_paths *ipath = NULL;
|
||||
struct btrfs_path *path;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!capable(CAP_DAC_READ_SEARCH))
|
||||
return -EPERM;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
@@ -3914,6 +3923,65 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
||||
const char *label = root->fs_info->super_copy->label;
|
||||
size_t len = strnlen(label, BTRFS_LABEL_SIZE);
|
||||
int ret;
|
||||
|
||||
if (len == BTRFS_LABEL_SIZE) {
|
||||
pr_warn("btrfs: label is too long, return the first %zu bytes\n",
|
||||
--len);
|
||||
}
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
ret = copy_to_user(arg, label, len);
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
||||
struct btrfs_super_block *super_block = root->fs_info->super_copy;
|
||||
struct btrfs_trans_handle *trans;
|
||||
char label[BTRFS_LABEL_SIZE];
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(label, arg, sizeof(label)))
|
||||
return -EFAULT;
|
||||
|
||||
if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
|
||||
pr_err("btrfs: unable to set label with more than %d bytes\n",
|
||||
BTRFS_LABEL_SIZE - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = mnt_want_write_file(file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
strcpy(super_block->label, label);
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
mnt_drop_write_file(file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
long btrfs_ioctl(struct file *file, unsigned int
|
||||
cmd, unsigned long arg)
|
||||
{
|
||||
@@ -4014,6 +4082,10 @@ long btrfs_ioctl(struct file *file, unsigned int
|
||||
return btrfs_ioctl_qgroup_limit(file, argp);
|
||||
case BTRFS_IOC_DEV_REPLACE:
|
||||
return btrfs_ioctl_dev_replace(root, argp);
|
||||
case BTRFS_IOC_GET_FSLABEL:
|
||||
return btrfs_ioctl_get_fslabel(file, argp);
|
||||
case BTRFS_IOC_SET_FSLABEL:
|
||||
return btrfs_ioctl_set_fslabel(file, argp);
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
|
||||
+2
-3
@@ -113,11 +113,10 @@ again:
|
||||
read_unlock(&eb->lock);
|
||||
return;
|
||||
}
|
||||
read_unlock(&eb->lock);
|
||||
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
|
||||
read_lock(&eb->lock);
|
||||
if (atomic_read(&eb->blocking_writers)) {
|
||||
read_unlock(&eb->lock);
|
||||
wait_event(eb->write_lock_wq,
|
||||
atomic_read(&eb->blocking_writers) == 0);
|
||||
goto again;
|
||||
}
|
||||
atomic_inc(&eb->read_locks);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user