Support simultaneous mount of multiple volumes

Separate the in-memory volume and container superblocks, and keep all
mounted containers in a linked list. Each container will hold a pointer
to its block device; when a mount is requested, we traverse the list to
check if it's a new container. Keep one vfs superblock for each mounted
volume, but only assign it a fake anonymous bdev; all disk operations
must be forwarded to the container's bdev, with the use of the new
apfs_sb_bread() and apfs_map_bh() functions.

All the mount changes require that we implement our own ->mount()
function, closely based on bdev_mount() and the btrfs equivalent. I
can't claim to be confident that all changes here are correct, much more
testing is needed.

To simplify access to the container, define two new helpers similar to
APFS_SB(): APFS_NXI() to retrieve the container superblock info, and
APFS_SM() to retrieve the space manager. Also move the usual assertion
that an object is part of the current transaction to its own inline
function; this saves me from rewriting all the callers and has the added
benefit of silencing "unused variable" warnings when the module is built
without APFS_DEBUG.

Signed-off-by: Ernesto A. Fernández <ernesto@corellium.com>
This commit is contained in:
Ernesto A. Fernández
2021-04-01 22:45:25 -03:00
parent e89e42bfe4
commit 4a6c0e2b77
13 changed files with 576 additions and 276 deletions
+82 -18
View File
@@ -134,41 +134,56 @@ struct apfs_bh_info {
struct list_head list; /* List of buffers in the transaction */
};
/* Mount option flags */
/* Mount option flags for a container */
#define APFS_CHECK_NODES 1
#define APFS_READWRITE 2
/*
* Superblock data in memory, both from the main superblock and the volume
* checkpoint superblock.
* Container superblock data in memory
*/
struct apfs_nxsb_info {
struct block_device *nx_bdev; /* Device for the container */
struct apfs_nx_superblock *nx_raw; /* On-disk main sb */
struct apfs_object nx_object; /* Main superblock object */
u64 nx_xid; /* Latest transaction id */
unsigned int nx_flags; /* Mount options shared by all volumes */
unsigned int nx_refcnt; /* Number of mounted volumes in container */
/* TODO: handle block sizes above the maximum of PAGE_SIZE? */
unsigned long nx_blocksize;
unsigned char nx_blocksize_bits;
struct apfs_spaceman nx_spaceman;
struct apfs_transaction nx_transaction;
/* For now, a single semaphore for every operation */
struct rw_semaphore nx_big_sem;
/* List of currently mounted containers */
struct list_head nx_list;
};
extern struct mutex nxs_mutex;
/*
* Volume superblock data in memory
*/
struct apfs_sb_info {
struct apfs_nx_superblock *s_msb_raw; /* On-disk main sb */
struct apfs_superblock *s_vsb_raw; /* On-disk volume sb */
struct apfs_nxsb_info *s_nxi; /* In-memory container sb for volume */
struct apfs_superblock *s_vsb_raw; /* On-disk volume sb */
u64 s_xid; /* Latest transaction id */
struct apfs_node *s_cat_root; /* Root of the catalog tree */
struct apfs_node *s_omap_root; /* Root of the object map tree */
struct apfs_object s_mobject; /* Main superblock object */
struct apfs_object s_vobject; /* Volume superblock object */
/* Mount options */
unsigned int s_flags;
unsigned int s_vol_nr; /* Index of the volume in the sb list */
kuid_t s_uid; /* uid to override on-disk uid */
kgid_t s_gid; /* gid to override on-disk gid */
/* TODO: handle block sizes above the maximum of PAGE_SIZE? */
unsigned long s_blocksize;
unsigned char s_blocksize_bits;
struct inode *s_private_dir; /* Inode for the private directory */
struct apfs_spaceman s_spaceman;
struct apfs_transaction s_transaction;
/* For now, a single semaphore for every operation */
struct rw_semaphore s_big_sem;
};
static inline struct apfs_sb_info *APFS_SB(struct super_block *sb)
@@ -176,6 +191,24 @@ static inline struct apfs_sb_info *APFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
/**
* APFS_NXI - Get the shared container info for a volume's superblock
* @sb: superblock structure
*/
static inline struct apfs_nxsb_info *APFS_NXI(struct super_block *sb)
{
return APFS_SB(sb)->s_nxi;
}
/**
* APFS_SM - Get the shared spaceman struct for a volume's superblock
* @sb: superblock structure
*/
static inline struct apfs_spaceman *APFS_SM(struct super_block *sb)
{
return &APFS_NXI(sb)->nx_spaceman;
}
static inline bool apfs_is_case_insensitive(struct super_block *sb)
{
return (APFS_SB(sb)->s_vsb_raw->apfs_incompatible_features &
@@ -505,6 +538,16 @@ struct apfs_xattr {
#define apfs_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* CONFIG_APFS_DEBUG */
/**
* apfs_assert_in_transaction - Assert that the object is in current transaction
* @sb: superblock structure
* @obj: on-disk object to check
*/
static inline void apfs_assert_in_transaction(struct super_block *sb, struct apfs_obj_phys *obj)
{
ASSERT(le64_to_cpu(obj->o_xid) == APFS_NXI(sb)->nx_xid);
}
/* btree.c */
extern struct apfs_query *apfs_alloc_query(struct apfs_node *node,
struct apfs_query *parent);
@@ -658,4 +701,25 @@ extern const struct inode_operations apfs_symlink_inode_operations;
/* xattr.c */
extern const struct xattr_handler *apfs_xattr_handlers[];
/*
* TODO: the following are modified variants of buffer head functions that will
* work with the shared block device for the container. The correct approach
* here would be to avoid buffer heads and use bios, but for now this will do.
*/
static inline void
apfs_map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
set_buffer_mapped(bh);
bh->b_bdev = APFS_NXI(sb)->nx_bdev;
bh->b_blocknr = block;
bh->b_size = sb->s_blocksize;
}
static inline struct buffer_head *
apfs_sb_bread(struct super_block *sb, sector_t block)
{
return __bread_gfp(APFS_NXI(sb)->nx_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
}
#endif /* _APFS_H */
+1 -1
View File
@@ -837,7 +837,7 @@ struct apfs_nx_superblock {
__le64 nx_readonly_compatible_features;
__le64 nx_incompatible_features;
/*48*/ char nx_uuid[16];
/*48*/ char nx_uuid[UUID_SIZE];
/*58*/ __le64 nx_next_oid;
__le64 nx_next_xid;
+13 -18
View File
@@ -40,7 +40,7 @@ static int apfs_child_from_query(struct apfs_query *query, u64 *child)
int apfs_omap_lookup_block(struct super_block *sb, struct apfs_node *tbl,
u64 id, u64 *block, bool write)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_query *query;
struct apfs_key key;
int ret = 0;
@@ -49,7 +49,7 @@ int apfs_omap_lookup_block(struct super_block *sb, struct apfs_node *tbl,
if (!query)
return -ENOMEM;
apfs_init_omap_key(id, sbi->s_xid, &key);
apfs_init_omap_key(id, nxi->nx_xid, &key);
query->key = &key;
query->flags |= APFS_QUERY_OMAP;
@@ -76,7 +76,7 @@ int apfs_omap_lookup_block(struct super_block *sb, struct apfs_node *tbl,
}
key.ok_oid = cpu_to_le64(id);
key.ok_xid = cpu_to_le64(sbi->s_xid); /* TODO: snapshots? */
key.ok_xid = cpu_to_le64(nxi->nx_xid); /* TODO: snapshots? */
val.ov_flags = 0; /* TODO: preserve the flags */
val.ov_size = cpu_to_le32(sb->s_blocksize);
val.ov_paddr = cpu_to_le64(new_bh->b_blocknr);
@@ -103,6 +103,7 @@ fail:
int apfs_create_omap_rec(struct super_block *sb, u64 oid, u64 bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_query *query;
struct apfs_key key;
struct apfs_omap_key raw_key;
@@ -113,7 +114,7 @@ int apfs_create_omap_rec(struct super_block *sb, u64 oid, u64 bno)
if (!query)
return -ENOMEM;
apfs_init_omap_key(oid, sbi->s_xid, &key);
apfs_init_omap_key(oid, nxi->nx_xid, &key);
query->key = &key;
query->flags |= APFS_QUERY_OMAP;
@@ -122,7 +123,7 @@ int apfs_create_omap_rec(struct super_block *sb, u64 oid, u64 bno)
goto fail;
raw_key.ok_oid = cpu_to_le64(oid);
raw_key.ok_xid = cpu_to_le64(sbi->s_xid);
raw_key.ok_xid = cpu_to_le64(nxi->nx_xid);
raw_val.ov_flags = 0;
raw_val.ov_size = cpu_to_le32(sb->s_blocksize);
raw_val.ov_paddr = cpu_to_le64(bno);
@@ -145,6 +146,7 @@ fail:
int apfs_delete_omap_rec(struct super_block *sb, u64 oid)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_query *query;
struct apfs_key key;
int ret;
@@ -153,7 +155,7 @@ int apfs_delete_omap_rec(struct super_block *sb, u64 oid)
if (!query)
return -ENOMEM;
apfs_init_omap_key(oid, sbi->s_xid, &key);
apfs_init_omap_key(oid, nxi->nx_xid, &key);
query->key = &key;
query->flags |= APFS_QUERY_OMAP;
@@ -361,7 +363,6 @@ static void apfs_btree_change_rec_count(struct apfs_query *query, int change,
int key_len, int val_len)
{
struct super_block *sb;
struct apfs_sb_info *sbi;
struct apfs_node *root;
struct apfs_btree_node_phys *root_raw;
struct apfs_btree_info *info;
@@ -376,11 +377,10 @@ static void apfs_btree_change_rec_count(struct apfs_query *query, int change,
ASSERT(apfs_node_is_root(root));
sb = root->object.sb;
sbi = APFS_SB(sb);
root_raw = (void *)root->object.bh->b_data;
info = (void *)root_raw + sb->s_blocksize - sizeof(*info);
ASSERT(sbi->s_xid == le64_to_cpu(root_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &root_raw->btn_o);
if (key_len > le32_to_cpu(info->bt_longest_key))
info->bt_longest_key = cpu_to_le32(key_len);
if (val_len > le32_to_cpu(info->bt_longest_val))
@@ -400,7 +400,6 @@ static void apfs_btree_change_rec_count(struct apfs_query *query, int change,
void apfs_btree_change_node_count(struct apfs_query *query, int change)
{
struct super_block *sb;
struct apfs_sb_info *sbi;
struct apfs_node *root;
struct apfs_btree_node_phys *root_raw;
struct apfs_btree_info *info;
@@ -413,11 +412,10 @@ void apfs_btree_change_node_count(struct apfs_query *query, int change)
ASSERT(apfs_node_is_root(root));
sb = root->object.sb;
sbi = APFS_SB(sb);
root_raw = (void *)root->object.bh->b_data;
info = (void *)root_raw + sb->s_blocksize - sizeof(*info);
ASSERT(sbi->s_xid == le64_to_cpu(root_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &root_raw->btn_o);
le64_add_cpu(&info->bt_node_count, change);
}
@@ -438,7 +436,6 @@ int apfs_btree_insert(struct apfs_query *query, void *key, int key_len,
{
struct apfs_node *node = query->node;
struct super_block *sb = node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_btree_node_phys *node_raw;
int toc_entry_size;
int err;
@@ -455,7 +452,7 @@ int apfs_btree_insert(struct apfs_query *query, void *key, int key_len,
again:
node = query->node;
node_raw = (void *)node->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(node_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &node_raw->btn_o);
/* TODO: support record fragmentation */
if (node->free + key_len + val_len > node->data) {
@@ -527,7 +524,6 @@ int apfs_btree_remove(struct apfs_query *query)
{
struct apfs_node *node = query->node;
struct super_block *sb = node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_btree_node_phys *node_raw;
int later_entries = node->records - query->index - 1;
int err;
@@ -545,7 +541,7 @@ int apfs_btree_remove(struct apfs_query *query)
node = query->node;
node_raw = (void *)query->node->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(node_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &node_raw->btn_o);
if (node->records == 1)
/* Just get rid of the node. TODO: update the node heights? */
@@ -617,7 +613,6 @@ int apfs_btree_replace(struct apfs_query *query, void *key, int key_len,
{
struct apfs_node *node = query->node;
struct super_block *sb = node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_btree_node_phys *node_raw;
int err;
@@ -635,7 +630,7 @@ int apfs_btree_replace(struct apfs_query *query, void *key, int key_len,
again:
node = query->node;
node_raw = (void *)node->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(node_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &node_raw->btn_o);
/* The first key in a node must match the parent record's */
if (key && query->parent && query->index == 0) {
+8 -8
View File
@@ -137,12 +137,12 @@ fail:
int apfs_inode_by_name(struct inode *dir, const struct qstr *child, u64 *ino)
{
struct super_block *sb = dir->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_query *query;
struct apfs_drec drec;
int err = 0;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
query = apfs_dentry_lookup(dir, child, &drec);
if (IS_ERR(query)) {
err = PTR_ERR(query);
@@ -151,7 +151,7 @@ int apfs_inode_by_name(struct inode *dir, const struct qstr *child, u64 *ino)
*ino = drec.ino;
apfs_free_query(sb, query);
out:
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return err;
}
@@ -160,6 +160,7 @@ static int apfs_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_key key;
struct apfs_query *query;
u64 cnid = apfs_ino(inode);
@@ -167,7 +168,7 @@ static int apfs_readdir(struct file *file, struct dir_context *ctx)
bool hashed = apfs_is_normalization_insensitive(sb);
int err = 0;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
/* Inode numbers might overflow here; follow btrfs in ignoring that */
if (!dir_emit_dots(file, ctx))
@@ -223,7 +224,7 @@ static int apfs_readdir(struct file *file, struct dir_context *ctx)
apfs_free_query(sb, query);
out:
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return err;
}
@@ -521,13 +522,12 @@ static int apfs_create_sibling_recs(struct dentry *dentry,
struct inode *inode, u64 *sibling_id)
{
struct super_block *sb = dentry->d_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
u64 cnid;
int ret;
/* Sibling ids come from the same pool as the inode numbers */
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
cnid = le64_to_cpu(vsb_raw->apfs_next_obj_id);
le64_add_cpu(&vsb_raw->apfs_next_obj_id, 1);
+11 -14
View File
@@ -125,7 +125,7 @@ int __apfs_get_block(struct inode *inode, sector_t iblock,
bh_result->b_size = map_len;
/*
* Save the requested mapping length as map_bh() replaces it with
* Save the requested mapping length as apfs_map_bh() replaces it with
* the filesystem block size
*/
map_len = bh_result->b_size;
@@ -133,7 +133,7 @@ int __apfs_get_block(struct inode *inode, sector_t iblock,
if (ext.phys_block_num != 0) {
/* Find the block number of iblock within the disk */
bno = ext.phys_block_num + blk_off;
map_bh(bh_result, sb, bno);
apfs_map_bh(bh_result, sb, bno);
}
bh_result->b_size = map_len;
return 0;
@@ -142,13 +142,12 @@ int __apfs_get_block(struct inode *inode, sector_t iblock,
int apfs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(inode->i_sb);
int ret;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
ret = __apfs_get_block(inode, iblock, bh_result, create);
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return ret;
}
@@ -230,8 +229,7 @@ static int apfs_create_phys_extent(struct inode *inode,
struct apfs_file_extent *extent)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
struct apfs_inode_info *ai = APFS_I(inode);
struct apfs_node *extref_root;
struct apfs_key key;
@@ -247,7 +245,7 @@ static int apfs_create_phys_extent(struct inode *inode,
APFS_OBJ_PHYSICAL, true /* write */);
if (IS_ERR(extref_root))
return PTR_ERR(extref_root);
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
vsb_raw->apfs_extentref_tree_oid = cpu_to_le64(extref_root->object.oid);
query = apfs_alloc_query(extref_root, NULL /* parent */);
@@ -296,8 +294,7 @@ static int apfs_delete_phys_extent(struct inode *inode,
struct apfs_file_extent *extent)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
struct apfs_node *extref_root;
struct apfs_key key;
struct apfs_query *query = NULL;
@@ -308,7 +305,7 @@ static int apfs_delete_phys_extent(struct inode *inode,
APFS_OBJ_PHYSICAL, true /* write */);
if (IS_ERR(extref_root))
return PTR_ERR(extref_root);
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
vsb_raw->apfs_extentref_tree_oid = cpu_to_le64(extref_root->object.oid);
query = apfs_alloc_query(extref_root, NULL /* parent */);
@@ -354,7 +351,7 @@ int apfs_get_new_block(struct inode *inode, sector_t iblock,
if (err)
return err;
map_bh(bh_result, sb, ext.phys_block_num);
apfs_map_bh(bh_result, sb, ext.phys_block_num);
err = apfs_transaction_join(sb, bh_result);
if (err)
return err;
@@ -380,7 +377,7 @@ int apfs_get_new_block(struct inode *inode, sector_t iblock,
return err;
}
/* Just invalidate the cache; s_big_sem provides the locking here */
/* Just invalidate the cache; nx_big_sem provides the locking here */
ai->i_cached_extent.len = 0;
return 0;
+9 -10
View File
@@ -375,6 +375,7 @@ static struct inode *apfs_iget_locked(struct super_block *sb, u64 cnid)
struct inode *apfs_iget(struct super_block *sb, u64 cnid)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct inode *inode;
struct apfs_query *query;
int err;
@@ -385,7 +386,7 @@ struct inode *apfs_iget(struct super_block *sb, u64 cnid)
if (!(inode->i_state & I_NEW))
return inode;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
query = apfs_inode_lookup(inode);
if (IS_ERR(query)) {
err = PTR_ERR(query);
@@ -395,7 +396,7 @@ struct inode *apfs_iget(struct super_block *sb, u64 cnid)
apfs_free_query(sb, query);
if (err)
goto fail;
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
/* Allow the user to override the ownership */
if (uid_valid(sbi->s_uid))
@@ -408,7 +409,7 @@ struct inode *apfs_iget(struct super_block *sb, u64 cnid)
return inode;
fail:
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
iget_failed(inode);
return ERR_PTR(err);
}
@@ -688,7 +689,7 @@ int apfs_update_inode(struct inode *inode, char *new_name)
goto fail;
bh = query->node->object.bh;
node_raw = (void *)bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(node_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &node_raw->btn_o);
inode_raw = (void *)node_raw + query->off;
inode_raw->parent_id = cpu_to_le64(ai->i_parent_id);
@@ -729,8 +730,7 @@ fail:
static int apfs_delete_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
struct apfs_query *query;
int ret;
@@ -743,7 +743,7 @@ static int apfs_delete_inode(struct inode *inode)
ret = apfs_btree_remove(query);
apfs_free_query(sb, query);
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
le64_add_cpu(&vsb_raw->apfs_num_files, -1);
@@ -814,15 +814,14 @@ static int apfs_insert_inode_locked(struct inode *inode)
struct inode *apfs_new_inode(struct inode *dir, umode_t mode, dev_t rdev)
{
struct super_block *sb = dir->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
struct inode *inode;
struct apfs_inode_info *ai;
u64 cnid;
struct timespec64 now;
/* Updating on-disk structures here is odd, but it works for now */
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
inode = new_inode(sb);
if (!inode)
+13 -16
View File
@@ -68,6 +68,7 @@ struct apfs_node *apfs_read_node(struct super_block *sb, u64 oid, u32 storage,
bool write)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct buffer_head *bh = NULL;
struct apfs_btree_node_phys *raw;
struct apfs_node *node;
@@ -127,7 +128,7 @@ struct apfs_node *apfs_read_node(struct super_block *sb, u64 oid, u32 storage,
kref_init(&node->refcount);
if (sbi->s_flags & APFS_CHECK_NODES &&
if (nxi->nx_flags & APFS_CHECK_NODES &&
!apfs_obj_verify_csum(sb, &raw->btn_o)) {
/* TODO: don't check this twice for virtual/physical objects */
apfs_alert(sb, "bad checksum for node in block 0x%llx", bh->b_blocknr);
@@ -155,7 +156,8 @@ struct apfs_node *apfs_read_node(struct super_block *sb, u64 oid, u32 storage,
static struct apfs_node *apfs_create_node(struct super_block *sb, u32 storage)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *msb_raw = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *msb_raw = nxi->nx_raw;
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_node *node;
struct buffer_head *bh;
@@ -197,7 +199,7 @@ static struct apfs_node *apfs_create_node(struct super_block *sb, u32 storage)
ASSERT(false);
}
bh = sb_bread(sb, bno);
bh = apfs_sb_bread(sb, bno);
if (!bh)
return ERR_PTR(-EIO);
raw = (void *)bh->b_data;
@@ -208,7 +210,7 @@ static struct apfs_node *apfs_create_node(struct super_block *sb, u32 storage)
/* Set most of the object header, but the subtype is up to the caller */
raw->btn_o.o_oid = cpu_to_le64(oid);
raw->btn_o.o_xid = cpu_to_le64(sbi->s_xid);
raw->btn_o.o_xid = cpu_to_le64(nxi->nx_xid);
raw->btn_o.o_type = cpu_to_le32(storage | APFS_OBJECT_TYPE_BTREE_NODE);
raw->btn_o.o_subtype = 0;
@@ -253,14 +255,13 @@ fail:
int apfs_delete_node(struct apfs_query *query)
{
struct super_block *sb = query->node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_superblock *vsb_raw = sbi->s_vsb_raw;
struct apfs_superblock *vsb_raw = APFS_SB(sb)->s_vsb_raw;
struct apfs_node *node = query->node;
u64 oid = node->object.oid;
u64 bno = node->object.block_nr;
int err;
ASSERT(sbi->s_xid == le64_to_cpu(vsb_raw->apfs_o.o_xid));
apfs_assert_in_transaction(sb, &vsb_raw->apfs_o);
switch (query->flags & APFS_QUERY_TREE_MASK) {
case APFS_QUERY_CAT:
@@ -293,14 +294,13 @@ int apfs_delete_node(struct apfs_query *query)
void apfs_update_node(struct apfs_node *node)
{
struct super_block *sb = node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct buffer_head *bh = node->object.bh;
struct apfs_btree_node_phys *raw = (void *)bh->b_data;
struct apfs_nloc *free_head;
u32 tflags, type;
int toc_off;
ASSERT(sbi->s_xid == le64_to_cpu(raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &raw->btn_o);
raw->btn_o.o_oid = cpu_to_le64(node->object.oid);
@@ -709,7 +709,6 @@ static int apfs_btree_inc_height(struct apfs_query *query)
struct apfs_node *root = query->node;
struct apfs_node *new_node;
struct super_block *sb = root->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_btree_node_phys *root_raw;
struct apfs_btree_node_phys *new_raw;
struct apfs_btree_info *info;
@@ -718,7 +717,7 @@ static int apfs_btree_inc_height(struct apfs_query *query)
int toc_entry_size;
root_raw = (void *)root->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(root_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &root_raw->btn_o);
if (query->parent || query->depth)
return -EFSCORRUPTED;
@@ -815,7 +814,6 @@ static int apfs_copy_record_range(struct apfs_node *dest_node,
int start, int end)
{
struct super_block *sb = dest_node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_btree_node_phys *dest_raw;
struct apfs_btree_node_phys *src_raw;
struct apfs_query *query = NULL;
@@ -828,7 +826,7 @@ static int apfs_copy_record_range(struct apfs_node *dest_node,
ASSERT(!dest_node->records);
ASSERT(!apfs_node_is_root(dest_node));
ASSERT(sbi->s_xid == le64_to_cpu(dest_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &dest_raw->btn_o);
/* Resize the table of contents so that all the records fit */
if (apfs_node_has_fixed_kv_size(src_node))
@@ -913,7 +911,6 @@ static int apfs_attach_child(struct apfs_query *query, struct apfs_node *child)
int apfs_node_split(struct apfs_query *query)
{
struct super_block *sb = query->node->object.sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_node *old_node, *new_node;
struct apfs_btree_node_phys *old_raw, *new_raw;
char *buffer = NULL;
@@ -934,7 +931,7 @@ int apfs_node_split(struct apfs_query *query)
apfs_btree_change_node_count(query->parent, 1 /* change */);
old_raw = (void *)old_node->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(old_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &old_raw->btn_o);
/*
* XXX: to defragment the original node, we put all records in a
@@ -976,7 +973,7 @@ int apfs_node_split(struct apfs_query *query)
if (err)
goto out_put_node;
new_raw = (void *)new_node->object.bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(new_raw->btn_o.o_xid));
apfs_assert_in_transaction(sb, &new_raw->btn_o);
new_raw->btn_level = old_raw->btn_level;
apfs_update_node(new_node);
+13 -14
View File
@@ -96,8 +96,7 @@ static int apfs_cpm_lookup_oid(struct super_block *sb,
*/
int apfs_create_cpoint_map(struct super_block *sb, u64 oid, u64 bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nx_superblock *raw_sb = APFS_NXI(sb)->nx_raw;
u64 desc_base = le64_to_cpu(raw_sb->nx_xp_desc_base);
u32 desc_index = le32_to_cpu(raw_sb->nx_xp_desc_index);
u32 desc_blks = le32_to_cpu(raw_sb->nx_xp_desc_blocks);
@@ -114,11 +113,11 @@ int apfs_create_cpoint_map(struct super_block *sb, u64 oid, u64 bno)
/* Last block in area is superblock; we want the last mapping block */
cpm_bno = desc_base + (desc_index + desc_len - 2) % desc_blks;
bh = sb_bread(sb, cpm_bno);
bh = apfs_sb_bread(sb, cpm_bno);
if (!bh)
return -EIO;
cpm = (struct apfs_checkpoint_map_phys *)bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(cpm->cpm_o.o_xid));
apfs_assert_in_transaction(sb, &cpm->cpm_o);
cpm_count = le32_to_cpu(cpm->cpm_count);
if (cpm_count >= apfs_max_maps_per_block(sb)) { /* TODO */
@@ -153,8 +152,8 @@ fail:
*/
struct buffer_head *apfs_read_ephemeral_object(struct super_block *sb, u64 oid)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
u64 desc_base = le64_to_cpu(raw_sb->nx_xp_desc_base);
u32 desc_index = le32_to_cpu(raw_sb->nx_xp_desc_index);
u32 desc_blks = le32_to_cpu(raw_sb->nx_xp_desc_blocks);
@@ -172,7 +171,7 @@ struct buffer_head *apfs_read_ephemeral_object(struct super_block *sb, u64 oid)
u64 obj_bno;
int err;
bh = sb_bread(sb, cpm_bno);
bh = apfs_sb_bread(sb, cpm_bno);
if (!bh)
return ERR_PTR(-EIO);
cpm = (struct apfs_checkpoint_map_phys *)bh->b_data;
@@ -185,7 +184,7 @@ struct buffer_head *apfs_read_ephemeral_object(struct super_block *sb, u64 oid)
if (err)
return ERR_PTR(err);
bh = sb_bread(sb, obj_bno);
bh = apfs_sb_bread(sb, obj_bno);
if (!bh)
return ERR_PTR(-EIO);
return bh;
@@ -206,21 +205,21 @@ struct buffer_head *apfs_read_ephemeral_object(struct super_block *sb, u64 oid)
struct buffer_head *apfs_read_object_block(struct super_block *sb, u64 bno,
bool write)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct buffer_head *bh, *new_bh;
struct apfs_obj_phys *obj;
u32 type;
u64 new_bno;
int err;
bh = sb_bread(sb, bno);
bh = apfs_sb_bread(sb, bno);
if (!bh)
return ERR_PTR(-EIO);
obj = (struct apfs_obj_phys *)bh->b_data;
type = le32_to_cpu(obj->o_type);
ASSERT(!(type & APFS_OBJ_EPHEMERAL));
if (sbi->s_flags & APFS_CHECK_NODES && !apfs_obj_verify_csum(sb, obj)) {
if (nxi->nx_flags & APFS_CHECK_NODES && !apfs_obj_verify_csum(sb, obj)) {
err = -EFSBADCRC;
goto fail;
}
@@ -230,13 +229,13 @@ struct buffer_head *apfs_read_object_block(struct super_block *sb, u64 bno,
ASSERT(!(sb->s_flags & SB_RDONLY));
/* Is the object already part of the current transaction? */
if (obj->o_xid == cpu_to_le64(sbi->s_xid))
if (obj->o_xid == cpu_to_le64(nxi->nx_xid))
return bh;
err = apfs_spaceman_allocate_block(sb, &new_bno);
if (err)
goto fail;
new_bh = sb_bread(sb, new_bno);
new_bh = apfs_sb_bread(sb, new_bno);
if (!new_bh) {
err = -EIO;
goto fail;
@@ -253,7 +252,7 @@ struct buffer_head *apfs_read_object_block(struct super_block *sb, u64 bno,
if (type & APFS_OBJ_PHYSICAL)
obj->o_oid = cpu_to_le64(new_bno);
obj->o_xid = cpu_to_le64(sbi->s_xid);
obj->o_xid = cpu_to_le64(nxi->nx_xid);
err = apfs_transaction_join(sb, bh);
if (err)
goto fail;
+42 -46
View File
@@ -19,8 +19,7 @@
*/
static u64 apfs_spaceman_read_cib_addr(struct super_block *sb, int index)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
u32 offset;
__le64 *addr_p;
@@ -39,13 +38,12 @@ static u64 apfs_spaceman_read_cib_addr(struct super_block *sb, int index)
static void apfs_spaceman_write_cib_addr(struct super_block *sb,
int index, u64 addr)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
u32 offset;
__le64 *addr_p;
ASSERT(le64_to_cpu(sm_raw->sm_o.o_xid) == sbi->s_xid);
apfs_assert_in_transaction(sb, &sm_raw->sm_o);
offset = sm->sm_addr_offset + index * sizeof(*addr_p);
addr_p = (void *)sm_raw + offset;
@@ -74,8 +72,7 @@ static inline int apfs_max_chunks_per_cib(struct super_block *sb)
static int apfs_read_spaceman_dev(struct super_block *sb,
struct apfs_spaceman_device *dev)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *spaceman = &sbi->s_spaceman;
struct apfs_spaceman *spaceman = APFS_SM(sb);
if (dev->sm_cab_count) {
apfs_err(sb, "large devices are not supported");
@@ -105,7 +102,7 @@ static int apfs_read_spaceman_dev(struct super_block *sb,
*/
static __le64 *apfs_spaceman_get_64(struct super_block *sb, size_t off)
{
struct apfs_spaceman *spaceman = &APFS_SB(sb)->s_spaceman;
struct apfs_spaceman *spaceman = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = spaceman->sm_raw;
if (off > sb->s_blocksize)
@@ -142,7 +139,7 @@ static bool apfs_ip_bm_is_free(struct apfs_spaceman_phys *sm, u16 index)
*/
static int apfs_update_ip_bm_free_next(struct super_block *sb)
{
struct apfs_spaceman *spaceman = &APFS_SB(sb)->s_spaceman;
struct apfs_spaceman *spaceman = APFS_SM(sb);
struct apfs_spaceman_phys *raw = spaceman->sm_raw;
u32 free_next_off = le32_to_cpu(raw->sm_ip_bm_free_next_offset);
int bmap_count = 16;
@@ -172,8 +169,8 @@ static int apfs_update_ip_bm_free_next(struct super_block *sb)
*/
static int apfs_rotate_ip_bitmaps(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *spaceman = &sbi->s_spaceman;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_spaceman *spaceman = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = spaceman->sm_raw;
u64 bmap_base = le64_to_cpu(sm_raw->sm_ip_bm_base);
u32 bmap_length = le32_to_cpu(sm_raw->sm_ip_bm_block_count);
@@ -182,7 +179,7 @@ static int apfs_rotate_ip_bitmaps(struct super_block *sb)
struct buffer_head *old_bh = NULL, *new_bh = NULL;
int err = 0;
ASSERT(le64_to_cpu(sm_raw->sm_o.o_xid) == sbi->s_xid);
apfs_assert_in_transaction(sb, &sm_raw->sm_o);
brelse(spaceman->sm_ip);
spaceman->sm_ip = NULL;
@@ -195,7 +192,7 @@ static int apfs_rotate_ip_bitmaps(struct super_block *sb)
xid = apfs_spaceman_get_64(sb, le32_to_cpu(sm_raw->sm_ip_bm_xid_offset));
if (!xid)
return -EFSCORRUPTED;
*xid = cpu_to_le64(sbi->s_xid);
*xid = cpu_to_le64(nxi->nx_xid);
free_head = le16_to_cpu(sm_raw->sm_ip_bm_free_head);
free_tail = le16_to_cpu(sm_raw->sm_ip_bm_free_tail);
@@ -203,7 +200,7 @@ static int apfs_rotate_ip_bitmaps(struct super_block *sb)
curr_bmap_off = apfs_spaceman_get_64(sb, le32_to_cpu(sm_raw->sm_ip_bitmap_offset));
if (!curr_bmap_off)
return -EFSCORRUPTED;
old_bh = sb_bread(sb, bmap_base + le64_to_cpup(curr_bmap_off));
old_bh = apfs_sb_bread(sb, bmap_base + le64_to_cpup(curr_bmap_off));
if (!old_bh)
return -EIO;
@@ -216,7 +213,7 @@ static int apfs_rotate_ip_bitmaps(struct super_block *sb)
if (err)
goto out;
new_bh = sb_bread(sb, bmap_base + le64_to_cpup(curr_bmap_off));
new_bh = apfs_sb_bread(sb, bmap_base + le64_to_cpup(curr_bmap_off));
if (!new_bh) {
err = -EIO;
goto out;
@@ -244,9 +241,9 @@ out:
*/
int apfs_read_spaceman(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_spaceman *spaceman = &sbi->s_spaceman;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
struct apfs_spaceman *spaceman = APFS_SM(sb);
struct buffer_head *sm_bh;
struct apfs_spaceman_phys *sm_raw;
u32 sm_flags;
@@ -261,7 +258,7 @@ int apfs_read_spaceman(struct super_block *sb)
return PTR_ERR(sm_bh);
sm_raw = (struct apfs_spaceman_phys *)sm_bh->b_data;
if (sbi->s_flags & APFS_CHECK_NODES &&
if (nxi->nx_flags & APFS_CHECK_NODES &&
!apfs_obj_verify_csum(sb, &sm_raw->sm_o)) {
apfs_err(sb, "bad checksum for the space manager");
err = -EFSBADCRC;
@@ -309,10 +306,10 @@ static void apfs_write_spaceman(struct apfs_spaceman *sm)
{
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
struct apfs_spaceman_device *dev_raw = &sm_raw->sm_dev[APFS_SD_MAIN];
struct apfs_sb_info *sbi;
struct apfs_nxsb_info *nxi;
sbi = container_of(sm, struct apfs_sb_info, s_spaceman);
ASSERT(le64_to_cpu(sm_raw->sm_o.o_xid) == sbi->s_xid);
nxi = container_of(sm, struct apfs_nxsb_info, nx_spaceman);
ASSERT(le64_to_cpu(sm_raw->sm_o.o_xid) == nxi->nx_xid);
dev_raw->sm_free_count = cpu_to_le64(sm->sm_free_count);
}
@@ -325,7 +322,7 @@ static void apfs_write_spaceman(struct apfs_spaceman *sm)
*/
static u64 apfs_ip_find_free(struct super_block *sb)
{
struct apfs_spaceman *sm = &APFS_SB(sb)->s_spaceman;
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
int bitcount = le64_to_cpu(sm_raw->sm_ip_block_count);
char *bitmap = sm->sm_ip->b_data;
@@ -365,7 +362,7 @@ static u64 apfs_chunk_find_free(struct super_block *sb, char *bitmap, u64 addr)
*/
static void apfs_ip_mark_used(struct super_block *sb, u64 bno)
{
struct apfs_spaceman *sm = &APFS_SB(sb)->s_spaceman;
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
char *bitmap = sm->sm_ip->b_data;
@@ -410,8 +407,8 @@ static inline bool apfs_block_in_ip(struct apfs_spaceman *sm, u64 bno)
*/
int apfs_free_queue_insert(struct super_block *sb, u64 bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
struct apfs_spaceman_free_queue *fq;
struct apfs_node *fq_root;
@@ -436,7 +433,7 @@ int apfs_free_queue_insert(struct super_block *sb, u64 bno)
goto fail;
}
apfs_init_free_queue_key(sbi->s_xid, bno, &key);
apfs_init_free_queue_key(nxi->nx_xid, bno, &key);
query->key = &key;
query->flags |= APFS_QUERY_FREE_QUEUE;
@@ -444,7 +441,7 @@ int apfs_free_queue_insert(struct super_block *sb, u64 bno)
if (err && err != -ENODATA)
goto fail;
raw_key.sfqk_xid = cpu_to_le64(sbi->s_xid);
raw_key.sfqk_xid = cpu_to_le64(nxi->nx_xid);
raw_key.sfqk_paddr = cpu_to_le64(bno);
/* A lack of value (ghost record) implies a single-block extent */
err = apfs_btree_insert(query, &raw_key, sizeof(raw_key),
@@ -453,7 +450,7 @@ int apfs_free_queue_insert(struct super_block *sb, u64 bno)
goto fail;
if (!fq->sfq_oldest_xid)
fq->sfq_oldest_xid = cpu_to_le64(sbi->s_xid);
fq->sfq_oldest_xid = cpu_to_le64(nxi->nx_xid);
le64_add_cpu(&fq->sfq_count, 1);
apfs_obj_set_csum(sb, &sm_raw->sm_o);
@@ -478,8 +475,8 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
struct buffer_head **cib_bh,
int index, u64 *bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_chunk_info_block *cib;
struct apfs_chunk_info *ci;
struct buffer_head *bmap_bh = NULL;
@@ -492,9 +489,9 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
ci = &cib->cib_chunk_info[index];
/* Cibs and bitmaps from old transactions can't be modified in place */
if (le64_to_cpu(cib->cib_o.o_xid) < sbi->s_xid)
if (le64_to_cpu(cib->cib_o.o_xid) < nxi->nx_xid)
old_cib = true;
if (le64_to_cpu(ci->ci_xid) < sbi->s_xid)
if (le64_to_cpu(ci->ci_xid) < nxi->nx_xid)
old_bmap = true;
if (le32_to_cpu(ci->ci_free_count) < 1)
return -ENOSPC;
@@ -507,9 +504,9 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
bmap_bno = apfs_ip_find_free(sb);
if (!bmap_bno)
return -EFSCORRUPTED;
bmap_bh = sb_bread(sb, bmap_bno);
bmap_bh = apfs_sb_bread(sb, bmap_bno);
} else {
bmap_bh = sb_bread(sb, le64_to_cpu(ci->ci_bitmap_addr));
bmap_bh = apfs_sb_bread(sb, le64_to_cpu(ci->ci_bitmap_addr));
}
if (!bmap_bh)
return -EIO;
@@ -530,7 +527,7 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
goto fail;
}
new_bmap_bh = sb_bread(sb, new_bmap_bno);
new_bmap_bh = apfs_sb_bread(sb, new_bmap_bno);
if (!new_bmap_bh) {
err = -EIO;
goto fail;
@@ -556,7 +553,7 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
goto fail;
}
new_cib_bh = sb_bread(sb, new_cib_bno);
new_cib_bh = apfs_sb_bread(sb, new_cib_bno);
if (!new_cib_bh) {
err = -EIO;
goto fail;
@@ -571,14 +568,14 @@ static int apfs_chunk_allocate_block(struct super_block *sb,
cib = (struct apfs_chunk_info_block *)(*cib_bh)->b_data;
ci = &cib->cib_chunk_info[index];
cib->cib_o.o_oid = cpu_to_le64(new_cib_bno);
cib->cib_o.o_xid = cpu_to_le64(sbi->s_xid);
cib->cib_o.o_xid = cpu_to_le64(nxi->nx_xid);
apfs_ip_mark_used(sb, new_cib_bno);
}
/* The chunk info can be updated now */
ASSERT(le64_to_cpu(cib->cib_o.o_xid) == sbi->s_xid);
ci->ci_xid = cpu_to_le64(sbi->s_xid);
apfs_assert_in_transaction(sb, &cib->cib_o);
ci->ci_xid = cpu_to_le64(nxi->nx_xid);
le32_add_cpu(&ci->ci_free_count, -1);
ci->ci_bitmap_addr = cpu_to_le64(bmap_bh->b_blocknr);
apfs_obj_set_csum(sb, &cib->cib_o);
@@ -612,14 +609,14 @@ fail:
static int apfs_cib_allocate_block(struct super_block *sb,
struct buffer_head **cib_bh, u64 *bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_chunk_info_block *cib;
u32 chunk_count;
int i;
cib = (struct apfs_chunk_info_block *)(*cib_bh)->b_data;
if (sbi->s_flags & APFS_CHECK_NODES &&
if (nxi->nx_flags & APFS_CHECK_NODES &&
!apfs_obj_verify_csum(sb, &cib->cib_o)) {
apfs_err(sb, "bad checksum for chunk-info block");
return -EFSBADCRC;
@@ -651,8 +648,7 @@ static int apfs_cib_allocate_block(struct super_block *sb,
*/
int apfs_spaceman_allocate_block(struct super_block *sb, u64 *bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_spaceman *sm = &sbi->s_spaceman;
struct apfs_spaceman *sm = APFS_SM(sb);
struct apfs_spaceman_phys *sm_raw = sm->sm_raw;
int i;
@@ -662,7 +658,7 @@ int apfs_spaceman_allocate_block(struct super_block *sb, u64 *bno)
int err;
cib_bno = apfs_spaceman_read_cib_addr(sb, i);
cib_bh = sb_bread(sb, cib_bno);
cib_bh = apfs_sb_bread(sb, cib_bno);
if (!cib_bh)
return -EIO;
+323 -75
View File
File diff suppressed because it is too large Load Diff
+4 -4
View File
@@ -20,12 +20,12 @@ static const char *apfs_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
char *target = NULL;
int err;
int size;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
if (!dentry) {
err = -ECHILD;
@@ -58,13 +58,13 @@ static const char *apfs_get_link(struct dentry *dentry, struct inode *inode,
goto fail;
}
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
set_delayed_call(done, kfree_link, target);
return target;
fail:
kfree(target);
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return ERR_PTR(err);
}
+50 -45
View File
@@ -21,7 +21,7 @@
static int apfs_cpoint_init_area(struct super_block *sb, u64 base, u32 blks,
u32 next, u32 len)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
u32 i;
for (i = 0; i < len; ++i) {
@@ -32,8 +32,8 @@ static int apfs_cpoint_init_area(struct super_block *sb, u64 base, u32 blks,
u32 type;
int err;
new_bh = sb_bread(sb, base + new_index);
old_bh = sb_bread(sb, base + old_index);
new_bh = apfs_sb_bread(sb, base + new_index);
old_bh = apfs_sb_bread(sb, base + old_index);
if (!new_bh || !old_bh) {
apfs_err(sb, "unable to read the checkpoint areas");
brelse(new_bh);
@@ -48,8 +48,8 @@ static int apfs_cpoint_init_area(struct super_block *sb, u64 base, u32 blks,
if ((type & APFS_OBJ_STORAGETYPE_MASK) == APFS_OBJ_PHYSICAL)
new_obj->o_oid = cpu_to_le64(new_bh->b_blocknr);
ASSERT(sbi->s_xid == le64_to_cpu(new_obj->o_xid) + 1);
new_obj->o_xid = cpu_to_le64(sbi->s_xid);
ASSERT(nxi->nx_xid == le64_to_cpu(new_obj->o_xid) + 1);
new_obj->o_xid = cpu_to_le64(nxi->nx_xid);
err = apfs_transaction_join(sb, new_bh);
if (err) {
brelse(new_bh);
@@ -78,8 +78,8 @@ static int apfs_cpoint_init_area(struct super_block *sb, u64 base, u32 blks,
*/
static int apfs_cpoint_init_desc(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
struct buffer_head *new_sb_bh = NULL;
u64 desc_base = le64_to_cpu(raw_sb->nx_xp_desc_base);
u32 desc_next = le32_to_cpu(raw_sb->nx_xp_desc_next);
@@ -99,22 +99,22 @@ static int apfs_cpoint_init_desc(struct super_block *sb)
/* Now update the superblock with the new checkpoint */
new_sb_index = (desc_next + desc_len - 1) % desc_blks;
new_sb_bh = sb_bread(sb, desc_base + new_sb_index);
new_sb_bh = apfs_sb_bread(sb, desc_base + new_sb_index);
if (!new_sb_bh) {
apfs_err(sb, "unable to read the new checkpoint superblock");
brelse(new_sb_bh);
return -EINVAL;
}
brelse(sbi->s_mobject.bh);
sbi->s_mobject.bh = new_sb_bh;
brelse(nxi->nx_object.bh);
nxi->nx_object.bh = new_sb_bh;
raw_sb = (struct apfs_nx_superblock *)new_sb_bh->b_data;
sbi->s_msb_raw = raw_sb;
sbi->s_mobject.block_nr = new_sb_bh->b_blocknr;
nxi->nx_raw = raw_sb;
nxi->nx_object.block_nr = new_sb_bh->b_blocknr;
ASSERT(sbi->s_xid == le64_to_cpu(raw_sb->nx_next_xid));
ASSERT(nxi->nx_xid == le64_to_cpu(raw_sb->nx_next_xid));
ASSERT(buffer_trans(new_sb_bh));
raw_sb->nx_next_xid = cpu_to_le64(sbi->s_xid + 1);
raw_sb->nx_next_xid = cpu_to_le64(nxi->nx_xid + 1);
/* Apparently the previous checkpoint gets invalidated right away */
raw_sb->nx_xp_desc_index = cpu_to_le32(desc_next);
@@ -133,8 +133,8 @@ static int apfs_cpoint_init_desc(struct super_block *sb)
*/
static int apfs_cpoint_init_data(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
u64 data_base = le64_to_cpu(raw_sb->nx_xp_data_base);
u32 data_next = le32_to_cpu(raw_sb->nx_xp_data_next);
u32 data_blks = le32_to_cpu(raw_sb->nx_xp_data_blocks);
@@ -150,7 +150,7 @@ static int apfs_cpoint_init_data(struct super_block *sb)
return err;
/* Apparently the previous checkpoint gets invalidated right away */
ASSERT(sbi->s_xid == le64_to_cpu(raw_sb->nx_o.o_xid));
apfs_assert_in_transaction(sb, &raw_sb->nx_o);
raw_sb->nx_xp_data_index = cpu_to_le32(data_next);
data_next = (data_next + data_len) % data_blks;
raw_sb->nx_xp_data_next = cpu_to_le32(data_next);
@@ -168,8 +168,8 @@ static int apfs_cpoint_init_data(struct super_block *sb)
static void apfs_update_mapping(struct super_block *sb,
struct apfs_checkpoint_mapping *map)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
u64 data_base = le64_to_cpu(raw_sb->nx_xp_data_base);
u32 data_blks = le32_to_cpu(raw_sb->nx_xp_data_blocks);
u32 data_len = le32_to_cpu(raw_sb->nx_xp_data_len);
@@ -194,8 +194,8 @@ static void apfs_update_mapping(struct super_block *sb,
*/
static int apfs_update_mapping_blocks(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
u64 desc_base = le64_to_cpu(raw_sb->nx_xp_desc_base);
u32 desc_index = le32_to_cpu(raw_sb->nx_xp_desc_index);
u32 desc_blks = le32_to_cpu(raw_sb->nx_xp_desc_blocks);
@@ -210,12 +210,12 @@ static int apfs_update_mapping_blocks(struct super_block *sb)
u32 map_count;
int j;
bh = sb_bread(sb, desc_base + desc_curr);
bh = apfs_sb_bread(sb, desc_base + desc_curr);
if (!bh)
return -EINVAL;
ASSERT(buffer_trans(bh));
cpm = (struct apfs_checkpoint_map_phys *)bh->b_data;
ASSERT(sbi->s_xid == le64_to_cpu(cpm->cpm_o.o_xid));
apfs_assert_in_transaction(sb, &cpm->cpm_o);
map_count = le32_to_cpu(cpm->cpm_count);
if (map_count > apfs_max_maps_per_block(sb)) {
@@ -238,8 +238,8 @@ static int apfs_update_mapping_blocks(struct super_block *sb)
*/
void apfs_cpoint_data_allocate(struct super_block *sb, u64 *bno)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nx_superblock *raw_sb = sbi->s_msb_raw;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_nx_superblock *raw_sb = nxi->nx_raw;
u64 data_base = le64_to_cpu(raw_sb->nx_xp_data_base);
u32 data_next = le32_to_cpu(raw_sb->nx_xp_data_next);
u32 data_blks = le32_to_cpu(raw_sb->nx_xp_data_blocks);
@@ -249,7 +249,7 @@ void apfs_cpoint_data_allocate(struct super_block *sb, u64 *bno)
data_next = (data_next + 1) % data_blks;
data_len++;
ASSERT(sbi->s_xid == le64_to_cpu(raw_sb->nx_o.o_xid));
apfs_assert_in_transaction(sb, &raw_sb->nx_o);
raw_sb->nx_xp_data_next = cpu_to_le32(data_next);
raw_sb->nx_xp_data_len = cpu_to_le32(data_len);
}
@@ -290,9 +290,9 @@ static int apfs_checkpoint_start(struct super_block *sb,
*/
static int apfs_checkpoint_end(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_obj_phys *obj = &sbi->s_msb_raw->nx_o;
struct buffer_head *bh = sbi->s_mobject.bh;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_obj_phys *obj = &nxi->nx_raw->nx_o;
struct buffer_head *bh = nxi->nx_object.bh;
ASSERT(!(sb->s_flags & SB_RDONLY));
@@ -311,15 +311,17 @@ static int apfs_checkpoint_end(struct super_block *sb)
int apfs_transaction_start(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_transaction *trans = &sbi->s_transaction;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_transaction *trans = &nxi->nx_transaction;
int err;
down_write(&sbi->s_big_sem);
down_write(&nxi->nx_big_sem);
mutex_lock(&nxs_mutex); /* Don't mount during a transaction */
ASSERT(!trans->t_old_msb && !trans->t_old_vsb);
/* Backup the old superblock buffers in case the transaction fails */
trans->t_old_msb = sbi->s_mobject.bh;
trans->t_old_msb = nxi->nx_object.bh;
get_bh(trans->t_old_msb);
trans->t_old_vsb = sbi->s_vobject.bh;
get_bh(trans->t_old_vsb);
@@ -329,7 +331,7 @@ int apfs_transaction_start(struct super_block *sb)
trans->t_old_omap_root = *sbi->s_omap_root;
get_bh(trans->t_old_omap_root.object.bh);
++sbi->s_xid;
++nxi->nx_xid;
INIT_LIST_HEAD(&trans->t_buffers);
if (sb->s_flags & SB_RDONLY) {
@@ -378,8 +380,8 @@ fail:
*/
int apfs_transaction_commit(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_transaction *trans = &sbi->s_transaction;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_transaction *trans = &nxi->nx_transaction;
struct apfs_bh_info *bhi, *tmp;
int err = 0;
@@ -424,7 +426,8 @@ int apfs_transaction_commit(struct super_block *sb)
brelse(trans->t_old_cat_root.object.bh);
trans->t_old_cat_root.object.bh = NULL;
up_write(&sbi->s_big_sem);
mutex_unlock(&nxs_mutex);
up_write(&nxi->nx_big_sem);
return 0;
fail:
@@ -445,8 +448,8 @@ fail:
*/
int apfs_transaction_join(struct super_block *sb, struct buffer_head *bh)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_transaction *trans = &sbi->s_transaction;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_transaction *trans = &nxi->nx_transaction;
struct apfs_bh_info *bhi;
ASSERT(!(sb->s_flags & SB_RDONLY));
@@ -479,12 +482,13 @@ int apfs_transaction_join(struct super_block *sb, struct buffer_head *bh)
void apfs_transaction_abort(struct super_block *sb)
{
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_transaction *trans = &sbi->s_transaction;
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_transaction *trans = &nxi->nx_transaction;
struct apfs_bh_info *bhi, *tmp;
ASSERT(trans->t_old_msb && trans->t_old_vsb);
--sbi->s_xid;
--nxi->nx_xid;
list_for_each_entry_safe(bhi, tmp, &trans->t_buffers, list) {
struct buffer_head *bh = bhi->bh;
@@ -500,10 +504,10 @@ void apfs_transaction_abort(struct super_block *sb)
}
/* Restore the old container and volume superblocks */
brelse(sbi->s_mobject.bh);
sbi->s_mobject.bh = trans->t_old_msb;
sbi->s_mobject.block_nr = trans->t_old_msb->b_blocknr;
sbi->s_msb_raw = (void *)trans->t_old_msb->b_data;
brelse(nxi->nx_object.bh);
nxi->nx_object.bh = trans->t_old_msb;
nxi->nx_object.block_nr = trans->t_old_msb->b_blocknr;
nxi->nx_raw = (void *)trans->t_old_msb->b_data;
trans->t_old_msb = NULL;
brelse(sbi->s_vobject.bh);
sbi->s_vobject.bh = trans->t_old_vsb;
@@ -519,5 +523,6 @@ void apfs_transaction_abort(struct super_block *sb)
*(sbi->s_cat_root) = trans->t_old_cat_root;
trans->t_old_cat_root.object.bh = NULL;
up_write(&sbi->s_big_sem);
mutex_unlock(&nxs_mutex);
up_write(&nxi->nx_big_sem);
}
+7 -7
View File
@@ -144,7 +144,7 @@ static int apfs_xattr_extents_read(struct inode *parent,
bytes = min(sb->s_blocksize,
(unsigned long)(length - file_off));
bh = sb_bread(sb, ext.phys_block_num + j);
bh = apfs_sb_bread(sb, ext.phys_block_num + j);
if (!bh) {
ret = -EIO;
goto done;
@@ -251,13 +251,12 @@ done:
int apfs_xattr_get(struct inode *inode, const char *name, void *buffer,
size_t size)
{
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(inode->i_sb);
int ret;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
ret = __apfs_xattr_get(inode, name, buffer, size);
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return ret;
}
@@ -285,13 +284,14 @@ ssize_t apfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct apfs_sb_info *sbi = APFS_SB(sb);
struct apfs_nxsb_info *nxi = APFS_NXI(sb);
struct apfs_key key;
struct apfs_query *query;
u64 cnid = apfs_ino(inode);
size_t free = size;
ssize_t ret;
down_read(&sbi->s_big_sem);
down_read(&nxi->nx_big_sem);
query = apfs_alloc_query(sbi->s_cat_root, NULL /* parent */);
if (!query) {
@@ -339,6 +339,6 @@ ssize_t apfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
fail:
apfs_free_query(sb, query);
up_read(&sbi->s_big_sem);
up_read(&nxi->nx_big_sem);
return ret;
}