mirror of
https://github.com/ukui/kernel.git
synced 2026-03-09 10:07:04 -07:00
Merge tag 'mm-nonmm-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton: - hfs and hfsplus kmap API modernization (Fabio Francesco) - make crash-kexec work properly when invoked from an NMI-time panic (Valentin Schneider) - ntfs bugfixes (Hawkins Jiawei) - improve IPC msg scalability by replacing atomic_t's with percpu counters (Jiebin Sun) - nilfs2 cleanups (Minghao Chi) - lots of other single patches all over the tree! * tag 'mm-nonmm-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (71 commits) include/linux/entry-common.h: remove has_signal comment of arch_do_signal_or_restart() prototype proc: test how it holds up with mapping'less process mailmap: update Frank Rowand email address ia64: mca: use strscpy() is more robust and safer init/Kconfig: fix unmet direct dependencies ia64: update config files nilfs2: replace WARN_ONs by nilfs_error for checkpoint acquisition failure fork: remove duplicate included header files init/main.c: remove unnecessary (void*) conversions proc: mark more files as permanent nilfs2: remove the unneeded result variable nilfs2: delete unnecessary checks before brelse() checkpatch: warn for non-standard fixes tag style usr/gen_init_cpio.c: remove unnecessary -1 values from int file ipc/msg: mitigate the lock contention with percpu counter percpu: add percpu_counter_add_local and percpu_counter_sub_local fs/ocfs2: fix repeated words in comments relay: use kvcalloc to alloc page array in relay_alloc_page_array proc: make config PROC_CHILDREN depend on PROC_FS fs: uninline inode_maybe_inc_iversion() ...
This commit is contained in:
9
fs/aio.c
9
fs/aio.c
@@ -951,16 +951,13 @@ static bool __get_reqs_available(struct kioctx *ctx)
|
||||
local_irq_save(flags);
|
||||
kcpu = this_cpu_ptr(ctx->cpu);
|
||||
if (!kcpu->reqs_available) {
|
||||
int old, avail = atomic_read(&ctx->reqs_available);
|
||||
int avail = atomic_read(&ctx->reqs_available);
|
||||
|
||||
do {
|
||||
if (avail < ctx->req_batch)
|
||||
goto out;
|
||||
|
||||
old = avail;
|
||||
avail = atomic_cmpxchg(&ctx->reqs_available,
|
||||
avail, avail - ctx->req_batch);
|
||||
} while (avail != old);
|
||||
} while (!atomic_try_cmpxchg(&ctx->reqs_available,
|
||||
&avail, avail - ctx->req_batch));
|
||||
|
||||
kcpu->reqs_available += ctx->req_batch;
|
||||
}
|
||||
|
||||
14
fs/buffer.c
14
fs/buffer.c
@@ -1453,19 +1453,15 @@ EXPORT_SYMBOL(set_bh_page);
|
||||
|
||||
static void discard_buffer(struct buffer_head * bh)
|
||||
{
|
||||
unsigned long b_state, b_state_old;
|
||||
unsigned long b_state;
|
||||
|
||||
lock_buffer(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
bh->b_bdev = NULL;
|
||||
b_state = bh->b_state;
|
||||
for (;;) {
|
||||
b_state_old = cmpxchg(&bh->b_state, b_state,
|
||||
(b_state & ~BUFFER_FLAGS_DISCARD));
|
||||
if (b_state_old == b_state)
|
||||
break;
|
||||
b_state = b_state_old;
|
||||
}
|
||||
b_state = READ_ONCE(bh->b_state);
|
||||
do {
|
||||
} while (!try_cmpxchg(&bh->b_state, &b_state,
|
||||
b_state & ~BUFFER_FLAGS_DISCARD));
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
||||
|
||||
@@ -1065,7 +1065,7 @@ static inline bool list_add_tail_lockless(struct list_head *new,
|
||||
* added to the list from another CPU: the winner observes
|
||||
* new->next == new.
|
||||
*/
|
||||
if (cmpxchg(&new->next, new, head) != new)
|
||||
if (!try_cmpxchg(&new->next, &new, head))
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
||||
@@ -21,7 +21,6 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
int pagenum;
|
||||
int bytes_read;
|
||||
int bytes_to_read;
|
||||
void *vaddr;
|
||||
|
||||
off += node->page_offset;
|
||||
pagenum = off >> PAGE_SHIFT;
|
||||
@@ -33,9 +32,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
page = node->page[pagenum];
|
||||
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
|
||||
kunmap_atomic(vaddr);
|
||||
memcpy_from_page(buf + bytes_read, page, off, bytes_to_read);
|
||||
|
||||
pagenum++;
|
||||
off = 0; /* page offset only applies to the first page */
|
||||
@@ -80,8 +77,7 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
|
||||
memcpy(kmap(page) + off, buf, len);
|
||||
kunmap(page);
|
||||
memcpy_to_page(page, off, buf, len);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@@ -105,8 +101,7 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
|
||||
memset(kmap(page) + off, 0, len);
|
||||
kunmap(page);
|
||||
memzero_page(page, off, len);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@@ -123,9 +118,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
src_page = src_node->page[0];
|
||||
dst_page = dst_node->page[0];
|
||||
|
||||
memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
|
||||
kunmap(src_page);
|
||||
kunmap(dst_page);
|
||||
memcpy_page(dst_page, dst, src_page, src, len);
|
||||
set_page_dirty(dst_page);
|
||||
}
|
||||
|
||||
@@ -140,9 +133,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
src += node->page_offset;
|
||||
dst += node->page_offset;
|
||||
page = node->page[0];
|
||||
ptr = kmap(page);
|
||||
ptr = kmap_local_page(page);
|
||||
memmove(ptr + dst, ptr + src, len);
|
||||
kunmap(page);
|
||||
kunmap_local(ptr);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@@ -346,13 +339,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
|
||||
if (!test_bit(HFS_BNODE_NEW, &node->flags))
|
||||
return node;
|
||||
|
||||
desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
|
||||
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
|
||||
node->page_offset);
|
||||
node->prev = be32_to_cpu(desc->prev);
|
||||
node->next = be32_to_cpu(desc->next);
|
||||
node->num_recs = be16_to_cpu(desc->num_recs);
|
||||
node->type = desc->type;
|
||||
node->height = desc->height;
|
||||
kunmap(node->page[0]);
|
||||
kunmap_local(desc);
|
||||
|
||||
switch (node->type) {
|
||||
case HFS_NODE_HEADER:
|
||||
@@ -436,14 +430,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
|
||||
}
|
||||
|
||||
pagep = node->page;
|
||||
memset(kmap(*pagep) + node->page_offset, 0,
|
||||
min((int)PAGE_SIZE, (int)tree->node_size));
|
||||
memzero_page(*pagep, node->page_offset,
|
||||
min((int)PAGE_SIZE, (int)tree->node_size));
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
for (i = 1; i < tree->pages_per_bnode; i++) {
|
||||
memset(kmap(*++pagep), 0, PAGE_SIZE);
|
||||
memzero_page(*++pagep, 0, PAGE_SIZE);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
clear_bit(HFS_BNODE_NEW, &node->flags);
|
||||
wake_up(&node->lock_wq);
|
||||
|
||||
@@ -80,7 +80,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||
goto free_inode;
|
||||
|
||||
/* Load the header */
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
tree->root = be32_to_cpu(head->root);
|
||||
tree->leaf_count = be32_to_cpu(head->leaf_count);
|
||||
tree->leaf_head = be32_to_cpu(head->leaf_head);
|
||||
@@ -119,11 +120,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||
tree->node_size_shift = ffs(size) - 1;
|
||||
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
return tree;
|
||||
|
||||
fail_page:
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
free_inode:
|
||||
tree->inode->i_mapping->a_ops = &hfs_aops;
|
||||
@@ -169,7 +171,8 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
return;
|
||||
/* Load the header */
|
||||
page = node->page[0];
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
|
||||
head->root = cpu_to_be32(tree->root);
|
||||
head->leaf_count = cpu_to_be32(tree->leaf_count);
|
||||
@@ -180,7 +183,7 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
head->attributes = cpu_to_be32(tree->attributes);
|
||||
head->depth = cpu_to_be16(tree->depth);
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
set_page_dirty(page);
|
||||
hfs_bnode_put(node);
|
||||
}
|
||||
@@ -268,7 +271,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
idx = 0;
|
||||
|
||||
@@ -281,7 +284,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
idx += i;
|
||||
data[off] |= m;
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
tree->free_nodes--;
|
||||
mark_inode_dirty(tree->inode);
|
||||
hfs_bnode_put(node);
|
||||
@@ -290,14 +293,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
}
|
||||
}
|
||||
if (++off >= PAGE_SIZE) {
|
||||
kunmap(*pagep);
|
||||
data = kmap(*++pagep);
|
||||
kunmap_local(data);
|
||||
data = kmap_local_page(*++pagep);
|
||||
off = 0;
|
||||
}
|
||||
idx += 8;
|
||||
len--;
|
||||
}
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
nidx = node->next;
|
||||
if (!nidx) {
|
||||
printk(KERN_DEBUG "create new bmap node...\n");
|
||||
@@ -313,7 +316,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
off = off16;
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
@@ -360,20 +363,20 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
}
|
||||
off += node->page_offset + nidx / 8;
|
||||
page = node->page[off >> PAGE_SHIFT];
|
||||
data = kmap(page);
|
||||
data = kmap_local_page(page);
|
||||
off &= ~PAGE_MASK;
|
||||
m = 1 << (~nidx & 7);
|
||||
byte = data[off];
|
||||
if (!(byte & m)) {
|
||||
pr_crit("trying to free free bnode %u(%d)\n",
|
||||
node->this, node->type);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
return;
|
||||
}
|
||||
data[off] = byte & ~m;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
tree->free_nodes++;
|
||||
mark_inode_dirty(tree->inode);
|
||||
|
||||
@@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||
i = offset % 32;
|
||||
offset &= ~(PAGE_CACHE_BITS - 1);
|
||||
@@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
}
|
||||
curr++;
|
||||
}
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
offset += PAGE_CACHE_BITS;
|
||||
if (offset >= size)
|
||||
break;
|
||||
@@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
curr = pptr = kmap(page);
|
||||
curr = pptr = kmap_local_page(page);
|
||||
if ((size ^ offset) / PAGE_CACHE_BITS)
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
else
|
||||
@@ -127,7 +127,7 @@ found:
|
||||
len -= 32;
|
||||
}
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
offset += PAGE_CACHE_BITS;
|
||||
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
|
||||
NULL);
|
||||
@@ -135,7 +135,7 @@ found:
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
}
|
||||
@@ -151,7 +151,7 @@ last:
|
||||
done:
|
||||
*curr = cpu_to_be32(n);
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
*max = offset + (curr - pptr) * 32 + i - start;
|
||||
sbi->free_blocks -= *max;
|
||||
hfsplus_mark_mdb_dirty(sb);
|
||||
@@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
page = read_mapping_page(mapping, pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
len = count;
|
||||
@@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
if (!count)
|
||||
break;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
page = read_mapping_page(mapping, ++pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
}
|
||||
@@ -231,7 +231,7 @@ done:
|
||||
}
|
||||
out:
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
sbi->free_blocks += len;
|
||||
hfsplus_mark_mdb_dirty(sb);
|
||||
mutex_unlock(&sbi->alloc_mutex);
|
||||
|
||||
@@ -29,14 +29,12 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memcpy(buf, kmap(*pagep) + off, l);
|
||||
kunmap(*pagep);
|
||||
memcpy_from_page(buf, *pagep, off, l);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
buf += l;
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(buf, kmap(*++pagep), l);
|
||||
kunmap(*pagep);
|
||||
memcpy_from_page(buf, *++pagep, 0, l);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,16 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memcpy(kmap(*pagep) + off, buf, l);
|
||||
memcpy_to_page(*pagep, off, buf, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
buf += l;
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(kmap(*++pagep), buf, l);
|
||||
memcpy_to_page(*++pagep, 0, buf, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,15 +108,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memset(kmap(*pagep) + off, 0, l);
|
||||
memzero_page(*pagep, off, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memset(kmap(*++pagep), 0, l);
|
||||
memzero_page(*++pagep, 0, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,24 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
|
||||
if (src == dst) {
|
||||
l = min_t(int, len, PAGE_SIZE - src);
|
||||
memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
|
||||
kunmap(*src_page);
|
||||
memcpy_page(*dst_page, src, *src_page, src, l);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(kmap(*++dst_page), kmap(*++src_page), l);
|
||||
kunmap(*src_page);
|
||||
memcpy_page(*++dst_page, 0, *++src_page, 0, l);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
}
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
|
||||
l = PAGE_SIZE - src;
|
||||
src = 0;
|
||||
@@ -171,9 +161,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
}
|
||||
l = min(len, l);
|
||||
memcpy(dst_ptr, src_ptr, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (!dst)
|
||||
dst_page++;
|
||||
else
|
||||
@@ -185,6 +175,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
{
|
||||
struct page **src_page, **dst_page;
|
||||
void *src_ptr, *dst_ptr;
|
||||
int l;
|
||||
|
||||
hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
|
||||
@@ -202,27 +193,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
|
||||
if (src == dst) {
|
||||
while (src < len) {
|
||||
memmove(kmap(*dst_page), kmap(*src_page), src);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*dst_page);
|
||||
src_ptr = kmap_local_page(*src_page);
|
||||
memmove(dst_ptr, src_ptr, src);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
len -= src;
|
||||
src = PAGE_SIZE;
|
||||
src_page--;
|
||||
dst_page--;
|
||||
}
|
||||
src -= len;
|
||||
memmove(kmap(*dst_page) + src,
|
||||
kmap(*src_page) + src, len);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*dst_page);
|
||||
src_ptr = kmap_local_page(*src_page);
|
||||
memmove(dst_ptr + src, src_ptr + src, len);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (src < dst) {
|
||||
l = src;
|
||||
src = PAGE_SIZE;
|
||||
@@ -234,9 +226,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
}
|
||||
l = min(len, l);
|
||||
memmove(dst_ptr - l, src_ptr - l, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (dst == PAGE_SIZE)
|
||||
dst_page--;
|
||||
else
|
||||
@@ -251,26 +243,27 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
|
||||
if (src == dst) {
|
||||
l = min_t(int, len, PAGE_SIZE - src);
|
||||
memmove(kmap(*dst_page) + src,
|
||||
kmap(*src_page) + src, l);
|
||||
kunmap(*src_page);
|
||||
|
||||
dst_ptr = kmap_local_page(*dst_page) + src;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memmove(kmap(*++dst_page),
|
||||
kmap(*++src_page), l);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*++dst_page);
|
||||
src_ptr = kmap_local_page(*++src_page);
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
}
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (PAGE_SIZE - src <
|
||||
PAGE_SIZE - dst) {
|
||||
l = PAGE_SIZE - src;
|
||||
@@ -283,9 +276,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
}
|
||||
l = min(len, l);
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (!dst)
|
||||
dst_page++;
|
||||
else
|
||||
@@ -498,14 +491,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
|
||||
if (!test_bit(HFS_BNODE_NEW, &node->flags))
|
||||
return node;
|
||||
|
||||
desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
|
||||
node->page_offset);
|
||||
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
|
||||
node->page_offset);
|
||||
node->prev = be32_to_cpu(desc->prev);
|
||||
node->next = be32_to_cpu(desc->next);
|
||||
node->num_recs = be16_to_cpu(desc->num_recs);
|
||||
node->type = desc->type;
|
||||
node->height = desc->height;
|
||||
kunmap(node->page[0]);
|
||||
kunmap_local(desc);
|
||||
|
||||
switch (node->type) {
|
||||
case HFS_NODE_HEADER:
|
||||
@@ -589,14 +582,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
|
||||
}
|
||||
|
||||
pagep = node->page;
|
||||
memset(kmap(*pagep) + node->page_offset, 0,
|
||||
min_t(int, PAGE_SIZE, tree->node_size));
|
||||
memzero_page(*pagep, node->page_offset,
|
||||
min_t(int, PAGE_SIZE, tree->node_size));
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
for (i = 1; i < tree->pages_per_bnode; i++) {
|
||||
memset(kmap(*++pagep), 0, PAGE_SIZE);
|
||||
memzero_page(*++pagep, 0, PAGE_SIZE);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
clear_bit(HFS_BNODE_NEW, &node->flags);
|
||||
wake_up(&node->lock_wq);
|
||||
|
||||
@@ -163,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|
||||
goto free_inode;
|
||||
|
||||
/* Load the header */
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) +
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
tree->root = be32_to_cpu(head->root);
|
||||
tree->leaf_count = be32_to_cpu(head->leaf_count);
|
||||
@@ -240,11 +240,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|
||||
(tree->node_size + PAGE_SIZE - 1) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
return tree;
|
||||
|
||||
fail_page:
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
free_inode:
|
||||
tree->inode->i_mapping->a_ops = &hfsplus_aops;
|
||||
@@ -291,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree)
|
||||
return -EIO;
|
||||
/* Load the header */
|
||||
page = node->page[0];
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) +
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
|
||||
head->root = cpu_to_be32(tree->root);
|
||||
@@ -303,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree)
|
||||
head->attributes = cpu_to_be32(tree->attributes);
|
||||
head->depth = cpu_to_be16(tree->depth);
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
set_page_dirty(page);
|
||||
hfs_bnode_put(node);
|
||||
return 0;
|
||||
@@ -394,7 +395,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
idx = 0;
|
||||
|
||||
@@ -407,7 +408,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
idx += i;
|
||||
data[off] |= m;
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
tree->free_nodes--;
|
||||
mark_inode_dirty(tree->inode);
|
||||
hfs_bnode_put(node);
|
||||
@@ -417,14 +418,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
}
|
||||
}
|
||||
if (++off >= PAGE_SIZE) {
|
||||
kunmap(*pagep);
|
||||
data = kmap(*++pagep);
|
||||
kunmap_local(data);
|
||||
data = kmap_local_page(*++pagep);
|
||||
off = 0;
|
||||
}
|
||||
idx += 8;
|
||||
len--;
|
||||
}
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
nidx = node->next;
|
||||
if (!nidx) {
|
||||
hfs_dbg(BNODE_MOD, "create new bmap node\n");
|
||||
@@ -440,7 +441,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
off = off16;
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
@@ -490,7 +491,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
}
|
||||
off += node->page_offset + nidx / 8;
|
||||
page = node->page[off >> PAGE_SHIFT];
|
||||
data = kmap(page);
|
||||
data = kmap_local_page(page);
|
||||
off &= ~PAGE_MASK;
|
||||
m = 1 << (~nidx & 7);
|
||||
byte = data[off];
|
||||
@@ -498,13 +499,13 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
pr_crit("trying to free free bnode "
|
||||
"%u(%d)\n",
|
||||
node->this, node->type);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
return;
|
||||
}
|
||||
data[off] = byte & ~m;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
tree->free_nodes++;
|
||||
mark_inode_dirty(tree->inode);
|
||||
|
||||
@@ -67,8 +67,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
for ( i = 0 ; i < pcount ; i++ ) {
|
||||
if (!pages[i])
|
||||
continue;
|
||||
memset(page_address(pages[i]), 0, PAGE_SIZE);
|
||||
flush_dcache_page(pages[i]);
|
||||
memzero_page(pages[i], 0, PAGE_SIZE);
|
||||
SetPageUptodate(pages[i]);
|
||||
}
|
||||
return ((loff_t)pcount) << PAGE_SHIFT;
|
||||
@@ -120,7 +119,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
zerr != Z_STREAM_END) {
|
||||
if (!stream.avail_out) {
|
||||
if (pages[curpage]) {
|
||||
stream.next_out = page_address(pages[curpage])
|
||||
stream.next_out = kmap_local_page(pages[curpage])
|
||||
+ poffset;
|
||||
stream.avail_out = PAGE_SIZE - poffset;
|
||||
poffset = 0;
|
||||
@@ -176,6 +175,10 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
flush_dcache_page(pages[curpage]);
|
||||
SetPageUptodate(pages[curpage]);
|
||||
}
|
||||
if (stream.next_out != (unsigned char *)zisofs_sink_page) {
|
||||
kunmap_local(stream.next_out);
|
||||
stream.next_out = NULL;
|
||||
}
|
||||
curpage++;
|
||||
}
|
||||
if (!stream.avail_in)
|
||||
@@ -183,6 +186,8 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
}
|
||||
inflate_out:
|
||||
zlib_inflateEnd(&stream);
|
||||
if (stream.next_out && stream.next_out != (unsigned char *)zisofs_sink_page)
|
||||
kunmap_local(stream.next_out);
|
||||
|
||||
z_eio:
|
||||
mutex_unlock(&zisofs_zlib_lock);
|
||||
@@ -283,9 +288,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
|
||||
}
|
||||
|
||||
if (poffset && *pages) {
|
||||
memset(page_address(*pages) + poffset, 0,
|
||||
PAGE_SIZE - poffset);
|
||||
flush_dcache_page(*pages);
|
||||
memzero_page(*pages, poffset, PAGE_SIZE - poffset);
|
||||
SetPageUptodate(*pages);
|
||||
}
|
||||
return 0;
|
||||
@@ -343,10 +346,8 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
|
||||
for (i = 0; i < pcount; i++, index++) {
|
||||
if (i != full_page)
|
||||
pages[i] = grab_cache_page_nowait(mapping, index);
|
||||
if (pages[i]) {
|
||||
if (pages[i])
|
||||
ClearPageError(pages[i]);
|
||||
kmap(pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
err = zisofs_fill_pages(inode, full_page, pcount, pages);
|
||||
@@ -357,7 +358,6 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
|
||||
flush_dcache_page(pages[i]);
|
||||
if (i == full_page && err)
|
||||
SetPageError(pages[i]);
|
||||
kunmap(pages[i]);
|
||||
unlock_page(pages[i]);
|
||||
if (i != full_page)
|
||||
put_page(pages[i]);
|
||||
|
||||
46
fs/libfs.c
46
fs/libfs.c
@@ -15,6 +15,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/exportfs.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/buffer_head.h> /* sync_mapping_buffers */
|
||||
#include <linux/fs_context.h>
|
||||
@@ -1520,3 +1521,48 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);
|
||||
|
||||
/**
|
||||
* inode_maybe_inc_iversion - increments i_version
|
||||
* @inode: inode with the i_version that should be updated
|
||||
* @force: increment the counter even if it's not necessary?
|
||||
*
|
||||
* Every time the inode is modified, the i_version field must be seen to have
|
||||
* changed by any observer.
|
||||
*
|
||||
* If "force" is set or the QUERIED flag is set, then ensure that we increment
|
||||
* the value, and clear the queried flag.
|
||||
*
|
||||
* In the common case where neither is set, then we can return "false" without
|
||||
* updating i_version.
|
||||
*
|
||||
* If this function returns false, and no other metadata has changed, then we
|
||||
* can avoid logging the metadata.
|
||||
*/
|
||||
bool inode_maybe_inc_iversion(struct inode *inode, bool force)
|
||||
{
|
||||
u64 cur, new;
|
||||
|
||||
/*
|
||||
* The i_version field is not strictly ordered with any other inode
|
||||
* information, but the legacy inode_inc_iversion code used a spinlock
|
||||
* to serialize increments.
|
||||
*
|
||||
* Here, we add full memory barriers to ensure that any de-facto
|
||||
* ordering with other info is preserved.
|
||||
*
|
||||
* This barrier pairs with the barrier in inode_query_iversion()
|
||||
*/
|
||||
smp_mb();
|
||||
cur = inode_peek_iversion_raw(inode);
|
||||
do {
|
||||
/* If flag is clear then we needn't do anything */
|
||||
if (!force && !(cur & I_VERSION_QUERIED))
|
||||
return false;
|
||||
|
||||
/* Since lowest bit is flag, add 2 to avoid it */
|
||||
new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
|
||||
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(inode_maybe_inc_iversion);
|
||||
|
||||
@@ -1668,8 +1668,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
|
||||
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
|
||||
nextmaxkey = (nchildren > 1) ?
|
||||
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
|
||||
if (bh != NULL)
|
||||
brelse(bh);
|
||||
brelse(bh);
|
||||
|
||||
return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
|
||||
}
|
||||
@@ -1717,8 +1716,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
|
||||
ptrs[i] = le64_to_cpu(dptrs[i]);
|
||||
}
|
||||
|
||||
if (bh != NULL)
|
||||
brelse(bh);
|
||||
brelse(bh);
|
||||
|
||||
return nitems;
|
||||
}
|
||||
|
||||
@@ -875,9 +875,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
|
||||
nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
|
||||
nilfs_cpfile_put_checkpoint(
|
||||
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
|
||||
} else
|
||||
WARN_ON(err == -EINVAL || err == -ENOENT);
|
||||
|
||||
} else if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint creation failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -891,7 +893,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
|
||||
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
|
||||
&raw_cp, &bh_cp);
|
||||
if (unlikely(err)) {
|
||||
WARN_ON(err == -EINVAL || err == -ENOENT);
|
||||
if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint finalization failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
goto failed_ibh;
|
||||
}
|
||||
raw_cp->cp_snapshot_list.ssl_next = 0;
|
||||
@@ -2235,7 +2241,6 @@ int nilfs_construct_segment(struct super_block *sb)
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
||||
struct nilfs_transaction_info *ti;
|
||||
int err;
|
||||
|
||||
if (!sci)
|
||||
return -EROFS;
|
||||
@@ -2243,8 +2248,7 @@ int nilfs_construct_segment(struct super_block *sb)
|
||||
/* A call inside transactions causes a deadlock. */
|
||||
BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
|
||||
|
||||
err = nilfs_segctor_sync(sci);
|
||||
return err;
|
||||
return nilfs_segctor_sync(sci);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
|
||||
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
||||
u8 *mrec_end = (u8 *)ctx->mrec +
|
||||
le32_to_cpu(ctx->mrec->bytes_allocated);
|
||||
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
|
||||
name_end > mrec_end)
|
||||
u8 *name_end;
|
||||
|
||||
/* check whether ATTR_RECORD wrap */
|
||||
if ((u8 *)a < (u8 *)ctx->mrec)
|
||||
break;
|
||||
|
||||
/* check whether Attribute Record Header is within bounds */
|
||||
if ((u8 *)a > mrec_end ||
|
||||
(u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's name is within bounds */
|
||||
name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if (name_end > mrec_end)
|
||||
break;
|
||||
|
||||
ctx->attr = a;
|
||||
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
||||
a->type == AT_END))
|
||||
return -ENOENT;
|
||||
if (unlikely(!a->length))
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's length wrap */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
|
||||
break;
|
||||
/* check whether ATTR_RECORD's length is within bounds */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
|
||||
break;
|
||||
|
||||
if (a->type != type)
|
||||
continue;
|
||||
/*
|
||||
|
||||
@@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Sanity check offset to the first attribute */
|
||||
if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
|
||||
ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
|
||||
le16_to_cpu(m->attrs_offset));
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Need this to sanity check attribute list references to $MFT. */
|
||||
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
|
||||
|
||||
|
||||
@@ -527,7 +527,7 @@ struct ocfs2_extent_block
|
||||
* value -1 (0xFFFF) is OCFS2_INVALID_SLOT. This marks a slot empty.
|
||||
*/
|
||||
struct ocfs2_slot_map {
|
||||
/*00*/ __le16 sm_slots[0];
|
||||
/*00*/ DECLARE_FLEX_ARRAY(__le16, sm_slots);
|
||||
/*
|
||||
* Actual on-disk size is one block. OCFS2_MAX_SLOTS is 255,
|
||||
* 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize.
|
||||
@@ -548,7 +548,7 @@ struct ocfs2_extended_slot {
|
||||
* i_size.
|
||||
*/
|
||||
struct ocfs2_slot_map_extended {
|
||||
/*00*/ struct ocfs2_extended_slot se_slots[0];
|
||||
/*00*/ DECLARE_FLEX_ARRAY(struct ocfs2_extended_slot, se_slots);
|
||||
/*
|
||||
* Actual size is i_size of the slot_map system file. It should
|
||||
* match s_max_slots * sizeof(struct ocfs2_extended_slot)
|
||||
@@ -727,7 +727,7 @@ struct ocfs2_dinode {
|
||||
struct ocfs2_extent_list i_list;
|
||||
struct ocfs2_truncate_log i_dealloc;
|
||||
struct ocfs2_inline_data i_data;
|
||||
__u8 i_symlink[0];
|
||||
DECLARE_FLEX_ARRAY(__u8, i_symlink);
|
||||
} id2;
|
||||
/* Actual on-disk size is one block */
|
||||
};
|
||||
@@ -892,7 +892,7 @@ struct ocfs2_group_desc
|
||||
/*30*/ struct ocfs2_block_check bg_check; /* Error checking */
|
||||
__le64 bg_reserved2;
|
||||
/*40*/ union {
|
||||
__u8 bg_bitmap[0];
|
||||
DECLARE_FLEX_ARRAY(__u8, bg_bitmap);
|
||||
struct {
|
||||
/*
|
||||
* Block groups may be discontiguous when
|
||||
|
||||
@@ -2614,7 +2614,7 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate out the start and number of virtual clusters we need to to CoW.
|
||||
* Calculate out the start and number of virtual clusters we need to CoW.
|
||||
*
|
||||
* cpos is vitual start cluster position we want to do CoW in a
|
||||
* file and write_len is the cluster length.
|
||||
|
||||
@@ -334,10 +334,10 @@ int ocfs2_cluster_connect(const char *stack_name,
|
||||
goto out;
|
||||
}
|
||||
|
||||
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
|
||||
strscpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
|
||||
new_conn->cc_namelen = grouplen;
|
||||
if (cluster_name_len)
|
||||
strlcpy(new_conn->cc_cluster_name, cluster_name,
|
||||
strscpy(new_conn->cc_cluster_name, cluster_name,
|
||||
CLUSTER_NAME_MAX + 1);
|
||||
new_conn->cc_cluster_name_len = cluster_name_len;
|
||||
new_conn->cc_recovery_handler = recovery_handler;
|
||||
|
||||
@@ -106,7 +106,7 @@ int ocfs2_claim_clusters(handle_t *handle,
|
||||
u32 *cluster_start,
|
||||
u32 *num_clusters);
|
||||
/*
|
||||
* Use this variant of ocfs2_claim_clusters to specify a maxiumum
|
||||
* Use this variant of ocfs2_claim_clusters to specify a maximum
|
||||
* number of clusters smaller than the allocation reserved.
|
||||
*/
|
||||
int __ocfs2_claim_clusters(handle_t *handle,
|
||||
|
||||
@@ -2219,7 +2219,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
||||
goto out_journal;
|
||||
}
|
||||
|
||||
strlcpy(osb->vol_label, di->id2.i_super.s_label,
|
||||
strscpy(osb->vol_label, di->id2.i_super.s_label,
|
||||
OCFS2_MAX_VOL_LABEL_LEN);
|
||||
osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno);
|
||||
osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno);
|
||||
|
||||
@@ -92,6 +92,7 @@ config PROC_PAGE_MONITOR
|
||||
|
||||
config PROC_CHILDREN
|
||||
bool "Include /proc/<pid>/task/<tid>/children file"
|
||||
depends on PROC_FS
|
||||
default n
|
||||
help
|
||||
Provides a fast way to retrieve first level children pids of a task. See
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user