You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'xfs-for-linus-3.15-rc1' of git://oss.sgi.com/xfs/xfs
Pull xfs update from Dave Chinner:
"There are a couple of new fallocate features in this request - it was
decided that it was easiest to push them through the XFS tree using
topic branches and have the ext4 support be based on those branches.
Hence you may see some overlap with the ext4 tree merge depending on
how they including those topic branches into their tree. Other than
that, there is O_TMPFILE support, some cleanups and bug fixes.
The main changes in the XFS tree for 3.15-rc1 are:
- O_TMPFILE support
- allowing AIO+DIO writes beyond EOF
- FALLOC_FL_COLLAPSE_RANGE support for fallocate syscall and XFS
implementation
- FALLOC_FL_ZERO_RANGE support for fallocate syscall and XFS
implementation
- IO verifier cleanup and rework
- stack usage reduction changes
- vm_map_ram NOIO context fixes to remove lockdep warings
- various bug fixes and cleanups"
* tag 'xfs-for-linus-3.15-rc1' of git://oss.sgi.com/xfs/xfs: (34 commits)
xfs: fix directory hash ordering bug
xfs: extra semi-colon breaks a condition
xfs: Add support for FALLOC_FL_ZERO_RANGE
fs: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate
xfs: inode log reservations are still too small
xfs: xfs_check_page_type buffer checks need help
xfs: avoid AGI/AGF deadlock scenario for inode chunk allocation
xfs: use NOIO contexts for vm_map_ram
xfs: don't leak EFSBADCRC to userspace
xfs: fix directory inode iolock lockdep false positive
xfs: allocate xfs_da_args to reduce stack footprint
xfs: always do log forces via the workqueue
xfs: modify verifiers to differentiate CRC from other errors
xfs: print useful caller information in xfs_error_report
xfs: add xfs_verifier_error()
xfs: add helper for updating checksums on xfs_bufs
xfs: add helper for verifying checksums on xfs_bufs
xfs: Use defines for CRC offsets in all cases
xfs: skip pointless CRC updates after verifier failures
xfs: Add support FALLOC_FL_COLLAPSE_RANGE for fallocate
...
This commit is contained in:
+12
-6
@@ -1193,13 +1193,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
}
|
||||
|
||||
/*
|
||||
* For file extending writes updating i_size before data
|
||||
* writeouts complete can expose uninitialized blocks. So
|
||||
* even for AIO, we need to wait for i/o to complete before
|
||||
* returning in this case.
|
||||
* For file extending writes updating i_size before data writeouts
|
||||
* complete can expose uninitialized blocks in dumb filesystems.
|
||||
* In that case we need to wait for I/O completion even if asked
|
||||
* for an asynchronous write.
|
||||
*/
|
||||
dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
|
||||
(end > i_size_read(inode)));
|
||||
if (is_sync_kiocb(iocb))
|
||||
dio->is_async = false;
|
||||
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
|
||||
(rw & WRITE) && end > i_size_read(inode))
|
||||
dio->is_async = false;
|
||||
else
|
||||
dio->is_async = true;
|
||||
|
||||
dio->inode = inode;
|
||||
dio->rw = rw;
|
||||
|
||||
|
||||
@@ -231,7 +231,13 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
return -EINVAL;
|
||||
|
||||
/* Return error if mode is not supported */
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
|
||||
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Punch hole and zero range are mutually exclusive */
|
||||
if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) ==
|
||||
(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Punch hole must have keep size set */
|
||||
@@ -239,11 +245,20 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
!(mode & FALLOC_FL_KEEP_SIZE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Collapse range should only be used exclusively. */
|
||||
if ((mode & FALLOC_FL_COLLAPSE_RANGE) &&
|
||||
(mode & ~FALLOC_FL_COLLAPSE_RANGE))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(file->f_mode & FMODE_WRITE))
|
||||
return -EBADF;
|
||||
|
||||
/* It's not possible punch hole on append only file */
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
|
||||
/*
|
||||
* It's not possible to punch hole or perform collapse range
|
||||
* on append only file
|
||||
*/
|
||||
if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)
|
||||
&& IS_APPEND(inode))
|
||||
return -EPERM;
|
||||
|
||||
if (IS_IMMUTABLE(inode))
|
||||
@@ -271,6 +286,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||
if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
|
||||
return -EFBIG;
|
||||
|
||||
/*
|
||||
* There is no need to overlap collapse range with EOF, in which case
|
||||
* it is effectively a truncate operation
|
||||
*/
|
||||
if ((mode & FALLOC_FL_COLLAPSE_RANGE) &&
|
||||
(offset + len >= i_size_read(inode)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!file->f_op->fallocate)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
||||
+20
-1
@@ -65,12 +65,31 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
|
||||
void *
|
||||
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
||||
{
|
||||
unsigned noio_flag = 0;
|
||||
void *ptr;
|
||||
gfp_t lflags;
|
||||
|
||||
ptr = kmem_zalloc(size, flags | KM_MAYFAIL);
|
||||
if (ptr)
|
||||
return ptr;
|
||||
return vzalloc(size);
|
||||
|
||||
/*
|
||||
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
||||
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
|
||||
* here. Hence we need to tell memory reclaim that we are in such a
|
||||
* context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
|
||||
* the filesystem here and potentially deadlocking.
|
||||
*/
|
||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
||||
noio_flag = memalloc_noio_save();
|
||||
|
||||
lflags = kmem_flags_convert(flags);
|
||||
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||
|
||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
+1
-1
@@ -281,7 +281,7 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
if (!acl)
|
||||
goto set_acl;
|
||||
|
||||
error = -EINVAL;
|
||||
error = -E2BIG;
|
||||
if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
|
||||
return error;
|
||||
|
||||
|
||||
@@ -89,6 +89,8 @@ typedef struct xfs_agf {
|
||||
/* structure must be padded to 64 bit alignment */
|
||||
} xfs_agf_t;
|
||||
|
||||
#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
|
||||
|
||||
#define XFS_AGF_MAGICNUM 0x00000001
|
||||
#define XFS_AGF_VERSIONNUM 0x00000002
|
||||
#define XFS_AGF_SEQNO 0x00000004
|
||||
@@ -167,6 +169,8 @@ typedef struct xfs_agi {
|
||||
/* structure must be padded to 64 bit alignment */
|
||||
} xfs_agi_t;
|
||||
|
||||
#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
|
||||
|
||||
#define XFS_AGI_MAGICNUM 0x00000001
|
||||
#define XFS_AGI_VERSIONNUM 0x00000002
|
||||
#define XFS_AGI_SEQNO 0x00000004
|
||||
@@ -222,6 +226,8 @@ typedef struct xfs_agfl {
|
||||
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
|
||||
} xfs_agfl_t;
|
||||
|
||||
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
|
||||
|
||||
/*
|
||||
* tags for inode radix tree
|
||||
*/
|
||||
|
||||
+19
-26
@@ -474,7 +474,6 @@ xfs_agfl_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
int agfl_ok = 1;
|
||||
|
||||
/*
|
||||
* There is no verification of non-crc AGFLs because mkfs does not
|
||||
@@ -485,15 +484,13 @@ xfs_agfl_read_verify(
|
||||
if (!xfs_sb_version_hascrc(&mp->m_sb))
|
||||
return;
|
||||
|
||||
agfl_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
offsetof(struct xfs_agfl, agfl_crc));
|
||||
|
||||
agfl_ok = agfl_ok && xfs_agfl_verify(bp);
|
||||
|
||||
if (!agfl_ok) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
else if (!xfs_agfl_verify(bp))
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
}
|
||||
|
||||
if (bp->b_error)
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -508,16 +505,15 @@ xfs_agfl_write_verify(
|
||||
return;
|
||||
|
||||
if (!xfs_agfl_verify(bp)) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (bip)
|
||||
XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
offsetof(struct xfs_agfl, agfl_crc));
|
||||
xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
|
||||
}
|
||||
|
||||
const struct xfs_buf_ops xfs_agfl_buf_ops = {
|
||||
@@ -2238,19 +2234,17 @@ xfs_agf_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
int agf_ok = 1;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
agf_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
offsetof(struct xfs_agf, agf_crc));
|
||||
|
||||
agf_ok = agf_ok && xfs_agf_verify(mp, bp);
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
|
||||
XFS_RANDOM_ALLOC_READ_AGF))) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb) &&
|
||||
!xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
|
||||
XFS_ERRTAG_ALLOC_READ_AGF,
|
||||
XFS_RANDOM_ALLOC_READ_AGF))
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
}
|
||||
|
||||
if (bp->b_error)
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2261,8 +2255,8 @@ xfs_agf_write_verify(
|
||||
struct xfs_buf_log_item *bip = bp->b_fspriv;
|
||||
|
||||
if (!xfs_agf_verify(mp, bp)) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2272,8 +2266,7 @@ xfs_agf_write_verify(
|
||||
if (bip)
|
||||
XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
offsetof(struct xfs_agf, agf_crc));
|
||||
xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
|
||||
}
|
||||
|
||||
const struct xfs_buf_ops xfs_agf_buf_ops = {
|
||||
|
||||
@@ -355,12 +355,14 @@ static void
|
||||
xfs_allocbt_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (!(xfs_btree_sblock_verify_crc(bp) &&
|
||||
xfs_allocbt_verify(bp))) {
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
|
||||
bp->b_target->bt_mount, bp->b_addr);
|
||||
if (!xfs_btree_sblock_verify_crc(bp))
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
else if (!xfs_allocbt_verify(bp))
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
|
||||
if (bp->b_error) {
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -370,9 +372,9 @@ xfs_allocbt_write_verify(
|
||||
{
|
||||
if (!xfs_allocbt_verify(bp)) {
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
|
||||
bp->b_target->bt_mount, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
xfs_btree_sblock_calc_crc(bp);
|
||||
|
||||
|
||||
+52
-32
@@ -632,38 +632,46 @@ xfs_map_at_offset(
|
||||
}
|
||||
|
||||
/*
|
||||
* Test if a given page is suitable for writing as part of an unwritten
|
||||
* or delayed allocate extent.
|
||||
* Test if a given page contains at least one buffer of a given @type.
|
||||
* If @check_all_buffers is true, then we walk all the buffers in the page to
|
||||
* try to find one of the type passed in. If it is not set, then the caller only
|
||||
* needs to check the first buffer on the page for a match.
|
||||
*/
|
||||
STATIC int
|
||||
STATIC bool
|
||||
xfs_check_page_type(
|
||||
struct page *page,
|
||||
unsigned int type)
|
||||
unsigned int type,
|
||||
bool check_all_buffers)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct buffer_head *head;
|
||||
|
||||
if (PageWriteback(page))
|
||||
return 0;
|
||||
return false;
|
||||
if (!page->mapping)
|
||||
return false;
|
||||
if (!page_has_buffers(page))
|
||||
return false;
|
||||
|
||||
if (page->mapping && page_has_buffers(page)) {
|
||||
struct buffer_head *bh, *head;
|
||||
int acceptable = 0;
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (buffer_unwritten(bh)) {
|
||||
if (type == XFS_IO_UNWRITTEN)
|
||||
return true;
|
||||
} else if (buffer_delay(bh)) {
|
||||
if (type == XFS_IO_DELALLOC)
|
||||
return true;
|
||||
} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
|
||||
if (type == XFS_IO_OVERWRITE)
|
||||
return true;
|
||||
}
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (buffer_unwritten(bh))
|
||||
acceptable += (type == XFS_IO_UNWRITTEN);
|
||||
else if (buffer_delay(bh))
|
||||
acceptable += (type == XFS_IO_DELALLOC);
|
||||
else if (buffer_dirty(bh) && buffer_mapped(bh))
|
||||
acceptable += (type == XFS_IO_OVERWRITE);
|
||||
else
|
||||
break;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
/* If we are only checking the first buffer, we are done now. */
|
||||
if (!check_all_buffers)
|
||||
break;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
|
||||
if (acceptable)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -697,7 +705,7 @@ xfs_convert_page(
|
||||
goto fail_unlock_page;
|
||||
if (page->mapping != inode->i_mapping)
|
||||
goto fail_unlock_page;
|
||||
if (!xfs_check_page_type(page, (*ioendp)->io_type))
|
||||
if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
|
||||
goto fail_unlock_page;
|
||||
|
||||
/*
|
||||
@@ -742,6 +750,15 @@ xfs_convert_page(
|
||||
p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
|
||||
page_dirty = p_offset / len;
|
||||
|
||||
/*
|
||||
* The moment we find a buffer that doesn't match our current type
|
||||
* specification or can't be written, abort the loop and start
|
||||
* writeback. As per the above xfs_imap_valid() check, only
|
||||
* xfs_vm_writepage() can handle partial page writeback fully - we are
|
||||
* limited here to the buffers that are contiguous with the current
|
||||
* ioend, and hence a buffer we can't write breaks that contiguity and
|
||||
* we have to defer the rest of the IO to xfs_vm_writepage().
|
||||
*/
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (offset >= end_offset)
|
||||
@@ -750,7 +767,7 @@ xfs_convert_page(
|
||||
uptodate = 0;
|
||||
if (!(PageUptodate(page) || buffer_uptodate(bh))) {
|
||||
done = 1;
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
if (buffer_unwritten(bh) || buffer_delay(bh) ||
|
||||
@@ -762,10 +779,11 @@ xfs_convert_page(
|
||||
else
|
||||
type = XFS_IO_OVERWRITE;
|
||||
|
||||
if (!xfs_imap_valid(inode, imap, offset)) {
|
||||
done = 1;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* imap should always be valid because of the above
|
||||
* partial page end_offset check on the imap.
|
||||
*/
|
||||
ASSERT(xfs_imap_valid(inode, imap, offset));
|
||||
|
||||
lock_buffer(bh);
|
||||
if (type != XFS_IO_OVERWRITE)
|
||||
@@ -777,6 +795,7 @@ xfs_convert_page(
|
||||
count++;
|
||||
} else {
|
||||
done = 1;
|
||||
break;
|
||||
}
|
||||
} while (offset += len, (bh = bh->b_this_page) != head);
|
||||
|
||||
@@ -868,7 +887,7 @@ xfs_aops_discard_page(
|
||||
struct buffer_head *bh, *head;
|
||||
loff_t offset = page_offset(page);
|
||||
|
||||
if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
|
||||
if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
|
||||
goto out_invalidate;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
|
||||
@@ -1441,7 +1460,8 @@ xfs_vm_direct_IO(
|
||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
|
||||
offset, nr_segs,
|
||||
xfs_get_blocks_direct,
|
||||
xfs_end_io_direct_write, NULL, 0);
|
||||
xfs_end_io_direct_write, NULL,
|
||||
DIO_ASYNC_EXTEND);
|
||||
if (ret != -EIOCBQUEUED && iocb->private)
|
||||
goto out_destroy_ioend;
|
||||
} else {
|
||||
|
||||
@@ -213,8 +213,8 @@ xfs_attr3_leaf_write_verify(
|
||||
struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (!xfs_attr3_leaf_verify(bp)) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ xfs_attr3_leaf_write_verify(
|
||||
if (bip)
|
||||
hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_ATTR3_LEAF_CRC_OFF);
|
||||
xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -239,13 +239,14 @@ xfs_attr3_leaf_read_verify(
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
|
||||
if ((xfs_sb_version_hascrc(&mp->m_sb) &&
|
||||
!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_ATTR3_LEAF_CRC_OFF)) ||
|
||||
!xfs_attr3_leaf_verify(bp)) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb) &&
|
||||
!xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
else if (!xfs_attr3_leaf_verify(bp))
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
}
|
||||
|
||||
if (bp->b_error)
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
|
||||
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
|
||||
|
||||
@@ -125,7 +125,6 @@ xfs_attr3_rmt_read_verify(
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
char *ptr;
|
||||
int len;
|
||||
bool corrupt = false;
|
||||
xfs_daddr_t bno;
|
||||
|
||||
/* no verification of non-crc buffers */
|
||||
@@ -140,11 +139,11 @@ xfs_attr3_rmt_read_verify(
|
||||
while (len > 0) {
|
||||
if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
|
||||
XFS_ATTR3_RMT_CRC_OFF)) {
|
||||
corrupt = true;
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
break;
|
||||
}
|
||||
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
|
||||
corrupt = true;
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
break;
|
||||
}
|
||||
len -= XFS_LBSIZE(mp);
|
||||
@@ -152,10 +151,9 @@ xfs_attr3_rmt_read_verify(
|
||||
bno += mp->m_bsize;
|
||||
}
|
||||
|
||||
if (corrupt) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
} else
|
||||
if (bp->b_error)
|
||||
xfs_verifier_error(bp);
|
||||
else
|
||||
ASSERT(len == 0);
|
||||
}
|
||||
|
||||
@@ -180,9 +178,8 @@ xfs_attr3_rmt_write_verify(
|
||||
|
||||
while (len > 0) {
|
||||
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
|
||||
XFS_CORRUPTION_ERROR(__func__,
|
||||
XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
if (bip) {
|
||||
|
||||
@@ -5378,3 +5378,196 @@ error0:
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shift extent records to the left to cover a hole.
|
||||
*
|
||||
* The maximum number of extents to be shifted in a single operation
|
||||
* is @num_exts, and @current_ext keeps track of the current extent
|
||||
* index we have shifted. @offset_shift_fsb is the length by which each
|
||||
* extent is shifted. If there is no hole to shift the extents
|
||||
* into, this will be considered invalid operation and we abort immediately.
|
||||
*/
|
||||
int
|
||||
xfs_bmap_shift_extents(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
int *done,
|
||||
xfs_fileoff_t start_fsb,
|
||||
xfs_fileoff_t offset_shift_fsb,
|
||||
xfs_extnum_t *current_ext,
|
||||
xfs_fsblock_t *firstblock,
|
||||
struct xfs_bmap_free *flist,
|
||||
int num_exts)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_bmbt_rec_host *gotp;
|
||||
struct xfs_bmbt_irec got;
|
||||
struct xfs_bmbt_irec left;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_ifork *ifp;
|
||||
xfs_extnum_t nexts = 0;
|
||||
xfs_fileoff_t startoff;
|
||||
int error = 0;
|
||||
int i;
|
||||
int whichfork = XFS_DATA_FORK;
|
||||
int logflags;
|
||||
xfs_filblks_t blockcount = 0;
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmap_shift_extents",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
ASSERT(current_ext != NULL);
|
||||
|
||||
ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
/* Read in all the extents */
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If *current_ext is 0, we would need to lookup the extent
|
||||
* from where we would start shifting and store it in gotp.
|
||||
*/
|
||||
if (!*current_ext) {
|
||||
gotp = xfs_iext_bno_to_ext(ifp, start_fsb, current_ext);
|
||||
/*
|
||||
* gotp can be null in 2 cases: 1) if there are no extents
|
||||
* or 2) start_fsb lies in a hole beyond which there are
|
||||
* no extents. Either way, we are done.
|
||||
*/
|
||||
if (!gotp) {
|
||||
*done = 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* We are going to change core inode */
|
||||
logflags = XFS_ILOG_CORE;
|
||||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.firstblock = *firstblock;
|
||||
cur->bc_private.b.flist = flist;
|
||||
cur->bc_private.b.flags = 0;
|
||||
} else {
|
||||
cur = NULL;
|
||||
logflags |= XFS_ILOG_DEXT;
|
||||
}
|
||||
|
||||
while (nexts++ < num_exts &&
|
||||
*current_ext < XFS_IFORK_NEXTENTS(ip, whichfork)) {
|
||||
|
||||
gotp = xfs_iext_get_ext(ifp, *current_ext);
|
||||
xfs_bmbt_get_all(gotp, &got);
|
||||
startoff = got.br_startoff - offset_shift_fsb;
|
||||
|
||||
/*
|
||||
* Before shifting extent into hole, make sure that the hole
|
||||
* is large enough to accomodate the shift.
|
||||
*/
|
||||
if (*current_ext) {
|
||||
xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
|
||||
*current_ext - 1), &left);
|
||||
|
||||
if (startoff < left.br_startoff + left.br_blockcount)
|
||||
error = XFS_ERROR(EINVAL);
|
||||
} else if (offset_shift_fsb > got.br_startoff) {
|
||||
/*
|
||||
* When first extent is shifted, offset_shift_fsb
|
||||
* should be less than the stating offset of
|
||||
* the first extent.
|
||||
*/
|
||||
error = XFS_ERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
|
||||
if (cur) {
|
||||
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
|
||||
got.br_startblock,
|
||||
got.br_blockcount,
|
||||
&i);
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
|
||||
}
|
||||
|
||||
/* Check if we can merge 2 adjacent extents */
|
||||
if (*current_ext &&
|
||||
left.br_startoff + left.br_blockcount == startoff &&
|
||||
left.br_startblock + left.br_blockcount ==
|
||||
got.br_startblock &&
|
||||
left.br_state == got.br_state &&
|
||||
left.br_blockcount + got.br_blockcount <= MAXEXTLEN) {
|
||||
blockcount = left.br_blockcount +
|
||||
got.br_blockcount;
|
||||
xfs_iext_remove(ip, *current_ext, 1, 0);
|
||||
if (cur) {
|
||||
error = xfs_btree_delete(cur, &i);
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
|
||||
}
|
||||
XFS_IFORK_NEXT_SET(ip, whichfork,
|
||||
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
|
||||
gotp = xfs_iext_get_ext(ifp, --*current_ext);
|
||||
xfs_bmbt_get_all(gotp, &got);
|
||||
|
||||
/* Make cursor point to the extent we will update */
|
||||
if (cur) {
|
||||
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
|
||||
got.br_startblock,
|
||||
got.br_blockcount,
|
||||
&i);
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
|
||||
}
|
||||
|
||||
xfs_bmbt_set_blockcount(gotp, blockcount);
|
||||
got.br_blockcount = blockcount;
|
||||
} else {
|
||||
/* We have to update the startoff */
|
||||
xfs_bmbt_set_startoff(gotp, startoff);
|
||||
got.br_startoff = startoff;
|
||||
}
|
||||
|
||||
if (cur) {
|
||||
error = xfs_bmbt_update(cur, got.br_startoff,
|
||||
got.br_startblock,
|
||||
got.br_blockcount,
|
||||
got.br_state);
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
}
|
||||
|
||||
(*current_ext)++;
|
||||
}
|
||||
|
||||
/* Check if we are done */
|
||||
if (*current_ext == XFS_IFORK_NEXTENTS(ip, whichfork))
|
||||
*done = 1;
|
||||
|
||||
del_cursor:
|
||||
if (cur)
|
||||
xfs_btree_del_cursor(cur,
|
||||
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
|
||||
|
||||
xfs_trans_log_inode(tp, ip, logflags);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -127,6 +127,16 @@ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
|
||||
{ BMAP_RIGHT_FILLING, "RF" }, \
|
||||
{ BMAP_ATTRFORK, "ATTR" }
|
||||
|
||||
|
||||
/*
|
||||
* This macro is used to determine how many extents will be shifted
|
||||
* in one write transaction. We could require two splits,
|
||||
* an extent move on the first and an extent merge on the second,
|
||||
* So it is proper that one extent is shifted inside write transaction
|
||||
* at a time.
|
||||
*/
|
||||
#define XFS_BMAP_MAX_SHIFT_EXTENTS 1
|
||||
|
||||
#ifdef DEBUG
|
||||
void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
|
||||
int whichfork, unsigned long caller_ip);
|
||||
@@ -169,5 +179,10 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
|
||||
xfs_extnum_t num);
|
||||
uint xfs_default_attroffset(struct xfs_inode *ip);
|
||||
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
int *done, xfs_fileoff_t start_fsb,
|
||||
xfs_fileoff_t offset_shift_fsb, xfs_extnum_t *current_ext,
|
||||
xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist,
|
||||
int num_exts);
|
||||
|
||||
#endif /* __XFS_BMAP_H__ */
|
||||
|
||||
@@ -780,12 +780,14 @@ static void
|
||||
xfs_bmbt_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (!(xfs_btree_lblock_verify_crc(bp) &&
|
||||
xfs_bmbt_verify(bp))) {
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
|
||||
bp->b_target->bt_mount, bp->b_addr);
|
||||
if (!xfs_btree_lblock_verify_crc(bp))
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
else if (!xfs_bmbt_verify(bp))
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
|
||||
if (bp->b_error) {
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -794,11 +796,9 @@ xfs_bmbt_write_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (!xfs_bmbt_verify(bp)) {
|
||||
xfs_warn(bp->b_target->bt_mount, "bmbt daddr 0x%llx failed", bp->b_bn);
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
|
||||
bp->b_target->bt_mount, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
xfs_btree_lblock_calc_crc(bp);
|
||||
|
||||
+96
-1
@@ -1349,7 +1349,6 @@ xfs_free_file_space(
|
||||
* the freeing of the space succeeds at ENOSPC.
|
||||
*/
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
|
||||
tp->t_flags |= XFS_TRANS_RESERVE;
|
||||
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
|
||||
|
||||
/*
|
||||
@@ -1467,6 +1466,102 @@ out:
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* xfs_collapse_file_space()
|
||||
* This routine frees disk space and shift extent for the given file.
|
||||
* The first thing we do is to free data blocks in the specified range
|
||||
* by calling xfs_free_file_space(). It would also sync dirty data
|
||||
* and invalidate page cache over the region on which collapse range
|
||||
* is working. And Shift extent records to the left to cover a hole.
|
||||
* RETURNS:
|
||||
* 0 on success
|
||||
* errno on error
|
||||
*
|
||||
*/
|
||||
int
|
||||
xfs_collapse_file_space(
|
||||
struct xfs_inode *ip,
|
||||
xfs_off_t offset,
|
||||
xfs_off_t len)
|
||||
{
|
||||
int done = 0;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
xfs_extnum_t current_ext = 0;
|
||||
struct xfs_bmap_free free_list;
|
||||
xfs_fsblock_t first_block;
|
||||
int committed;
|
||||
xfs_fileoff_t start_fsb;
|
||||
xfs_fileoff_t shift_fsb;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
||||
|
||||
trace_xfs_collapse_file_space(ip);
|
||||
|
||||
start_fsb = XFS_B_TO_FSB(mp, offset + len);
|
||||
shift_fsb = XFS_B_TO_FSB(mp, len);
|
||||
|
||||
error = xfs_free_file_space(ip, offset, len);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
while (!error && !done) {
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
|
||||
tp->t_flags |= XFS_TRANS_RESERVE;
|
||||
/*
|
||||
* We would need to reserve permanent block for transaction.
|
||||
* This will come into picture when after shifting extent into
|
||||
* hole we found that adjacent extents can be merged which
|
||||
* may lead to freeing of a block during record update.
|
||||
*/
|
||||
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
|
||||
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
|
||||
if (error) {
|
||||
ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
|
||||
xfs_trans_cancel(tp, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
|
||||
ip->i_gdquot, ip->i_pdquot,
|
||||
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
xfs_bmap_init(&free_list, &first_block);
|
||||
|
||||
/*
|
||||
* We are using the write transaction in which max 2 bmbt
|
||||
* updates are allowed
|
||||
*/
|
||||
error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
|
||||
shift_fsb, ¤t_ext,
|
||||
&first_block, &free_list,
|
||||
XFS_BMAP_MAX_SHIFT_EXTENTS);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
||||
out:
|
||||
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to check that the format of the data fork in the temporary inode is
|
||||
* valid for the target inode before doing the swap. This is not a problem with
|
||||
|
||||
@@ -99,6 +99,8 @@ int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
|
||||
xfs_off_t len);
|
||||
int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
|
||||
xfs_off_t len);
|
||||
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
|
||||
xfs_off_t len);
|
||||
|
||||
/* EOF block manipulation functions */
|
||||
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
|
||||
|
||||
+6
-8
@@ -234,8 +234,7 @@ xfs_btree_lblock_calc_crc(
|
||||
return;
|
||||
if (bip)
|
||||
block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_BTREE_LBLOCK_CRC_OFF);
|
||||
xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -243,8 +242,8 @@ xfs_btree_lblock_verify_crc(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
|
||||
return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_BTREE_LBLOCK_CRC_OFF);
|
||||
return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -267,8 +266,7 @@ xfs_btree_sblock_calc_crc(
|
||||
return;
|
||||
if (bip)
|
||||
block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_BTREE_SBLOCK_CRC_OFF);
|
||||
xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -276,8 +274,8 @@ xfs_btree_sblock_verify_crc(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
|
||||
return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_BTREE_SBLOCK_CRC_OFF);
|
||||
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -396,7 +396,17 @@ _xfs_buf_map_pages(
|
||||
bp->b_addr = NULL;
|
||||
} else {
|
||||
int retried = 0;
|
||||
unsigned noio_flag;
|
||||
|
||||
/*
|
||||
* vm_map_ram() will allocate auxillary structures (e.g.
|
||||
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
||||
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
||||
* that we are in such a context via PF_MEMALLOC_NOIO to prevent
|
||||
* memory reclaim re-entering the filesystem here and
|
||||
* potentially deadlocking.
|
||||
*/
|
||||
noio_flag = memalloc_noio_save();
|
||||
do {
|
||||
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
|
||||
-1, PAGE_KERNEL);
|
||||
@@ -404,6 +414,7 @@ _xfs_buf_map_pages(
|
||||
break;
|
||||
vm_unmap_aliases();
|
||||
} while (retried++ <= 1);
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
if (!bp->b_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -369,6 +369,20 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
|
||||
xfs_buf_rele(bp);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
|
||||
{
|
||||
return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
cksum_offset);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
|
||||
{
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
cksum_offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handling of buftargs.
|
||||
*/
|
||||
|
||||
@@ -796,20 +796,6 @@ xfs_buf_item_init(
|
||||
bip->bli_formats[i].blf_map_size = map_size;
|
||||
}
|
||||
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
/*
|
||||
* Allocate the arrays for tracking what needs to be logged
|
||||
* and what our callers request to be logged. bli_orig
|
||||
* holds a copy of the original, clean buffer for comparison
|
||||
* against, and bli_logged keeps a 1 bit flag per byte in
|
||||
* the buffer to indicate which bytes the callers have asked
|
||||
* to have logged.
|
||||
*/
|
||||
bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
|
||||
memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
|
||||
bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Put the buf item into the list of items attached to the
|
||||
* buffer at the front.
|
||||
@@ -957,11 +943,6 @@ STATIC void
|
||||
xfs_buf_item_free(
|
||||
xfs_buf_log_item_t *bip)
|
||||
{
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
kmem_free(bip->bli_orig);
|
||||
kmem_free(bip->bli_logged);
|
||||
#endif /* XFS_TRANS_DEBUG */
|
||||
|
||||
xfs_buf_item_free_format(bip);
|
||||
kmem_zone_free(xfs_buf_item_zone, bip);
|
||||
}
|
||||
|
||||
+10
-9
@@ -185,8 +185,8 @@ xfs_da3_node_write_verify(
|
||||
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (!xfs_da3_node_verify(bp)) {
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ xfs_da3_node_write_verify(
|
||||
if (bip)
|
||||
hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
|
||||
xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -209,18 +209,20 @@ static void
|
||||
xfs_da3_node_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
struct xfs_da_blkinfo *info = bp->b_addr;
|
||||
|
||||
switch (be16_to_cpu(info->magic)) {
|
||||
case XFS_DA3_NODE_MAGIC:
|
||||
if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
|
||||
XFS_DA3_NODE_CRC_OFF))
|
||||
if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
|
||||
xfs_buf_ioerror(bp, EFSBADCRC);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case XFS_DA_NODE_MAGIC:
|
||||
if (!xfs_da3_node_verify(bp))
|
||||
if (!xfs_da3_node_verify(bp)) {
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
case XFS_ATTR_LEAF_MAGIC:
|
||||
case XFS_ATTR3_LEAF_MAGIC:
|
||||
@@ -237,8 +239,7 @@ xfs_da3_node_read_verify(
|
||||
}
|
||||
|
||||
/* corrupt block */
|
||||
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
|
||||
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
||||
xfs_verifier_error(bp);
|
||||
}
|
||||
|
||||
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
|
||||
@@ -1295,7 +1296,7 @@ xfs_da3_fixhashpath(
|
||||
node = blk->bp->b_addr;
|
||||
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
|
||||
btree = dp->d_ops->node_tree_p(node);
|
||||
if (be32_to_cpu(btree->hashval) == lasthash)
|
||||
if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
|
||||
break;
|
||||
blk->hashval = lasthash;
|
||||
btree[blk->index].hashval = cpu_to_be32(lasthash);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user