Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (36 commits)
  xfs: semaphore cleanup
  xfs: Extend project quotas to support 32bit project ids
  xfs: remove xfs_buf wrappers
  xfs: remove xfs_cred.h
  xfs: remove xfs_globals.h
  xfs: remove xfs_version.h
  xfs: remove xfs_refcache.h
  xfs: fix the xfs_trans_committed
  xfs: remove unused t_callback field in struct xfs_trans
  xfs: fix bogus m_maxagi check in xfs_iget
  xfs: do not use xfs_mod_incore_sb_batch for per-cpu counters
  xfs: do not use xfs_mod_incore_sb for per-cpu counters
  xfs: remove XFS_MOUNT_NO_PERCPU_SB
  xfs: pack xfs_buf structure more tightly
  xfs: convert buffer cache hash to rbtree
  xfs: serialise inode reclaim within an AG
  xfs: batch inode reclaim lookup
  xfs: implement batched inode lookups for AG walking
  xfs: split out inode walk inode grabbing
  xfs: split inode AG walking into separate code for reclaim
  ...
This commit is contained in:
Linus Torvalds
2010-10-22 17:32:27 -07:00
60 changed files with 1189 additions and 1379 deletions
+117 -96
View File
@@ -188,8 +188,8 @@ _xfs_buf_initialize(
atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_hold, 1);
init_completion(&bp->b_iowait); init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_list); INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_hash_list); RB_CLEAR_NODE(&bp->b_rbnode);
init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp); XB_SET_OWNER(bp);
bp->b_target = target; bp->b_target = target;
bp->b_file_offset = range_base; bp->b_file_offset = range_base;
@@ -262,8 +262,6 @@ xfs_buf_free(
{ {
trace_xfs_buf_free(bp, _RET_IP_); trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_hash_list));
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i; uint i;
@@ -422,8 +420,10 @@ _xfs_buf_find(
{ {
xfs_off_t range_base; xfs_off_t range_base;
size_t range_length; size_t range_length;
xfs_bufhash_t *hash; struct xfs_perag *pag;
xfs_buf_t *bp, *n; struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
range_base = (ioff << BBSHIFT); range_base = (ioff << BBSHIFT);
range_length = (isize << BBSHIFT); range_length = (isize << BBSHIFT);
@@ -432,14 +432,37 @@ _xfs_buf_find(
ASSERT(!(range_length < (1 << btp->bt_sshift))); ASSERT(!(range_length < (1 << btp->bt_sshift)));
ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; /* get tree root */
pag = xfs_perag_get(btp->bt_mount,
xfs_daddr_to_agno(btp->bt_mount, ioff));
spin_lock(&hash->bh_lock); /* walk tree */
spin_lock(&pag->pag_buf_lock);
rbp = &pag->pag_buf_tree.rb_node;
parent = NULL;
bp = NULL;
while (*rbp) {
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { if (range_base < bp->b_file_offset)
ASSERT(btp == bp->b_target); rbp = &(*rbp)->rb_left;
if (bp->b_file_offset == range_base && else if (range_base > bp->b_file_offset)
bp->b_buffer_length == range_length) { rbp = &(*rbp)->rb_right;
else {
/*
* found a block offset match. If the range doesn't
* match, the only way this is allowed is if the buffer
* in the cache is stale and the transaction that made
* it stale has not yet committed. i.e. we are
* reallocating a busy extent. Skip this buffer and
* continue searching to the right for an exact match.
*/
if (bp->b_buffer_length != range_length) {
ASSERT(bp->b_flags & XBF_STALE);
rbp = &(*rbp)->rb_right;
continue;
}
atomic_inc(&bp->b_hold); atomic_inc(&bp->b_hold);
goto found; goto found;
} }
@@ -449,17 +472,21 @@ _xfs_buf_find(
if (new_bp) { if (new_bp) {
_xfs_buf_initialize(new_bp, btp, range_base, _xfs_buf_initialize(new_bp, btp, range_base,
range_length, flags); range_length, flags);
new_bp->b_hash = hash; rb_link_node(&new_bp->b_rbnode, parent, rbp);
list_add(&new_bp->b_hash_list, &hash->bh_list); rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
spin_unlock(&pag->pag_buf_lock);
} else { } else {
XFS_STATS_INC(xb_miss_locked); XFS_STATS_INC(xb_miss_locked);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
} }
spin_unlock(&hash->bh_lock);
return new_bp; return new_bp;
found: found:
spin_unlock(&hash->bh_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
/* Attempt to get the semaphore without sleeping, /* Attempt to get the semaphore without sleeping,
* if this does not work then we need to drop the * if this does not work then we need to drop the
@@ -625,8 +652,7 @@ void
xfs_buf_readahead( xfs_buf_readahead(
xfs_buftarg_t *target, xfs_buftarg_t *target,
xfs_off_t ioff, xfs_off_t ioff,
size_t isize, size_t isize)
xfs_buf_flags_t flags)
{ {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
@@ -634,8 +660,42 @@ xfs_buf_readahead(
if (bdi_read_congested(bdi)) if (bdi_read_congested(bdi))
return; return;
flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); xfs_buf_read(target, ioff, isize,
xfs_buf_read(target, ioff, isize, flags); XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
}
/*
* Read an uncached buffer from disk. Allocates and returns a locked
* buffer containing the disk contents or nothing.
*/
struct xfs_buf *
xfs_buf_read_uncached(
struct xfs_mount *mp,
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t length,
int flags)
{
xfs_buf_t *bp;
int error;
bp = xfs_buf_get_uncached(target, length, flags);
if (!bp)
return NULL;
/* set up the buffer for a read IO */
xfs_buf_lock(bp);
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
XFS_BUF_BUSY(bp);
xfsbdstrat(mp, bp);
error = xfs_buf_iowait(bp);
if (error || bp->b_error) {
xfs_buf_relse(bp);
return NULL;
}
return bp;
} }
xfs_buf_t * xfs_buf_t *
@@ -707,9 +767,10 @@ xfs_buf_associate_memory(
} }
xfs_buf_t * xfs_buf_t *
xfs_buf_get_noaddr( xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t len, size_t len,
xfs_buftarg_t *target) int flags)
{ {
unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
int error, i; int error, i;
@@ -725,7 +786,7 @@ xfs_buf_get_noaddr(
goto fail_free_buf; goto fail_free_buf;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
bp->b_pages[i] = alloc_page(GFP_KERNEL); bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
if (!bp->b_pages[i]) if (!bp->b_pages[i])
goto fail_free_mem; goto fail_free_mem;
} }
@@ -740,7 +801,7 @@ xfs_buf_get_noaddr(
xfs_buf_unlock(bp); xfs_buf_unlock(bp);
trace_xfs_buf_get_noaddr(bp, _RET_IP_); trace_xfs_buf_get_uncached(bp, _RET_IP_);
return bp; return bp;
fail_free_mem: fail_free_mem:
@@ -774,29 +835,30 @@ void
xfs_buf_rele( xfs_buf_rele(
xfs_buf_t *bp) xfs_buf_t *bp)
{ {
xfs_bufhash_t *hash = bp->b_hash; struct xfs_perag *pag = bp->b_pag;
trace_xfs_buf_rele(bp, _RET_IP_); trace_xfs_buf_rele(bp, _RET_IP_);
if (unlikely(!hash)) { if (!pag) {
ASSERT(!bp->b_relse); ASSERT(!bp->b_relse);
ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
if (atomic_dec_and_test(&bp->b_hold)) if (atomic_dec_and_test(&bp->b_hold))
xfs_buf_free(bp); xfs_buf_free(bp);
return; return;
} }
ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
ASSERT(atomic_read(&bp->b_hold) > 0); ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
if (bp->b_relse) { if (bp->b_relse) {
atomic_inc(&bp->b_hold); atomic_inc(&bp->b_hold);
spin_unlock(&hash->bh_lock); spin_unlock(&pag->pag_buf_lock);
(*(bp->b_relse)) (bp); bp->b_relse(bp);
} else if (bp->b_flags & XBF_FS_MANAGED) {
spin_unlock(&hash->bh_lock);
} else { } else {
ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
list_del_init(&bp->b_hash_list); rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
spin_unlock(&hash->bh_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
xfs_buf_free(bp); xfs_buf_free(bp);
} }
} }
@@ -859,7 +921,7 @@ xfs_buf_lock(
trace_xfs_buf_lock(bp, _RET_IP_); trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_mount, 0); xfs_log_force(bp->b_target->bt_mount, 0);
if (atomic_read(&bp->b_io_remaining)) if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping); blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema); down(&bp->b_sema);
@@ -970,7 +1032,6 @@ xfs_bwrite(
{ {
int error; int error;
bp->b_mount = mp;
bp->b_flags |= XBF_WRITE; bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ); bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
@@ -991,8 +1052,6 @@ xfs_bdwrite(
{ {
trace_xfs_buf_bdwrite(bp, _RET_IP_); trace_xfs_buf_bdwrite(bp, _RET_IP_);
bp->b_mount = mp;
bp->b_flags &= ~XBF_READ; bp->b_flags &= ~XBF_READ;
bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
@@ -1001,7 +1060,7 @@ xfs_bdwrite(
/* /*
* Called when we want to stop a buffer from getting written or read. * Called when we want to stop a buffer from getting written or read.
* We attach the EIO error, muck with its flags, and call biodone * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
* so that the proper iodone callbacks get called. * so that the proper iodone callbacks get called.
*/ */
STATIC int STATIC int
@@ -1018,21 +1077,21 @@ xfs_bioerror(
XFS_BUF_ERROR(bp, EIO); XFS_BUF_ERROR(bp, EIO);
/* /*
* We're calling biodone, so delete XBF_DONE flag. * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
*/ */
XFS_BUF_UNREAD(bp); XFS_BUF_UNREAD(bp);
XFS_BUF_UNDELAYWRITE(bp); XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_UNDONE(bp); XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp); XFS_BUF_STALE(bp);
xfs_biodone(bp); xfs_buf_ioend(bp, 0);
return EIO; return EIO;
} }
/* /*
* Same as xfs_bioerror, except that we are releasing the buffer * Same as xfs_bioerror, except that we are releasing the buffer
* here ourselves, and avoiding the biodone call. * here ourselves, and avoiding the xfs_buf_ioend call.
* This is meant for userdata errors; metadata bufs come with * This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors. * iodone functions attached, so that we can track down errors.
*/ */
@@ -1081,7 +1140,7 @@ int
xfs_bdstrat_cb( xfs_bdstrat_cb(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_); trace_xfs_bdstrat_shut(bp, _RET_IP_);
/* /*
* Metadata write that didn't get logged but * Metadata write that didn't get logged but
@@ -1387,62 +1446,24 @@ xfs_buf_iomove(
*/ */
void void
xfs_wait_buftarg( xfs_wait_buftarg(
xfs_buftarg_t *btp) struct xfs_buftarg *btp)
{ {
xfs_buf_t *bp, *n; struct xfs_perag *pag;
xfs_bufhash_t *hash; uint i;
uint i;
for (i = 0; i < (1 << btp->bt_hashshift); i++) { for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
hash = &btp->bt_hash[i]; pag = xfs_perag_get(btp->bt_mount, i);
again: spin_lock(&pag->pag_buf_lock);
spin_lock(&hash->bh_lock); while (rb_first(&pag->pag_buf_tree)) {
list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { spin_unlock(&pag->pag_buf_lock);
ASSERT(btp == bp->b_target); delay(100);
if (!(bp->b_flags & XBF_FS_MANAGED)) { spin_lock(&pag->pag_buf_lock);
spin_unlock(&hash->bh_lock);
/*
* Catch superblock reference count leaks
* immediately
*/
BUG_ON(bp->b_bn == 0);
delay(100);
goto again;
}
} }
spin_unlock(&hash->bh_lock); spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
} }
} }
/*
* Allocate buffer hash table for a given target.
* For devices containing metadata (i.e. not the log/realtime devices)
* we need to allocate a much larger hash table.
*/
STATIC void
xfs_alloc_bufhash(
xfs_buftarg_t *btp,
int external)
{
unsigned int i;
btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */
btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
sizeof(xfs_bufhash_t));
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
spin_lock_init(&btp->bt_hash[i].bh_lock);
INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
}
}
STATIC void
xfs_free_bufhash(
xfs_buftarg_t *btp)
{
kmem_free_large(btp->bt_hash);
btp->bt_hash = NULL;
}
/* /*
* buftarg list for delwrite queue processing * buftarg list for delwrite queue processing
*/ */
@@ -1475,7 +1496,6 @@ xfs_free_buftarg(
xfs_flush_buftarg(btp, 1); xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER) if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp); xfs_blkdev_issue_flush(btp);
xfs_free_bufhash(btp);
iput(btp->bt_mapping->host); iput(btp->bt_mapping->host);
/* Unregister the buftarg first so that we don't get a /* Unregister the buftarg first so that we don't get a
@@ -1597,6 +1617,7 @@ out_error:
xfs_buftarg_t * xfs_buftarg_t *
xfs_alloc_buftarg( xfs_alloc_buftarg(
struct xfs_mount *mp,
struct block_device *bdev, struct block_device *bdev,
int external, int external,
const char *fsname) const char *fsname)
@@ -1605,6 +1626,7 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev; btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev; btp->bt_bdev = bdev;
if (xfs_setsize_buftarg_early(btp, bdev)) if (xfs_setsize_buftarg_early(btp, bdev))
@@ -1613,7 +1635,6 @@ xfs_alloc_buftarg(
goto error; goto error;
if (xfs_alloc_delwrite_queue(btp, fsname)) if (xfs_alloc_delwrite_queue(btp, fsname))
goto error; goto error;
xfs_alloc_bufhash(btp, external);
return btp; return btp;
error: error:
@@ -1904,7 +1925,7 @@ xfs_flush_buftarg(
bp = list_first_entry(&wait_list, struct xfs_buf, b_list); bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list); list_del_init(&bp->b_list);
xfs_iowait(bp); xfs_buf_iowait(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);
} }
} }
+33 -43
View File
@@ -51,7 +51,6 @@ typedef enum {
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
#define XBF_ORDERED (1 << 11)/* use ordered writes */ #define XBF_ORDERED (1 << 11)/* use ordered writes */
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ #define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ #define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
@@ -96,7 +95,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \ { XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
{ XBF_FS_MANAGED, "FS_MANAGED" }, \
{ XBF_ORDERED, "ORDERED" }, \ { XBF_ORDERED, "ORDERED" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \ { XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\ { XBF_LOCK, "LOCK" }, /* should never be set */\
@@ -123,14 +121,11 @@ typedef struct xfs_buftarg {
dev_t bt_dev; dev_t bt_dev;
struct block_device *bt_bdev; struct block_device *bt_bdev;
struct address_space *bt_mapping; struct address_space *bt_mapping;
struct xfs_mount *bt_mount;
unsigned int bt_bsize; unsigned int bt_bsize;
unsigned int bt_sshift; unsigned int bt_sshift;
size_t bt_smask; size_t bt_smask;
/* per device buffer hash table */
uint bt_hashshift;
xfs_bufhash_t *bt_hash;
/* per device delwri queue */ /* per device delwri queue */
struct task_struct *bt_task; struct task_struct *bt_task;
struct list_head bt_list; struct list_head bt_list;
@@ -158,34 +153,41 @@ typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
#define XB_PAGES 2 #define XB_PAGES 2
typedef struct xfs_buf { typedef struct xfs_buf {
struct semaphore b_sema; /* semaphore for lockables */ /*
unsigned long b_queuetime; /* time buffer was queued */ * first cacheline holds all the fields needed for an uncontended cache
atomic_t b_pin_count; /* pin count */ * hit to be fully processed. The semaphore straddles the cacheline
wait_queue_head_t b_waiters; /* unpin waiters */ * boundary, but the counter and lock sits on the first cacheline,
struct list_head b_list; * which is the only bit that is touched if we hit the semaphore
xfs_buf_flags_t b_flags; /* status flags */ * fast-path on locking.
struct list_head b_hash_list; /* hash table list */ */
xfs_bufhash_t *b_hash; /* hash table list start */ struct rb_node b_rbnode; /* rbtree node */
xfs_buftarg_t *b_target; /* buffer target (device) */
atomic_t b_hold; /* reference count */
xfs_daddr_t b_bn; /* block number for I/O */
xfs_off_t b_file_offset; /* offset in file */ xfs_off_t b_file_offset; /* offset in file */
size_t b_buffer_length;/* size of buffer in bytes */ size_t b_buffer_length;/* size of buffer in bytes */
atomic_t b_hold; /* reference count */
xfs_buf_flags_t b_flags; /* status flags */
struct semaphore b_sema; /* semaphore for lockables */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */
xfs_daddr_t b_bn; /* block number for I/O */
size_t b_count_desired;/* desired transfer size */ size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */ void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work; struct work_struct b_iodone_work;
atomic_t b_io_remaining; /* #outstanding I/O requests */
xfs_buf_iodone_t b_iodone; /* I/O completion function */ xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */ xfs_buf_relse_t b_relse; /* releasing function */
struct completion b_iowait; /* queue for I/O waiters */ struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv; void *b_fspriv;
void *b_fspriv2; void *b_fspriv2;
struct xfs_mount *b_mount;
unsigned short b_error; /* error code on I/O */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
struct page **b_pages; /* array of page pointers */ struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */ struct page *b_page_array[XB_PAGES]; /* inline pages */
unsigned long b_queuetime; /* time buffer was queued */
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
unsigned short b_error; /* error code on I/O */
#ifdef XFS_BUF_LOCK_TRACKING #ifdef XFS_BUF_LOCK_TRACKING
int b_last_holder; int b_last_holder;
#endif #endif
@@ -204,11 +206,13 @@ extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t); xfs_buf_flags_t);
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
extern void xfs_buf_hold(xfs_buf_t *); extern void xfs_buf_hold(xfs_buf_t *);
extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t, extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
xfs_buf_flags_t); struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
struct xfs_buftarg *target,
xfs_daddr_t daddr, size_t length, int flags);
/* Releasing Buffers */ /* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *); extern void xfs_buf_free(xfs_buf_t *);
@@ -233,6 +237,8 @@ extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *); extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t); xfs_buf_rw_t);
#define xfs_buf_zero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
static inline int xfs_buf_geterror(xfs_buf_t *bp) static inline int xfs_buf_geterror(xfs_buf_t *bp)
{ {
@@ -267,8 +273,6 @@ extern void xfs_buf_terminate(void);
XFS_BUF_DONE(bp); \ XFS_BUF_DONE(bp); \
} while (0) } while (0)
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
@@ -347,25 +351,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
xfs_buf_rele(bp); xfs_buf_rele(bp);
} }
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(bp, off, len, data, rw) \
xfs_buf_iomove((bp), (off), (len), (data), \
((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
#define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \
xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
/* /*
* Handling of buftargs. * Handling of buftargs.
*/ */
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *); extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
struct block_device *, int, const char *);
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *); extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
-28
View File
@@ -1,28 +0,0 @@
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_CRED_H__
#define __XFS_CRED_H__
#include <linux/capability.h>
/*
* Credentials
*/
typedef const struct cred cred_t;
#endif /* __XFS_CRED_H__ */
+15 -16
View File
@@ -32,10 +32,9 @@ xfs_tosspages(
xfs_off_t last, xfs_off_t last,
int fiopt) int fiopt)
{ {
struct address_space *mapping = VFS_I(ip)->i_mapping; /* can't toss partial tail pages, so mask them out */
last &= ~(PAGE_SIZE - 1);
if (mapping->nrpages) truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1);
truncate_inode_pages(mapping, first);
} }
int int
@@ -50,12 +49,11 @@ xfs_flushinval_pages(
trace_xfs_pagecache_inval(ip, first, last); trace_xfs_pagecache_inval(ip, first, last);
if (mapping->nrpages) { xfs_iflags_clear(ip, XFS_ITRUNCATED);
xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = filemap_write_and_wait_range(mapping, first,
ret = filemap_write_and_wait(mapping); last == -1 ? LLONG_MAX : last);
if (!ret) if (!ret)
truncate_inode_pages(mapping, first); truncate_inode_pages_range(mapping, first, last);
}
return -ret; return -ret;
} }
@@ -71,10 +69,9 @@ xfs_flush_pages(
int ret = 0; int ret = 0;
int ret2; int ret2;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { xfs_iflags_clear(ip, XFS_ITRUNCATED);
xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = -filemap_fdatawrite_range(mapping, first,
ret = -filemap_fdatawrite(mapping); last == -1 ? LLONG_MAX : last);
}
if (flags & XBF_ASYNC) if (flags & XBF_ASYNC)
return ret; return ret;
ret2 = xfs_wait_on_pages(ip, first, last); ret2 = xfs_wait_on_pages(ip, first, last);
@@ -91,7 +88,9 @@ xfs_wait_on_pages(
{ {
struct address_space *mapping = VFS_I(ip)->i_mapping; struct address_space *mapping = VFS_I(ip)->i_mapping;
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
return -filemap_fdatawait(mapping); return -filemap_fdatawait_range(mapping, first,
last == -1 ? ip->i_size - 1 : last);
}
return 0; return 0;
} }
-1
View File
@@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include "xfs.h" #include "xfs.h"
#include "xfs_cred.h"
#include "xfs_sysctl.h" #include "xfs_sysctl.h"
/* /*
-23
View File
@@ -1,23 +0,0 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_GLOBALS_H__
#define __XFS_GLOBALS_H__
extern uint64_t xfs_panic_mask; /* set to cause more panics */
#endif /* __XFS_GLOBALS_H__ */
+10 -9
View File
@@ -790,7 +790,7 @@ xfs_ioc_fsgetxattr(
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip); fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa.fsx_projid = ip->i_d.di_projid; fa.fsx_projid = xfs_get_projid(ip);
if (attr) { if (attr) {
if (ip->i_afp) { if (ip->i_afp) {
@@ -909,10 +909,10 @@ xfs_ioctl_setattr(
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
/* /*
* Disallow 32bit project ids because on-disk structure * Disallow 32bit project ids when projid32bit feature is not enabled.
* is 16bit only.
*/ */
if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1)) if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
!xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
/* /*
@@ -961,7 +961,7 @@ xfs_ioctl_setattr(
if (mask & FSX_PROJID) { if (mask & FSX_PROJID) {
if (XFS_IS_QUOTA_RUNNING(mp) && if (XFS_IS_QUOTA_RUNNING(mp) &&
XFS_IS_PQUOTA_ON(mp) && XFS_IS_PQUOTA_ON(mp) &&
ip->i_d.di_projid != fa->fsx_projid) { xfs_get_projid(ip) != fa->fsx_projid) {
ASSERT(tp); ASSERT(tp);
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ? capable(CAP_FOWNER) ?
@@ -1063,12 +1063,12 @@ xfs_ioctl_setattr(
* Change the ownerships and register quota modifications * Change the ownerships and register quota modifications
* in the transaction. * in the transaction.
*/ */
if (ip->i_d.di_projid != fa->fsx_projid) { if (xfs_get_projid(ip) != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip, olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp); &ip->i_gdquot, gdqp);
} }
ip->i_d.di_projid = fa->fsx_projid; xfs_set_projid(ip, fa->fsx_projid);
/* /*
* We may have to rev the inode as well as * We may have to rev the inode as well as
@@ -1088,8 +1088,8 @@ xfs_ioctl_setattr(
xfs_diflags_to_linux(ip); xfs_diflags_to_linux(ip);
} }
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
XFS_STATS_INC(xs_ig_attrchg); XFS_STATS_INC(xs_ig_attrchg);
@@ -1301,7 +1301,8 @@ xfs_file_ioctl(
case XFS_IOC_ALLOCSP64: case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64: case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64: case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP64: { case XFS_IOC_UNRESVSP64:
case XFS_IOC_ZERO_RANGE: {
xfs_flock64_t bf; xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf))) if (copy_from_user(&bf, arg, sizeof(bf)))
+4 -1
View File
@@ -164,7 +164,8 @@ xfs_ioctl32_bstat_copyin(
get_user(bstat->bs_extsize, &bstat32->bs_extsize) || get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
get_user(bstat->bs_extents, &bstat32->bs_extents) || get_user(bstat->bs_extents, &bstat32->bs_extents) ||
get_user(bstat->bs_gen, &bstat32->bs_gen) || get_user(bstat->bs_gen, &bstat32->bs_gen) ||
get_user(bstat->bs_projid, &bstat32->bs_projid) || get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
get_user(bstat->bs_aextents, &bstat32->bs_aextents)) get_user(bstat->bs_aextents, &bstat32->bs_aextents))
@@ -218,6 +219,7 @@ xfs_bulkstat_one_fmt_compat(
put_user(buffer->bs_extents, &p32->bs_extents) || put_user(buffer->bs_extents, &p32->bs_extents) ||
put_user(buffer->bs_gen, &p32->bs_gen) || put_user(buffer->bs_gen, &p32->bs_gen) ||
put_user(buffer->bs_projid, &p32->bs_projid) || put_user(buffer->bs_projid, &p32->bs_projid) ||
put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
put_user(buffer->bs_dmstate, &p32->bs_dmstate) || put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
put_user(buffer->bs_aextents, &p32->bs_aextents)) put_user(buffer->bs_aextents, &p32->bs_aextents))
@@ -574,6 +576,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_FSGEOMETRY_V1: case XFS_IOC_FSGEOMETRY_V1:
case XFS_IOC_FSGROWFSDATA: case XFS_IOC_FSGROWFSDATA:
case XFS_IOC_FSGROWFSRT: case XFS_IOC_FSGROWFSRT:
case XFS_IOC_ZERO_RANGE:
return xfs_file_ioctl(filp, cmd, p); return xfs_file_ioctl(filp, cmd, p);
#else #else
case XFS_IOC_ALLOCSP_32: case XFS_IOC_ALLOCSP_32:
+4 -2
View File
@@ -65,8 +65,10 @@ typedef struct compat_xfs_bstat {
__s32 bs_extsize; /* extent size */ __s32 bs_extsize; /* extent size */
__s32 bs_extents; /* number of extents */ __s32 bs_extents; /* number of extents */
__u32 bs_gen; /* generation count */ __u32 bs_gen; /* generation count */
__u16 bs_projid; /* project id */ __u16 bs_projid_lo; /* lower part of project id */
unsigned char bs_pad[14]; /* pad space, unused */ #define bs_projid bs_projid_lo /* (previously just bs_projid) */
__u16 bs_projid_hi; /* high part of project id */
unsigned char bs_pad[12]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */ __u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */ __u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */ __u16 bs_aextents; /* attribute number of extents */
+2 -37
View File
@@ -94,41 +94,6 @@ xfs_mark_inode_dirty(
mark_inode_dirty(inode); mark_inode_dirty(inode);
} }
/*
* Change the requested timestamp in the given inode.
* We don't lock across timestamp updates, and we don't log them but
* we do record the fact that there is dirty information in core.
*/
void
xfs_ichgtime(
xfs_inode_t *ip,
int flags)
{
struct inode *inode = VFS_I(ip);
timespec_t tv;
int sync_it = 0;
tv = current_fs_time(inode->i_sb);
if ((flags & XFS_ICHGTIME_MOD) &&
!timespec_equal(&inode->i_mtime, &tv)) {
inode->i_mtime = tv;
sync_it = 1;
}
if ((flags & XFS_ICHGTIME_CHG) &&
!timespec_equal(&inode->i_ctime, &tv)) {
inode->i_ctime = tv;
sync_it = 1;
}
/*
* Update complete - now make sure everyone knows that the inode
* is dirty.
*/
if (sync_it)
xfs_mark_inode_dirty_sync(ip);
}
/* /*
* Hook in SELinux. This is not quite correct yet, what we really need * Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of * here (as we do for default ACLs) is a mechanism by which creation of
@@ -224,7 +189,7 @@ xfs_vn_mknod(
} }
xfs_dentry_to_name(&name, dentry); xfs_dentry_to_name(&name, dentry);
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
if (unlikely(error)) if (unlikely(error))
goto out_free_acl; goto out_free_acl;
@@ -397,7 +362,7 @@ xfs_vn_symlink(
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
xfs_dentry_to_name(&name, dentry); xfs_dentry_to_name(&name, dentry);
error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL); error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
if (unlikely(error)) if (unlikely(error))
goto out; goto out;
+2 -3
View File
@@ -71,6 +71,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/capability.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/div64.h> #include <asm/div64.h>
@@ -79,14 +80,12 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <xfs_cred.h>
#include <xfs_vnode.h> #include <xfs_vnode.h>
#include <xfs_stats.h> #include <xfs_stats.h>
#include <xfs_sysctl.h> #include <xfs_sysctl.h>
#include <xfs_iops.h> #include <xfs_iops.h>
#include <xfs_aops.h> #include <xfs_aops.h>
#include <xfs_super.h> #include <xfs_super.h>
#include <xfs_globals.h>
#include <xfs_buf.h> #include <xfs_buf.h>
/* /*
@@ -144,7 +143,7 @@
#define SYNCHRONIZE() barrier() #define SYNCHRONIZE() barrier()
#define __return_address __builtin_return_address(0) #define __return_address __builtin_return_address(0)
#define dfltprid 0 #define XFS_PROJID_DEFAULT 0
#define MAXPATHLEN 1024 #define MAXPATHLEN 1024
#define MIN(a,b) (min(a,b)) #define MIN(a,b) (min(a,b))
+11 -13
View File
@@ -44,7 +44,6 @@
#include "xfs_buf_item.h" #include "xfs_buf_item.h"
#include "xfs_utils.h" #include "xfs_utils.h"
#include "xfs_vnodeops.h" #include "xfs_vnodeops.h"
#include "xfs_version.h"
#include "xfs_log_priv.h" #include "xfs_log_priv.h"
#include "xfs_trans_priv.h" #include "xfs_trans_priv.h"
#include "xfs_filestream.h" #include "xfs_filestream.h"
@@ -645,7 +644,7 @@ xfs_barrier_test(
XFS_BUF_ORDERED(sbp); XFS_BUF_ORDERED(sbp);
xfsbdstrat(mp, sbp); xfsbdstrat(mp, sbp);
error = xfs_iowait(sbp); error = xfs_buf_iowait(sbp);
/* /*
* Clear all the flags we set and possible error state in the * Clear all the flags we set and possible error state in the
@@ -757,18 +756,20 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers * Setup xfs_mount buffer target pointers
*/ */
error = ENOMEM; error = ENOMEM;
mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname); mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
if (!mp->m_ddev_targp) if (!mp->m_ddev_targp)
goto out_close_rtdev; goto out_close_rtdev;
if (rtdev) { if (rtdev) {
mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname); mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
mp->m_fsname);
if (!mp->m_rtdev_targp) if (!mp->m_rtdev_targp)
goto out_free_ddev_targ; goto out_free_ddev_targ;
} }
if (logdev && logdev != ddev) { if (logdev && logdev != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname); mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
mp->m_fsname);
if (!mp->m_logdev_targp) if (!mp->m_logdev_targp)
goto out_free_rtdev_targ; goto out_free_rtdev_targ;
} else { } else {
@@ -971,12 +972,7 @@ xfs_fs_inode_init_once(
/* /*
* Dirty the XFS inode when mark_inode_dirty_sync() is called so that * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
* we catch unlogged VFS level updates to the inode. Care must be taken * we catch unlogged VFS level updates to the inode.
* here - the transaction code calls mark_inode_dirty_sync() to mark the
* VFS inode dirty in a transaction and clears the i_update_core field;
* it must clear the field after calling mark_inode_dirty_sync() to
* correctly indicate that the dirty state has been propagated into the
* inode log item.
* *
* We need the barrier() to maintain correct ordering between unlogged * We need the barrier() to maintain correct ordering between unlogged
* updates and the transaction commit code that clears the i_update_core * updates and the transaction commit code that clears the i_update_core
@@ -1520,8 +1516,9 @@ xfs_fs_fill_super(
if (error) if (error)
goto out_free_fsname; goto out_free_fsname;
if (xfs_icsb_init_counters(mp)) error = xfs_icsb_init_counters(mp);
mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; if (error)
goto out_close_devices;
error = xfs_readsb(mp, flags); error = xfs_readsb(mp, flags);
if (error) if (error)
@@ -1582,6 +1579,7 @@ xfs_fs_fill_super(
xfs_freesb(mp); xfs_freesb(mp);
out_destroy_counters: out_destroy_counters:
xfs_icsb_destroy_counters(mp); xfs_icsb_destroy_counters(mp);
out_close_devices:
xfs_close_devices(mp); xfs_close_devices(mp);
out_free_fsname: out_free_fsname:
xfs_free_fsname(mp); xfs_free_fsname(mp);
+1
View File
@@ -62,6 +62,7 @@ extern void xfs_qm_exit(void);
# define XFS_DBG_STRING "no debug" # define XFS_DBG_STRING "no debug"
#endif #endif
#define XFS_VERSION_STRING "SGI XFS"
#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ #define XFS_BUILD_OPTIONS XFS_ACL_STRING \
XFS_SECURITY_STRING \ XFS_SECURITY_STRING \
XFS_REALTIME_STRING \ XFS_REALTIME_STRING \
File diff suppressed because it is too large Load Diff
+2 -2
View File
@@ -47,10 +47,10 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); int xfs_sync_inode_grab(struct xfs_inode *ip);
int xfs_inode_ag_iterator(struct xfs_mount *mp, int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
int flags, int tag, int write_lock, int *nr_to_scan); int flags);
void xfs_inode_shrinker_register(struct xfs_mount *mp); void xfs_inode_shrinker_register(struct xfs_mount *mp);
void xfs_inode_shrinker_unregister(struct xfs_mount *mp); void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
+2 -2
View File
@@ -124,7 +124,7 @@ DEFINE_EVENT(xfs_perag_class, name, \
unsigned long caller_ip), \ unsigned long caller_ip), \
TP_ARGS(mp, agno, refcount, caller_ip)) TP_ARGS(mp, agno, refcount, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get); DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim); DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put); DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
@@ -330,7 +330,7 @@ DEFINE_BUF_EVENT(xfs_buf_iowait_done);
DEFINE_BUF_EVENT(xfs_buf_delwri_queue); DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
DEFINE_BUF_EVENT(xfs_buf_delwri_split); DEFINE_BUF_EVENT(xfs_buf_delwri_split);
DEFINE_BUF_EVENT(xfs_buf_get_noaddr); DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_bdstrat_shut); DEFINE_BUF_EVENT(xfs_bdstrat_shut);
DEFINE_BUF_EVENT(xfs_buf_item_relse); DEFINE_BUF_EVENT(xfs_buf_item_relse);
DEFINE_BUF_EVENT(xfs_buf_item_iodone); DEFINE_BUF_EVENT(xfs_buf_item_iodone);
-29
View File
@@ -1,29 +0,0 @@
/*
* Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_VERSION_H__
#define __XFS_VERSION_H__
/*
* Dummy file that can contain a timestamp to put into the
* XFS init string, to help users keep track of what they're
* running
*/
#define XFS_VERSION_STRING "SGI XFS"
#endif /* __XFS_VERSION_H__ */
+77 -89
View File
@@ -463,88 +463,69 @@ xfs_qm_dqtobp(
uint flags) uint flags)
{ {
xfs_bmbt_irec_t map; xfs_bmbt_irec_t map;
int nmaps, error; int nmaps = 1, error;
xfs_buf_t *bp; xfs_buf_t *bp;
xfs_inode_t *quotip; xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
xfs_mount_t *mp; xfs_mount_t *mp = dqp->q_mount;
xfs_disk_dquot_t *ddq; xfs_disk_dquot_t *ddq;
xfs_dqid_t id; xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
boolean_t newdquot;
xfs_trans_t *tp = (tpp ? *tpp : NULL); xfs_trans_t *tp = (tpp ? *tpp : NULL);
mp = dqp->q_mount; dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
id = be32_to_cpu(dqp->q_core.d_id);
nmaps = 1;
newdquot = B_FALSE;
/* xfs_ilock(quotip, XFS_ILOCK_SHARED);
* If we don't know where the dquot lives, find out. if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
*/
if (dqp->q_blkno == (xfs_daddr_t) 0) {
/* We use the id as an index */
dqp->q_fileoffset = (xfs_fileoff_t)id /
mp->m_quotainfo->qi_dqperchunk;
nmaps = 1;
quotip = XFS_DQ_TO_QIP(dqp);
xfs_ilock(quotip, XFS_ILOCK_SHARED);
/* /*
* Return if this type of quotas is turned off while we didn't * Return if this type of quotas is turned off while we
* have an inode lock * didn't have the quota inode lock.
*/ */
if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
return (ESRCH);
}
/*
* Find the block map; no allocations yet
*/
error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA,
NULL, 0, &map, &nmaps, NULL);
xfs_iunlock(quotip, XFS_ILOCK_SHARED); xfs_iunlock(quotip, XFS_ILOCK_SHARED);
if (error) return ESRCH;
return (error);
ASSERT(nmaps == 1);
ASSERT(map.br_blockcount == 1);
/*
* offset of dquot in the (fixed sized) dquot chunk.
*/
dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
sizeof(xfs_dqblk_t);
if (map.br_startblock == HOLESTARTBLOCK) {
/*
* We don't allocate unless we're asked to
*/
if (!(flags & XFS_QMOPT_DQALLOC))
return (ENOENT);
ASSERT(tp);
if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
dqp->q_fileoffset, &bp)))
return (error);
tp = *tpp;
newdquot = B_TRUE;
} else {
/*
* store the blkno etc so that we don't have to do the
* mapping all the time
*/
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
}
} }
ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
ASSERT(dqp->q_blkno != HOLESTARTBLOCK);
/* /*
* Read in the buffer, unless we've just done the allocation * Find the block map; no allocations yet
* (in which case we already have the buf).
*/ */
if (!newdquot) { error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
NULL, 0, &map, &nmaps, NULL);
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
if (error)
return error;
ASSERT(nmaps == 1);
ASSERT(map.br_blockcount == 1);
/*
* Offset of dquot in the (fixed sized) dquot chunk.
*/
dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
sizeof(xfs_dqblk_t);
ASSERT(map.br_startblock != DELAYSTARTBLOCK);
if (map.br_startblock == HOLESTARTBLOCK) {
/*
* We don't allocate unless we're asked to
*/
if (!(flags & XFS_QMOPT_DQALLOC))
return ENOENT;
ASSERT(tp);
error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
dqp->q_fileoffset, &bp);
if (error)
return error;
tp = *tpp;
} else {
trace_xfs_dqtobp_read(dqp); trace_xfs_dqtobp_read(dqp);
/*
* store the blkno etc so that we don't have to do the
* mapping all the time
*/
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, mp->m_quotainfo->qi_dqchunklen,
@@ -552,13 +533,14 @@ xfs_qm_dqtobp(
if (error || !bp) if (error || !bp)
return XFS_ERROR(error); return XFS_ERROR(error);
} }
ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
/* /*
* calculate the location of the dquot inside the buffer. * calculate the location of the dquot inside the buffer.
*/ */
ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
/* /*
* A simple sanity check in case we got a corrupted dquot... * A simple sanity check in case we got a corrupted dquot...
@@ -1176,18 +1158,18 @@ xfs_qm_dqflush(
xfs_dquot_t *dqp, xfs_dquot_t *dqp,
uint flags) uint flags)
{ {
xfs_mount_t *mp; struct xfs_mount *mp = dqp->q_mount;
xfs_buf_t *bp; struct xfs_buf *bp;
xfs_disk_dquot_t *ddqp; struct xfs_disk_dquot *ddqp;
int error; int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush)); ASSERT(!completion_done(&dqp->q_flush));
trace_xfs_dqflush(dqp); trace_xfs_dqflush(dqp);
/* /*
* If not dirty, or it's pinned and we are not supposed to * If not dirty, or it's pinned and we are not supposed to block, nada.
* block, nada.
*/ */
if (!XFS_DQ_IS_DIRTY(dqp) || if (!XFS_DQ_IS_DIRTY(dqp) ||
(!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
@@ -1201,40 +1183,46 @@ xfs_qm_dqflush(
* down forcibly. If that's the case we must not write this dquot * down forcibly. If that's the case we must not write this dquot
* to disk, because the log record didn't make it to disk! * to disk, because the log record didn't make it to disk!
*/ */
if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { if (XFS_FORCED_SHUTDOWN(mp)) {
dqp->dq_flags &= ~(XFS_DQ_DIRTY); dqp->dq_flags &= ~XFS_DQ_DIRTY;
xfs_dqfunlock(dqp); xfs_dqfunlock(dqp);
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
} }
/* /*
* Get the buffer containing the on-disk dquot * Get the buffer containing the on-disk dquot
* We don't need a transaction envelope because we know that the
* the ondisk-dquot has already been allocated for.
*/ */
if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0, &bp);
if (error) {
ASSERT(error != ENOENT); ASSERT(error != ENOENT);
/*
* Quotas could have gotten turned off (ESRCH)
*/
xfs_dqfunlock(dqp); xfs_dqfunlock(dqp);
return (error); return error;
} }
if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), /*
0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { * Calculate the location of the dquot inside the buffer.
xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE); */
ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
/*
* A simple sanity check in case we got a corrupted dquot..
*/
if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
} }
/* This is the only portion of data that needs to persist */ /* This is the only portion of data that needs to persist */
memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
/* /*
* Clear the dirty field and remember the flush lsn for later use. * Clear the dirty field and remember the flush lsn for later use.
*/ */
dqp->dq_flags &= ~(XFS_DQ_DIRTY); dqp->dq_flags &= ~XFS_DQ_DIRTY;
mp = dqp->q_mount;
xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
&dqp->q_logitem.qli_item.li_lsn); &dqp->q_logitem.qli_item.li_lsn);
+70 -153
View File
@@ -55,8 +55,6 @@ uint ndquot;
kmem_zone_t *qm_dqzone; kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone; kmem_zone_t *qm_dqtrxzone;
static cred_t xfs_zerocr;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
@@ -837,7 +835,7 @@ xfs_qm_dqattach_locked(
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC, flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot) : ip->i_udquot, &ip->i_gdquot) :
xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
flags & XFS_QMOPT_DQALLOC, flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot); ip->i_udquot, &ip->i_gdquot);
/* /*
@@ -1199,87 +1197,6 @@ xfs_qm_list_destroy(
mutex_destroy(&(list->qh_lock)); mutex_destroy(&(list->qh_lock));
} }
/*
* Stripped down version of dqattach. This doesn't attach, or even look at the
* dquots attached to the inode. The rationale is that there won't be any
* attached at the time this is called from quotacheck.
*/
STATIC int
xfs_qm_dqget_noattach(
xfs_inode_t *ip,
xfs_dquot_t **O_udqpp,
xfs_dquot_t **O_gdqpp)
{
int error;
xfs_mount_t *mp;
xfs_dquot_t *udqp, *gdqp;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
mp = ip->i_mount;
udqp = NULL;
gdqp = NULL;
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
/*
* We want the dquot allocated if it doesn't exist.
*/
if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
&udqp))) {
/*
* Shouldn't be able to turn off quotas here.
*/
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(udqp);
}
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
if (udqp)
xfs_dqunlock(udqp);
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqget(mp, ip,
ip->i_d.di_gid, XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp) :
xfs_qm_dqget(mp, ip,
ip->i_d.di_projid, XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp);
if (error) {
if (udqp)
xfs_qm_dqrele(udqp);
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
ASSERT(gdqp);
/* Reacquire the locks in the right order */
if (udqp) {
if (! xfs_qm_dqlock_nowait(udqp)) {
xfs_dqunlock(gdqp);
xfs_dqlock(udqp);
xfs_dqlock(gdqp);
}
}
}
*O_udqpp = udqp;
*O_gdqpp = gdqp;
#ifdef QUOTADEBUG
if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
#endif
return 0;
}
/* /*
* Create an inode and return with a reference already taken, but unlocked * Create an inode and return with a reference already taken, but unlocked
* This is how we create quota inodes * This is how we create quota inodes
@@ -1305,8 +1222,8 @@ xfs_qm_qino_alloc(
return error; return error;
} }
if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
&xfs_zerocr, 0, 1, ip, &committed))) { if (error) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT); XFS_TRANS_ABORT);
return error; return error;
@@ -1516,7 +1433,7 @@ xfs_qm_dqiterate(
rablkcnt = map[i+1].br_blockcount; rablkcnt = map[i+1].br_blockcount;
rablkno = map[i+1].br_startblock; rablkno = map[i+1].br_startblock;
while (rablkcnt--) { while (rablkcnt--) {
xfs_baread(mp->m_ddev_targp, xfs_buf_readahead(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, rablkno), XFS_FSB_TO_DADDR(mp, rablkno),
mp->m_quotainfo->qi_dqchunklen); mp->m_quotainfo->qi_dqchunklen);
rablkno++; rablkno++;
@@ -1546,18 +1463,34 @@ xfs_qm_dqiterate(
/* /*
* Called by dqusage_adjust in doing a quotacheck. * Called by dqusage_adjust in doing a quotacheck.
* Given the inode, and a dquot (either USR or GRP, doesn't matter), *
* this updates its incore copy as well as the buffer copy. This is * Given the inode, and a dquot id this updates both the incore dqout as well
* so that once the quotacheck is done, we can just log all the buffers, * as the buffer copy. This is so that once the quotacheck is done, we can
* as opposed to logging numerous updates to individual dquots. * just log all the buffers, as opposed to logging numerous updates to
* individual dquots.
*/ */
STATIC void STATIC int
xfs_qm_quotacheck_dqadjust( xfs_qm_quotacheck_dqadjust(
xfs_dquot_t *dqp, struct xfs_inode *ip,
xfs_dqid_t id,
uint type,
xfs_qcnt_t nblks, xfs_qcnt_t nblks,
xfs_qcnt_t rtblks) xfs_qcnt_t rtblks)
{ {
ASSERT(XFS_DQ_IS_LOCKED(dqp)); struct xfs_mount *mp = ip->i_mount;
struct xfs_dquot *dqp;
int error;
error = xfs_qm_dqget(mp, ip, id, type,
XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
if (error) {
/*
* Shouldn't be able to turn off quotas here.
*/
ASSERT(error != ESRCH);
ASSERT(error != ENOENT);
return error;
}
trace_xfs_dqadjust(dqp); trace_xfs_dqadjust(dqp);
@@ -1582,11 +1515,13 @@ xfs_qm_quotacheck_dqadjust(
* There are no timers for the default values set in the root dquot. * There are no timers for the default values set in the root dquot.
*/ */
if (dqp->q_core.d_id) { if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core); xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
} }
dqp->dq_flags |= XFS_DQ_DIRTY; dqp->dq_flags |= XFS_DQ_DIRTY;
xfs_qm_dqput(dqp);
return 0;
} }
STATIC int STATIC int
@@ -1629,8 +1564,7 @@ xfs_qm_dqusage_adjust(
int *res) /* result code value */ int *res) /* result code value */
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
xfs_dquot_t *udqp, *gdqp; xfs_qcnt_t nblks, rtblks = 0;
xfs_qcnt_t nblks, rtblks;
int error; int error;
ASSERT(XFS_IS_QUOTA_RUNNING(mp)); ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -1650,51 +1584,24 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because * the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time. * quotacheck is done only at mount time.
*/ */
if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
if (error) {
*res = BULKSTAT_RV_NOTHING; *res = BULKSTAT_RV_NOTHING;
return error; return error;
} }
/* ASSERT(ip->i_delayed_blks == 0);
* Obtain the locked dquots. In case of an error (eg. allocation
* fails for ENOSPC), we return the negative of the error number
* to bulkstat, so that it can get propagated to quotacheck() and
* making us disable quotas for the file system.
*/
if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
rtblks = 0; if (XFS_IS_REALTIME_INODE(ip)) {
if (! XFS_IS_REALTIME_INODE(ip)) {
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
} else {
/* /*
* Walk thru the extent list and count the realtime blocks. * Walk thru the extent list and count the realtime blocks.
*/ */
if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { error = xfs_qm_get_rtblks(ip, &rtblks);
xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error)
IRELE(ip); goto error0;
if (udqp)
xfs_qm_dqput(udqp);
if (gdqp)
xfs_qm_dqput(gdqp);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
} }
ASSERT(ip->i_delayed_blks == 0);
/* nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
* We can't release the inode while holding its dquot locks.
* The inode can go into inactive and might try to acquire the dquotlocks.
* So, just unlock here and do a vn_rele at the end.
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* /*
* Add the (disk blocks and inode) resources occupied by this * Add the (disk blocks and inode) resources occupied by this
@@ -1709,26 +1616,36 @@ xfs_qm_dqusage_adjust(
* and quotaoffs don't race. (Quotachecks happen at mount time only). * and quotaoffs don't race. (Quotachecks happen at mount time only).
*/ */
if (XFS_IS_UQUOTA_ON(mp)) { if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(udqp); error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); XFS_DQ_USER, nblks, rtblks);
xfs_qm_dqput(udqp); if (error)
goto error0;
} }
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(gdqp);
xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
xfs_qm_dqput(gdqp);
}
/*
* Now release the inode. This will send it to 'inactive', and
* possibly even free blocks.
*/
IRELE(ip);
/* if (XFS_IS_GQUOTA_ON(mp)) {
* Goto next inode. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
*/ XFS_DQ_GROUP, nblks, rtblks);
if (error)
goto error0;
}
if (XFS_IS_PQUOTA_ON(mp)) {
error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
XFS_DQ_PROJ, nblks, rtblks);
if (error)
goto error0;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_DIDONE; *res = BULKSTAT_RV_DIDONE;
return 0; return 0;
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
} }
/* /*
@@ -2224,7 +2141,7 @@ xfs_qm_write_sb_changes(
/* /*
* Given an inode, a uid and gid (from cred_t) make sure that we have * Given an inode, a uid, gid and prid make sure that we have
* allocated relevant dquot(s) on disk, and that we won't exceed inode * allocated relevant dquot(s) on disk, and that we won't exceed inode
* quotas by creating this file. * quotas by creating this file.
* This also attaches dquot(s) to the given inode after locking it, * This also attaches dquot(s) to the given inode after locking it,
@@ -2332,7 +2249,7 @@ xfs_qm_vop_dqalloc(
xfs_dqunlock(gq); xfs_dqunlock(gq);
} }
} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
if (ip->i_d.di_projid != prid) { if (xfs_get_projid(ip) != prid) {
xfs_iunlock(ip, lockflags); xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ, XFS_DQ_PROJ,
@@ -2454,7 +2371,7 @@ xfs_qm_vop_chown_reserve(
} }
if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
if (XFS_IS_PQUOTA_ON(ip->i_mount) && if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id)) xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
prjflags = XFS_QMOPT_ENOSPC; prjflags = XFS_QMOPT_ENOSPC;
if (prjflags || if (prjflags ||
@@ -2558,7 +2475,7 @@ xfs_qm_vop_create_dqattach(
ip->i_gdquot = gdqp; ip->i_gdquot = gdqp;
ASSERT(XFS_IS_OQUOTA_ON(mp)); ASSERT(XFS_IS_OQUOTA_ON(mp));
ASSERT((XFS_IS_GQUOTA_ON(mp) ? ASSERT((XFS_IS_GQUOTA_ON(mp) ?
ip->i_d.di_gid : ip->i_d.di_projid) == ip->i_d.di_gid : xfs_get_projid(ip)) ==
be32_to_cpu(gdqp->q_core.d_id)); be32_to_cpu(gdqp->q_core.d_id));
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
} }
+1 -1
View File
@@ -81,7 +81,7 @@ xfs_qm_statvfs(
xfs_mount_t *mp = ip->i_mount; xfs_mount_t *mp = ip->i_mount;
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
xfs_qm_dqput(dqp); xfs_qm_dqput(dqp);
} }

Some files were not shown because too many files have changed in this diff Show More