You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: fs: brlock vfsmount_lock fs: scale files_lock lglock: introduce special lglock and brlock spin locks tty: fix fu_list abuse fs: cleanup files_lock locking fs: remove extra lookup in __lookup_hash fs: fs_struct rwlock to spinlock apparmor: use task path helpers fs: dentry allocation consolidation fs: fix do_lookup false negative mbcache: Limit the maximum number of cache entries hostfs ->follow_link() braino hostfs: dumb (and usually harmless) tpyo - strncpy instead of strlcpy remove SWRITE* I/O types kill BH_Ordered flag vfs: update ctime when changing the file's permission by setfacl cramfs: only unlock new inodes fix reiserfs_evict_inode end_writeback second call
This commit is contained in:
+2
-2
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
|
||||
filp->private_data = tty;
|
||||
file_move(filp, &tty->tty_files);
|
||||
|
||||
tty_add_file(tty, filp);
|
||||
|
||||
retval = devpts_pty_new(inode, tty->link);
|
||||
if (retval)
|
||||
|
||||
+63
-29
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
|
||||
DEFINE_MUTEX(tty_mutex);
|
||||
EXPORT_SYMBOL(tty_mutex);
|
||||
|
||||
/* Spinlock to protect the tty->tty_files list */
|
||||
DEFINE_SPINLOCK(tty_files_lock);
|
||||
|
||||
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
|
||||
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
|
||||
ssize_t redirected_tty_write(struct file *, const char __user *,
|
||||
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
|
||||
kfree(tty);
|
||||
}
|
||||
|
||||
static inline struct tty_struct *file_tty(struct file *file)
|
||||
{
|
||||
return ((struct tty_file_private *)file->private_data)->tty;
|
||||
}
|
||||
|
||||
/* Associate a new file with the tty structure */
|
||||
void tty_add_file(struct tty_struct *tty, struct file *file)
|
||||
{
|
||||
struct tty_file_private *priv;
|
||||
|
||||
/* XXX: must implement proper error handling in callers */
|
||||
priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
|
||||
|
||||
priv->tty = tty;
|
||||
priv->file = file;
|
||||
file->private_data = priv;
|
||||
|
||||
spin_lock(&tty_files_lock);
|
||||
list_add(&priv->list, &tty->tty_files);
|
||||
spin_unlock(&tty_files_lock);
|
||||
}
|
||||
|
||||
/* Delete file from its tty */
|
||||
void tty_del_file(struct file *file)
|
||||
{
|
||||
struct tty_file_private *priv = file->private_data;
|
||||
|
||||
spin_lock(&tty_files_lock);
|
||||
list_del(&priv->list);
|
||||
spin_unlock(&tty_files_lock);
|
||||
file->private_data = NULL;
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
|
||||
#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
|
||||
|
||||
/**
|
||||
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
|
||||
struct list_head *p;
|
||||
int count = 0;
|
||||
|
||||
file_list_lock();
|
||||
spin_lock(&tty_files_lock);
|
||||
list_for_each(p, &tty->tty_files) {
|
||||
count++;
|
||||
}
|
||||
file_list_unlock();
|
||||
spin_unlock(&tty_files_lock);
|
||||
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
|
||||
tty->driver->subtype == PTY_TYPE_SLAVE &&
|
||||
tty->link && tty->link->count)
|
||||
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
|
||||
struct file *cons_filp = NULL;
|
||||
struct file *filp, *f = NULL;
|
||||
struct task_struct *p;
|
||||
struct tty_file_private *priv;
|
||||
int closecount = 0, n;
|
||||
unsigned long flags;
|
||||
int refs = 0;
|
||||
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
|
||||
|
||||
|
||||
spin_lock(&redirect_lock);
|
||||
if (redirect && redirect->private_data == tty) {
|
||||
if (redirect && file_tty(redirect) == tty) {
|
||||
f = redirect;
|
||||
redirect = NULL;
|
||||
}
|
||||
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
|
||||
workqueue with the lock held */
|
||||
check_tty_count(tty, "tty_hangup");
|
||||
|
||||
file_list_lock();
|
||||
spin_lock(&tty_files_lock);
|
||||
/* This breaks for file handles being sent over AF_UNIX sockets ? */
|
||||
list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
|
||||
list_for_each_entry(priv, &tty->tty_files, list) {
|
||||
filp = priv->file;
|
||||
if (filp->f_op->write == redirected_tty_write)
|
||||
cons_filp = filp;
|
||||
if (filp->f_op->write != tty_write)
|
||||
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
|
||||
__tty_fasync(-1, filp, 0); /* can't block */
|
||||
filp->f_op = &hung_up_tty_fops;
|
||||
}
|
||||
file_list_unlock();
|
||||
spin_unlock(&tty_files_lock);
|
||||
|
||||
tty_ldisc_hangup(tty);
|
||||
|
||||
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int i;
|
||||
struct tty_struct *tty;
|
||||
struct inode *inode;
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct tty_struct *tty = file_tty(file);
|
||||
struct tty_ldisc *ld;
|
||||
|
||||
tty = file->private_data;
|
||||
inode = file->f_path.dentry->d_inode;
|
||||
if (tty_paranoia_check(tty, inode, "tty_read"))
|
||||
return -EIO;
|
||||
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
|
||||
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
|
||||
static ssize_t tty_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct tty_struct *tty = file_tty(file);
|
||||
struct tty_ldisc *ld;
|
||||
ssize_t ret;
|
||||
struct tty_ldisc *ld;
|
||||
|
||||
tty = file->private_data;
|
||||
if (tty_paranoia_check(tty, inode, "tty_write"))
|
||||
return -EIO;
|
||||
if (!tty || !tty->ops->write ||
|
||||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
|
||||
tty_driver_kref_put(driver);
|
||||
module_put(driver->owner);
|
||||
|
||||
file_list_lock();
|
||||
spin_lock(&tty_files_lock);
|
||||
list_del_init(&tty->tty_files);
|
||||
file_list_unlock();
|
||||
spin_unlock(&tty_files_lock);
|
||||
|
||||
put_pid(tty->pgrp);
|
||||
put_pid(tty->session);
|
||||
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
|
||||
|
||||
int tty_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct tty_struct *tty, *o_tty;
|
||||
struct tty_struct *tty = file_tty(filp);
|
||||
struct tty_struct *o_tty;
|
||||
int pty_master, tty_closing, o_tty_closing, do_sleep;
|
||||
int devpts;
|
||||
int idx;
|
||||
char buf[64];
|
||||
|
||||
tty = filp->private_data;
|
||||
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
|
||||
return 0;
|
||||
|
||||
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
|
||||
* - do_tty_hangup no longer sees this file descriptor as
|
||||
* something that needs to be handled for hangups.
|
||||
*/
|
||||
file_kill(filp);
|
||||
filp->private_data = NULL;
|
||||
tty_del_file(filp);
|
||||
|
||||
/*
|
||||
* Perform some housekeeping before deciding whether to return.
|
||||
@@ -1839,8 +1875,8 @@ got_driver:
|
||||
return PTR_ERR(tty);
|
||||
}
|
||||
|
||||
filp->private_data = tty;
|
||||
file_move(filp, &tty->tty_files);
|
||||
tty_add_file(tty, filp);
|
||||
|
||||
check_tty_count(tty, "tty_open");
|
||||
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
|
||||
tty->driver->subtype == PTY_TYPE_MASTER)
|
||||
@@ -1916,11 +1952,10 @@ got_driver:
|
||||
|
||||
static unsigned int tty_poll(struct file *filp, poll_table *wait)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
struct tty_struct *tty = file_tty(filp);
|
||||
struct tty_ldisc *ld;
|
||||
int ret = 0;
|
||||
|
||||
tty = filp->private_data;
|
||||
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
|
||||
return 0;
|
||||
|
||||
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
|
||||
|
||||
static int __tty_fasync(int fd, struct file *filp, int on)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
struct tty_struct *tty = file_tty(filp);
|
||||
unsigned long flags;
|
||||
int retval = 0;
|
||||
|
||||
tty = filp->private_data;
|
||||
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
|
||||
goto out;
|
||||
|
||||
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
|
||||
*/
|
||||
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct tty_struct *tty, *real_tty;
|
||||
struct tty_struct *tty = file_tty(file);
|
||||
struct tty_struct *real_tty;
|
||||
void __user *p = (void __user *)arg;
|
||||
int retval;
|
||||
struct tty_ldisc *ld;
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
|
||||
tty = file->private_data;
|
||||
if (tty_paranoia_check(tty, inode, "tty_ioctl"))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
struct tty_struct *tty = file->private_data;
|
||||
struct tty_struct *tty = file_tty(file);
|
||||
struct tty_ldisc *ld;
|
||||
int retval = -ENOIOCTLCMD;
|
||||
|
||||
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
|
||||
if (!filp)
|
||||
continue;
|
||||
if (filp->f_op->read == tty_read &&
|
||||
filp->private_data == tty) {
|
||||
file_tty(filp) == tty) {
|
||||
printk(KERN_NOTICE "SAK: killed process %d"
|
||||
" (%s): fd#%d opened to the tty\n",
|
||||
task_pid_nr(p), p->comm, i);
|
||||
|
||||
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
read_lock(¤t->fs->lock);
|
||||
spin_lock(¤t->fs->lock);
|
||||
path.mnt = mntget(current->fs->root.mnt);
|
||||
read_unlock(¤t->fs->lock);
|
||||
spin_unlock(¤t->fs->lock);
|
||||
|
||||
path.dentry = d;
|
||||
|
||||
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
read_lock(¤t->fs->lock);
|
||||
spin_lock(¤t->fs->lock);
|
||||
root = dget(current->fs->root.dentry);
|
||||
read_unlock(¤t->fs->lock);
|
||||
spin_unlock(¤t->fs->lock);
|
||||
|
||||
spin_lock(&dcache_lock);
|
||||
|
||||
|
||||
+37
-32
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||
spin_unlock(lock);
|
||||
/*
|
||||
* Ensure any pending I/O completes so that
|
||||
* ll_rw_block() actually writes the current
|
||||
* contents - it is a noop if I/O is still in
|
||||
* flight on potentially older contents.
|
||||
* write_dirty_buffer() actually writes the
|
||||
* current contents - it is a noop if I/O is
|
||||
* still in flight on potentially older
|
||||
* contents.
|
||||
*/
|
||||
ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
|
||||
write_dirty_buffer(bh, WRITE_SYNC_PLUG);
|
||||
|
||||
/*
|
||||
* Kick off IO for the previous mapping. Note
|
||||
@@ -2911,13 +2912,6 @@ int submit_bh(int rw, struct buffer_head * bh)
|
||||
BUG_ON(buffer_delay(bh));
|
||||
BUG_ON(buffer_unwritten(bh));
|
||||
|
||||
/*
|
||||
* Mask in barrier bit for a write (could be either a WRITE or a
|
||||
* WRITE_SYNC
|
||||
*/
|
||||
if (buffer_ordered(bh) && (rw & WRITE))
|
||||
rw |= WRITE_BARRIER;
|
||||
|
||||
/*
|
||||
* Only clear out a write error when rewriting
|
||||
*/
|
||||
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh);
|
||||
|
||||
/**
|
||||
* ll_rw_block: low-level access to block devices (DEPRECATED)
|
||||
* @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
|
||||
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
|
||||
* @nr: number of &struct buffer_heads in the array
|
||||
* @bhs: array of pointers to &struct buffer_head
|
||||
*
|
||||
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
|
||||
* requests an I/O operation on them, either a %READ or a %WRITE. The third
|
||||
* %SWRITE is like %WRITE only we make sure that the *current* data in buffers
|
||||
* are sent to disk. The fourth %READA option is described in the documentation
|
||||
* for generic_make_request() which ll_rw_block() calls.
|
||||
* %READA option is described in the documentation for generic_make_request()
|
||||
* which ll_rw_block() calls.
|
||||
*
|
||||
* This function drops any buffer that it cannot get a lock on (with the
|
||||
* BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
|
||||
* clean when doing a write request, and any buffer that appears to be
|
||||
* up-to-date when doing read request. Further it marks as clean buffers that
|
||||
* are processed for writing (the buffer cache won't assume that they are
|
||||
* actually clean until the buffer gets unlocked).
|
||||
* BH_Lock state bit), any buffer that appears to be clean when doing a write
|
||||
* request, and any buffer that appears to be up-to-date when doing read
|
||||
* request. Further it marks as clean buffers that are processed for
|
||||
* writing (the buffer cache won't assume that they are actually clean
|
||||
* until the buffer gets unlocked).
|
||||
*
|
||||
* ll_rw_block sets b_end_io to simple completion handler that marks
|
||||
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
|
||||
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct buffer_head *bh = bhs[i];
|
||||
|
||||
if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
|
||||
lock_buffer(bh);
|
||||
else if (!trylock_buffer(bh))
|
||||
if (!trylock_buffer(bh))
|
||||
continue;
|
||||
|
||||
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
|
||||
rw == SWRITE_SYNC_PLUG) {
|
||||
if (rw == WRITE) {
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
get_bh(bh);
|
||||
if (rw == SWRITE_SYNC)
|
||||
submit_bh(WRITE_SYNC, bh);
|
||||
else
|
||||
submit_bh(WRITE, bh);
|
||||
submit_bh(WRITE, bh);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
||||
}
|
||||
EXPORT_SYMBOL(ll_rw_block);
|
||||
|
||||
void write_dirty_buffer(struct buffer_head *bh, int rw)
|
||||
{
|
||||
lock_buffer(bh);
|
||||
if (!test_clear_buffer_dirty(bh)) {
|
||||
unlock_buffer(bh);
|
||||
return;
|
||||
}
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
get_bh(bh);
|
||||
submit_bh(rw, bh);
|
||||
}
|
||||
EXPORT_SYMBOL(write_dirty_buffer);
|
||||
|
||||
/*
|
||||
* For a data-integrity writeout, we need to wait upon any in-progress I/O
|
||||
* and then start new I/O and then wait upon it. The caller must have a ref on
|
||||
* the buffer_head.
|
||||
*/
|
||||
int sync_dirty_buffer(struct buffer_head *bh)
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, int rw)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
get_bh(bh);
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
ret = submit_bh(WRITE_SYNC, bh);
|
||||
ret = submit_bh(rw, bh);
|
||||
wait_on_buffer(bh);
|
||||
if (buffer_eopnotsupp(bh)) {
|
||||
clear_buffer_eopnotsupp(bh);
|
||||
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__sync_dirty_buffer);
|
||||
|
||||
int sync_dirty_buffer(struct buffer_head *bh)
|
||||
{
|
||||
return __sync_dirty_buffer(bh, WRITE_SYNC);
|
||||
}
|
||||
EXPORT_SYMBOL(sync_dirty_buffer);
|
||||
|
||||
/*
|
||||
|
||||
+1
-1
@@ -80,7 +80,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
|
||||
}
|
||||
} else {
|
||||
inode = iget_locked(sb, CRAMINO(cramfs_inode));
|
||||
if (inode) {
|
||||
if (inode && (inode->i_state & I_NEW)) {
|
||||
setup_inode(inode, cramfs_inode);
|
||||
unlock_new_inode(inode);
|
||||
}
|
||||
|
||||
+41
-30
@@ -1332,31 +1332,13 @@ EXPORT_SYMBOL(d_add_ci);
|
||||
* d_lookup - search for a dentry
|
||||
* @parent: parent dentry
|
||||
* @name: qstr of name we wish to find
|
||||
* Returns: dentry, or NULL
|
||||
*
|
||||
* Searches the children of the parent dentry for the name in question. If
|
||||
* the dentry is found its reference count is incremented and the dentry
|
||||
* is returned. The caller must use dput to free the entry when it has
|
||||
* finished using it. %NULL is returned on failure.
|
||||
*
|
||||
* __d_lookup is dcache_lock free. The hash list is protected using RCU.
|
||||
* Memory barriers are used while updating and doing lockless traversal.
|
||||
* To avoid races with d_move while rename is happening, d_lock is used.
|
||||
*
|
||||
* Overflows in memcmp(), while d_move, are avoided by keeping the length
|
||||
* and name pointer in one structure pointed by d_qstr.
|
||||
*
|
||||
* rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
|
||||
* lookup is going on.
|
||||
*
|
||||
* The dentry unused LRU is not updated even if lookup finds the required dentry
|
||||
* in there. It is updated in places such as prune_dcache, shrink_dcache_sb,
|
||||
* select_parent and __dget_locked. This laziness saves lookup from dcache_lock
|
||||
* acquisition.
|
||||
*
|
||||
* d_lookup() is protected against the concurrent renames in some unrelated
|
||||
* directory using the seqlockt_t rename_lock.
|
||||
* d_lookup searches the children of the parent dentry for the name in
|
||||
* question. If the dentry is found its reference count is incremented and the
|
||||
* dentry is returned. The caller must use dput to free the entry when it has
|
||||
* finished using it. %NULL is returned if the dentry does not exist.
|
||||
*/
|
||||
|
||||
struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
|
||||
{
|
||||
struct dentry * dentry = NULL;
|
||||
@@ -1372,6 +1354,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
|
||||
}
|
||||
EXPORT_SYMBOL(d_lookup);
|
||||
|
||||
/*
|
||||
* __d_lookup - search for a dentry (racy)
|
||||
* @parent: parent dentry
|
||||
* @name: qstr of name we wish to find
|
||||
* Returns: dentry, or NULL
|
||||
*
|
||||
* __d_lookup is like d_lookup, however it may (rarely) return a
|
||||
* false-negative result due to unrelated rename activity.
|
||||
*
|
||||
* __d_lookup is slightly faster by avoiding rename_lock read seqlock,
|
||||
* however it must be used carefully, eg. with a following d_lookup in
|
||||
* the case of failure.
|
||||
*
|
||||
* __d_lookup callers must be commented.
|
||||
*/
|
||||
struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
||||
{
|
||||
unsigned int len = name->len;
|
||||
@@ -1382,6 +1379,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
||||
struct hlist_node *node;
|
||||
struct dentry *dentry;
|
||||
|
||||
/*
|
||||
* The hash list is protected using RCU.
|
||||
*
|
||||
* Take d_lock when comparing a candidate dentry, to avoid races
|
||||
* with d_move().
|
||||
*
|
||||
* It is possible that concurrent renames can mess up our list
|
||||
* walk here and result in missing our dentry, resulting in the
|
||||
* false-negative result. d_lookup() protects against concurrent
|
||||
* renames using rename_lock seqlock.
|
||||
*
|
||||
* See Documentation/vfs/dcache-locking.txt for more details.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
|
||||
@@ -1396,8 +1406,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
||||
|
||||
/*
|
||||
* Recheck the dentry after taking the lock - d_move may have
|
||||
* changed things. Don't bother checking the hash because we're
|
||||
* about to compare the whole name anyway.
|
||||
* changed things. Don't bother checking the hash because
|
||||
* we're about to compare the whole name anyway.
|
||||
*/
|
||||
if (dentry->d_parent != parent)
|
||||
goto next;
|
||||
@@ -1925,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root,
|
||||
bool slash = false;
|
||||
int error = 0;
|
||||
|
||||
spin_lock(&vfsmount_lock);
|
||||
br_read_lock(vfsmount_lock);
|
||||
while (dentry != root->dentry || vfsmnt != root->mnt) {
|
||||
struct dentry * parent;
|
||||
|
||||
@@ -1954,7 +1964,7 @@ out:
|
||||
if (!error && !slash)
|
||||
error = prepend(buffer, buflen, "/", 1);
|
||||
|
||||
spin_unlock(&vfsmount_lock);
|
||||
br_read_unlock(vfsmount_lock);
|
||||
return error;
|
||||
|
||||
global_root:
|
||||
@@ -2292,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2)
|
||||
struct vfsmount *mnt = path1->mnt;
|
||||
struct dentry *dentry = path1->dentry;
|
||||
int res;
|
||||
spin_lock(&vfsmount_lock);
|
||||
|
||||
br_read_lock(vfsmount_lock);
|
||||
if (mnt != path2->mnt) {
|
||||
for (;;) {
|
||||
if (mnt->mnt_parent == mnt) {
|
||||
spin_unlock(&vfsmount_lock);
|
||||
br_read_unlock(vfsmount_lock);
|
||||
return 0;
|
||||
}
|
||||
if (mnt->mnt_parent == path2->mnt)
|
||||
@@ -2306,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2)
|
||||
dentry = mnt->mnt_mountpoint;
|
||||
}
|
||||
res = is_subdir(dentry, path2->dentry);
|
||||
spin_unlock(&vfsmount_lock);
|
||||
br_read_unlock(vfsmount_lock);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(path_is_under);
|
||||
|
||||
@@ -1118,7 +1118,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
|
||||
bprm->unsafe = tracehook_unsafe_exec(p);
|
||||
|
||||
n_fs = 1;
|
||||
write_lock(&p->fs->lock);
|
||||
spin_lock(&p->fs->lock);
|
||||
rcu_read_lock();
|
||||
for (t = next_thread(p); t != p; t = next_thread(t)) {
|
||||
if (t->fs == p->fs)
|
||||
@@ -1135,7 +1135,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
|
||||
res = 1;
|
||||
}
|
||||
}
|
||||
write_unlock(&p->fs->lock);
|
||||
spin_unlock(&p->fs->lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
+3
-1
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
ll_rw_block(SWRITE, nr_bhs, bhs);
|
||||
for (i = 0; i < nr_bhs; i++)
|
||||
write_dirty_buffer(bhs[i], WRITE);
|
||||
|
||||
for (i = 0; i < nr_bhs; i++) {
|
||||
wait_on_buffer(bhs[i]);
|
||||
if (buffer_eopnotsupp(bhs[i])) {
|
||||
|
||||
+95
-29
@@ -20,7 +20,9 @@
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/fsnotify.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/lglock.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/ima.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
@@ -32,8 +34,8 @@ struct files_stat_struct files_stat = {
|
||||
.max_files = NR_FILE
|
||||
};
|
||||
|
||||
/* public. Not pretty! */
|
||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
|
||||
DECLARE_LGLOCK(files_lglock);
|
||||
DEFINE_LGLOCK(files_lglock);
|
||||
|
||||
/* SLAB cache for file structures */
|
||||
static struct kmem_cache *filp_cachep __read_mostly;
|
||||
@@ -249,7 +251,7 @@ static void __fput(struct file *file)
|
||||
cdev_put(inode->i_cdev);
|
||||
fops_put(file->f_op);
|
||||
put_pid(file->f_owner.pid);
|
||||
file_kill(file);
|
||||
file_sb_list_del(file);
|
||||
if (file->f_mode & FMODE_WRITE)
|
||||
drop_file_write_access(file);
|
||||
file->f_path.dentry = NULL;
|
||||
@@ -328,41 +330,107 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
|
||||
return file;
|
||||
}
|
||||
|
||||
|
||||
void put_filp(struct file *file)
|
||||
{
|
||||
if (atomic_long_dec_and_test(&file->f_count)) {
|
||||
security_file_free(file);
|
||||
file_kill(file);
|
||||
file_sb_list_del(file);
|
||||
file_free(file);
|
||||
}
|
||||
}
|
||||
|
||||
void file_move(struct file *file, struct list_head *list)
|
||||
static inline int file_list_cpu(struct file *file)
|
||||
{
|
||||
if (!list)
|
||||
return;
|
||||
file_list_lock();
|
||||
list_move(&file->f_u.fu_list, list);
|
||||
file_list_unlock();
|
||||
#ifdef CONFIG_SMP
|
||||
return file->f_sb_list_cpu;
|
||||
#else
|
||||
return smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
void file_kill(struct file *file)
|
||||
/* helper for file_sb_list_add to reduce ifdefs */
|
||||
static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
|
||||
{
|
||||
struct list_head *list;
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
cpu = smp_processor_id();
|
||||
file->f_sb_list_cpu = cpu;
|
||||
list = per_cpu_ptr(sb->s_files, cpu);
|
||||
#else
|
||||
list = &sb->s_files;
|
||||
#endif
|
||||
list_add(&file->f_u.fu_list, list);
|
||||
}
|
||||
|
||||
/**
|
||||
* file_sb_list_add - add a file to the sb's file list
|
||||
* @file: file to add
|
||||
* @sb: sb to add it to
|
||||
*
|
||||
* Use this function to associate a file with the superblock of the inode it
|
||||
* refers to.
|
||||
*/
|
||||
void file_sb_list_add(struct file *file, struct super_block *sb)
|
||||
{
|
||||
lg_local_lock(files_lglock);
|
||||
__file_sb_list_add(file, sb);
|
||||
lg_local_unlock(files_lglock);
|
||||
}
|
||||
|
||||
/**
|
||||
* file_sb_list_del - remove a file from the sb's file list
|
||||
* @file: file to remove
|
||||
* @sb: sb to remove it from
|
||||
*
|
||||
* Use this function to remove a file from its superblock.
|
||||
*/
|
||||
void file_sb_list_del(struct file *file)
|
||||
{
|
||||
if (!list_empty(&file->f_u.fu_list)) {
|
||||
file_list_lock();
|
||||
lg_local_lock_cpu(files_lglock, file_list_cpu(file));
|
||||
list_del_init(&file->f_u.fu_list);
|
||||
file_list_unlock();
|
||||
lg_local_unlock_cpu(files_lglock, file_list_cpu(file));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* These macros iterate all files on all CPUs for a given superblock.
|
||||
* files_lglock must be held globally.
|
||||
*/
|
||||
#define do_file_list_for_each_entry(__sb, __file) \
|
||||
{ \
|
||||
int i; \
|
||||
for_each_possible_cpu(i) { \
|
||||
struct list_head *list; \
|
||||
list = per_cpu_ptr((__sb)->s_files, i); \
|
||||
list_for_each_entry((__file), list, f_u.fu_list)
|
||||
|
||||
#define while_file_list_for_each_entry \
|
||||
} \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define do_file_list_for_each_entry(__sb, __file) \
|
||||
{ \
|
||||
struct list_head *list; \
|
||||
list = &(sb)->s_files; \
|
||||
list_for_each_entry((__file), list, f_u.fu_list)
|
||||
|
||||
#define while_file_list_for_each_entry \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int fs_may_remount_ro(struct super_block *sb)
|
||||
{
|
||||
struct file *file;
|
||||
|
||||
/* Check that no files are currently opened for writing. */
|
||||
file_list_lock();
|
||||
list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
|
||||
lg_global_lock(files_lglock);
|
||||
do_file_list_for_each_entry(sb, file) {
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
|
||||
/* File with pending delete? */
|
||||
@@ -372,11 +440,11 @@ int fs_may_remount_ro(struct super_block *sb)
|
||||
/* Writeable file? */
|
||||
if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
|
||||
goto too_bad;
|
||||
}
|
||||
file_list_unlock();
|
||||
} while_file_list_for_each_entry;
|
||||
lg_global_unlock(files_lglock);
|
||||
return 1; /* Tis' cool bro. */
|
||||
too_bad:
|
||||
file_list_unlock();
|
||||
lg_global_unlock(files_lglock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -392,8 +460,8 @@ void mark_files_ro(struct super_block *sb)
|
||||
struct file *f;
|
||||
|
||||
retry:
|
||||
file_list_lock();
|
||||
list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
|
||||
lg_global_lock(files_lglock);
|
||||
do_file_list_for_each_entry(sb, f) {
|
||||
struct vfsmount *mnt;
|
||||
if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
|
||||
continue;
|
||||
@@ -408,16 +476,13 @@ retry:
|
||||
continue;
|
||||
file_release_write(f);
|
||||
mnt = mntget(f->f_path.mnt);
|
||||
file_list_unlock();
|
||||
/*
|
||||
* This can sleep, so we can't hold
|
||||
* the file_list_lock() spinlock.
|
||||
*/
|
||||
/* This can sleep, so we can't hold the spinlock. */
|
||||
lg_global_unlock(files_lglock);
|
||||
mnt_drop_write(mnt);
|
||||
mntput(mnt);
|
||||
goto retry;
|
||||
}
|
||||
file_list_unlock();
|
||||
} while_file_list_for_each_entry;
|
||||
lg_global_unlock(files_lglock);
|
||||
}
|
||||
|
||||
void __init files_init(unsigned long mempages)
|
||||
@@ -437,5 +502,6 @@ void __init files_init(unsigned long mempages)
|
||||
if (files_stat.max_files < NR_FILE)
|
||||
files_stat.max_files = NR_FILE;
|
||||
files_defer_init();
|
||||
lg_lock_init(files_lglock);
|
||||
percpu_counter_init(&nr_files, 0);
|
||||
}
|
||||
|
||||
+16
-16
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
|
||||
{
|
||||
struct path old_root;
|
||||
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
old_root = fs->root;
|
||||
fs->root = *path;
|
||||
path_get(path);
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
if (old_root.dentry)
|
||||
path_put(&old_root);
|
||||
}
|
||||
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
|
||||
{
|
||||
struct path old_pwd;
|
||||
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
old_pwd = fs->pwd;
|
||||
fs->pwd = *path;
|
||||
path_get(path);
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
|
||||
if (old_pwd.dentry)
|
||||
path_put(&old_pwd);
|
||||
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
|
||||
task_lock(p);
|
||||
fs = p->fs;
|
||||
if (fs) {
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
if (fs->root.dentry == old_root->dentry
|
||||
&& fs->root.mnt == old_root->mnt) {
|
||||
path_get(new_root);
|
||||
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
|
||||
fs->pwd = *new_root;
|
||||
count++;
|
||||
}
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
}
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk)
|
||||
if (fs) {
|
||||
int kill;
|
||||
task_lock(tsk);
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
tsk->fs = NULL;
|
||||
kill = !--fs->users;
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
task_unlock(tsk);
|
||||
if (kill)
|
||||
free_fs_struct(fs);
|
||||
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
||||
if (fs) {
|
||||
fs->users = 1;
|
||||
fs->in_exec = 0;
|
||||
rwlock_init(&fs->lock);
|
||||
spin_lock_init(&fs->lock);
|
||||
fs->umask = old->umask;
|
||||
get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
|
||||
}
|
||||
@@ -121,10 +121,10 @@ int unshare_fs_struct(void)
|
||||
return -ENOMEM;
|
||||
|
||||
task_lock(current);
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
kill = !--fs->users;
|
||||
current->fs = new_fs;
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
task_unlock(current);
|
||||
|
||||
if (kill)
|
||||
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask);
|
||||
/* to be mentioned only in INIT_TASK */
|
||||
struct fs_struct init_fs = {
|
||||
.users = 1,
|
||||
.lock = __RW_LOCK_UNLOCKED(init_fs.lock),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
||||
.umask = 0022,
|
||||
};
|
||||
|
||||
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void)
|
||||
|
||||
task_lock(current);
|
||||
|
||||
write_lock(&init_fs.lock);
|
||||
spin_lock(&init_fs.lock);
|
||||
init_fs.users++;
|
||||
write_unlock(&init_fs.lock);
|
||||
spin_unlock(&init_fs.lock);
|
||||
|
||||
write_lock(&fs->lock);
|
||||
spin_lock(&fs->lock);
|
||||
current->fs = &init_fs;
|
||||
kill = !--fs->users;
|
||||
write_unlock(&fs->lock);
|
||||
spin_unlock(&fs->lock);
|
||||
|
||||
task_unlock(current);
|
||||
if (kill)
|
||||
|
||||
@@ -94,6 +94,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
|
||||
if (error < 0)
|
||||
goto failed;
|
||||
inode->i_mode = mode;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
if (error == 0) {
|
||||
posix_acl_release(acl);
|
||||
acl = NULL;
|
||||
|
||||
@@ -104,7 +104,7 @@ static char *__dentry_name(struct dentry *dentry, char *name)
|
||||
__putname(name);
|
||||
return NULL;
|
||||
}
|
||||
strncpy(name, root, PATH_MAX);
|
||||
strlcpy(name, root, PATH_MAX);
|
||||
if (len > p - name) {
|
||||
__putname(name);
|
||||
return NULL;
|
||||
@@ -876,7 +876,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||
char *path = dentry_name(dentry);
|
||||
int err = -ENOMEM;
|
||||
if (path) {
|
||||
int err = hostfs_do_readlink(path, link, PATH_MAX);
|
||||
err = hostfs_do_readlink(path, link, PATH_MAX);
|
||||
if (err == PATH_MAX)
|
||||
err = -E2BIG;
|
||||
__putname(path);
|
||||
|
||||
+6
-1
@@ -9,6 +9,8 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/lglock.h>
|
||||
|
||||
struct super_block;
|
||||
struct linux_binprm;
|
||||
struct path;
|
||||
@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
|
||||
|
||||
extern void __init mnt_init(void);
|
||||
|
||||
extern spinlock_t vfsmount_lock;
|
||||
DECLARE_BRLOCK(vfsmount_lock);
|
||||
|
||||
|
||||
/*
|
||||
* fs_struct.c
|
||||
@@ -80,6 +83,8 @@ extern void chroot_fs_refs(struct path *, struct path *);
|
||||
/*
|
||||
* file_table.c
|
||||
*/
|
||||
extern void file_sb_list_add(struct file *f, struct super_block *sb);
|
||||
extern void file_sb_list_del(struct file *f);
|
||||
extern void mark_files_ro(struct super_block *);
|
||||
extern struct file *get_empty_filp(void);
|
||||
|
||||
|
||||
+3
-1
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
ll_rw_block(SWRITE, *batch_count, bhs);
|
||||
for (i = 0; i < *batch_count; i++)
|
||||
write_dirty_buffer(bhs[i], WRITE);
|
||||
|
||||
for (i = 0; i < *batch_count; i++) {
|
||||
struct buffer_head *bh = bhs[i];
|
||||
clear_buffer_jwrite(bh);
|
||||
|
||||
+25
-24
@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
|
||||
struct buffer_head *bh;
|
||||
journal_header_t *header;
|
||||
int ret;
|
||||
int barrier_done = 0;
|
||||
|
||||
if (is_journal_aborted(journal))
|
||||
return 0;
|
||||
@@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal,
|
||||
|
||||
JBUFFER_TRACE(descriptor, "write commit block");
|
||||
set_buffer_dirty(bh);
|
||||
|
||||
if (journal->j_flags & JFS_BARRIER) {
|
||||
set_buffer_ordered(bh);
|
||||
barrier_done = 1;
|
||||
}
|
||||
ret = sync_dirty_buffer(bh);
|
||||
if (barrier_done)
|
||||
clear_buffer_ordered(bh);
|
||||
/* is it possible for another commit to fail at roughly
|
||||
* the same time as this one? If so, we don't want to
|
||||
* trust the barrier flag in the super, but instead want
|
||||
* to remember if we sent a barrier request
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP && barrier_done) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
|
||||
|
||||
printk(KERN_WARNING
|
||||
"JBD: barrier-based sync failed on %s - "
|
||||
"disabling barriers\n",
|
||||
bdevname(journal->j_dev, b));
|
||||
spin_lock(&journal->j_state_lock);
|
||||
journal->j_flags &= ~JFS_BARRIER;
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
/*
|
||||
* Is it possible for another commit to fail at roughly
|
||||
* the same time as this one? If so, we don't want to
|
||||
* trust the barrier flag in the super, but instead want
|
||||
* to remember if we sent a barrier request
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
/* And try again, without the barrier */
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_dirty(bh);
|
||||
printk(KERN_WARNING
|
||||
"JBD: barrier-based sync failed on %s - "
|
||||
"disabling barriers\n",
|
||||
bdevname(journal->j_dev, b));
|
||||
spin_lock(&journal->j_state_lock);
|
||||
journal->j_flags &= ~JFS_BARRIER;
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
|
||||
/* And try again, without the barrier */
|
||||
set_buffer_uptodate(bh);
|
||||
set_buffer_dirty(bh);
|
||||
ret = sync_dirty_buffer(bh);
|
||||
}
|
||||
} else {
|
||||
ret = sync_dirty_buffer(bh);
|
||||
}
|
||||
|
||||
put_bh(bh); /* One for getblk() */
|
||||
journal_put_journal_head(descriptor);
|
||||
|
||||
|
||||
+1
-1
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait)
|
||||
if (wait)
|
||||
sync_dirty_buffer(bh);
|
||||
else
|
||||
ll_rw_block(SWRITE, 1, &bh);
|
||||
write_dirty_buffer(bh, WRITE);
|
||||
|
||||
out:
|
||||
/* If we have just flushed the log (by marking s_start==0), then
|
||||
|
||||
+1
-1
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal,
|
||||
set_buffer_jwrite(bh);
|
||||
BUFFER_TRACE(bh, "write");
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh);
|
||||
write_dirty_buffer(bh, write_op);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
|
||||
for (i = 0; i < *batch_count; i++)
|
||||
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
|
||||
|
||||
for (i = 0; i < *batch_count; i++) {
|
||||
struct buffer_head *bh = journal->j_chkpt_bhs[i];
|
||||
clear_buffer_jwrite(bh);
|
||||
|
||||
+15
-24
@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
|
||||
struct commit_header *tmp;
|
||||
struct buffer_head *bh;
|
||||
int ret;
|
||||
int barrier_done = 0;
|
||||
struct timespec now = current_kernel_time();
|
||||
|
||||
if (is_journal_aborted(journal))
|
||||
@@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal,
|
||||
if (journal->j_flags & JBD2_BARRIER &&
|
||||
!JBD2_HAS_INCOMPAT_FEATURE(journal,
|
||||
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
|
||||
set_buffer_ordered(bh);
|
||||
barrier_done = 1;
|
||||
}
|
||||
ret = submit_bh(WRITE_SYNC_PLUG, bh);
|
||||
if (barrier_done)
|
||||
clear_buffer_ordered(bh);
|
||||
ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
printk(KERN_WARNING
|
||||
"JBD2: Disabling barriers on %s, "
|
||||
"not supported by device\n", journal->j_devname);
|
||||
write_lock(&journal->j_state_lock);
|
||||
journal->j_flags &= ~JBD2_BARRIER;
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
||||
/* is it possible for another commit to fail at roughly
|
||||
* the same time as this one? If so, we don't want to
|
||||
* trust the barrier flag in the super, but instead want
|
||||
* to remember if we sent a barrier request
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP && barrier_done) {
|
||||
printk(KERN_WARNING
|
||||
"JBD2: Disabling barriers on %s, "
|
||||
"not supported by device\n", journal->j_devname);
|
||||
write_lock(&journal->j_state_lock);
|
||||
journal->j_flags &= ~JBD2_BARRIER;
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
||||
/* And try again, without the barrier */
|
||||
lock_buffer(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
/* And try again, without the barrier */
|
||||
lock_buffer(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
ret = submit_bh(WRITE_SYNC_PLUG, bh);
|
||||
}
|
||||
} else {
|
||||
ret = submit_bh(WRITE_SYNC_PLUG, bh);
|
||||
}
|
||||
*cbh = bh;
|
||||
|
||||
+1
-1
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
} else
|
||||
ll_rw_block(SWRITE, 1, &bh);
|
||||
write_dirty_buffer(bh, WRITE);
|
||||
|
||||
out:
|
||||
/* If we have just flushed the log (by marking s_start==0), then
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user