mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'vfs-6.14-final.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull vfs fixes from Christian Brauner:
"A final set of fixes for this cycle:
VFS:
- Ensure that the stable offset api doesn't return duplicate
directory entries when userspace has to perform the getdents call
multiple times on large directories
afs:
- Prevent invalid pointer dereference during get_link RCU pathwalk
fuse:
- Fix deadlock caused by uninitialized rings when using io_uring with
fuse
- Handle race condition when using io_uring with fuse to prevent NULL
dereference
libnetfs:
- Ensure that invalidate_cache is only called if implemented
- Fix collection of results during pause when collection is
offloaded
- Ensure rolling_buffer_load_from_ra() doesn't clear mark bits
- Make netfs_unbuffered_read() return ssize_t rather than int"
* tag 'vfs-6.14-final.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
libfs: Fix duplicate directory entry in offset_dir_lookup
fuse: fix possible deadlock if rings are never initialized
netfs: Fix netfs_unbuffered_read() to return ssize_t rather than int
netfs: Fix rolling_buffer_load_from_ra() to not clear mark bits
netfs: Call `invalidate_cache` only if implemented
netfs: Fix collection of results during pause when collection offloaded
fuse: fix uring race condition for null dereference of fc
afs: Fix afs_atcell_get_link() to check if ws_cell is unset first
This commit is contained in:
@@ -314,6 +314,9 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
|
||||
const char *name;
|
||||
bool dotted = vnode->fid.vnode == 3;
|
||||
|
||||
if (!rcu_access_pointer(net->ws_cell))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
if (!dentry) {
|
||||
/* We're in RCU-pathwalk. */
|
||||
cell = rcu_dereference(net->ws_cell);
|
||||
@@ -325,9 +328,6 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
|
||||
return name;
|
||||
}
|
||||
|
||||
if (!rcu_access_pointer(net->ws_cell))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
down_read(&net->cells_lock);
|
||||
|
||||
cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
|
||||
|
||||
@@ -77,7 +77,7 @@ void fuse_set_initialized(struct fuse_conn *fc)
|
||||
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
|
||||
{
|
||||
return !fc->initialized || (for_background && fc->blocked) ||
|
||||
(fc->io_uring && !fuse_uring_ready(fc));
|
||||
(fc->io_uring && fc->connected && !fuse_uring_ready(fc));
|
||||
}
|
||||
|
||||
static void fuse_drop_waiting(struct fuse_conn *fc)
|
||||
|
||||
@@ -208,11 +208,11 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
|
||||
|
||||
init_waitqueue_head(&ring->stop_waitq);
|
||||
|
||||
fc->ring = ring;
|
||||
ring->nr_queues = nr_queues;
|
||||
ring->fc = fc;
|
||||
ring->max_payload_sz = max_payload_size;
|
||||
atomic_set(&ring->queue_refs, 0);
|
||||
smp_store_release(&fc->ring, ring);
|
||||
|
||||
spin_unlock(&fc->lock);
|
||||
return ring;
|
||||
@@ -1041,7 +1041,7 @@ static int fuse_uring_register(struct io_uring_cmd *cmd,
|
||||
unsigned int issue_flags, struct fuse_conn *fc)
|
||||
{
|
||||
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
|
||||
struct fuse_ring *ring = fc->ring;
|
||||
struct fuse_ring *ring = smp_load_acquire(&fc->ring);
|
||||
struct fuse_ring_queue *queue;
|
||||
struct fuse_ring_ent *ent;
|
||||
int err;
|
||||
|
||||
@@ -496,7 +496,7 @@ offset_dir_lookup(struct dentry *parent, loff_t offset)
|
||||
found = find_positive_dentry(parent, NULL, false);
|
||||
else {
|
||||
rcu_read_lock();
|
||||
child = mas_find(&mas, DIR_OFFSET_MAX);
|
||||
child = mas_find_rev(&mas, DIR_OFFSET_MIN);
|
||||
found = find_positive_dentry(parent, child, false);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -125,9 +125,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
* Perform a read to an application buffer, bypassing the pagecache and the
|
||||
* local disk cache.
|
||||
*/
|
||||
static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
|
||||
static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
|
||||
{
|
||||
int ret;
|
||||
ssize_t ret;
|
||||
|
||||
_enter("R=%x %llx-%llx",
|
||||
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
|
||||
@@ -155,7 +155,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
|
||||
else
|
||||
ret = -EIOCBQUEUED;
|
||||
out:
|
||||
_leave(" = %d", ret);
|
||||
_leave(" = %zd", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -682,14 +682,16 @@ void netfs_wait_for_pause(struct netfs_io_request *rreq)
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
|
||||
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
subreq = list_first_entry_or_null(&stream->subrequests,
|
||||
struct netfs_io_subrequest, rreq_link);
|
||||
if (subreq &&
|
||||
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
|
||||
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
netfs_read_collection(rreq);
|
||||
continue;
|
||||
if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
|
||||
subreq = list_first_entry_or_null(&stream->subrequests,
|
||||
struct netfs_io_subrequest, rreq_link);
|
||||
if (subreq &&
|
||||
(!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
|
||||
test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
netfs_read_collection(rreq);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
|
||||
|
||||
@@ -146,10 +146,6 @@ ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
|
||||
|
||||
/* Store the counter after setting the slot. */
|
||||
smp_store_release(&roll->next_head_slot, to);
|
||||
|
||||
for (; ix < folioq_nr_slots(fq); ix++)
|
||||
folioq_clear(fq, ix);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
@@ -400,7 +400,8 @@ void netfs_write_collection_worker(struct work_struct *work)
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
|
||||
|
||||
if (wreq->io_streams[1].active &&
|
||||
wreq->io_streams[1].failed) {
|
||||
wreq->io_streams[1].failed &&
|
||||
ictx->ops->invalidate_cache) {
|
||||
/* Cache write failure doesn't prevent writeback completion
|
||||
* unless we're in disconnected mode.
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user