fuse: Allow to queue bg requests through io-uring

This prepares queueing and sending background requests through
io-uring.

Signed-off-by: Bernd Schubert <bschubert@ddn.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> # io_uring
Reviewed-by: Luis Henriques <luis@igalia.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
Bernd Schubert
2025-01-20 02:29:07 +01:00
committed by Miklos Szeredi
parent c2c9af9a0b
commit 857b0263f3
3 changed files with 136 additions and 1 deletions

View File

@@ -568,7 +568,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
return ret;
}
static bool fuse_request_queue_background(struct fuse_req *req)
#ifdef CONFIG_FUSE_IO_URING
static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
req->in.h.unique = fuse_get_unique(fiq);
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
return fuse_uring_queue_bq_req(req);
}
#endif
/*
* @return true if queued
*/
static int fuse_request_queue_background(struct fuse_req *req)
{
struct fuse_mount *fm = req->fm;
struct fuse_conn *fc = fm->fc;
@@ -580,6 +598,12 @@ static bool fuse_request_queue_background(struct fuse_req *req)
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
#ifdef CONFIG_FUSE_IO_URING
if (fuse_uring_ready(fc))
return fuse_request_queue_background_uring(fc, req);
#endif
spin_lock(&fc->bg_lock);
if (likely(fc->connected)) {
fc->num_background++;

View File

@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
return pdu->ent;
}
static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
{
struct fuse_ring *ring = queue->ring;
struct fuse_conn *fc = ring->fc;
lockdep_assert_held(&queue->lock);
lockdep_assert_held(&fc->bg_lock);
/*
* Allow one bg request per queue, ignoring global fc limits.
* This prevents a single queue from consuming all resources and
* eliminates the need for remote queue wake-ups when global
* limits are met but this queue has no more waiting requests.
*/
while ((fc->active_background < fc->max_background ||
!queue->active_background) &&
(!list_empty(&queue->fuse_req_bg_queue))) {
struct fuse_req *req;
req = list_first_entry(&queue->fuse_req_bg_queue,
struct fuse_req, list);
fc->active_background++;
queue->active_background++;
list_move_tail(&req->list, &queue->fuse_req_queue);
}
}
static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
int error)
{
struct fuse_ring_queue *queue = ent->queue;
struct fuse_ring *ring = queue->ring;
struct fuse_conn *fc = ring->fc;
lockdep_assert_not_held(&queue->lock);
spin_lock(&queue->lock);
ent->fuse_req = NULL;
if (test_bit(FR_BACKGROUND, &req->flags)) {
queue->active_background--;
spin_lock(&fc->bg_lock);
fuse_uring_flush_bg(queue);
spin_unlock(&fc->bg_lock);
}
spin_unlock(&queue->lock);
if (error)
req->out.h.error = error;
@@ -78,6 +121,7 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
{
int qid;
struct fuse_ring_queue *queue;
struct fuse_conn *fc = ring->fc;
for (qid = 0; qid < ring->nr_queues; qid++) {
queue = READ_ONCE(ring->queues[qid]);
@@ -85,6 +129,13 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
continue;
queue->stopped = true;
WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
spin_lock(&queue->lock);
spin_lock(&fc->bg_lock);
fuse_uring_flush_bg(queue);
spin_unlock(&fc->bg_lock);
spin_unlock(&queue->lock);
fuse_uring_abort_end_queue_requests(queue);
}
}
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
INIT_LIST_HEAD(&queue->ent_w_req_queue);
INIT_LIST_HEAD(&queue->ent_in_userspace);
INIT_LIST_HEAD(&queue->fuse_req_queue);
INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
queue->fpq.processing = pq;
fuse_pqueue_init(&queue->fpq);
@@ -1141,6 +1193,53 @@ err:
fuse_request_end(req);
}
bool fuse_uring_queue_bq_req(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
struct fuse_ring *ring = fc->ring;
struct fuse_ring_queue *queue;
struct fuse_ring_ent *ent = NULL;
queue = fuse_uring_task_to_queue(ring);
if (!queue)
return false;
spin_lock(&queue->lock);
if (unlikely(queue->stopped)) {
spin_unlock(&queue->lock);
return false;
}
list_add_tail(&req->list, &queue->fuse_req_bg_queue);
ent = list_first_entry_or_null(&queue->ent_avail_queue,
struct fuse_ring_ent, list);
spin_lock(&fc->bg_lock);
fc->num_background++;
if (fc->num_background == fc->max_background)
fc->blocked = 1;
fuse_uring_flush_bg(queue);
spin_unlock(&fc->bg_lock);
/*
* Due to bg_queue flush limits there might be other bg requests
* in the queue that need to be handled first. Or no further req
* might be available.
*/
req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
list);
if (ent && req) {
fuse_uring_add_req_to_ring_ent(ent, req);
spin_unlock(&queue->lock);
fuse_uring_dispatch_ent(ent);
} else {
spin_unlock(&queue->lock);
}
return true;
}
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
/* should be send over io-uring as enhancement */
.send_forget = fuse_dev_queue_forget,

View File

@@ -82,8 +82,13 @@ struct fuse_ring_queue {
/* fuse requests waiting for an entry slot */
struct list_head fuse_req_queue;
/* background fuse requests */
struct list_head fuse_req_bg_queue;
struct fuse_pqueue fpq;
unsigned int active_background;
bool stopped;
};
@@ -127,6 +132,7 @@ void fuse_uring_stop_queues(struct fuse_ring *ring);
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
bool fuse_uring_queue_bq_req(struct fuse_req *req);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -179,6 +185,12 @@ static inline void fuse_uring_abort(struct fuse_conn *fc)
static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
{
}
static inline bool fuse_uring_ready(struct fuse_conn *fc)
{
return false;
}
#endif /* CONFIG_FUSE_IO_URING */
#endif /* _FS_FUSE_DEV_URING_I_H */