mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
netfs: Change the read result collector to only use one work item
Change the way netfslib collects read results to do all the collection for
a particular read request using a single work item that walks along the
subrequest queue as subrequests make progress or complete, unlocking folios
progressively rather than doing the unlock in parallel as parallel requests
come in.
The code is remodelled to be more like the write-side code, though only
using a single stream. This makes it more directly comparable and thus
easier to duplicate fixes between the two sides.
This has a number of advantages:
(1) It's simpler. There doesn't need to be a complex donation mechanism
to handle mismatches between the size and alignment of subrequests and
folios. The collector unlocks folios as the subrequests covering each
complete.
(2) It should cause less scheduler overhead as there's a single work item
in play unlocking pages in parallel when a read gets split up into a
lot of subrequests instead of one per subrequest.
Whilst the parallellism is nice in theory, in practice, the vast
majority of loads are sequential reads of the whole file, so
committing a bunch of threads to unlocking folios out of order doesn't
help in those cases.
(3) It should make it easier to implement content decryption. A folio
cannot be decrypted until all the requests that contribute to it have
completed - and, again, most loads are sequential and so, most of the
time, we want to begin decryption sequentially (though it's great if
the decryption can happen in parallel).
There is a disadvantage in that we're losing the ability to decrypt and
unlock things on an as-things-arrive basis which may affect some
applications.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-28-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
eddf51f2bb
commit
e2d46f2ec3
@@ -81,8 +81,7 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (pos + total >= i_size_read(rreq->inode))
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
||||
|
||||
if (!err) {
|
||||
if (!err && total) {
|
||||
subreq->transferred += total;
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
}
|
||||
|
||||
@@ -325,8 +325,10 @@ static ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
|
||||
* haven't read it yet.
|
||||
*/
|
||||
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
|
||||
test_bit(AFS_VNODE_DIR_READ, &dvnode->flags))
|
||||
test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
|
||||
ret = i_size;
|
||||
goto valid;
|
||||
}
|
||||
|
||||
up_read(&dvnode->validate_lock);
|
||||
if (down_write_killable(&dvnode->validate_lock) < 0)
|
||||
@@ -346,11 +348,13 @@ static ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
|
||||
|
||||
set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
|
||||
set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
|
||||
} else {
|
||||
ret = i_size;
|
||||
}
|
||||
|
||||
downgrade_write(&dvnode->validate_lock);
|
||||
valid:
|
||||
return i_size;
|
||||
return ret;
|
||||
|
||||
error_unlock:
|
||||
up_write(&dvnode->validate_lock);
|
||||
|
||||
@@ -223,10 +223,13 @@ static void finish_netfs_read(struct ceph_osd_request *req)
|
||||
subreq->len, i_size_read(req->r_inode));
|
||||
|
||||
/* no object means success but no data */
|
||||
if (err == -ENOENT)
|
||||
if (err == -ENOENT) {
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
err = 0;
|
||||
else if (err == -EBLOCKLISTED)
|
||||
} else if (err == -EBLOCKLISTED) {
|
||||
fsc->blocklisted = true;
|
||||
}
|
||||
|
||||
if (err >= 0) {
|
||||
if (sparse && err > 0)
|
||||
@@ -242,6 +245,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
|
||||
if (err > subreq->len)
|
||||
err = subreq->len;
|
||||
}
|
||||
if (err > 0)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
}
|
||||
|
||||
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
|
||||
|
||||
@@ -121,12 +121,6 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
|
||||
|
||||
subreq->io_iter = rreq->buffer.iter;
|
||||
|
||||
if (iov_iter_is_folioq(&subreq->io_iter)) {
|
||||
subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq;
|
||||
subreq->curr_folioq_slot = subreq->io_iter.folioq_slot;
|
||||
subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
|
||||
}
|
||||
|
||||
iov_iter_truncate(&subreq->io_iter, subreq->len);
|
||||
rolling_buffer_advance(&rreq->buffer, subreq->len);
|
||||
return subreq->len;
|
||||
@@ -147,19 +141,6 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rr
|
||||
|
||||
}
|
||||
|
||||
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq = priv;
|
||||
|
||||
if (transferred_or_error > 0) {
|
||||
subreq->transferred += transferred_or_error;
|
||||
subreq->error = 0;
|
||||
} else {
|
||||
subreq->error = transferred_or_error;
|
||||
}
|
||||
schedule_work(&subreq->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Issue a read against the cache.
|
||||
* - Eats the caller's ref on subreq.
|
||||
@@ -174,6 +155,47 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
|
||||
netfs_cache_read_terminated, subreq);
|
||||
}
|
||||
|
||||
static void netfs_issue_read(struct netfs_io_request *rreq,
|
||||
struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
||||
|
||||
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
|
||||
|
||||
/* We add to the end of the list whilst the collector may be walking
|
||||
* the list. The collector only goes nextwards and uses the lock to
|
||||
* remove entries off of the front.
|
||||
*/
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &stream->subrequests);
|
||||
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
|
||||
stream->front = subreq;
|
||||
if (!stream->active) {
|
||||
stream->collected_to = stream->front->start;
|
||||
/* Store list pointers before active flag */
|
||||
smp_store_release(&stream->active, true);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
switch (subreq->source) {
|
||||
case NETFS_DOWNLOAD_FROM_SERVER:
|
||||
rreq->netfs_ops->issue_read(subreq);
|
||||
break;
|
||||
case NETFS_READ_FROM_CACHE:
|
||||
netfs_read_cache_to_pagecache(rreq, subreq);
|
||||
break;
|
||||
default:
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
subreq->error = 0;
|
||||
iov_iter_zero(subreq->len, &subreq->io_iter);
|
||||
subreq->transferred = subreq->len;
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform a read to the pagecache from a series of sources of different types,
|
||||
* slicing up the region to be read according to available cache blocks and
|
||||
@@ -186,8 +208,6 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
ssize_t size = rreq->len;
|
||||
int ret = 0;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
|
||||
do {
|
||||
struct netfs_io_subrequest *subreq;
|
||||
enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
@@ -202,14 +222,6 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
subreq->start = start;
|
||||
subreq->len = size;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated = rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
|
||||
subreq->source = source;
|
||||
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
|
||||
@@ -237,17 +249,18 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
netfs_stat(&netfs_n_rh_download);
|
||||
if (rreq->netfs_ops->prepare_read) {
|
||||
ret = rreq->netfs_ops->prepare_read(subreq);
|
||||
if (ret < 0)
|
||||
goto prep_failed;
|
||||
if (ret < 0) {
|
||||
subreq->error = ret;
|
||||
/* Not queued - release both refs. */
|
||||
netfs_put_subrequest(subreq, false,
|
||||
netfs_sreq_trace_put_cancel);
|
||||
netfs_put_subrequest(subreq, false,
|
||||
netfs_sreq_trace_put_cancel);
|
||||
break;
|
||||
}
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
|
||||
}
|
||||
|
||||
slice = netfs_prepare_read_iterator(subreq);
|
||||
if (slice < 0)
|
||||
goto prep_iter_failed;
|
||||
|
||||
rreq->netfs_ops->issue_read(subreq);
|
||||
goto done;
|
||||
goto issue;
|
||||
}
|
||||
|
||||
fill_with_zeroes:
|
||||
@@ -255,67 +268,50 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
subreq->source = NETFS_FILL_WITH_ZEROES;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
|
||||
netfs_stat(&netfs_n_rh_zero);
|
||||
slice = netfs_prepare_read_iterator(subreq);
|
||||
if (slice < 0)
|
||||
goto prep_iter_failed;
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
subreq->error = 0;
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
goto done;
|
||||
goto issue;
|
||||
}
|
||||
|
||||
if (source == NETFS_READ_FROM_CACHE) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
|
||||
slice = netfs_prepare_read_iterator(subreq);
|
||||
if (slice < 0)
|
||||
goto prep_iter_failed;
|
||||
netfs_read_cache_to_pagecache(rreq, subreq);
|
||||
goto done;
|
||||
goto issue;
|
||||
}
|
||||
|
||||
pr_err("Unexpected read source %u\n", source);
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
|
||||
prep_iter_failed:
|
||||
ret = slice;
|
||||
prep_failed:
|
||||
subreq->error = ret;
|
||||
atomic_dec(&rreq->nr_outstanding);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
|
||||
break;
|
||||
|
||||
done:
|
||||
issue:
|
||||
slice = netfs_prepare_read_iterator(subreq);
|
||||
if (slice < 0) {
|
||||
ret = slice;
|
||||
subreq->error = ret;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
|
||||
/* Not queued - release both refs. */
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
|
||||
break;
|
||||
}
|
||||
size -= slice;
|
||||
start += slice;
|
||||
if (size <= 0) {
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
|
||||
}
|
||||
|
||||
netfs_issue_read(rreq, subreq);
|
||||
cond_resched();
|
||||
} while (size > 0);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq);
|
||||
if (unlikely(size > 0)) {
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
|
||||
netfs_wake_read_collector(rreq);
|
||||
}
|
||||
|
||||
/* Defer error return as we may need to wait for outstanding I/O. */
|
||||
cmpxchg(&rreq->error, 0, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the read operation to complete, successfully or otherwise.
|
||||
*/
|
||||
static int netfs_wait_for_read(struct netfs_io_request *rreq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
|
||||
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
|
||||
ret = rreq->error;
|
||||
if (ret == 0 && rreq->submitted < rreq->len) {
|
||||
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_readahead - Helper to manage a read request
|
||||
* @ractl: The description of the readahead request
|
||||
@@ -344,6 +340,8 @@ void netfs_readahead(struct readahead_control *ractl)
|
||||
if (IS_ERR(rreq))
|
||||
return;
|
||||
|
||||
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
|
||||
|
||||
ret = netfs_begin_cache_read(rreq, ictx);
|
||||
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
|
||||
goto cleanup_free;
|
||||
@@ -460,7 +458,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
|
||||
folio_put(sink);
|
||||
|
||||
ret = netfs_wait_for_read(rreq);
|
||||
if (ret == 0) {
|
||||
if (ret >= 0) {
|
||||
flush_dcache_folio(folio);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
@@ -748,7 +746,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
||||
netfs_read_to_pagecache(rreq);
|
||||
ret = netfs_wait_for_read(rreq);
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
|
||||
return ret;
|
||||
return ret < 0 ? ret : 0;
|
||||
|
||||
error_put:
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
|
||||
|
||||
@@ -47,12 +47,11 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
|
||||
*/
|
||||
static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
||||
unsigned long long start = rreq->start;
|
||||
ssize_t size = rreq->len;
|
||||
int ret = 0;
|
||||
|
||||
atomic_set(&rreq->nr_outstanding, 1);
|
||||
|
||||
do {
|
||||
struct netfs_io_subrequest *subreq;
|
||||
ssize_t slice;
|
||||
@@ -67,11 +66,18 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
subreq->start = start;
|
||||
subreq->len = size;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
|
||||
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated = rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
list_add_tail(&subreq->rreq_link, &stream->subrequests);
|
||||
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
|
||||
stream->front = subreq;
|
||||
if (!stream->active) {
|
||||
stream->collected_to = stream->front->start;
|
||||
/* Store list pointers before active flag */
|
||||
smp_store_release(&stream->active, true);
|
||||
}
|
||||
}
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
@@ -79,7 +85,6 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
if (rreq->netfs_ops->prepare_read) {
|
||||
ret = rreq->netfs_ops->prepare_read(subreq);
|
||||
if (ret < 0) {
|
||||
atomic_dec(&rreq->nr_outstanding);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
|
||||
break;
|
||||
}
|
||||
@@ -87,20 +92,32 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
|
||||
netfs_prepare_dio_read_iterator(subreq);
|
||||
slice = subreq->len;
|
||||
rreq->netfs_ops->issue_read(subreq);
|
||||
|
||||
size -= slice;
|
||||
start += slice;
|
||||
rreq->submitted += slice;
|
||||
if (size <= 0) {
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
|
||||
}
|
||||
|
||||
rreq->netfs_ops->issue_read(subreq);
|
||||
|
||||
if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
|
||||
netfs_wait_for_pause(rreq);
|
||||
if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
|
||||
break;
|
||||
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
|
||||
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
|
||||
break;
|
||||
cond_resched();
|
||||
} while (size > 0);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq);
|
||||
if (unlikely(size > 0)) {
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
|
||||
netfs_wake_read_collector(rreq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -133,21 +150,10 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sync) {
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
|
||||
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
ret = rreq->error;
|
||||
if (ret == 0 && rreq->submitted < rreq->len &&
|
||||
rreq->origin != NETFS_DIO_READ) {
|
||||
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
|
||||
ret = -EIO;
|
||||
}
|
||||
} else {
|
||||
if (sync)
|
||||
ret = netfs_wait_for_read(rreq);
|
||||
else
|
||||
ret = -EIOCBQUEUED;
|
||||
}
|
||||
|
||||
out:
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
@@ -215,8 +221,10 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
|
||||
|
||||
// TODO: Set up bounce buffer if needed
|
||||
|
||||
if (!sync)
|
||||
if (!sync) {
|
||||
rreq->iocb = iocb;
|
||||
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
|
||||
}
|
||||
|
||||
ret = netfs_unbuffered_read(rreq, sync);
|
||||
if (ret < 0)
|
||||
|
||||
@@ -82,20 +82,27 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
|
||||
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
|
||||
}
|
||||
|
||||
static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
|
||||
enum netfs_sreq_ref_trace what)
|
||||
{
|
||||
trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index,
|
||||
refcount_read(&subreq->ref), what);
|
||||
}
|
||||
|
||||
/*
|
||||
* read_collect.c
|
||||
*/
|
||||
void netfs_read_termination_worker(struct work_struct *work);
|
||||
void netfs_rreq_terminated(struct netfs_io_request *rreq);
|
||||
void netfs_read_collection_worker(struct work_struct *work);
|
||||
void netfs_wake_read_collector(struct netfs_io_request *rreq);
|
||||
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
|
||||
ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
|
||||
void netfs_wait_for_pause(struct netfs_io_request *rreq);
|
||||
|
||||
/*
|
||||
* read_pgpriv2.c
|
||||
*/
|
||||
void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
|
||||
struct netfs_io_request *rreq,
|
||||
struct folio_queue *folioq,
|
||||
int slot);
|
||||
void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
|
||||
void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
|
||||
void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
|
||||
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
|
||||
|
||||
/*
|
||||
|
||||
@@ -71,7 +71,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
|
||||
refcount_read(&rreq->ref),
|
||||
rreq->flags,
|
||||
rreq->error,
|
||||
atomic_read(&rreq->nr_outstanding),
|
||||
0,
|
||||
rreq->start, rreq->submitted, rreq->len);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
|
||||
@@ -48,7 +48,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
spin_lock_init(&rreq->lock);
|
||||
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
|
||||
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
|
||||
INIT_LIST_HEAD(&rreq->subrequests);
|
||||
init_waitqueue_head(&rreq->waitq);
|
||||
refcount_set(&rreq->ref, 1);
|
||||
|
||||
if (origin == NETFS_READAHEAD ||
|
||||
@@ -56,10 +56,12 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
origin == NETFS_READ_GAPS ||
|
||||
origin == NETFS_READ_SINGLE ||
|
||||
origin == NETFS_READ_FOR_WRITE ||
|
||||
origin == NETFS_DIO_READ)
|
||||
INIT_WORK(&rreq->work, NULL);
|
||||
else
|
||||
origin == NETFS_DIO_READ) {
|
||||
INIT_WORK(&rreq->work, netfs_read_collection_worker);
|
||||
rreq->io_streams[0].avail = true;
|
||||
} else {
|
||||
INIT_WORK(&rreq->work, netfs_write_collection_worker);
|
||||
}
|
||||
|
||||
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
if (file && file->f_flags & O_NONBLOCK)
|
||||
@@ -93,14 +95,6 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
|
||||
struct netfs_io_stream *stream;
|
||||
int s;
|
||||
|
||||
while (!list_empty(&rreq->subrequests)) {
|
||||
subreq = list_first_entry(&rreq->subrequests,
|
||||
struct netfs_io_subrequest, rreq_link);
|
||||
list_del(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, was_async,
|
||||
netfs_sreq_trace_put_clear);
|
||||
}
|
||||
|
||||
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
|
||||
stream = &rreq->io_streams[s];
|
||||
while (!list_empty(&stream->subrequests)) {
|
||||
@@ -192,21 +186,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
|
||||
}
|
||||
|
||||
memset(subreq, 0, kmem_cache_size(cache));
|
||||
|
||||
switch (rreq->origin) {
|
||||
case NETFS_READAHEAD:
|
||||
case NETFS_READPAGE:
|
||||
case NETFS_READ_GAPS:
|
||||
case NETFS_READ_SINGLE:
|
||||
case NETFS_READ_FOR_WRITE:
|
||||
case NETFS_DIO_READ:
|
||||
INIT_WORK(&subreq->work, netfs_read_subreq_termination_worker);
|
||||
break;
|
||||
default:
|
||||
INIT_WORK(&subreq->work, NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
INIT_WORK(&subreq->work, NULL);
|
||||
INIT_LIST_HEAD(&subreq->rreq_link);
|
||||
refcount_set(&subreq->ref, 2);
|
||||
subreq->rreq = rreq;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,54 +13,12 @@
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
|
||||
* third mark in the folio queue is used to indicate that this folio needs
|
||||
* writing.
|
||||
*/
|
||||
void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
|
||||
struct netfs_io_request *rreq,
|
||||
struct folio_queue *folioq,
|
||||
int slot)
|
||||
{
|
||||
struct folio *folio = folioq_folio(folioq, slot);
|
||||
|
||||
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
|
||||
folio_start_private_2(folio);
|
||||
folioq_mark3(folioq, slot);
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
|
||||
* unrecoverable error.
|
||||
*/
|
||||
static void netfs_pgpriv2_cancel(struct rolling_buffer *buffer)
|
||||
{
|
||||
struct folio_queue *folioq = buffer->tail;
|
||||
struct folio *folio;
|
||||
int slot;
|
||||
|
||||
while (folioq) {
|
||||
if (!folioq->marks3) {
|
||||
folioq = folioq->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
slot = __ffs(folioq->marks3);
|
||||
folio = folioq_folio(folioq, slot);
|
||||
|
||||
trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
|
||||
folio_end_private_2(folio);
|
||||
folioq_unmark3(folioq, slot);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
|
||||
*/
|
||||
static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
|
||||
static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio)
|
||||
{
|
||||
struct netfs_io_stream *cache = &wreq->io_streams[1];
|
||||
struct netfs_io_stream *cache = &creq->io_streams[1];
|
||||
size_t fsize = folio_size(folio), flen = fsize;
|
||||
loff_t fpos = folio_pos(folio), i_size;
|
||||
bool to_eof = false;
|
||||
@@ -71,17 +29,17 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
|
||||
* of the page to beyond it, but cannot move i_size into or through the
|
||||
* page since we have it locked.
|
||||
*/
|
||||
i_size = i_size_read(wreq->inode);
|
||||
i_size = i_size_read(creq->inode);
|
||||
|
||||
if (fpos >= i_size) {
|
||||
/* mmap beyond eof. */
|
||||
_debug("beyond eof");
|
||||
folio_end_private_2(folio);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (fpos + fsize > wreq->i_size)
|
||||
wreq->i_size = i_size;
|
||||
if (fpos + fsize > creq->i_size)
|
||||
creq->i_size = i_size;
|
||||
|
||||
if (flen > i_size - fpos) {
|
||||
flen = i_size - fpos;
|
||||
@@ -95,8 +53,10 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
|
||||
trace_netfs_folio(folio, netfs_folio_trace_store_copy);
|
||||
|
||||
/* Attach the folio to the rolling buffer. */
|
||||
if (rolling_buffer_append(&wreq->buffer, folio, 0) < 0)
|
||||
return -ENOMEM;
|
||||
if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) {
|
||||
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
cache->submit_extendable_to = fsize;
|
||||
cache->submit_off = 0;
|
||||
@@ -110,11 +70,11 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
|
||||
do {
|
||||
ssize_t part;
|
||||
|
||||
wreq->buffer.iter.iov_offset = cache->submit_off;
|
||||
creq->buffer.iter.iov_offset = cache->submit_off;
|
||||
|
||||
atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
|
||||
atomic64_set(&creq->issued_to, fpos + cache->submit_off);
|
||||
cache->submit_extendable_to = fsize - cache->submit_off;
|
||||
part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
|
||||
part = netfs_advance_write(creq, cache, fpos + cache->submit_off,
|
||||
cache->submit_len, to_eof);
|
||||
cache->submit_off += part;
|
||||
if (part > cache->submit_len)
|
||||
@@ -123,98 +83,95 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
|
||||
cache->submit_len -= part;
|
||||
} while (cache->submit_len > 0);
|
||||
|
||||
wreq->buffer.iter.iov_offset = 0;
|
||||
rolling_buffer_advance(&wreq->buffer, fsize);
|
||||
atomic64_set(&wreq->issued_to, fpos + fsize);
|
||||
creq->buffer.iter.iov_offset = 0;
|
||||
rolling_buffer_advance(&creq->buffer, fsize);
|
||||
atomic64_set(&creq->issued_to, fpos + fsize);
|
||||
|
||||
if (flen < fsize)
|
||||
netfs_issue_write(wreq, cache);
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
netfs_issue_write(creq, cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Go through the buffer and write any folios that are marked with
|
||||
* the third mark to the cache.
|
||||
* [DEPRECATED] Set up copying to the cache.
|
||||
*/
|
||||
void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
|
||||
static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
|
||||
struct netfs_io_request *rreq, struct folio *folio)
|
||||
{
|
||||
struct netfs_io_request *wreq;
|
||||
struct folio_queue *folioq;
|
||||
struct folio *folio;
|
||||
int error = 0;
|
||||
int slot = 0;
|
||||
|
||||
_enter("");
|
||||
struct netfs_io_request *creq;
|
||||
|
||||
if (!fscache_resources_valid(&rreq->cache_resources))
|
||||
goto couldnt_start;
|
||||
goto cancel;
|
||||
|
||||
/* Need the first folio to be able to set up the op. */
|
||||
for (folioq = rreq->buffer.tail; folioq; folioq = folioq->next) {
|
||||
if (folioq->marks3) {
|
||||
slot = __ffs(folioq->marks3);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!folioq)
|
||||
return;
|
||||
folio = folioq_folio(folioq, slot);
|
||||
|
||||
wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
|
||||
creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
|
||||
NETFS_PGPRIV2_COPY_TO_CACHE);
|
||||
if (IS_ERR(wreq)) {
|
||||
kleave(" [create %ld]", PTR_ERR(wreq));
|
||||
goto couldnt_start;
|
||||
}
|
||||
if (IS_ERR(creq))
|
||||
goto cancel;
|
||||
|
||||
trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
|
||||
if (!creq->io_streams[1].avail)
|
||||
goto cancel_put;
|
||||
|
||||
trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
|
||||
netfs_stat(&netfs_n_wh_copy_to_cache);
|
||||
if (!wreq->io_streams[1].avail) {
|
||||
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
|
||||
goto couldnt_start;
|
||||
}
|
||||
rreq->copy_to_cache = creq;
|
||||
return creq;
|
||||
|
||||
for (;;) {
|
||||
error = netfs_pgpriv2_copy_folio(wreq, folio);
|
||||
if (error < 0)
|
||||
break;
|
||||
cancel_put:
|
||||
netfs_put_request(creq, false, netfs_rreq_trace_put_return);
|
||||
cancel:
|
||||
rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
|
||||
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
|
||||
return ERR_PTR(-ENOBUFS);
|
||||
}
|
||||
|
||||
folioq_unmark3(folioq, slot);
|
||||
if (!folioq->marks3) {
|
||||
folioq = folioq->next;
|
||||
if (!folioq)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
|
||||
* it to the copy write request.
|
||||
*/
|
||||
void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio)
|
||||
{
|
||||
struct netfs_io_request *creq = rreq->copy_to_cache;
|
||||
|
||||
slot = __ffs(folioq->marks3);
|
||||
folio = folioq_folio(folioq, slot);
|
||||
}
|
||||
if (!creq)
|
||||
creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio);
|
||||
if (IS_ERR(creq))
|
||||
return;
|
||||
|
||||
netfs_issue_write(wreq, &wreq->io_streams[1]);
|
||||
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
|
||||
folio_start_private_2(folio);
|
||||
netfs_pgpriv2_copy_folio(creq, folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
|
||||
*/
|
||||
void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_io_request *creq = rreq->copy_to_cache;
|
||||
|
||||
if (IS_ERR_OR_NULL(creq))
|
||||
return;
|
||||
|
||||
netfs_issue_write(creq, &creq->io_streams[1]);
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
|
||||
|
||||
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
|
||||
_leave(" = %d", error);
|
||||
couldnt_start:
|
||||
netfs_pgpriv2_cancel(&rreq->buffer);
|
||||
netfs_put_request(creq, false, netfs_rreq_trace_put_return);
|
||||
creq->copy_to_cache = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
|
||||
* copying.
|
||||
*/
|
||||
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
|
||||
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq)
|
||||
{
|
||||
struct folio_queue *folioq = wreq->buffer.tail;
|
||||
unsigned long long collected_to = wreq->collected_to;
|
||||
unsigned int slot = wreq->buffer.first_tail_slot;
|
||||
struct folio_queue *folioq = creq->buffer.tail;
|
||||
unsigned long long collected_to = creq->collected_to;
|
||||
unsigned int slot = creq->buffer.first_tail_slot;
|
||||
bool made_progress = false;
|
||||
|
||||
if (slot >= folioq_nr_slots(folioq)) {
|
||||
folioq = rolling_buffer_delete_spent(&wreq->buffer);
|
||||
folioq = rolling_buffer_delete_spent(&creq->buffer);
|
||||
slot = 0;
|
||||
}
|
||||
|
||||
@@ -226,16 +183,16 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
|
||||
folio = folioq_folio(folioq, slot);
|
||||
if (WARN_ONCE(!folio_test_private_2(folio),
|
||||
"R=%08x: folio %lx is not marked private_2\n",
|
||||
wreq->debug_id, folio->index))
|
||||
creq->debug_id, folio->index))
|
||||
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
|
||||
|
||||
fpos = folio_pos(folio);
|
||||
fsize = folio_size(folio);
|
||||
flen = fsize;
|
||||
|
||||
fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
|
||||
fend = min_t(unsigned long long, fpos + flen, creq->i_size);
|
||||
|
||||
trace_netfs_collect_folio(wreq, folio, fend, collected_to);
|
||||
trace_netfs_collect_folio(creq, folio, fend, collected_to);
|
||||
|
||||
/* Unlock any folio we've transferred all of. */
|
||||
if (collected_to < fend)
|
||||
@@ -243,7 +200,7 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
|
||||
|
||||
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
|
||||
folio_end_private_2(folio);
|
||||
wreq->cleaned_to = fpos + fsize;
|
||||
creq->cleaned_to = fpos + fsize;
|
||||
made_progress = true;
|
||||
|
||||
/* Clean up the head folioq. If we clear an entire folioq, then
|
||||
@@ -253,7 +210,7 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
|
||||
folioq_clear(folioq, slot);
|
||||
slot++;
|
||||
if (slot >= folioq_nr_slots(folioq)) {
|
||||
folioq = rolling_buffer_delete_spent(&wreq->buffer);
|
||||
folioq = rolling_buffer_delete_spent(&creq->buffer);
|
||||
if (!folioq)
|
||||
goto done;
|
||||
slot = 0;
|
||||
@@ -263,8 +220,8 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
|
||||
break;
|
||||
}
|
||||
|
||||
wreq->buffer.tail = folioq;
|
||||
creq->buffer.tail = folioq;
|
||||
done:
|
||||
wreq->buffer.first_tail_slot = slot;
|
||||
creq->buffer.first_tail_slot = slot;
|
||||
return made_progress;
|
||||
}
|
||||
|
||||
@@ -12,15 +12,7 @@
|
||||
static void netfs_reissue_read(struct netfs_io_request *rreq,
|
||||
struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct iov_iter *io_iter = &subreq->io_iter;
|
||||
|
||||
if (iov_iter_is_folioq(io_iter)) {
|
||||
subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
|
||||
subreq->curr_folioq_slot = io_iter->folioq_slot;
|
||||
subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
|
||||
}
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
subreq->rreq->netfs_ops->issue_read(subreq);
|
||||
@@ -33,13 +25,12 @@ static void netfs_reissue_read(struct netfs_io_request *rreq,
|
||||
static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct netfs_io_stream *stream0 = &rreq->io_streams[0];
|
||||
LIST_HEAD(sublist);
|
||||
LIST_HEAD(queue);
|
||||
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
||||
struct list_head *next;
|
||||
|
||||
_enter("R=%x", rreq->debug_id);
|
||||
|
||||
if (list_empty(&rreq->subrequests))
|
||||
if (list_empty(&stream->subrequests))
|
||||
return;
|
||||
|
||||
if (rreq->netfs_ops->retry_request)
|
||||
@@ -50,9 +41,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
*/
|
||||
if (!rreq->netfs_ops->prepare_read &&
|
||||
!rreq->cache_resources.ops) {
|
||||
struct netfs_io_subrequest *subreq;
|
||||
|
||||
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
|
||||
break;
|
||||
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
|
||||
@@ -75,48 +64,44 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
* populating with smaller subrequests. In the event that the subreq
|
||||
* we just launched finishes before we insert the next subreq, it'll
|
||||
* fill in rreq->prev_donated instead.
|
||||
|
||||
*
|
||||
* Note: Alternatively, we could split the tail subrequest right before
|
||||
* we reissue it and fix up the donations under lock.
|
||||
*/
|
||||
list_splice_init(&rreq->subrequests, &queue);
|
||||
next = stream->subrequests.next;
|
||||
|
||||
do {
|
||||
struct netfs_io_subrequest *from;
|
||||
struct netfs_io_subrequest *from, *to, *tmp;
|
||||
struct iov_iter source;
|
||||
unsigned long long start, len;
|
||||
size_t part, deferred_next_donated = 0;
|
||||
size_t part;
|
||||
bool boundary = false;
|
||||
|
||||
/* Go through the subreqs and find the next span of contiguous
|
||||
* buffer that we then rejig (cifs, for example, needs the
|
||||
* rsize renegotiating) and reissue.
|
||||
*/
|
||||
from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
|
||||
list_move_tail(&from->rreq_link, &sublist);
|
||||
from = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
to = from;
|
||||
start = from->start + from->transferred;
|
||||
len = from->len - from->transferred;
|
||||
|
||||
_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
|
||||
_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
|
||||
rreq->debug_id, from->debug_index,
|
||||
from->start, from->consumed, from->transferred, from->len);
|
||||
from->start, from->transferred, from->len);
|
||||
|
||||
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
|
||||
goto abandon;
|
||||
|
||||
deferred_next_donated = from->next_donated;
|
||||
while ((subreq = list_first_entry_or_null(
|
||||
&queue, struct netfs_io_subrequest, rreq_link))) {
|
||||
if (subreq->start != start + len ||
|
||||
subreq->transferred > 0 ||
|
||||
list_for_each_continue(next, &stream->subrequests) {
|
||||
subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
if (subreq->start + subreq->transferred != start + len ||
|
||||
test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
|
||||
break;
|
||||
list_move_tail(&subreq->rreq_link, &sublist);
|
||||
len += subreq->len;
|
||||
deferred_next_donated = subreq->next_donated;
|
||||
if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
|
||||
break;
|
||||
to = subreq;
|
||||
len += to->len;
|
||||
}
|
||||
|
||||
_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
|
||||
@@ -129,37 +114,30 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
source.count = len;
|
||||
|
||||
/* Work through the sublist. */
|
||||
while ((subreq = list_first_entry_or_null(
|
||||
&sublist, struct netfs_io_subrequest, rreq_link))) {
|
||||
list_del(&subreq->rreq_link);
|
||||
|
||||
subreq = from;
|
||||
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
|
||||
if (!len)
|
||||
break;
|
||||
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
subreq->start = start - subreq->transferred;
|
||||
subreq->len = len + subreq->transferred;
|
||||
stream0->sreq_max_len = subreq->len;
|
||||
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
subreq->retry_count++;
|
||||
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated += rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
BUG_ON(!len);
|
||||
|
||||
/* Renegotiate max_len (rsize) */
|
||||
stream->sreq_max_len = subreq->len;
|
||||
if (rreq->netfs_ops->prepare_read(subreq) < 0) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
|
||||
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
goto abandon;
|
||||
}
|
||||
|
||||
part = umin(len, stream0->sreq_max_len);
|
||||
if (unlikely(rreq->io_streams[0].sreq_max_segs))
|
||||
part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
|
||||
part = umin(len, stream->sreq_max_len);
|
||||
if (unlikely(stream->sreq_max_segs))
|
||||
part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
|
||||
subreq->len = subreq->transferred + part;
|
||||
subreq->io_iter = source;
|
||||
iov_iter_truncate(&subreq->io_iter, part);
|
||||
@@ -169,57 +147,105 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
if (!len) {
|
||||
if (boundary)
|
||||
__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
|
||||
subreq->next_donated = deferred_next_donated;
|
||||
} else {
|
||||
__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
|
||||
subreq->next_donated = 0;
|
||||
}
|
||||
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
netfs_reissue_read(rreq, subreq);
|
||||
if (!len)
|
||||
if (subreq == to)
|
||||
break;
|
||||
|
||||
/* If we ran out of subrequests, allocate another. */
|
||||
if (list_empty(&sublist)) {
|
||||
subreq = netfs_alloc_subrequest(rreq);
|
||||
if (!subreq)
|
||||
goto abandon;
|
||||
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
subreq->start = start;
|
||||
|
||||
/* We get two refs, but need just one. */
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_split);
|
||||
list_add_tail(&subreq->rreq_link, &sublist);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we managed to use fewer subreqs, we can discard the
|
||||
* excess.
|
||||
* excess; if we used the same number, then we're done.
|
||||
*/
|
||||
while ((subreq = list_first_entry_or_null(
|
||||
&sublist, struct netfs_io_subrequest, rreq_link))) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
|
||||
list_del(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
|
||||
if (!len) {
|
||||
if (subreq == to)
|
||||
continue;
|
||||
list_for_each_entry_safe_from(subreq, tmp,
|
||||
&stream->subrequests, rreq_link) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
|
||||
list_del(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
|
||||
if (subreq == to)
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
} while (!list_empty(&queue));
|
||||
/* We ran out of subrequests, so we need to allocate some more
|
||||
* and insert them after.
|
||||
*/
|
||||
do {
|
||||
subreq = netfs_alloc_subrequest(rreq);
|
||||
if (!subreq) {
|
||||
subreq = to;
|
||||
goto abandon_after;
|
||||
}
|
||||
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
subreq->start = start;
|
||||
subreq->len = len;
|
||||
subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
|
||||
subreq->stream_nr = stream->stream_nr;
|
||||
subreq->retry_count = 1;
|
||||
|
||||
trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
|
||||
refcount_read(&subreq->ref),
|
||||
netfs_sreq_trace_new);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
|
||||
list_add(&subreq->rreq_link, &to->rreq_link);
|
||||
to = list_next_entry(to, rreq_link);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
|
||||
stream->sreq_max_len = umin(len, rreq->rsize);
|
||||
stream->sreq_max_segs = 0;
|
||||
if (unlikely(stream->sreq_max_segs))
|
||||
part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
|
||||
|
||||
netfs_stat(&netfs_n_rh_download);
|
||||
if (rreq->netfs_ops->prepare_read(subreq) < 0) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
|
||||
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
goto abandon;
|
||||
}
|
||||
|
||||
part = umin(len, stream->sreq_max_len);
|
||||
subreq->len = subreq->transferred + part;
|
||||
subreq->io_iter = source;
|
||||
iov_iter_truncate(&subreq->io_iter, part);
|
||||
iov_iter_advance(&source, part);
|
||||
|
||||
len -= part;
|
||||
start += part;
|
||||
if (!len && boundary) {
|
||||
__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
|
||||
boundary = false;
|
||||
}
|
||||
|
||||
netfs_reissue_read(rreq, subreq);
|
||||
} while (len);
|
||||
|
||||
} while (!list_is_head(next, &stream->subrequests));
|
||||
|
||||
return;
|
||||
|
||||
/* If we hit ENOMEM, fail all remaining subrequests */
|
||||
/* If we hit an error, fail all remaining incomplete subrequests */
|
||||
abandon_after:
|
||||
if (list_is_last(&subreq->rreq_link, &stream->subrequests))
|
||||
return;
|
||||
subreq = list_next_entry(subreq, rreq_link);
|
||||
abandon:
|
||||
list_splice_init(&sublist, &queue);
|
||||
list_for_each_entry(subreq, &queue, rreq_link) {
|
||||
if (!subreq->error)
|
||||
subreq->error = -ENOMEM;
|
||||
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
|
||||
if (!subreq->error &&
|
||||
!test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
|
||||
continue;
|
||||
subreq->error = -ENOMEM;
|
||||
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
}
|
||||
spin_lock(&rreq->lock);
|
||||
list_splice_tail_init(&queue, &rreq->subrequests);
|
||||
spin_unlock(&rreq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -227,14 +253,19 @@ abandon:
|
||||
*/
|
||||
void netfs_retry_reads(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
||||
|
||||
/* Wait for all outstanding I/O to quiesce before performing retries as
|
||||
* we may need to renegotiate the I/O sizes.
|
||||
*/
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
|
||||
netfs_retry_read_subrequests(rreq);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -77,6 +77,7 @@ static void netfs_single_read_cache(struct netfs_io_request *rreq,
|
||||
{
|
||||
struct netfs_cache_resources *cres = &rreq->cache_resources;
|
||||
|
||||
_enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index);
|
||||
netfs_stat(&netfs_n_rh_read);
|
||||
cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL,
|
||||
netfs_cache_read_terminated, subreq);
|
||||
@@ -88,28 +89,28 @@ static void netfs_single_read_cache(struct netfs_io_request *rreq,
|
||||
*/
|
||||
static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
||||
struct netfs_io_subrequest *subreq;
|
||||
int ret = 0;
|
||||
|
||||
atomic_set(&rreq->nr_outstanding, 1);
|
||||
|
||||
subreq = netfs_alloc_subrequest(rreq);
|
||||
if (!subreq) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!subreq)
|
||||
return -ENOMEM;
|
||||
|
||||
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
subreq->start = 0;
|
||||
subreq->len = rreq->len;
|
||||
subreq->io_iter = rreq->buffer.iter;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
|
||||
|
||||
spin_lock_bh(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &stream->subrequests);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
stream->front = subreq;
|
||||
/* Store list pointers before active flag */
|
||||
smp_store_release(&stream->active, true);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
netfs_single_cache_prepare_read(rreq, subreq);
|
||||
switch (subreq->source) {
|
||||
@@ -137,14 +138,12 @@ static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq);
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
|
||||
return ret;
|
||||
cancel:
|
||||
atomic_dec(&rreq->nr_outstanding);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -185,13 +184,7 @@ ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_ite
|
||||
rreq->buffer.iter = *iter;
|
||||
netfs_single_dispatch_read(rreq);
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
|
||||
wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
ret = rreq->error;
|
||||
if (ret == 0)
|
||||
ret = rreq->transferred;
|
||||
ret = netfs_wait_for_read(rreq);
|
||||
netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -289,7 +289,9 @@ reassess_streams:
|
||||
goto need_retry;
|
||||
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
|
||||
clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
|
||||
clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
|
||||
smp_mb__after_atomic(); /* Set PAUSE before task state */
|
||||
wake_up(&wreq->waitq);
|
||||
}
|
||||
|
||||
if (notes & NEED_REASSESS) {
|
||||
|
||||
@@ -723,7 +723,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
|
||||
rolling_buffer_advance(&wreq->buffer, part);
|
||||
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
|
||||
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
|
||||
wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
|
||||
wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags));
|
||||
}
|
||||
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
|
||||
break;
|
||||
|
||||
@@ -93,15 +93,21 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
|
||||
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
|
||||
if (!len)
|
||||
break;
|
||||
/* Renegotiate max_len (wsize) */
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
|
||||
subreq->start = start;
|
||||
subreq->len = len;
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
subreq->retry_count++;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
|
||||
/* Renegotiate max_len (wsize) */
|
||||
stream->sreq_max_len = len;
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
part = min(len, stream->sreq_max_len);
|
||||
part = umin(len, stream->sreq_max_len);
|
||||
if (unlikely(stream->sreq_max_segs))
|
||||
part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
|
||||
subreq->len = part;
|
||||
subreq->start = start;
|
||||
subreq->transferred = 0;
|
||||
len -= part;
|
||||
start += part;
|
||||
|
||||
@@ -1322,6 +1322,8 @@ cifs_readv_callback(struct mid_q_entry *mid)
|
||||
} else if (rdata->got_bytes > 0) {
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
|
||||
}
|
||||
if (rdata->got_bytes)
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
|
||||
}
|
||||
|
||||
rdata->credits.value = 0;
|
||||
|
||||
@@ -4607,7 +4607,8 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
|
||||
rdata->result = 0;
|
||||
}
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
|
||||
if (rdata->got_bytes)
|
||||
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
|
||||
}
|
||||
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
|
||||
server->credits, server->in_flight,
|
||||
@@ -4616,7 +4617,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
||||
rdata->subreq.error = rdata->result;
|
||||
rdata->subreq.transferred += rdata->got_bytes;
|
||||
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
|
||||
queue_work(cifsiod_wq, &rdata->subreq.work);
|
||||
netfs_read_subreq_terminated(&rdata->subreq);
|
||||
release_mid(mid);
|
||||
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
|
||||
server->credits, server->in_flight,
|
||||
|
||||
@@ -181,9 +181,6 @@ struct netfs_io_subrequest {
|
||||
unsigned long long start; /* Where to start the I/O */
|
||||
size_t len; /* Size of the I/O */
|
||||
size_t transferred; /* Amount of data transferred */
|
||||
size_t consumed; /* Amount of read data consumed */
|
||||
size_t prev_donated; /* Amount of data donated from previous subreq */
|
||||
size_t next_donated; /* Amount of data donated from next subreq */
|
||||
refcount_t ref;
|
||||
short error; /* 0 or error that occurred */
|
||||
unsigned short debug_index; /* Index in list (for debugging output) */
|
||||
@@ -191,9 +188,6 @@ struct netfs_io_subrequest {
|
||||
u8 retry_count; /* The number of retries (0 on initial pass) */
|
||||
enum netfs_io_source source; /* Where to read from/write to */
|
||||
unsigned char stream_nr; /* I/O stream this belongs to */
|
||||
unsigned char curr_folioq_slot; /* Folio currently being read */
|
||||
unsigned char curr_folio_order; /* Order of folio */
|
||||
struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
|
||||
unsigned long flags;
|
||||
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
|
||||
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
|
||||
@@ -236,15 +230,16 @@ struct netfs_io_request {
|
||||
struct address_space *mapping; /* The mapping being accessed */
|
||||
struct kiocb *iocb; /* AIO completion vector */
|
||||
struct netfs_cache_resources cache_resources;
|
||||
struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */
|
||||
struct readahead_control *ractl; /* Readahead descriptor */
|
||||
struct list_head proc_link; /* Link in netfs_iorequests */
|
||||
struct list_head subrequests; /* Contributory I/O operations */
|
||||
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
|
||||
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
|
||||
struct netfs_group *group; /* Writeback group being written back */
|
||||
struct rolling_buffer buffer; /* Unencrypted buffer */
|
||||
#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1
|
||||
#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2
|
||||
wait_queue_head_t waitq; /* Processor waiter */
|
||||
void *netfs_priv; /* Private data for the netfs */
|
||||
void *netfs_priv2; /* Private data for the netfs */
|
||||
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
|
||||
@@ -255,7 +250,6 @@ struct netfs_io_request {
|
||||
atomic_t subreq_counter; /* Next subreq->debug_index */
|
||||
unsigned int nr_group_rel; /* Number of refs to release on ->group */
|
||||
spinlock_t lock; /* Lock for queuing subreqs */
|
||||
atomic_t nr_outstanding; /* Number of ops in progress */
|
||||
unsigned long long submitted; /* Amount submitted for I/O so far */
|
||||
unsigned long long len; /* Length of the request */
|
||||
size_t transferred; /* Amount to be indicated as transferred */
|
||||
@@ -267,14 +261,17 @@ struct netfs_io_request {
|
||||
atomic64_t issued_to; /* Write issuer folio cursor */
|
||||
unsigned long long collected_to; /* Point we've collected to */
|
||||
unsigned long long cleaned_to; /* Position we've cleaned folios to */
|
||||
unsigned long long abandon_to; /* Position to abandon folios to */
|
||||
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
|
||||
size_t prev_donated; /* Fallback for subreq->prev_donated */
|
||||
unsigned char front_folio_order; /* Order (size) of front folio */
|
||||
refcount_t ref;
|
||||
unsigned long flags;
|
||||
#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */
|
||||
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
|
||||
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
|
||||
#define NETFS_RREQ_FAILED 4 /* The request failed */
|
||||
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
|
||||
#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */
|
||||
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
|
||||
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
|
||||
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
|
||||
@@ -439,7 +436,6 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
||||
/* (Sub)request management API. */
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
|
||||
void netfs_read_subreq_termination_worker(struct work_struct *work);
|
||||
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
||||
enum netfs_sreq_ref_trace what);
|
||||
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
|
||||
|
||||
@@ -50,18 +50,23 @@
|
||||
EM(netfs_rreq_trace_assess, "ASSESS ") \
|
||||
EM(netfs_rreq_trace_copy, "COPY ") \
|
||||
EM(netfs_rreq_trace_collect, "COLLECT") \
|
||||
EM(netfs_rreq_trace_complete, "COMPLET") \
|
||||
EM(netfs_rreq_trace_dirty, "DIRTY ") \
|
||||
EM(netfs_rreq_trace_done, "DONE ") \
|
||||
EM(netfs_rreq_trace_free, "FREE ") \
|
||||
EM(netfs_rreq_trace_redirty, "REDIRTY") \
|
||||
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
|
||||
EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \
|
||||
EM(netfs_rreq_trace_set_pause, "PAUSE ") \
|
||||
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
|
||||
EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
|
||||
EM(netfs_rreq_trace_unmark, "UNMARK ") \
|
||||
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
|
||||
EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
|
||||
EM(netfs_rreq_trace_wait_queue, "WAIT-Q ") \
|
||||
EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
|
||||
EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \
|
||||
EM(netfs_rreq_trace_woke_queue, "WOKE-Q ") \
|
||||
EM(netfs_rreq_trace_unpause, "UNPAUSE") \
|
||||
E_(netfs_rreq_trace_write_done, "WR-DONE")
|
||||
|
||||
@@ -81,6 +86,7 @@
|
||||
EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \
|
||||
EM(netfs_sreq_trace_cache_prepare, "CA-PR") \
|
||||
EM(netfs_sreq_trace_cache_write, "CA-WR") \
|
||||
EM(netfs_sreq_trace_cancel, "CANCL") \
|
||||
EM(netfs_sreq_trace_clear, "CLEAR") \
|
||||
EM(netfs_sreq_trace_discard, "DSCRD") \
|
||||
EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
|
||||
@@ -91,6 +97,9 @@
|
||||
EM(netfs_sreq_trace_hit_eof, "EOF ") \
|
||||
EM(netfs_sreq_trace_io_progress, "IO ") \
|
||||
EM(netfs_sreq_trace_limited, "LIMIT") \
|
||||
EM(netfs_sreq_trace_need_clear, "N-CLR") \
|
||||
EM(netfs_sreq_trace_partial_read, "PARTR") \
|
||||
EM(netfs_sreq_trace_need_retry, "NRTRY") \
|
||||
EM(netfs_sreq_trace_prepare, "PREP ") \
|
||||
EM(netfs_sreq_trace_prep_failed, "PRPFL") \
|
||||
EM(netfs_sreq_trace_progress, "PRGRS") \
|
||||
@@ -136,6 +145,7 @@
|
||||
EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \
|
||||
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
|
||||
EM(netfs_sreq_trace_new, "NEW ") \
|
||||
EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \
|
||||
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
|
||||
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
|
||||
EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
|
||||
@@ -176,6 +186,7 @@
|
||||
EM(netfs_folio_trace_mkwrite, "mkwrite") \
|
||||
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
|
||||
EM(netfs_folio_trace_not_under_wback, "!wback") \
|
||||
EM(netfs_folio_trace_not_locked, "!locked") \
|
||||
EM(netfs_folio_trace_put, "put") \
|
||||
EM(netfs_folio_trace_read, "read") \
|
||||
EM(netfs_folio_trace_read_done, "read-done") \
|
||||
@@ -204,7 +215,6 @@
|
||||
EM(netfs_trace_folioq_clear, "clear") \
|
||||
EM(netfs_trace_folioq_delete, "delete") \
|
||||
EM(netfs_trace_folioq_make_space, "make-space") \
|
||||
EM(netfs_trace_folioq_prep_write, "prep-wr") \
|
||||
EM(netfs_trace_folioq_rollbuf_init, "roll-init") \
|
||||
E_(netfs_trace_folioq_read_progress, "r-progress")
|
||||
|
||||
@@ -352,7 +362,7 @@ TRACE_EVENT(netfs_sreq,
|
||||
__entry->len = sreq->len;
|
||||
__entry->transferred = sreq->transferred;
|
||||
__entry->start = sreq->start;
|
||||
__entry->slot = sreq->curr_folioq_slot;
|
||||
__entry->slot = sreq->io_iter.folioq_slot;
|
||||
),
|
||||
|
||||
TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx s=%u e=%d",
|
||||
@@ -701,71 +711,6 @@ TRACE_EVENT(netfs_collect_stream,
|
||||
__entry->collected_to, __entry->front)
|
||||
);
|
||||
|
||||
TRACE_EVENT(netfs_progress,
|
||||
TP_PROTO(const struct netfs_io_subrequest *subreq,
|
||||
unsigned long long start, size_t avail, size_t part),
|
||||
|
||||
TP_ARGS(subreq, start, avail, part),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, rreq)
|
||||
__field(unsigned int, subreq)
|
||||
__field(unsigned int, consumed)
|
||||
__field(unsigned int, transferred)
|
||||
__field(unsigned long long, f_start)
|
||||
__field(unsigned int, f_avail)
|
||||
__field(unsigned int, f_part)
|
||||
__field(unsigned char, slot)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->rreq = subreq->rreq->debug_id;
|
||||
__entry->subreq = subreq->debug_index;
|
||||
__entry->consumed = subreq->consumed;
|
||||
__entry->transferred = subreq->transferred;
|
||||
__entry->f_start = start;
|
||||
__entry->f_avail = avail;
|
||||
__entry->f_part = part;
|
||||
__entry->slot = subreq->curr_folioq_slot;
|
||||
),
|
||||
|
||||
TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x",
|
||||
__entry->rreq, __entry->subreq, __entry->f_start,
|
||||
__entry->consumed, __entry->transferred,
|
||||
__entry->f_part, __entry->f_avail, __entry->slot)
|
||||
);
|
||||
|
||||
TRACE_EVENT(netfs_donate,
|
||||
TP_PROTO(const struct netfs_io_request *rreq,
|
||||
const struct netfs_io_subrequest *from,
|
||||
const struct netfs_io_subrequest *to,
|
||||
size_t amount,
|
||||
enum netfs_donate_trace trace),
|
||||
|
||||
TP_ARGS(rreq, from, to, amount, trace),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, rreq)
|
||||
__field(unsigned int, from)
|
||||
__field(unsigned int, to)
|
||||
__field(unsigned int, amount)
|
||||
__field(enum netfs_donate_trace, trace)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->rreq = rreq->debug_id;
|
||||
__entry->from = from->debug_index;
|
||||
__entry->to = to ? to->debug_index : -1;
|
||||
__entry->amount = amount;
|
||||
__entry->trace = trace;
|
||||
),
|
||||
|
||||
TP_printk("R=%08x[%02x] -> [%02x] %s am=%x",
|
||||
__entry->rreq, __entry->from, __entry->to,
|
||||
__print_symbolic(__entry->trace, netfs_donate_traces),
|
||||
__entry->amount)
|
||||
);
|
||||
|
||||
TRACE_EVENT(netfs_folioq,
|
||||
TP_PROTO(const struct folio_queue *fq,
|
||||
enum netfs_folioq_trace trace),
|
||||
|
||||
Reference in New Issue
Block a user