mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
block: switch polling to be bio based
Replace the blk_poll interface that requires the caller to keep a queue and cookie from the submissions with polling based on the bio. Polling for the bio itself leads to a few advantages: - the cookie construction can made entirely private in blk-mq.c - the caller does not need to remember the request_queue and cookie separately and thus sidesteps their lifetime issues - keeping the device and the cookie inside the bio allows to trivially support polling BIOs remapping by stacking drivers - a lot of code to propagate the cookie back up the submission path can be removed entirely. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-15-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
19416123ab
commit
3e08773c38
@@ -58,7 +58,7 @@ struct nfhd_device {
|
||||
struct gendisk *disk;
|
||||
};
|
||||
|
||||
static blk_qc_t nfhd_submit_bio(struct bio *bio)
|
||||
static void nfhd_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data;
|
||||
struct bio_vec bvec;
|
||||
@@ -76,7 +76,6 @@ static blk_qc_t nfhd_submit_bio(struct bio *bio)
|
||||
sec += len;
|
||||
}
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
|
||||
@@ -100,7 +100,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
|
||||
spin_unlock(&dev->lock);
|
||||
}
|
||||
|
||||
static blk_qc_t simdisk_submit_bio(struct bio *bio)
|
||||
static void simdisk_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct simdisk *dev = bio->bi_bdev->bd_disk->private_data;
|
||||
struct bio_vec bvec;
|
||||
@@ -118,7 +118,6 @@ static blk_qc_t simdisk_submit_bio(struct bio *bio)
|
||||
}
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int simdisk_open(struct block_device *bdev, fmode_t mode)
|
||||
|
||||
@@ -282,6 +282,7 @@ void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
atomic_set(&bio->__bi_cnt, 1);
|
||||
bio->bi_cookie = BLK_QC_T_NONE;
|
||||
|
||||
bio->bi_max_vecs = max_vecs;
|
||||
bio->bi_io_vec = table;
|
||||
|
||||
127
block/blk-core.c
127
block/blk-core.c
@@ -915,25 +915,22 @@ end_io:
|
||||
return false;
|
||||
}
|
||||
|
||||
static blk_qc_t __submit_bio(struct bio *bio)
|
||||
static void __submit_bio(struct bio *bio)
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (unlikely(bio_queue_enter(bio) != 0))
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
|
||||
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
|
||||
goto queue_exit;
|
||||
if (disk->fops->submit_bio) {
|
||||
ret = disk->fops->submit_bio(bio);
|
||||
goto queue_exit;
|
||||
if (!disk->fops->submit_bio) {
|
||||
blk_mq_submit_bio(bio);
|
||||
return;
|
||||
}
|
||||
return blk_mq_submit_bio(bio);
|
||||
|
||||
disk->fops->submit_bio(bio);
|
||||
queue_exit:
|
||||
blk_queue_exit(disk->queue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -955,10 +952,9 @@ queue_exit:
|
||||
* bio_list_on_stack[1] contains bios that were submitted before the current
|
||||
* ->submit_bio_bio, but that haven't been processed yet.
|
||||
*/
|
||||
static blk_qc_t __submit_bio_noacct(struct bio *bio)
|
||||
static void __submit_bio_noacct(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
BUG_ON(bio->bi_next);
|
||||
|
||||
@@ -975,7 +971,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
|
||||
ret = __submit_bio(bio);
|
||||
__submit_bio(bio);
|
||||
|
||||
/*
|
||||
* Sort new bios into those for a lower level and those for the
|
||||
@@ -998,22 +994,19 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
|
||||
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
|
||||
|
||||
current->bio_list = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
|
||||
static void __submit_bio_noacct_mq(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list[2] = { };
|
||||
blk_qc_t ret;
|
||||
|
||||
current->bio_list = bio_list;
|
||||
|
||||
do {
|
||||
ret = __submit_bio(bio);
|
||||
__submit_bio(bio);
|
||||
} while ((bio = bio_list_pop(&bio_list[0])));
|
||||
|
||||
current->bio_list = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1025,7 +1018,7 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
|
||||
* systems and other upper level users of the block layer should use
|
||||
* submit_bio() instead.
|
||||
*/
|
||||
blk_qc_t submit_bio_noacct(struct bio *bio)
|
||||
void submit_bio_noacct(struct bio *bio)
|
||||
{
|
||||
/*
|
||||
* We only want one ->submit_bio to be active at a time, else stack
|
||||
@@ -1033,14 +1026,12 @@ blk_qc_t submit_bio_noacct(struct bio *bio)
|
||||
* to collect a list of requests submited by a ->submit_bio method while
|
||||
* it is active, and then process them after it returned.
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
if (current->bio_list)
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (!bio->bi_bdev->bd_disk->fops->submit_bio)
|
||||
return __submit_bio_noacct_mq(bio);
|
||||
return __submit_bio_noacct(bio);
|
||||
else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
|
||||
__submit_bio_noacct_mq(bio);
|
||||
else
|
||||
__submit_bio_noacct(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bio_noacct);
|
||||
|
||||
@@ -1057,10 +1048,10 @@ EXPORT_SYMBOL(submit_bio_noacct);
|
||||
* in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
|
||||
* been called.
|
||||
*/
|
||||
blk_qc_t submit_bio(struct bio *bio)
|
||||
void submit_bio(struct bio *bio)
|
||||
{
|
||||
if (blkcg_punt_bio_submit(bio))
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
|
||||
/*
|
||||
* If it's a regular read/write or a barrier with data attached,
|
||||
@@ -1092,19 +1083,91 @@ blk_qc_t submit_bio(struct bio *bio)
|
||||
if (unlikely(bio_op(bio) == REQ_OP_READ &&
|
||||
bio_flagged(bio, BIO_WORKINGSET))) {
|
||||
unsigned long pflags;
|
||||
blk_qc_t ret;
|
||||
|
||||
psi_memstall_enter(&pflags);
|
||||
ret = submit_bio_noacct(bio);
|
||||
submit_bio_noacct(bio);
|
||||
psi_memstall_leave(&pflags);
|
||||
|
||||
return ret;
|
||||
return;
|
||||
}
|
||||
|
||||
return submit_bio_noacct(bio);
|
||||
submit_bio_noacct(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bio);
|
||||
|
||||
/**
|
||||
* bio_poll - poll for BIO completions
|
||||
* @bio: bio to poll for
|
||||
* @flags: BLK_POLL_* flags that control the behavior
|
||||
*
|
||||
* Poll for completions on queue associated with the bio. Returns number of
|
||||
* completed entries found.
|
||||
*
|
||||
* Note: the caller must either be the context that submitted @bio, or
|
||||
* be in a RCU critical section to prevent freeing of @bio.
|
||||
*/
|
||||
int bio_poll(struct bio *bio, unsigned int flags)
|
||||
{
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
|
||||
int ret;
|
||||
|
||||
if (cookie == BLK_QC_T_NONE ||
|
||||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
||||
return 0;
|
||||
|
||||
if (current->plug)
|
||||
blk_flush_plug_list(current->plug, false);
|
||||
|
||||
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
||||
ret = 0; /* not yet implemented, should not happen */
|
||||
else
|
||||
ret = blk_mq_poll(q, cookie, flags);
|
||||
blk_queue_exit(q);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_poll);
|
||||
|
||||
/*
|
||||
* Helper to implement file_operations.iopoll. Requires the bio to be stored
|
||||
* in iocb->private, and cleared before freeing the bio.
|
||||
*/
|
||||
int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags)
|
||||
{
|
||||
struct bio *bio;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
|
||||
* point to a freshly allocated bio at this point. If that happens
|
||||
* we have a few cases to consider:
|
||||
*
|
||||
* 1) the bio is beeing initialized and bi_bdev is NULL. We can just
|
||||
* simply nothing in this case
|
||||
* 2) the bio points to a not poll enabled device. bio_poll will catch
|
||||
* this and return 0
|
||||
* 3) the bio points to a poll capable device, including but not
|
||||
* limited to the one that the original bio pointed to. In this
|
||||
* case we will call into the actual poll method and poll for I/O,
|
||||
* even if we don't need to, but it won't cause harm either.
|
||||
*
|
||||
* For cases 2) and 3) above the RCU grace period ensures that bi_bdev
|
||||
* is still allocated. Because partitions hold a reference to the whole
|
||||
* device bdev and thus disk, the disk is also still valid. Grabbing
|
||||
* a reference to the queue in bio_poll() ensures the hctxs and requests
|
||||
* are still valid as well.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
bio = READ_ONCE(kiocb->private);
|
||||
if (bio && bio->bi_bdev)
|
||||
ret = bio_poll(bio, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
|
||||
|
||||
/**
|
||||
* blk_cloned_rq_check_limits - Helper function to check a cloned request
|
||||
* for the new queue limits
|
||||
|
||||
@@ -65,13 +65,19 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
|
||||
static bool blk_rq_is_poll(struct request *rq)
|
||||
{
|
||||
return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL;
|
||||
if (!rq->mq_hctx)
|
||||
return false;
|
||||
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
|
||||
return false;
|
||||
if (WARN_ON_ONCE(!rq->bio))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
||||
{
|
||||
do {
|
||||
blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
|
||||
bio_poll(rq->bio, 0);
|
||||
cond_resched();
|
||||
} while (!completion_done(wait));
|
||||
}
|
||||
|
||||
@@ -65,6 +65,9 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
|
||||
return bucket;
|
||||
}
|
||||
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
#define BLK_QC_T_INTERNAL (1U << 31)
|
||||
|
||||
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
|
||||
blk_qc_t qc)
|
||||
{
|
||||
@@ -81,6 +84,13 @@ static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return blk_mq_tag_to_rq(hctx->tags, tag);
|
||||
}
|
||||
|
||||
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
|
||||
{
|
||||
return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
|
||||
(rq->tag != -1 ?
|
||||
rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if any of the ctx, dispatch list or elevator
|
||||
* have pending work in this hardware queue.
|
||||
@@ -819,6 +829,8 @@ void blk_mq_start_request(struct request *rq)
|
||||
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
|
||||
q->integrity.profile->prepare_fn(rq);
|
||||
#endif
|
||||
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
|
||||
WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_start_request);
|
||||
|
||||
@@ -2045,19 +2057,15 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||
}
|
||||
|
||||
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie, bool last)
|
||||
struct request *rq, bool last)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
.rq = rq,
|
||||
.last = last,
|
||||
};
|
||||
blk_qc_t new_cookie;
|
||||
blk_status_t ret;
|
||||
|
||||
new_cookie = request_to_qc_t(hctx, rq);
|
||||
|
||||
/*
|
||||
* For OK queue, we are done. For error, caller may kill it.
|
||||
* Any other error (busy), just add it to our list as we
|
||||
@@ -2067,7 +2075,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
switch (ret) {
|
||||
case BLK_STS_OK:
|
||||
blk_mq_update_dispatch_busy(hctx, false);
|
||||
*cookie = new_cookie;
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
@@ -2076,7 +2083,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
break;
|
||||
default:
|
||||
blk_mq_update_dispatch_busy(hctx, false);
|
||||
*cookie = BLK_QC_T_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2085,7 +2091,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie,
|
||||
bool bypass_insert, bool last)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
@@ -2119,7 +2124,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
goto insert;
|
||||
}
|
||||
|
||||
return __blk_mq_issue_directly(hctx, rq, cookie, last);
|
||||
return __blk_mq_issue_directly(hctx, rq, last);
|
||||
insert:
|
||||
if (bypass_insert)
|
||||
return BLK_STS_RESOURCE;
|
||||
@@ -2133,7 +2138,6 @@ insert:
|
||||
* blk_mq_try_issue_directly - Try to send a request directly to device driver.
|
||||
* @hctx: Pointer of the associated hardware queue.
|
||||
* @rq: Pointer to request to be sent.
|
||||
* @cookie: Request queue cookie.
|
||||
*
|
||||
* If the device has enough resources to accept a new request now, send the
|
||||
* request directly to device driver. Else, insert at hctx->dispatch queue, so
|
||||
@@ -2141,7 +2145,7 @@ insert:
|
||||
* queue have higher priority.
|
||||
*/
|
||||
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, blk_qc_t *cookie)
|
||||
struct request *rq)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
@@ -2150,7 +2154,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
|
||||
blk_mq_request_bypass_insert(rq, false, true);
|
||||
else if (ret != BLK_STS_OK)
|
||||
@@ -2163,11 +2167,10 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
blk_qc_t unused_cookie;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
|
||||
return ret;
|
||||
@@ -2247,10 +2250,8 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
|
||||
*
|
||||
* It will not queue the request if there is an error with the bio, or at the
|
||||
* request creation.
|
||||
*
|
||||
* Returns: Request queue cookie.
|
||||
*/
|
||||
blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
void blk_mq_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
@@ -2259,9 +2260,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
struct blk_plug *plug;
|
||||
struct request *same_queue_rq = NULL;
|
||||
unsigned int nr_segs;
|
||||
blk_qc_t cookie;
|
||||
blk_status_t ret;
|
||||
bool hipri;
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
__blk_queue_split(&bio, &nr_segs);
|
||||
@@ -2278,8 +2277,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
hipri = bio->bi_opf & REQ_POLLED;
|
||||
|
||||
plug = blk_mq_plug(q, bio);
|
||||
if (plug && plug->cached_rq) {
|
||||
rq = plug->cached_rq;
|
||||
@@ -2310,8 +2307,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
|
||||
rq_qos_track(q, rq, bio);
|
||||
|
||||
cookie = request_to_qc_t(rq->mq_hctx, rq);
|
||||
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
|
||||
ret = blk_crypto_init_request(rq);
|
||||
@@ -2319,7 +2314,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
blk_mq_free_request(rq);
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(is_flush_fua)) {
|
||||
@@ -2375,7 +2370,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
if (same_queue_rq) {
|
||||
trace_block_unplug(q, 1, true);
|
||||
blk_mq_try_issue_directly(same_queue_rq->mq_hctx,
|
||||
same_queue_rq, &cookie);
|
||||
same_queue_rq);
|
||||
}
|
||||
} else if ((q->nr_hw_queues > 1 && is_sync) ||
|
||||
!rq->mq_hctx->dispatch_busy) {
|
||||
@@ -2383,18 +2378,15 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
* There is no scheduler and we can try to send directly
|
||||
* to the hardware.
|
||||
*/
|
||||
blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie);
|
||||
blk_mq_try_issue_directly(rq->mq_hctx, rq);
|
||||
} else {
|
||||
/* Default case. */
|
||||
blk_mq_sched_insert_request(rq, false, true, true);
|
||||
}
|
||||
|
||||
if (!hipri)
|
||||
return BLK_QC_T_NONE;
|
||||
return cookie;
|
||||
return;
|
||||
queue_exit:
|
||||
blk_queue_exit(q);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static size_t order_to_size(unsigned int order)
|
||||
@@ -4084,25 +4076,8 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_poll - poll for IO completions
|
||||
* @q: the queue
|
||||
* @cookie: cookie passed back at IO submission time
|
||||
* @flags: BLK_POLL_* flags that control the behavior
|
||||
*
|
||||
* Description:
|
||||
* Poll for completions on the passed in queue. Returns number of
|
||||
* completed entries found.
|
||||
*/
|
||||
int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
|
||||
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
|
||||
{
|
||||
if (cookie == BLK_QC_T_NONE ||
|
||||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
||||
return 0;
|
||||
|
||||
if (current->plug)
|
||||
blk_flush_plug_list(current->plug, false);
|
||||
|
||||
if (!(flags & BLK_POLL_NOSLEEP) &&
|
||||
q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
|
||||
if (blk_mq_poll_hybrid(q, cookie))
|
||||
@@ -4110,7 +4085,6 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
|
||||
}
|
||||
return blk_mq_poll_classic(q, cookie, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_poll);
|
||||
|
||||
unsigned int blk_mq_rq_cpu(struct request *rq)
|
||||
{
|
||||
|
||||
@@ -37,6 +37,8 @@ struct blk_mq_ctx {
|
||||
struct kobject kobj;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
void blk_mq_submit_bio(struct bio *bio);
|
||||
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
|
||||
void blk_mq_exit_queue(struct request_queue *q);
|
||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||
void blk_mq_wake_waiters(struct request_queue *q);
|
||||
|
||||
25
block/fops.c
25
block/fops.c
@@ -61,7 +61,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
bool should_dirty = false;
|
||||
struct bio bio;
|
||||
ssize_t ret;
|
||||
blk_qc_t qc;
|
||||
|
||||
if ((pos | iov_iter_alignment(iter)) &
|
||||
(bdev_logical_block_size(bdev) - 1))
|
||||
@@ -102,13 +101,12 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
|
||||
if (iocb->ki_flags & IOCB_HIPRI)
|
||||
bio_set_polled(&bio, iocb);
|
||||
|
||||
qc = submit_bio(&bio);
|
||||
submit_bio(&bio);
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!READ_ONCE(bio.bi_private))
|
||||
break;
|
||||
if (!(iocb->ki_flags & IOCB_HIPRI) ||
|
||||
!blk_poll(bdev_get_queue(bdev), qc, 0))
|
||||
if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0))
|
||||
blk_io_schedule();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
@@ -141,14 +139,6 @@ struct blkdev_dio {
|
||||
|
||||
static struct bio_set blkdev_dio_pool;
|
||||
|
||||
static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
|
||||
{
|
||||
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
|
||||
}
|
||||
|
||||
static void blkdev_bio_end_io(struct bio *bio)
|
||||
{
|
||||
struct blkdev_dio *dio = bio->bi_private;
|
||||
@@ -162,6 +152,8 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
struct kiocb *iocb = dio->iocb;
|
||||
ssize_t ret;
|
||||
|
||||
WRITE_ONCE(iocb->private, NULL);
|
||||
|
||||
if (likely(!dio->bio.bi_status)) {
|
||||
ret = dio->size;
|
||||
iocb->ki_pos += ret;
|
||||
@@ -200,7 +192,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
bool do_poll = (iocb->ki_flags & IOCB_HIPRI);
|
||||
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
|
||||
loff_t pos = iocb->ki_pos;
|
||||
blk_qc_t qc = BLK_QC_T_NONE;
|
||||
int ret = 0;
|
||||
|
||||
if ((pos | iov_iter_alignment(iter)) &
|
||||
@@ -262,9 +253,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (!nr_pages) {
|
||||
if (do_poll)
|
||||
bio_set_polled(bio, iocb);
|
||||
qc = submit_bio(bio);
|
||||
submit_bio(bio);
|
||||
if (do_poll)
|
||||
WRITE_ONCE(iocb->ki_cookie, qc);
|
||||
WRITE_ONCE(iocb->private, bio);
|
||||
break;
|
||||
}
|
||||
if (!dio->multi_bio) {
|
||||
@@ -297,7 +288,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (!READ_ONCE(dio->waiter))
|
||||
break;
|
||||
|
||||
if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
|
||||
if (!do_poll || !bio_poll(bio, 0))
|
||||
blk_io_schedule();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
@@ -594,7 +585,7 @@ const struct file_operations def_blk_fops = {
|
||||
.llseek = blkdev_llseek,
|
||||
.read_iter = blkdev_read_iter,
|
||||
.write_iter = blkdev_write_iter,
|
||||
.iopoll = blkdev_iopoll,
|
||||
.iopoll = iocb_bio_iopoll,
|
||||
.mmap = generic_file_mmap,
|
||||
.fsync = blkdev_fsync,
|
||||
.unlocked_ioctl = blkdev_ioctl,
|
||||
|
||||
@@ -282,7 +282,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static blk_qc_t brd_submit_bio(struct bio *bio)
|
||||
static void brd_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
@@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
|
||||
|
||||
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
|
||||
bio_op(bio), sector);
|
||||
if (err)
|
||||
goto io_error;
|
||||
if (err) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
sector += len >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
io_error:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
||||
|
||||
@@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
|
||||
/* drbd_req */
|
||||
extern void do_submit(struct work_struct *ws);
|
||||
extern void __drbd_make_request(struct drbd_device *, struct bio *);
|
||||
extern blk_qc_t drbd_submit_bio(struct bio *bio);
|
||||
void drbd_submit_bio(struct bio *bio);
|
||||
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
|
||||
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
|
||||
|
||||
|
||||
@@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws)
|
||||
}
|
||||
}
|
||||
|
||||
blk_qc_t drbd_submit_bio(struct bio *bio)
|
||||
void drbd_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
|
||||
|
||||
@@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio)
|
||||
|
||||
inc_ap_bio(device);
|
||||
__drbd_make_request(device, bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static bool net_timeout_reached(struct drbd_request *net_req,
|
||||
|
||||
@@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
|
||||
return true;
|
||||
}
|
||||
|
||||
static blk_qc_t n64cart_submit_bio(struct bio *bio)
|
||||
static void n64cart_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
@@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio)
|
||||
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
||||
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
if (!n64cart_do_bvec(dev, &bvec, pos))
|
||||
goto io_error;
|
||||
if (!n64cart_do_bvec(dev, &bvec, pos)) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
pos += bvec.bv_len;
|
||||
}
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
io_error:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static const struct block_device_operations n64cart_fops = {
|
||||
|
||||
@@ -1422,7 +1422,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
|
||||
return &nullb->queues[index];
|
||||
}
|
||||
|
||||
static blk_qc_t null_submit_bio(struct bio *bio)
|
||||
static void null_submit_bio(struct bio *bio)
|
||||
{
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
sector_t nr_sectors = bio_sectors(bio);
|
||||
@@ -1434,7 +1434,6 @@ static blk_qc_t null_submit_bio(struct bio *bio)
|
||||
cmd->bio = bio;
|
||||
|
||||
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static bool should_timeout_request(struct request *rq)
|
||||
|
||||
@@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
static blk_qc_t pkt_submit_bio(struct bio *bio)
|
||||
static void pkt_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct pktcdvd_device *pd;
|
||||
char b[BDEVNAME_SIZE];
|
||||
@@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
|
||||
*/
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
pkt_make_request_read(pd, bio);
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
|
||||
@@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
|
||||
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
|
||||
} while (split != bio);
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
end_io:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static void pkt_init_queue(struct pktcdvd_device *pd)
|
||||
|
||||
@@ -578,7 +578,7 @@ out:
|
||||
return next;
|
||||
}
|
||||
|
||||
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
|
||||
static void ps3vram_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
|
||||
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
|
||||
@@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio)
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
if (busy)
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
|
||||
do {
|
||||
bio = ps3vram_do_bio(dev, bio);
|
||||
} while (bio);
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static const struct block_device_operations ps3vram_fops = {
|
||||
|
||||
@@ -50,7 +50,7 @@ struct rsxx_bio_meta {
|
||||
|
||||
static struct kmem_cache *bio_meta_pool;
|
||||
|
||||
static blk_qc_t rsxx_submit_bio(struct bio *bio);
|
||||
static void rsxx_submit_bio(struct bio *bio);
|
||||
|
||||
/*----------------- Block Device Operations -----------------*/
|
||||
static int rsxx_blkdev_ioctl(struct block_device *bdev,
|
||||
@@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
|
||||
}
|
||||
}
|
||||
|
||||
static blk_qc_t rsxx_submit_bio(struct bio *bio)
|
||||
static void rsxx_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
|
||||
struct rsxx_bio_meta *bio_meta;
|
||||
@@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio)
|
||||
if (st)
|
||||
goto queue_err;
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
|
||||
queue_err:
|
||||
kmem_cache_free(bio_meta_pool, bio_meta);
|
||||
@@ -177,7 +177,6 @@ req_err:
|
||||
if (st)
|
||||
bio->bi_status = st;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*----------------- Device Setup -------------------*/
|
||||
|
||||
@@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||
/*
|
||||
* Handler function for all zram I/O requests.
|
||||
*/
|
||||
static blk_qc_t zram_submit_bio(struct bio *bio)
|
||||
static void zram_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
|
||||
|
||||
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size)) {
|
||||
atomic64_inc(&zram->stats.invalid_io);
|
||||
goto error;
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
__zram_make_request(zram, bio);
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
error:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static void zram_slot_free_notify(struct block_device *bdev,
|
||||
|
||||
@@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
|
||||
|
||||
/* Cached devices - read & write stuff */
|
||||
|
||||
blk_qc_t cached_dev_submit_bio(struct bio *bio)
|
||||
void cached_dev_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct search *s;
|
||||
struct block_device *orig_bdev = bio->bi_bdev;
|
||||
@@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
|
||||
dc->io_disable)) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(d->c)) {
|
||||
@@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
|
||||
} else
|
||||
/* I/O request sent to backing device */
|
||||
detached_dev_do_request(d, bio, orig_bdev, start_time);
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
|
||||
@@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl)
|
||||
continue_at(cl, search_free, NULL);
|
||||
}
|
||||
|
||||
blk_qc_t flash_dev_submit_bio(struct bio *bio)
|
||||
void flash_dev_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct search *s;
|
||||
struct closure *cl;
|
||||
@@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
|
||||
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
|
||||
@@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
|
||||
continue_at_nobarrier(&s->cl,
|
||||
flash_dev_nodata,
|
||||
bcache_wq);
|
||||
return BLK_QC_T_NONE;
|
||||
return;
|
||||
} else if (bio_data_dir(bio)) {
|
||||
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
|
||||
&KEY(d->id, bio->bi_iter.bi_sector, 0),
|
||||
@@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
|
||||
}
|
||||
|
||||
continue_at(cl, search_free, NULL);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
|
||||
|
||||
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
|
||||
void bch_data_insert(struct closure *cl);
|
||||
|
||||
void bch_cached_dev_request_init(struct cached_dev *dc);
|
||||
blk_qc_t cached_dev_submit_bio(struct bio *bio);
|
||||
void cached_dev_submit_bio(struct bio *bio);
|
||||
|
||||
void bch_flash_dev_request_init(struct bcache_device *d);
|
||||
blk_qc_t flash_dev_submit_bio(struct bio *bio);
|
||||
void flash_dev_submit_bio(struct bio *bio);
|
||||
|
||||
extern struct kmem_cache *bch_search_cache;
|
||||
|
||||
|
||||
@@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
|
||||
mutex_unlock(&md->swap_bios_lock);
|
||||
}
|
||||
|
||||
static blk_qc_t __map_bio(struct dm_target_io *tio)
|
||||
static void __map_bio(struct dm_target_io *tio)
|
||||
{
|
||||
int r;
|
||||
sector_t sector;
|
||||
struct bio *clone = &tio->clone;
|
||||
struct dm_io *io = tio->io;
|
||||
struct dm_target *ti = tio->ti;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
clone->bi_end_io = clone_endio;
|
||||
|
||||
@@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
|
||||
case DM_MAPIO_REMAPPED:
|
||||
/* the bio has been remapped so dispatch it */
|
||||
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
|
||||
ret = submit_bio_noacct(clone);
|
||||
submit_bio_noacct(clone);
|
||||
break;
|
||||
case DM_MAPIO_KILL:
|
||||
if (unlikely(swap_bios_limit(ti, clone))) {
|
||||
@@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
|
||||
DMWARN("unimplemented target map return value: %d", r);
|
||||
BUG();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
|
||||
@@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
|
||||
}
|
||||
}
|
||||
|
||||
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
|
||||
static void __clone_and_map_simple_bio(struct clone_info *ci,
|
||||
struct dm_target_io *tio, unsigned *len)
|
||||
{
|
||||
struct bio *clone = &tio->clone;
|
||||
@@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
|
||||
__bio_clone_fast(clone, ci->bio);
|
||||
if (len)
|
||||
bio_setup_sector(clone, ci->sector, *len);
|
||||
|
||||
return __map_bio(tio);
|
||||
__map_bio(tio);
|
||||
}
|
||||
|
||||
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||
@@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||
|
||||
while ((bio = bio_list_pop(&blist))) {
|
||||
tio = container_of(bio, struct dm_target_io, clone);
|
||||
(void) __clone_and_map_simple_bio(ci, tio, len);
|
||||
__clone_and_map_simple_bio(ci, tio, len);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
|
||||
free_tio(tio);
|
||||
return r;
|
||||
}
|
||||
(void) __map_bio(tio);
|
||||
__map_bio(tio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
|
||||
/*
|
||||
* Entry point to split a bio into clones and submit them to the targets.
|
||||
*/
|
||||
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
||||
static void __split_and_process_bio(struct mapped_device *md,
|
||||
struct dm_table *map, struct bio *bio)
|
||||
{
|
||||
struct clone_info ci;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
int error = 0;
|
||||
|
||||
init_clone_info(&ci, md, map, bio);
|
||||
@@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
||||
|
||||
bio_chain(b, bio);
|
||||
trace_block_split(b, bio->bi_iter.bi_sector);
|
||||
ret = submit_bio_noacct(bio);
|
||||
submit_bio_noacct(bio);
|
||||
}
|
||||
}
|
||||
|
||||
/* drop the extra reference count */
|
||||
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static blk_qc_t dm_submit_bio(struct bio *bio)
|
||||
static void dm_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
int srcu_idx;
|
||||
struct dm_table *map;
|
||||
|
||||
@@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
|
||||
if (is_abnormal_io(bio))
|
||||
blk_queue_split(&bio);
|
||||
|
||||
ret = __split_and_process_bio(md, map, bio);
|
||||
__split_and_process_bio(md, map, bio);
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user