You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-4.13/block' of git://git.kernel.dk/linux-block
Pull core block/IO updates from Jens Axboe:
"This is the main pull request for the block layer for 4.13. Not a huge
round in terms of features, but there's a lot of churn related to some
core cleanups.
Note this depends on the UUID tree pull request, that Christoph
already sent out.
This pull request contains:
- A series from Christoph, unifying the error/stats codes in the
block layer. We now use blk_status_t everywhere, instead of using
different schemes for different places.
- Also from Christoph, some cleanups around request allocation and IO
scheduler interactions in blk-mq.
- And yet another series from Christoph, cleaning up how we handle
and do bounce buffering in the block layer.
- A blk-mq debugfs series from Bart, further improving on the support
we have for exporting internal information to aid debugging IO
hangs or stalls.
- Also from Bart, a series that cleans up the request initialization
differences across types of devices.
- A series from Goldwyn Rodrigues, allowing the block layer to return
failure if we will block and the user asked for non-blocking.
- Patch from Hannes for supporting setting loop devices block size to
that of the underlying device.
- Two series of patches from Javier, fixing various issues with
lightnvm, particular around pblk.
- A series from me, adding support for write hints. This comes with
NVMe support as well, so applications can help guide data placement
on flash to improve performance, latencies, and write
amplification.
- A series from Ming, improving and hardening blk-mq support for
stopping/starting and quiescing hardware queues.
- Two pull requests for NVMe updates. Nothing major on the feature
side, but lots of cleanups and bug fixes. From the usual crew.
- A series from Neil Brown, greatly improving the bio rescue set
support. Most notably, this kills the bio rescue work queues, if we
don't really need them.
- Lots of other little bug fixes that are all over the place"
* 'for-4.13/block' of git://git.kernel.dk/linux-block: (217 commits)
lightnvm: pblk: set line bitmap check under debug
lightnvm: pblk: verify that cache read is still valid
lightnvm: pblk: add initialization check
lightnvm: pblk: remove target using async. I/Os
lightnvm: pblk: use vmalloc for GC data buffer
lightnvm: pblk: use right metadata buffer for recovery
lightnvm: pblk: schedule if data is not ready
lightnvm: pblk: remove unused return variable
lightnvm: pblk: fix double-free on pblk init
lightnvm: pblk: fix bad le64 assignations
nvme: Makefile: remove dead build rule
blk-mq: map all HWQ also in hyperthreaded system
nvmet-rdma: register ib_client to not deadlock in device removal
nvme_fc: fix error recovery on link down.
nvmet_fc: fix crashes on bad opcodes
nvme_fc: Fix crash when nvme controller connection fails.
nvme_fc: replace ioabort msleep loop with completion
nvme_fc: fix double calls to nvme_cleanup_cmd()
nvme-fabrics: verify that a controller returns the correct NQN
nvme: simplify nvme_dev_attrs_are_visible
...
This commit is contained in:
@@ -632,7 +632,7 @@ to i/o submission, if the bio fields are likely to be accessed after the
|
||||
i/o is issued (since the bio may otherwise get freed in case i/o completion
|
||||
happens in the meantime).
|
||||
|
||||
The bio_clone() routine may be used to duplicate a bio, where the clone
|
||||
The bio_clone_fast() routine may be used to duplicate a bio, where the clone
|
||||
shares the bio_vec_list with the original bio (i.e. both point to the
|
||||
same bio_vec_list). This would typically be used for splitting i/o requests
|
||||
in lvm or md.
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
struct arqb {
|
||||
u64 data;
|
||||
@@ -105,13 +106,14 @@ struct scm_driver {
|
||||
int (*probe) (struct scm_device *scmdev);
|
||||
int (*remove) (struct scm_device *scmdev);
|
||||
void (*notify) (struct scm_device *scmdev, enum scm_event event);
|
||||
void (*handler) (struct scm_device *scmdev, void *data, int error);
|
||||
void (*handler) (struct scm_device *scmdev, void *data,
|
||||
blk_status_t error);
|
||||
};
|
||||
|
||||
int scm_driver_register(struct scm_driver *scmdrv);
|
||||
void scm_driver_unregister(struct scm_driver *scmdrv);
|
||||
|
||||
int eadm_start_aob(struct aob *aob);
|
||||
void scm_irq_handler(struct aob *aob, int error);
|
||||
void scm_irq_handler(struct aob *aob, blk_status_t error);
|
||||
|
||||
#endif /* _ASM_S390_EADM_H */
|
||||
|
||||
@@ -534,7 +534,7 @@ static void ubd_handler(void)
|
||||
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
|
||||
blk_end_request(
|
||||
(*irq_req_buffer)[count]->req,
|
||||
0,
|
||||
BLK_STS_OK,
|
||||
(*irq_req_buffer)[count]->length
|
||||
);
|
||||
kfree((*irq_req_buffer)[count]);
|
||||
|
||||
@@ -533,6 +533,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
|
||||
case 3:
|
||||
if (newline != '\n')
|
||||
return -EINVAL;
|
||||
/* fall through */
|
||||
case 2:
|
||||
if (length <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
+37
-22
@@ -725,8 +725,12 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
|
||||
}
|
||||
|
||||
static void
|
||||
bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
||||
bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
|
||||
struct bfq_io_cq *bic, bool bfq_already_existing)
|
||||
{
|
||||
unsigned int old_wr_coeff = bfqq->wr_coeff;
|
||||
bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
|
||||
|
||||
if (bic->saved_idle_window)
|
||||
bfq_mark_bfqq_idle_window(bfqq);
|
||||
else
|
||||
@@ -754,6 +758,14 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
||||
|
||||
/* make sure weight will be updated, however we got here */
|
||||
bfqq->entity.prio_changed = 1;
|
||||
|
||||
if (likely(!busy))
|
||||
return;
|
||||
|
||||
if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
|
||||
bfqd->wr_busy_queues++;
|
||||
else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
|
||||
bfqd->wr_busy_queues--;
|
||||
}
|
||||
|
||||
static int bfqq_process_refs(struct bfq_queue *bfqq)
|
||||
@@ -4290,10 +4302,16 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
|
||||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
|
||||
static void bfq_finish_request(struct request *rq)
|
||||
{
|
||||
struct bfq_queue *bfqq = RQ_BFQQ(rq);
|
||||
struct bfq_data *bfqd = bfqq->bfqd;
|
||||
struct bfq_queue *bfqq;
|
||||
struct bfq_data *bfqd;
|
||||
|
||||
if (!rq->elv.icq)
|
||||
return;
|
||||
|
||||
bfqq = RQ_BFQQ(rq);
|
||||
bfqd = bfqq->bfqd;
|
||||
|
||||
if (rq->rq_flags & RQF_STARTED)
|
||||
bfqg_stats_update_completion(bfqq_group(bfqq),
|
||||
@@ -4324,7 +4342,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
|
||||
*/
|
||||
|
||||
if (!RB_EMPTY_NODE(&rq->rb_node))
|
||||
bfq_remove_request(q, rq);
|
||||
bfq_remove_request(rq->q, rq);
|
||||
bfq_put_rq_priv_body(bfqq);
|
||||
}
|
||||
|
||||
@@ -4394,21 +4412,22 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
|
||||
/*
|
||||
* Allocate bfq data structures associated with this request.
|
||||
*/
|
||||
static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
static void bfq_prepare_request(struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct bfq_data *bfqd = q->elevator->elevator_data;
|
||||
struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
|
||||
struct bfq_io_cq *bic;
|
||||
const int is_sync = rq_is_sync(rq);
|
||||
struct bfq_queue *bfqq;
|
||||
bool new_queue = false;
|
||||
bool split = false;
|
||||
bool bfqq_already_existing = false, split = false;
|
||||
|
||||
if (!rq->elv.icq)
|
||||
return;
|
||||
bic = icq_to_bic(rq->elv.icq);
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
|
||||
if (!bic)
|
||||
goto queue_fail;
|
||||
|
||||
bfq_check_ioprio_change(bic, bio);
|
||||
|
||||
bfq_bic_update_cgroup(bic, bio);
|
||||
@@ -4432,6 +4451,8 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
|
||||
bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
|
||||
true, is_sync,
|
||||
NULL);
|
||||
else
|
||||
bfqq_already_existing = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4457,7 +4478,8 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
|
||||
* queue: restore the idle window and the
|
||||
* possible weight raising period.
|
||||
*/
|
||||
bfq_bfqq_resume_state(bfqq, bic);
|
||||
bfq_bfqq_resume_state(bfqq, bfqd, bic,
|
||||
bfqq_already_existing);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4465,13 +4487,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
|
||||
bfq_handle_burst(bfqd, bfqq);
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
queue_fail:
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
|
||||
@@ -4950,8 +4965,8 @@ static struct elv_fs_entry bfq_attrs[] = {
|
||||
|
||||
static struct elevator_type iosched_bfq_mq = {
|
||||
.ops.mq = {
|
||||
.get_rq_priv = bfq_get_rq_private,
|
||||
.put_rq_priv = bfq_put_rq_private,
|
||||
.prepare_request = bfq_prepare_request,
|
||||
.finish_request = bfq_finish_request,
|
||||
.exit_icq = bfq_exit_icq,
|
||||
.insert_requests = bfq_insert_requests,
|
||||
.dispatch_request = bfq_dispatch_request,
|
||||
|
||||
@@ -224,7 +224,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
|
||||
* @bio: bio to generate/verify integrity metadata for
|
||||
* @proc_fn: Pointer to the relevant processing function
|
||||
*/
|
||||
static int bio_integrity_process(struct bio *bio,
|
||||
static blk_status_t bio_integrity_process(struct bio *bio,
|
||||
integrity_processing_fn *proc_fn)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
@@ -232,7 +232,7 @@ static int bio_integrity_process(struct bio *bio,
|
||||
struct bvec_iter bviter;
|
||||
struct bio_vec bv;
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
unsigned int ret = 0;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
void *prot_buf = page_address(bip->bip_vec->bv_page) +
|
||||
bip->bip_vec->bv_offset;
|
||||
|
||||
@@ -369,7 +369,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
struct bio *bio = bip->bip_bio;
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
|
||||
bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
|
||||
bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
|
||||
|
||||
/* Restore original bio completion handler */
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
@@ -398,7 +398,7 @@ void bio_integrity_endio(struct bio *bio)
|
||||
* integrity metadata. Restore original bio end_io handler
|
||||
* and run it.
|
||||
*/
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_status) {
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
bio_endio(bio);
|
||||
|
||||
|
||||
+40
-45
@@ -315,8 +315,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
|
||||
{
|
||||
struct bio *parent = bio->bi_private;
|
||||
|
||||
if (!parent->bi_error)
|
||||
parent->bi_error = bio->bi_error;
|
||||
if (!parent->bi_status)
|
||||
parent->bi_status = bio->bi_status;
|
||||
bio_put(bio);
|
||||
return parent;
|
||||
}
|
||||
@@ -369,6 +369,8 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
struct bio_list punt, nopunt;
|
||||
struct bio *bio;
|
||||
|
||||
if (WARN_ON_ONCE(!bs->rescue_workqueue))
|
||||
return;
|
||||
/*
|
||||
* In order to guarantee forward progress we must punt only bios that
|
||||
* were allocated from this bio_set; otherwise, if there was a bio on
|
||||
@@ -480,7 +482,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
|
||||
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
!bio_list_empty(¤t->bio_list[1])) &&
|
||||
bs->rescue_workqueue)
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
@@ -550,7 +553,7 @@ EXPORT_SYMBOL(zero_fill_bio);
|
||||
*
|
||||
* Description:
|
||||
* Put a reference to a &struct bio, either one you have gotten with
|
||||
* bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
|
||||
* bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
|
||||
**/
|
||||
void bio_put(struct bio *bio)
|
||||
{
|
||||
@@ -599,6 +602,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio_set_flag(bio, BIO_CLONED);
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
|
||||
@@ -682,6 +686,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
||||
return NULL;
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
@@ -924,7 +929,7 @@ static void submit_bio_wait_endio(struct bio *bio)
|
||||
{
|
||||
struct submit_bio_ret *ret = bio->bi_private;
|
||||
|
||||
ret->error = bio->bi_error;
|
||||
ret->error = blk_status_to_errno(bio->bi_status);
|
||||
complete(&ret->event);
|
||||
}
|
||||
|
||||
@@ -1823,8 +1828,8 @@ again:
|
||||
}
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
|
||||
bio, bio->bi_error);
|
||||
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio,
|
||||
blk_status_to_errno(bio->bi_status));
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
}
|
||||
|
||||
@@ -1927,9 +1932,29 @@ void bioset_free(struct bio_set *bs)
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_free);
|
||||
|
||||
static struct bio_set *__bioset_create(unsigned int pool_size,
|
||||
unsigned int front_pad,
|
||||
bool create_bvec_pool)
|
||||
/**
|
||||
* bioset_create - Create a bio_set
|
||||
* @pool_size: Number of bio and bio_vecs to cache in the mempool
|
||||
* @front_pad: Number of bytes to allocate in front of the returned bio
|
||||
* @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
|
||||
* and %BIOSET_NEED_RESCUER
|
||||
*
|
||||
* Description:
|
||||
* Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
|
||||
* to ask for a number of bytes to be allocated in front of the bio.
|
||||
* Front pad allocation is useful for embedding the bio inside
|
||||
* another structure, to avoid allocating extra data to go with the bio.
|
||||
* Note that the bio must be embedded at the END of that structure always,
|
||||
* or things will break badly.
|
||||
* If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
|
||||
* for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
|
||||
* If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
|
||||
* dispatch queued requests when the mempool runs out of space.
|
||||
*
|
||||
*/
|
||||
struct bio_set *bioset_create(unsigned int pool_size,
|
||||
unsigned int front_pad,
|
||||
int flags)
|
||||
{
|
||||
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
|
||||
struct bio_set *bs;
|
||||
@@ -1954,12 +1979,15 @@ static struct bio_set *__bioset_create(unsigned int pool_size,
|
||||
if (!bs->bio_pool)
|
||||
goto bad;
|
||||
|
||||
if (create_bvec_pool) {
|
||||
if (flags & BIOSET_NEED_BVECS) {
|
||||
bs->bvec_pool = biovec_create_pool(pool_size);
|
||||
if (!bs->bvec_pool)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!(flags & BIOSET_NEED_RESCUER))
|
||||
return bs;
|
||||
|
||||
bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
|
||||
if (!bs->rescue_workqueue)
|
||||
goto bad;
|
||||
@@ -1969,41 +1997,8 @@ bad:
|
||||
bioset_free(bs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* bioset_create - Create a bio_set
|
||||
* @pool_size: Number of bio and bio_vecs to cache in the mempool
|
||||
* @front_pad: Number of bytes to allocate in front of the returned bio
|
||||
*
|
||||
* Description:
|
||||
* Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
|
||||
* to ask for a number of bytes to be allocated in front of the bio.
|
||||
* Front pad allocation is useful for embedding the bio inside
|
||||
* another structure, to avoid allocating extra data to go with the bio.
|
||||
* Note that the bio must be embedded at the END of that structure always,
|
||||
* or things will break badly.
|
||||
*/
|
||||
struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
||||
{
|
||||
return __bioset_create(pool_size, front_pad, true);
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
|
||||
/**
|
||||
* bioset_create_nobvec - Create a bio_set without bio_vec mempool
|
||||
* @pool_size: Number of bio to cache in the mempool
|
||||
* @front_pad: Number of bytes to allocate in front of the returned bio
|
||||
*
|
||||
* Description:
|
||||
* Same functionality as bioset_create() except that mempool is not
|
||||
* created for bio_vecs. Saving some memory for bio_clone_fast() users.
|
||||
*/
|
||||
struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
|
||||
{
|
||||
return __bioset_create(pool_size, front_pad, false);
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_create_nobvec);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
||||
/**
|
||||
@@ -2118,7 +2113,7 @@ static int __init init_bio(void)
|
||||
bio_integrity_init();
|
||||
biovec_init_slabs();
|
||||
|
||||
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
|
||||
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (!fs_bio_set)
|
||||
panic("bio: can't allocate bios\n");
|
||||
|
||||
|
||||
+224
-107
File diff suppressed because it is too large
Load Diff
+2
-2
@@ -16,7 +16,7 @@
|
||||
* @rq: request to complete
|
||||
* @error: end I/O status of the request
|
||||
*/
|
||||
static void blk_end_sync_rq(struct request *rq, int error)
|
||||
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
|
||||
{
|
||||
struct completion *waiting = rq->end_io_data;
|
||||
|
||||
@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
|
||||
if (unlikely(blk_queue_dying(q))) {
|
||||
rq->rq_flags |= RQF_QUIET;
|
||||
__blk_end_request_all(rq, -ENXIO);
|
||||
__blk_end_request_all(rq, BLK_STS_IOERR);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
+9
-7
@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
|
||||
*/
|
||||
static bool blk_flush_complete_seq(struct request *rq,
|
||||
struct blk_flush_queue *fq,
|
||||
unsigned int seq, int error)
|
||||
unsigned int seq, blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
|
||||
@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq,
|
||||
return kicked | queued;
|
||||
}
|
||||
|
||||
static void flush_end_io(struct request *flush_rq, int error)
|
||||
static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = flush_rq->q;
|
||||
struct list_head *running;
|
||||
@@ -341,11 +341,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
||||
return blk_flush_queue_rq(flush_rq, false);
|
||||
}
|
||||
|
||||
static void flush_data_end_io(struct request *rq, int error)
|
||||
static void flush_data_end_io(struct request *rq, blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
/*
|
||||
* Updating q->in_flight[] here for making this tag usable
|
||||
* early. Because in blk_queue_start_tag(),
|
||||
@@ -382,7 +384,7 @@ static void flush_data_end_io(struct request *rq, int error)
|
||||
blk_run_queue_async(q);
|
||||
}
|
||||
|
||||
static void mq_flush_data_end_io(struct request *rq, int error)
|
||||
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
@@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, int error)
|
||||
* or __blk_mq_run_hw_queue() to dispatch request.
|
||||
* @rq is being submitted. Analyze what needs to be done and put it on the
|
||||
* right queue.
|
||||
*
|
||||
* CONTEXT:
|
||||
* spin_lock_irq(q->queue_lock) in !mq case
|
||||
*/
|
||||
void blk_insert_flush(struct request *rq)
|
||||
{
|
||||
@@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq)
|
||||
unsigned int policy = blk_flush_policy(fflags, rq);
|
||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
||||
|
||||
if (!q->mq_ops)
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
/*
|
||||
* @policy now records what operations need to be done. Adjust
|
||||
* REQ_PREFLUSH and FUA for the driver.
|
||||
|
||||
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
|
||||
.sysfs_ops = &integrity_ops,
|
||||
};
|
||||
|
||||
static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
||||
static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static const struct blk_integrity_profile nop_profile = {
|
||||
|
||||
+3
-4
@@ -16,6 +16,8 @@
|
||||
*/
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
||||
{
|
||||
blk_queue_bounce(rq->q, &bio);
|
||||
|
||||
if (!rq->bio) {
|
||||
blk_rq_bio_prep(rq->q, rq, bio);
|
||||
} else {
|
||||
@@ -72,15 +74,13 @@ static int __blk_rq_map_user_iov(struct request *rq,
|
||||
map_data->offset += bio->bi_iter.bi_size;
|
||||
|
||||
orig_bio = bio;
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
/*
|
||||
* We link the bounce buffer in and could have to traverse it
|
||||
* later so we have to get a ref to prevent it from being freed
|
||||
*/
|
||||
bio_get(bio);
|
||||
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
bio_get(bio);
|
||||
if (ret) {
|
||||
bio_endio(bio);
|
||||
__blk_rq_unmap_user(orig_bio);
|
||||
@@ -249,7 +249,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
blk_queue_bounce(q, &rq->bio);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_kern);
|
||||
|
||||
+21
-27
@@ -108,30 +108,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
||||
bool do_split = true;
|
||||
struct bio *new = NULL;
|
||||
const unsigned max_sectors = get_max_io_size(q, bio);
|
||||
unsigned bvecs = 0;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/*
|
||||
* With arbitrary bio size, the incoming bio may be very
|
||||
* big. We have to split the bio into small bios so that
|
||||
* each holds at most BIO_MAX_PAGES bvecs because
|
||||
* bio_clone() can fail to allocate big bvecs.
|
||||
*
|
||||
* It should have been better to apply the limit per
|
||||
* request queue in which bio_clone() is involved,
|
||||
* instead of globally. The biggest blocker is the
|
||||
* bio_clone() in bio bounce.
|
||||
*
|
||||
* If bio is splitted by this reason, we should have
|
||||
* allowed to continue bios merging, but don't do
|
||||
* that now for making the change simple.
|
||||
*
|
||||
* TODO: deal with bio bounce's bio_clone() gracefully
|
||||
* and convert the global limit into per-queue limit.
|
||||
*/
|
||||
if (bvecs++ >= BIO_MAX_PAGES)
|
||||
goto split;
|
||||
|
||||
/*
|
||||
* If the queue doesn't support SG gaps and adding this
|
||||
* offset would create a gap, disallow it.
|
||||
@@ -202,8 +180,7 @@ split:
|
||||
return do_split ? new : NULL;
|
||||
}
|
||||
|
||||
void blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||
struct bio_set *bs)
|
||||
void blk_queue_split(struct request_queue *q, struct bio **bio)
|
||||
{
|
||||
struct bio *split, *res;
|
||||
unsigned nsegs;
|
||||
@@ -211,13 +188,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||
switch (bio_op(*bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
|
||||
split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
|
||||
break;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
split = blk_bio_write_zeroes_split(q, *bio, bs, &nsegs);
|
||||
split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
|
||||
split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
|
||||
@@ -671,6 +648,9 @@ static void blk_account_io_merge(struct request *req)
|
||||
static struct request *attempt_merge(struct request_queue *q,
|
||||
struct request *req, struct request *next)
|
||||
{
|
||||
if (!q->mq_ops)
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
if (!rq_mergeable(req) || !rq_mergeable(next))
|
||||
return NULL;
|
||||
|
||||
@@ -692,6 +672,13 @@ static struct request *attempt_merge(struct request_queue *q,
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (req->write_hint != next->write_hint)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we are allowed to merge, then append bio list
|
||||
* from next to rq and release next. merge_requests_fn
|
||||
@@ -811,6 +798,13 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
!blk_write_same_mergeable(rq->bio, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
*/
|
||||
if (rq->write_hint != bio->bi_write_hint)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
+25
-49
@@ -14,10 +14,15 @@
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
|
||||
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
|
||||
const int cpu)
|
||||
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu,
|
||||
const struct cpumask *online_mask)
|
||||
{
|
||||
return cpu * nr_queues / nr_cpus;
|
||||
/*
|
||||
* Non online CPU will be mapped to queue index 0.
|
||||
*/
|
||||
if (!cpumask_test_cpu(cpu, online_mask))
|
||||
return 0;
|
||||
return cpu % nr_queues;
|
||||
}
|
||||
|
||||
static int get_first_sibling(unsigned int cpu)
|
||||
@@ -36,55 +41,26 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
|
||||
unsigned int *map = set->mq_map;
|
||||
unsigned int nr_queues = set->nr_hw_queues;
|
||||
const struct cpumask *online_mask = cpu_online_mask;
|
||||
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
|
||||
cpumask_var_t cpus;
|
||||
unsigned int cpu, first_sibling;
|
||||
|
||||
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
|
||||
cpumask_clear(cpus);
|
||||
nr_cpus = nr_uniq_cpus = 0;
|
||||
for_each_cpu(i, online_mask) {
|
||||
nr_cpus++;
|
||||
first_sibling = get_first_sibling(i);
|
||||
if (!cpumask_test_cpu(first_sibling, cpus))
|
||||
nr_uniq_cpus++;
|
||||
cpumask_set_cpu(i, cpus);
|
||||
for_each_possible_cpu(cpu) {
|
||||
/*
|
||||
* First do sequential mapping between CPUs and queues.
|
||||
* In case we still have CPUs to map, and we have some number of
|
||||
* threads per cores then map sibling threads to the same queue for
|
||||
* performace optimizations.
|
||||
*/
|
||||
if (cpu < nr_queues) {
|
||||
map[cpu] = cpu_to_queue_index(nr_queues, cpu, online_mask);
|
||||
} else {
|
||||
first_sibling = get_first_sibling(cpu);
|
||||
if (first_sibling == cpu)
|
||||
map[cpu] = cpu_to_queue_index(nr_queues, cpu, online_mask);
|
||||
else
|
||||
map[cpu] = map[first_sibling];
|
||||
}
|
||||
}
|
||||
|
||||
queue = 0;
|
||||
for_each_possible_cpu(i) {
|
||||
if (!cpumask_test_cpu(i, online_mask)) {
|
||||
map[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Easy case - we have equal or more hardware queues. Or
|
||||
* there are no thread siblings to take into account. Do
|
||||
* 1:1 if enough, or sequential mapping if less.
|
||||
*/
|
||||
if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
|
||||
map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
|
||||
queue++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Less then nr_cpus queues, and we have some number of
|
||||
* threads per cores. Map sibling threads to the same
|
||||
* queue.
|
||||
*/
|
||||
first_sibling = get_first_sibling(i);
|
||||
if (first_sibling == i) {
|
||||
map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
|
||||
queue);
|
||||
queue++;
|
||||
} else
|
||||
map[i] = map[first_sibling];
|
||||
}
|
||||
|
||||
free_cpumask_var(cpus);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
||||
|
||||
+100
-1
@@ -114,10 +114,12 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
|
||||
blk_mq_run_hw_queues(q, true);
|
||||
} else if (strcmp(op, "start") == 0) {
|
||||
blk_mq_start_stopped_hw_queues(q, true);
|
||||
} else if (strcmp(op, "kick") == 0) {
|
||||
blk_mq_kick_requeue_list(q);
|
||||
} else {
|
||||
pr_err("%s: unsupported operation '%s'\n", __func__, op);
|
||||
inval:
|
||||
pr_err("%s: use either 'run' or 'start'\n", __func__);
|
||||
pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return count;
|
||||
@@ -133,6 +135,29 @@ static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
|
||||
}
|
||||
}
|
||||
|
||||
static int queue_write_hint_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t queue_write_hint_store(void *data, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
|
||||
q->write_hints[i] = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int queue_poll_stat_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
@@ -267,6 +292,14 @@ static const char *const rqf_name[] = {
|
||||
};
|
||||
#undef RQF_NAME
|
||||
|
||||
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
|
||||
static const char *const rqaf_name[] = {
|
||||
RQAF_NAME(COMPLETE),
|
||||
RQAF_NAME(STARTED),
|
||||
RQAF_NAME(POLL_SLEPT),
|
||||
};
|
||||
#undef RQAF_NAME
|
||||
|
||||
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
||||
{
|
||||
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
|
||||
@@ -283,6 +316,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
|
||||
seq_puts(m, ", .rq_flags=");
|
||||
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
|
||||
ARRAY_SIZE(rqf_name));
|
||||
seq_puts(m, ", .atomic_flags=");
|
||||
blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
|
||||
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
|
||||
rq->internal_tag);
|
||||
if (mq_ops->show_rq)
|
||||
@@ -298,6 +333,37 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
|
||||
|
||||
static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(&q->requeue_lock)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
|
||||
spin_lock_irq(&q->requeue_lock);
|
||||
return seq_list_start(&q->requeue_list, *pos);
|
||||
}
|
||||
|
||||
static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
|
||||
return seq_list_next(v, &q->requeue_list, pos);
|
||||
}
|
||||
|
||||
static void queue_requeue_list_stop(struct seq_file *m, void *v)
|
||||
__releases(&q->requeue_lock)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
|
||||
spin_unlock_irq(&q->requeue_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations queue_requeue_list_seq_ops = {
|
||||
.start = queue_requeue_list_start,
|
||||
.next = queue_requeue_list_next,
|
||||
.stop = queue_requeue_list_stop,
|
||||
.show = blk_mq_debugfs_rq_show,
|
||||
};
|
||||
|
||||
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(&hctx->lock)
|
||||
{
|
||||
@@ -329,6 +395,36 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
|
||||
.show = blk_mq_debugfs_rq_show,
|
||||
};
|
||||
|
||||
struct show_busy_params {
|
||||
struct seq_file *m;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: the state of a request may change while this function is in progress,
|
||||
* e.g. due to a concurrent blk_mq_finish_request() call.
|
||||
*/
|
||||
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
|
||||
{
|
||||
const struct show_busy_params *params = data;
|
||||
|
||||
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
|
||||
test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||
__blk_mq_debugfs_rq_show(params->m,
|
||||
list_entry_rq(&rq->queuelist));
|
||||
}
|
||||
|
||||
static int hctx_busy_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = data;
|
||||
struct show_busy_params params = { .m = m, .hctx = hctx };
|
||||
|
||||
blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
|
||||
¶ms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hctx_ctx_map_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = data;
|
||||
@@ -655,7 +751,9 @@ const struct file_operations blk_mq_debugfs_fops = {
|
||||
|
||||
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
||||
{"poll_stat", 0400, queue_poll_stat_show},
|
||||
{"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
|
||||
{"state", 0600, queue_state_show, queue_state_write},
|
||||
{"write_hints", 0600, queue_write_hint_show, queue_write_hint_store},
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -663,6 +761,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
|
||||
{"state", 0400, hctx_state_show},
|
||||
{"flags", 0400, hctx_flags_show},
|
||||
{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
|
||||
{"busy", 0400, hctx_busy_show},
|
||||
{"ctx_map", 0400, hctx_ctx_map_show},
|
||||
{"tags", 0400, hctx_tags_show},
|
||||
{"tags_bitmap", 0400, hctx_tags_bitmap_show},
|
||||
|
||||
+65
-93
@@ -31,11 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
|
||||
|
||||
static void __blk_mq_sched_assign_ioc(struct request_queue *q,
|
||||
struct request *rq,
|
||||
struct bio *bio,
|
||||
struct io_context *ioc)
|
||||
void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct io_context *ioc = rq_ioc(bio);
|
||||
struct io_cq *icq;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
@@ -47,25 +46,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
|
||||
if (!icq)
|
||||
return;
|
||||
}
|
||||
|
||||
get_io_context(icq->ioc);
|
||||
rq->elv.icq = icq;
|
||||
if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
|
||||
rq->rq_flags |= RQF_ELVPRIV;
|
||||
get_io_context(icq->ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
rq->elv.icq = NULL;
|
||||
}
|
||||
|
||||
static void blk_mq_sched_assign_ioc(struct request_queue *q,
|
||||
struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
|
||||
ioc = rq_ioc(bio);
|
||||
if (ioc)
|
||||
__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -107,71 +89,6 @@ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
return false;
|
||||
}
|
||||
|
||||
struct request *blk_mq_sched_get_request(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
unsigned int op,
|
||||
struct blk_mq_alloc_data *data)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct request *rq;
|
||||
|
||||
blk_queue_enter_live(q);
|
||||
data->q = q;
|
||||
if (likely(!data->ctx))
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
||||
|
||||
if (e) {
|
||||
data->flags |= BLK_MQ_REQ_INTERNAL;
|
||||
|
||||
/*
|
||||
* Flush requests are special and go directly to the
|
||||
* dispatch list.
|
||||
*/
|
||||
if (!op_is_flush(op) && e->type->ops.mq.get_request) {
|
||||
rq = e->type->ops.mq.get_request(q, op, data);
|
||||
if (rq)
|
||||
rq->rq_flags |= RQF_QUEUED;
|
||||
} else
|
||||
rq = __blk_mq_alloc_request(data, op);
|
||||
} else {
|
||||
rq = __blk_mq_alloc_request(data, op);
|
||||
}
|
||||
|
||||
if (rq) {
|
||||
if (!op_is_flush(op)) {
|
||||
rq->elv.icq = NULL;
|
||||
if (e && e->type->icq_cache)
|
||||
blk_mq_sched_assign_ioc(q, rq, bio);
|
||||
}
|
||||
data->hctx->queued++;
|
||||
return rq;
|
||||
}
|
||||
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void blk_mq_sched_put_request(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (rq->rq_flags & RQF_ELVPRIV) {
|
||||
blk_mq_sched_put_rq_priv(rq->q, rq);
|
||||
if (rq->elv.icq) {
|
||||
put_io_context(rq->elv.icq->ioc);
|
||||
rq->elv.icq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
|
||||
e->type->ops.mq.put_request(rq);
|
||||
else
|
||||
blk_mq_finish_request(rq);
|
||||
}
|
||||
|
||||
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
@@ -180,7 +97,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
bool did_work = false;
|
||||
LIST_HEAD(rq_list);
|
||||
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx)))
|
||||
/* RCU or SRCU read lock is needed before checking quiesced flag */
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
|
||||
return;
|
||||
|
||||
hctx->run++;
|
||||
@@ -260,19 +178,73 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
|
||||
|
||||
/*
|
||||
* Reverse check our software queue for entries that we could potentially
|
||||
* merge with. Currently includes a hand-wavy stop count of 8, to not spend
|
||||
* too much time checking for merges.
|
||||
*/
|
||||
static bool blk_mq_attempt_merge(struct request_queue *q,
|
||||
struct blk_mq_ctx *ctx, struct bio *bio)
|
||||
{
|
||||
struct request *rq;
|
||||
int checked = 8;
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
|
||||
bool merged = false;
|
||||
|
||||
if (!checked--)
|
||||
break;
|
||||
|
||||
if (!blk_rq_merge_ok(rq, bio))
|
||||
continue;
|
||||
|
||||
switch (blk_try_merge(rq, bio)) {
|
||||
case ELEVATOR_BACK_MERGE:
|
||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||
merged = bio_attempt_back_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_FRONT_MERGE:
|
||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||
merged = bio_attempt_front_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_DISCARD_MERGE:
|
||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (merged)
|
||||
ctx->rq_merged++;
|
||||
return merged;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
||||
bool ret = false;
|
||||
|
||||
if (e->type->ops.mq.bio_merge) {
|
||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
||||
|
||||
if (e && e->type->ops.mq.bio_merge) {
|
||||
blk_mq_put_ctx(ctx);
|
||||
return e->type->ops.mq.bio_merge(hctx, bio);
|
||||
}
|
||||
|
||||
return false;
|
||||
if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
|
||||
/* default per sw-queue merge */
|
||||
spin_lock(&ctx->lock);
|
||||
ret = blk_mq_attempt_merge(q, ctx, bio);
|
||||
spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
blk_mq_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
|
||||
|
||||
+2
-26
@@ -7,8 +7,7 @@
|
||||
void blk_mq_sched_free_hctx_data(struct request_queue *q,
|
||||
void (*exit)(struct blk_mq_hw_ctx *));
|
||||
|
||||
struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
|
||||
void blk_mq_sched_put_request(struct request *rq);
|
||||
void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq);
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
@@ -38,35 +37,12 @@ int blk_mq_sched_init(struct request_queue *q);
|
||||
static inline bool
|
||||
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
|
||||
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
|
||||
return false;
|
||||
|
||||
return __blk_mq_sched_bio_merge(q, bio);
|
||||
}
|
||||
|
||||
static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
|
||||
struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e && e->type->ops.mq.get_rq_priv)
|
||||
return e->type->ops.mq.get_rq_priv(q, rq, bio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e && e->type->ops.mq.put_rq_priv)
|
||||
e->type->ops.mq.put_rq_priv(q, rq);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
|
||||
+214
-191
File diff suppressed because it is too large
Load Diff
@@ -128,17 +128,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
|
||||
return data->hctx->tags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal helpers for request allocation/init/free
|
||||
*/
|
||||
void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
||||
struct request *rq, unsigned int op);
|
||||
void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct request *rq);
|
||||
void blk_mq_finish_request(struct request *rq);
|
||||
struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
|
||||
unsigned int op);
|
||||
|
||||
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
|
||||
@@ -172,11 +172,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||
q->nr_batching = BLK_BATCH_REQ;
|
||||
|
||||
blk_set_default_limits(&q->limits);
|
||||
|
||||
/*
|
||||
* by default assume old behaviour and bounce for any highmem page
|
||||
*/
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_make_request);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user