block: simplify disk shutdown

Set the queue dying flag and call blk_mq_exit_queue from del_gendisk for
all disks that do not have separately allocated queues, and thus remove
the need to call blk_cleanup_queue for them.

Rename blk_cleanup_disk to blk_mq_destroy_queue to make it clear that
this function is intended only for separately allocated blk-mq queues.

This saves an extra queue freeze for devices without a separately
allocated queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20220619060552.1850436-6-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig
2022-06-19 08:05:51 +02:00
committed by Jens Axboe
parent 0e3534022f
commit 6f8191fdf4
34 changed files with 105 additions and 113 deletions

View File

@@ -284,43 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
* put it. All future requests will be failed immediately with -ENODEV.
*
* Context: can sleep
*/
void blk_cleanup_queue(struct request_queue *q)
{
/* cannot be called from atomic context */
might_sleep();
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that blk_mq_run_hw_queues() accesses the hardware queues
* after draining finished.
*/
blk_freeze_queue(q);
blk_sync_queue(q);
if (queue_is_mq(q)) {
blk_mq_cancel_work_sync(q);
blk_mq_exit_queue(q);
}
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer

View File

@@ -3902,7 +3902,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
q->queuedata = queuedata;
ret = blk_mq_init_allocated_queue(set, q);
if (ret) {
blk_cleanup_queue(q);
blk_put_queue(q);
return ERR_PTR(ret);
}
return q;
@@ -3914,6 +3914,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
/**
* blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown
*
* This shuts down a request queue allocated by blk_mq_init_queue() and drops
* the initial reference. All future requests will failed with -ENODEV.
*
* Context: can sleep
*/
void blk_mq_destroy_queue(struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_mq(q));
WARN_ON_ONCE(blk_queue_registered(q));
might_sleep();
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
blk_freeze_queue(q);
blk_sync_queue(q);
blk_mq_cancel_work_sync(q);
blk_mq_exit_queue(q);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_mq_destroy_queue);
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass)
{
@@ -3926,13 +3955,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
disk = __alloc_disk_node(q, set->numa_node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
blk_put_queue(q);
return ERR_PTR(-ENOMEM);
}
set_bit(GD_OWNS_QUEUE, &disk->state);
return disk;
}
EXPORT_SYMBOL(__blk_mq_alloc_disk);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass)
{
if (!blk_get_queue(q))
return NULL;
return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
}
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)

View File

@@ -755,11 +755,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
* decremented with blk_put_queue(). Once the refcount reaches 0 this function
* is called.
*
* For drivers that have a request_queue on a gendisk and added with
* __device_add_disk() the refcount to request_queue will reach 0 with
* the last put_disk() called by the driver. For drivers which don't use
* __device_add_disk() this happens with blk_cleanup_queue().
*
* Drivers exist which depend on the release of the request_queue to be
* synchronous, it should not be deferred.
*

View File

@@ -424,6 +424,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
void blk_drop_partitions(struct gendisk *disk);
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass);
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);

View File

@@ -324,7 +324,7 @@ void bsg_remove_queue(struct request_queue *q)
container_of(q->tag_set, struct bsg_set, tag_set);
bsg_unregister_queue(bset->bd);
blk_cleanup_queue(q);
blk_mq_destroy_queue(q);
blk_mq_free_tag_set(&bset->tag_set);
kfree(bset);
}
@@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
return q;
out_cleanup_queue:
blk_cleanup_queue(q);
blk_mq_destroy_queue(q);
out_queue:
blk_mq_free_tag_set(set);
out_tag_set:

View File

@@ -617,6 +617,8 @@ void del_gendisk(struct gendisk *disk)
* Fail any new I/O.
*/
set_bit(GD_DEAD, &disk->state);
if (test_bit(GD_OWNS_QUEUE, &disk->state))
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
set_capacity(disk, 0);
/*
@@ -663,11 +665,16 @@ void del_gendisk(struct gendisk *disk)
blk_mq_unquiesce_queue(q);
/*
* Allow using passthrough request again after the queue is torn down.
* If the disk does not own the queue, allow using passthrough requests
* again. Else leave the queue frozen to fail all I/O.
*/
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
} else {
if (queue_is_mq(q))
blk_mq_exit_queue(q);
}
}
EXPORT_SYMBOL(del_gendisk);
@@ -1338,9 +1345,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
{
struct gendisk *disk;
if (!blk_get_queue(q))
return NULL;
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (!disk)
goto out_put_queue;
@@ -1391,7 +1395,6 @@ out_put_queue:
blk_put_queue(q);
return NULL;
}
EXPORT_SYMBOL(__alloc_disk_node);
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
{
@@ -1404,9 +1407,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
disk = __alloc_disk_node(q, node, lkclass);
if (!disk) {
blk_cleanup_queue(q);
blk_put_queue(q);
return NULL;
}
set_bit(GD_OWNS_QUEUE, &disk->state);
return disk;
}
EXPORT_SYMBOL(__blk_alloc_disk);
@@ -1439,7 +1443,6 @@ EXPORT_SYMBOL(put_disk);
*/
void blk_cleanup_disk(struct gendisk *disk)
{
blk_cleanup_queue(disk->queue);
put_disk(disk);
}
EXPORT_SYMBOL(blk_cleanup_disk);

View File

@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
if (!unit[i].disk[type])
continue;
del_gendisk(unit[i].disk[type]);
blk_cleanup_queue(unit[i].disk[type]->queue);
put_disk(unit[i].disk[type]);
}
blk_mq_free_tag_set(&unit[i].tag_set);

View File

@@ -2057,7 +2057,6 @@ static void loop_remove(struct loop_device *lo)
{
/* Make this loop device unreachable from pathname. */
del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_disk->queue);
blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex);

View File

@@ -3565,7 +3565,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
del_gendisk(dd->disk);
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
put_disk(dd->disk);
return 0;
@@ -3914,7 +3913,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
/* De-initialize the protocol layer. */

View File

@@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
* Here unmap happens in parallel for only one reason:
* blk_cleanup_queue() takes around half a second, so
* del_gendisk() takes around half a second, so
* on huge amount of devices the whole module unload
* procedure takes minutes.
*/

View File

@@ -1536,7 +1536,7 @@ err_out_free_majors:
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
err_out_dma_free:
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
@@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
clear_bit(0, &carm_major_alloc);
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
blk_mq_destroy_queue(host->oob_q);
blk_mq_free_tag_set(&host->tag_set);
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);

View File

@@ -1111,7 +1111,6 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);

View File

@@ -384,7 +384,6 @@ static void __exit z2_exit(void)
for (i = 0; i < Z2MINOR_COUNT; i++) {
del_gendisk(z2ram_gendisk[i]);
blk_cleanup_queue(z2ram_gendisk[i]->queue);
put_disk(z2ram_gendisk[i]);
}
blk_mq_free_tag_set(&tag_set);

View File

@@ -831,7 +831,6 @@ probe_fail_no_mem:
static int remove_gdrom(struct platform_device *devptr)
{
blk_cleanup_queue(gd.gdrom_rq);
blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);

View File

@@ -2187,7 +2187,6 @@ static void msb_remove(struct memstick_dev *card)
/* Remove the disk */
del_gendisk(msb->disk);
blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;

View File

@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
del_gendisk(msb->disk);
dev_dbg(&card->dev, "mspro block remove\n");
blk_cleanup_queue(msb->queue);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;

View File

@@ -2509,7 +2509,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
return md;
err_cleanup_queue:
blk_cleanup_queue(md->disk->queue);
blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);

View File

@@ -494,7 +494,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
blk_cleanup_queue(q);
blk_mq_free_tag_set(&mq->tag_set);
/*

View File

@@ -1502,7 +1502,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
if (!blk_get_queue(anv->ctrl.admin_q)) {
nvme_start_admin_queue(&anv->ctrl);
blk_cleanup_queue(anv->ctrl.admin_q);
blk_mq_destroy_queue(anv->ctrl.admin_q);
anv->ctrl.admin_q = NULL;
ret = -ENODEV;
goto put_dev;

View File

@@ -4103,7 +4103,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);

Some files were not shown because too many files have changed in this diff Show More