You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' into for-4.12/block
We've added a considerable amount of fixes for stalls and issues with the blk-mq scheduling in the 4.11 series since forking off the for-4.12/block branch. We need to do improvements on top of that for 4.12, so pull in the previous fixes to make our lives easier going forward. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
+62
-22
@@ -351,7 +351,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
|
||||
if (sched_tag != -1)
|
||||
blk_mq_sched_completed_request(hctx, rq);
|
||||
blk_mq_sched_restart_queues(hctx);
|
||||
blk_mq_sched_restart(hctx);
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
||||
@@ -863,12 +863,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
||||
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
|
||||
};
|
||||
|
||||
if (rq->tag != -1) {
|
||||
done:
|
||||
if (hctx)
|
||||
*hctx = data.hctx;
|
||||
return true;
|
||||
}
|
||||
if (rq->tag != -1)
|
||||
goto done;
|
||||
|
||||
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
|
||||
data.flags |= BLK_MQ_REQ_RESERVED;
|
||||
@@ -880,10 +876,12 @@ done:
|
||||
atomic_inc(&data.hctx->nr_active);
|
||||
}
|
||||
data.hctx->tags->rqs[rq->tag] = rq;
|
||||
goto done;
|
||||
}
|
||||
|
||||
return false;
|
||||
done:
|
||||
if (hctx)
|
||||
*hctx = data.hctx;
|
||||
return rq->tag != -1;
|
||||
}
|
||||
|
||||
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
||||
@@ -980,17 +978,20 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
|
||||
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct request *rq;
|
||||
int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
|
||||
|
||||
if (list_empty(list))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Now process all the entries, sending them to the driver.
|
||||
*/
|
||||
errors = queued = 0;
|
||||
while (!list_empty(list)) {
|
||||
do {
|
||||
struct blk_mq_queue_data bd;
|
||||
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
@@ -1053,7 +1054,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
|
||||
|
||||
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
|
||||
break;
|
||||
}
|
||||
} while (!list_empty(list));
|
||||
|
||||
hctx->dispatched[queued_to_index(queued)]++;
|
||||
|
||||
@@ -1138,7 +1139,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
return hctx->next_cpu;
|
||||
}
|
||||
|
||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
||||
unsigned long msecs)
|
||||
{
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx) ||
|
||||
!blk_mq_hw_queue_mapped(hctx)))
|
||||
@@ -1155,7 +1157,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
|
||||
if (msecs == 0)
|
||||
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||
&hctx->run_work);
|
||||
else
|
||||
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
|
||||
&hctx->delayed_run_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
||||
{
|
||||
__blk_mq_delay_run_hw_queue(hctx, true, msecs);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
|
||||
|
||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
{
|
||||
__blk_mq_delay_run_hw_queue(hctx, async, 0);
|
||||
}
|
||||
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
||||
@@ -1258,6 +1277,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
}
|
||||
|
||||
static void blk_mq_delayed_run_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
|
||||
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
}
|
||||
|
||||
static void blk_mq_delay_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
@@ -1822,6 +1850,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
||||
hctx->fq->flush_rq, hctx_idx,
|
||||
flush_start_tag + hctx_idx);
|
||||
|
||||
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
|
||||
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
||||
@@ -1858,6 +1888,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
node = hctx->numa_node = set->numa_node;
|
||||
|
||||
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
|
||||
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
|
||||
spin_lock_init(&hctx->lock);
|
||||
INIT_LIST_HEAD(&hctx->dispatch);
|
||||
@@ -1888,9 +1919,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||
goto free_bitmap;
|
||||
|
||||
if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
|
||||
goto exit_hctx;
|
||||
|
||||
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
|
||||
if (!hctx->fq)
|
||||
goto exit_hctx;
|
||||
goto sched_exit_hctx;
|
||||
|
||||
if (set->ops->init_request &&
|
||||
set->ops->init_request(set->driver_data,
|
||||
@@ -1905,6 +1939,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
|
||||
free_fq:
|
||||
kfree(hctx->fq);
|
||||
sched_exit_hctx:
|
||||
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
|
||||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
@@ -2129,8 +2165,6 @@ void blk_mq_release(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
|
||||
blk_mq_sched_teardown(q);
|
||||
|
||||
/* hctx kobj stays in hctx */
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (!hctx)
|
||||
@@ -2461,6 +2495,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||
{
|
||||
if (set->ops->map_queues)
|
||||
return set->ops->map_queues(set);
|
||||
else
|
||||
return blk_mq_map_queues(set);
|
||||
}
|
||||
|
||||
/*
|
||||
* Alloc a tag set to be associated with one or more request queues.
|
||||
* May fail with EINVAL for various error conditions. May adjust the
|
||||
@@ -2515,10 +2557,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (!set->mq_map)
|
||||
goto out_free_tags;
|
||||
|
||||
if (set->ops->map_queues)
|
||||
ret = set->ops->map_queues(set);
|
||||
else
|
||||
ret = blk_mq_map_queues(set);
|
||||
ret = blk_mq_update_queue_map(set);
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
||||
@@ -2610,6 +2649,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
||||
blk_mq_freeze_queue(q);
|
||||
|
||||
set->nr_hw_queues = nr_hw_queues;
|
||||
blk_mq_update_queue_map(set);
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||
blk_mq_realloc_hw_ctxs(set, q);
|
||||
blk_mq_queue_reinit(q, cpu_online_mask);
|
||||
|
||||
Reference in New Issue
Block a user