You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
blk-mq: use __smp_call_function_single directly
__smp_call_function_single already avoids multiple IPIs by internally queing up the items, and now also is available for non-SMP builds as a trivially correct stub, so there is no need to wrap it. If the additional lock roundtrip cause problems my patch to convert the generic IPI code to llists is waiting to get merged will fix it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
c7b22bb19a
commit
3d6efbf62c
+11
-57
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
|
||||
|
||||
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
DEFINE_PER_CPU(struct llist_head, ipi_lists);
|
||||
|
||||
static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
||||
unsigned int cpu)
|
||||
{
|
||||
@@ -339,56 +337,13 @@ void __blk_mq_end_io(struct request *rq, int error)
|
||||
blk_mq_complete_request(rq, error);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
|
||||
/*
|
||||
* Called with interrupts disabled.
|
||||
*/
|
||||
static void ipi_end_io(void *data)
|
||||
static void blk_mq_end_io_remote(void *data)
|
||||
{
|
||||
struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
|
||||
struct llist_node *entry, *next;
|
||||
struct request *rq;
|
||||
struct request *rq = data;
|
||||
|
||||
entry = llist_del_all(list);
|
||||
|
||||
while (entry) {
|
||||
next = entry->next;
|
||||
rq = llist_entry(entry, struct request, ll_list);
|
||||
__blk_mq_end_io(rq, rq->errors);
|
||||
entry = next;
|
||||
}
|
||||
__blk_mq_end_io(rq, rq->errors);
|
||||
}
|
||||
|
||||
static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
|
||||
struct request *rq, const int error)
|
||||
{
|
||||
struct call_single_data *data = &rq->csd;
|
||||
|
||||
rq->errors = error;
|
||||
rq->ll_list.next = NULL;
|
||||
|
||||
/*
|
||||
* If the list is non-empty, an existing IPI must already
|
||||
* be "in flight". If that is the case, we need not schedule
|
||||
* a new one.
|
||||
*/
|
||||
if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
|
||||
data->func = ipi_end_io;
|
||||
data->flags = 0;
|
||||
__smp_call_function_single(ctx->cpu, data, 0);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#else /* CONFIG_SMP */
|
||||
static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
|
||||
struct request *rq, const int error)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* End IO on this request on a multiqueue enabled driver. We'll either do
|
||||
* it directly inline, or punt to a local IPI handler on the matching
|
||||
@@ -403,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error)
|
||||
return __blk_mq_end_io(rq, error);
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
|
||||
!ipi_remote_cpu(ctx, cpu, rq, error))
|
||||
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
|
||||
rq->errors = error;
|
||||
rq->csd.func = blk_mq_end_io_remote;
|
||||
rq->csd.info = rq;
|
||||
rq->csd.flags = 0;
|
||||
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
|
||||
} else {
|
||||
__blk_mq_end_io(rq, error);
|
||||
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_end_io);
|
||||
@@ -1506,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
|
||||
|
||||
static int __init blk_mq_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
init_llist_head(&per_cpu(ipi_lists, i));
|
||||
|
||||
blk_mq_cpu_init();
|
||||
|
||||
/* Must be called after percpu_counter_hotcpu_callback() */
|
||||
|
||||
Reference in New Issue
Block a user