Merge tag 'v4.12-rc5' into for-4.13/block

We've already got a few conflicts and upcoming work depends on some of the
changes that have gone into mainline as regression fixes for this series.

Pull in 4.12-rc5 to resolve these conflicts and make it easier on down stream
trees to continue working on 4.13 changes.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe
2017-06-12 08:30:13 -06:00
742 changed files with 7025 additions and 3932 deletions
+27 -10
View File
@@ -1396,22 +1396,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
bool may_sleep)
static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie, bool may_sleep)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
.last = true,
};
struct blk_mq_hw_ctx *hctx;
blk_qc_t new_cookie;
blk_status_t ret;
int ret;
bool run_queue = true;
if (blk_mq_hctx_stopped(hctx)) {
run_queue = false;
goto insert;
}
if (q->elevator)
goto insert;
if (!blk_mq_get_driver_tag(rq, &hctx, false))
if (!blk_mq_get_driver_tag(rq, NULL, false))
goto insert;
new_cookie = request_to_qc_t(hctx, rq);
@@ -1436,7 +1442,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
}
insert:
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
}
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1444,7 +1450,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock();
__blk_mq_try_issue_directly(rq, cookie, false);
__blk_mq_try_issue_directly(hctx, rq, cookie, false);
rcu_read_unlock();
} else {
unsigned int srcu_idx;
@@ -1452,7 +1458,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
__blk_mq_try_issue_directly(rq, cookie, true);
__blk_mq_try_issue_directly(hctx, rq, cookie, true);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
}
}
@@ -1555,9 +1561,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx(data.ctx);
if (same_queue_rq)
if (same_queue_rq) {
data.hctx = blk_mq_map_queue(q,
same_queue_rq->mq_ctx->cpu);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
} else if (q->nr_hw_queues > 1 && is_sync) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -2578,7 +2587,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
@@ -2602,6 +2612,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
mutex_lock(&set->tag_list_lock);
__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
mutex_unlock(&set->tag_list_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */