mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block
Merge block/IO core bits from Jens Axboe:
"This is a bit bigger on the core side than usual, but that is purely
because we decided to hold off on parts of Tejun's submission on 3.4
to give it a bit more time to simmer. As a consequence, it's seen a
long cycle in for-next.
It contains:
- Bug fix from Dan, wrong locking type.
- Relax splice gifting restriction from Eric.
- A ton of updates from Tejun, primarily for blkcg. This improves
the code a lot, making the API nicer and cleaner, and also includes
fixes for how we handle and tie policies and re-activate on
switches. The changes also include generic bug fixes.
- A simple fix from Vivek, along with a fix for doing proper delayed
allocation of the blkcg stats."
Fix up annoying conflict just due to different merge resolution in
Documentation/feature-removal-schedule.txt
* 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits)
blkcg: tg_stats_alloc_lock is an irq lock
vmsplice: relax alignement requirements for SPLICE_F_GIFT
blkcg: use radix tree to index blkgs from blkcg
blkcg: fix blkcg->css ref leak in __blkg_lookup_create()
block: fix elvpriv allocation failure handling
block: collapse blk_alloc_request() into get_request()
blkcg: collapse blkcg_policy_ops into blkcg_policy
blkcg: embed struct blkg_policy_data in policy specific data
blkcg: mass rename of blkcg API
blkcg: style cleanups for blk-cgroup.h
blkcg: remove blkio_group->path[]
blkcg: blkg_rwstat_read() was missing inline
blkcg: shoot down blkgs if all policies are deactivated
blkcg: drop stuff unused after per-queue policy activation update
blkcg: implement per-queue policy activation
blkcg: add request_queue->root_blkg
blkcg: make request_queue bypassing on allocation
blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing
blkcg: make blkg_conf_prep() take @pol and return with queue lock held
blkcg: remove static policy ID enums
...
This commit is contained in:
@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
|
||||
|
||||
config IOSCHED_CFQ
|
||||
tristate "CFQ I/O scheduler"
|
||||
# If BLK_CGROUP is a module, CFQ has to be built as module.
|
||||
depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
|
||||
default y
|
||||
---help---
|
||||
The CFQ I/O scheduler tries to distribute bandwidth equally
|
||||
@@ -34,8 +32,6 @@ config IOSCHED_CFQ
|
||||
|
||||
This is the default I/O scheduler.
|
||||
|
||||
Note: If BLK_CGROUP=m, then CFQ can be built only as module.
|
||||
|
||||
config CFQ_GROUP_IOSCHED
|
||||
bool "CFQ Group Scheduling support"
|
||||
depends on IOSCHED_CFQ && BLK_CGROUP
|
||||
|
||||
2248
block/blk-cgroup.c
2248
block/blk-cgroup.c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
287
block/blk-core.c
287
block/blk-core.c
@@ -29,11 +29,13 @@
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
|
||||
*
|
||||
* This function does not cancel any asynchronous activity arising
|
||||
* out of elevator or throttling code. That would require elevaotor_exit()
|
||||
* and blk_throtl_exit() to be called with queue lock initialized.
|
||||
* and blkcg_exit_queue() to be called with queue lock initialized.
|
||||
*
|
||||
*/
|
||||
void blk_sync_queue(struct request_queue *q)
|
||||
@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
elv_drain_elevator(q);
|
||||
if (drain_all)
|
||||
blk_throtl_drain(q);
|
||||
/*
|
||||
* The caller might be trying to drain @q before its
|
||||
* elevator is initialized.
|
||||
*/
|
||||
if (q->elevator)
|
||||
elv_drain_elevator(q);
|
||||
|
||||
blkcg_drain_queue(q);
|
||||
|
||||
/*
|
||||
* This function might be called on a queue which failed
|
||||
* driver init after queue creation. Some drivers
|
||||
* (e.g. fd) get unhappy in such cases. Kick queue iff
|
||||
* dispatch queue has something on it.
|
||||
* driver init after queue creation or is not yet fully
|
||||
* active yet. Some drivers (e.g. fd and loop) get unhappy
|
||||
* in such cases. Kick queue iff dispatch queue has
|
||||
* something on it and @q has request_fn set.
|
||||
*/
|
||||
if (!list_empty(&q->queue_head))
|
||||
if (!list_empty(&q->queue_head) && q->request_fn)
|
||||
__blk_run_queue(q);
|
||||
|
||||
drain |= q->rq.elvpriv;
|
||||
@@ -402,6 +410,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_start - enter queue bypass mode
|
||||
* @q: queue of interest
|
||||
*
|
||||
* In bypass mode, only the dispatch FIFO queue of @q is used. This
|
||||
* function makes @q enter bypass mode and drains all requests which were
|
||||
* throttled or issued before. On return, it's guaranteed that no request
|
||||
* is being throttled or has ELVPRIV set and blk_queue_bypass() %true
|
||||
* inside queue or RCU read lock.
|
||||
*/
|
||||
void blk_queue_bypass_start(struct request_queue *q)
|
||||
{
|
||||
bool drain;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
drain = !q->bypass_depth++;
|
||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (drain) {
|
||||
blk_drain_queue(q, false);
|
||||
/* ensure blk_queue_bypass() is %true inside RCU read lock */
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_end - leave queue bypass mode
|
||||
* @q: queue of interest
|
||||
*
|
||||
* Leave bypass mode and restore the normal queueing behavior.
|
||||
*/
|
||||
void blk_queue_bypass_end(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!--q->bypass_depth)
|
||||
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
|
||||
WARN_ON_ONCE(q->bypass_depth < 0);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
||||
|
||||
/**
|
||||
* blk_cleanup_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/*
|
||||
* Dead queue is permanently in bypass mode till released. Note
|
||||
* that, unlike blk_queue_bypass_start(), we aren't performing
|
||||
* synchronize_rcu() after entering bypass mode to avoid the delay
|
||||
* as some drivers create and destroy a lot of queues while
|
||||
* probing. This is still safe because blk_release_queue() will be
|
||||
* called only after the queue refcnt drops to zero and nothing,
|
||||
* RCU or not, would be traversing the queue by then.
|
||||
*/
|
||||
q->bypass_depth++;
|
||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||
|
||||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
spin_unlock_irq(lock);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
/*
|
||||
* Drain all requests queued before DEAD marking. The caller might
|
||||
* be trying to tear down @q before its elevator is initialized, in
|
||||
* which case we don't want to call into draining.
|
||||
*/
|
||||
if (q->elevator)
|
||||
blk_drain_queue(q, true);
|
||||
/* drain all requests queued before DEAD marking */
|
||||
blk_drain_queue(q, true);
|
||||
|
||||
/* @q won't process any more request, flush async actions */
|
||||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
if (err)
|
||||
goto fail_id;
|
||||
|
||||
if (blk_throtl_init(q))
|
||||
goto fail_id;
|
||||
|
||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, (unsigned long) q);
|
||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
INIT_LIST_HEAD(&q->timeout_list);
|
||||
INIT_LIST_HEAD(&q->icq_list);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
INIT_LIST_HEAD(&q->blkg_list);
|
||||
#endif
|
||||
INIT_LIST_HEAD(&q->flush_queue[0]);
|
||||
INIT_LIST_HEAD(&q->flush_queue[1]);
|
||||
INIT_LIST_HEAD(&q->flush_data_in_flight);
|
||||
@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
*/
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
|
||||
/*
|
||||
* A queue starts its life with bypass turned on to avoid
|
||||
* unnecessary bypass on/off overhead and nasty surprises during
|
||||
* init. The initial bypass will be finished at the end of
|
||||
* blk_init_allocated_queue().
|
||||
*/
|
||||
q->bypass_depth = 1;
|
||||
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
|
||||
|
||||
if (blkcg_init_queue(q))
|
||||
goto fail_id;
|
||||
|
||||
return q;
|
||||
|
||||
fail_id:
|
||||
@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
|
||||
q->sg_reserved_size = INT_MAX;
|
||||
|
||||
/*
|
||||
* all done
|
||||
*/
|
||||
if (!elevator_init(q, NULL)) {
|
||||
blk_queue_congestion_threshold(q);
|
||||
return q;
|
||||
}
|
||||
/* init elevator */
|
||||
if (elevator_init(q, NULL))
|
||||
return NULL;
|
||||
|
||||
return NULL;
|
||||
blk_queue_congestion_threshold(q);
|
||||
|
||||
/* all done, end the initial bypass */
|
||||
blk_queue_bypass_end(q);
|
||||
return q;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
blk_alloc_request(struct request_queue *q, struct io_cq *icq,
|
||||
unsigned int flags, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||
|
||||
if (!rq)
|
||||
return NULL;
|
||||
|
||||
blk_rq_init(q, rq);
|
||||
|
||||
rq->cmd_flags = flags | REQ_ALLOCED;
|
||||
|
||||
if (flags & REQ_ELVPRIV) {
|
||||
rq->elv.icq = icq;
|
||||
if (unlikely(elv_set_request(q, rq, gfp_mask))) {
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
return NULL;
|
||||
}
|
||||
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
||||
if (icq)
|
||||
get_io_context(icq->ioc);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
/*
|
||||
* ioc_batching returns true if the ioc is a valid batching request and
|
||||
* should be given priority access to a request.
|
||||
@@ -762,6 +807,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* rq_ioc - determine io_context for request allocation
|
||||
* @bio: request being allocated is for this bio (can be %NULL)
|
||||
*
|
||||
* Determine io_context to use for request allocation for @bio. May return
|
||||
* %NULL if %current->io_context doesn't exist.
|
||||
*/
|
||||
static struct io_context *rq_ioc(struct bio *bio)
|
||||
{
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
if (bio && bio->bi_ioc)
|
||||
return bio->bi_ioc;
|
||||
#endif
|
||||
return current->io_context;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_request - get a free request
|
||||
* @q: request_queue to allocate request from
|
||||
@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||
static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = NULL;
|
||||
struct request *rq;
|
||||
struct request_list *rl = &q->rq;
|
||||
struct elevator_type *et;
|
||||
struct io_context *ioc;
|
||||
@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
int may_queue;
|
||||
retry:
|
||||
et = q->elevator->type;
|
||||
ioc = current->io_context;
|
||||
ioc = rq_ioc(bio);
|
||||
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
@@ -808,7 +869,7 @@ retry:
|
||||
*/
|
||||
if (!ioc && !retried) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
create_io_context(current, gfp_mask, q->node);
|
||||
create_io_context(gfp_mask, q->node);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
retried = true;
|
||||
goto retry;
|
||||
@@ -831,7 +892,7 @@ retry:
|
||||
* process is not a "batcher", and not
|
||||
* exempted by the IO scheduler
|
||||
*/
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -844,7 +905,7 @@ retry:
|
||||
* allocated with any setting of ->nr_requests
|
||||
*/
|
||||
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
rl->count[is_sync]++;
|
||||
rl->starved[is_sync] = 0;
|
||||
@@ -859,8 +920,7 @@ retry:
|
||||
* Also, lookup icq while holding queue_lock. If it doesn't exist,
|
||||
* it will be created after releasing queue_lock.
|
||||
*/
|
||||
if (blk_rq_should_init_elevator(bio) &&
|
||||
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
|
||||
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
|
||||
rw_flags |= REQ_ELVPRIV;
|
||||
rl->elvpriv++;
|
||||
if (et->icq_cache && ioc)
|
||||
@@ -871,41 +931,36 @@ retry:
|
||||
rw_flags |= REQ_IO_STAT;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/* create icq if missing */
|
||||
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
|
||||
icq = ioc_create_icq(q, gfp_mask);
|
||||
if (!icq)
|
||||
goto fail_icq;
|
||||
/* allocate and init request */
|
||||
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||
if (!rq)
|
||||
goto fail_alloc;
|
||||
|
||||
blk_rq_init(q, rq);
|
||||
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
||||
|
||||
/* init elvpriv */
|
||||
if (rw_flags & REQ_ELVPRIV) {
|
||||
if (unlikely(et->icq_cache && !icq)) {
|
||||
create_io_context(gfp_mask, q->node);
|
||||
ioc = rq_ioc(bio);
|
||||
if (!ioc)
|
||||
goto fail_elvpriv;
|
||||
|
||||
icq = ioc_create_icq(ioc, q, gfp_mask);
|
||||
if (!icq)
|
||||
goto fail_elvpriv;
|
||||
}
|
||||
|
||||
rq->elv.icq = icq;
|
||||
if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
|
||||
goto fail_elvpriv;
|
||||
|
||||
/* @rq->elv.icq holds io_context until @rq is freed */
|
||||
if (icq)
|
||||
get_io_context(icq->ioc);
|
||||
}
|
||||
|
||||
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
|
||||
|
||||
fail_icq:
|
||||
if (unlikely(!rq)) {
|
||||
/*
|
||||
* Allocation failed presumably due to memory. Undo anything
|
||||
* we might have messed up.
|
||||
*
|
||||
* Allocating task should really be put onto the front of the
|
||||
* wait queue, but this is pretty rare.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
freed_request(q, rw_flags);
|
||||
|
||||
/*
|
||||
* in the very unlikely event that allocation failed and no
|
||||
* requests for this direction was pending, mark us starved
|
||||
* so that freeing of a request in the other direction will
|
||||
* notice us. another possible fix would be to split the
|
||||
* rq mempool into READ and WRITE
|
||||
*/
|
||||
rq_starved:
|
||||
if (unlikely(rl->count[is_sync] == 0))
|
||||
rl->starved[is_sync] = 1;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* ioc may be NULL here, and ioc_batching will be false. That's
|
||||
* OK, if the queue is under the request limit then requests need
|
||||
@@ -916,8 +971,48 @@ rq_starved:
|
||||
ioc->nr_batch_requests--;
|
||||
|
||||
trace_block_getrq(q, bio, rw_flags & 1);
|
||||
out:
|
||||
return rq;
|
||||
|
||||
fail_elvpriv:
|
||||
/*
|
||||
* elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
|
||||
* and may fail indefinitely under memory pressure and thus
|
||||
* shouldn't stall IO. Treat this request as !elvpriv. This will
|
||||
* disturb iosched and blkcg but weird is bettern than dead.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
|
||||
dev_name(q->backing_dev_info.dev));
|
||||
|
||||
rq->cmd_flags &= ~REQ_ELVPRIV;
|
||||
rq->elv.icq = NULL;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
rl->elvpriv--;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
goto out;
|
||||
|
||||
fail_alloc:
|
||||
/*
|
||||
* Allocation failed presumably due to memory. Undo anything we
|
||||
* might have messed up.
|
||||
*
|
||||
* Allocating task should really be put onto the front of the wait
|
||||
* queue, but this is pretty rare.
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
freed_request(q, rw_flags);
|
||||
|
||||
/*
|
||||
* in the very unlikely event that allocation failed and no
|
||||
* requests for this direction was pending, mark us starved so that
|
||||
* freeing of a request in the other direction will notice
|
||||
* us. another possible fix would be to split the rq mempool into
|
||||
* READ and WRITE
|
||||
*/
|
||||
rq_starved:
|
||||
if (unlikely(rl->count[is_sync] == 0))
|
||||
rl->starved[is_sync] = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||
* up to a big batch of them for a small period time.
|
||||
* See ioc_batching, ioc_set_batching
|
||||
*/
|
||||
create_io_context(current, GFP_NOIO, q->node);
|
||||
create_io_context(GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, current->io_context);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
126
block/blk-ioc.c
126
block/blk-ioc.c
@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
|
||||
}
|
||||
EXPORT_SYMBOL(put_io_context);
|
||||
|
||||
/* Called by the exiting task */
|
||||
void exit_io_context(struct task_struct *task)
|
||||
/**
|
||||
* put_io_context_active - put active reference on ioc
|
||||
* @ioc: ioc of interest
|
||||
*
|
||||
* Undo get_io_context_active(). If active reference reaches zero after
|
||||
* put, @ioc can never issue further IOs and ioscheds are notified.
|
||||
*/
|
||||
void put_io_context_active(struct io_context *ioc)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
struct io_cq *icq;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
|
||||
if (!atomic_dec_and_test(&ioc->active_ref)) {
|
||||
put_io_context(ioc);
|
||||
return;
|
||||
}
|
||||
@@ -197,6 +197,20 @@ retry:
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
/* Called by the exiting task */
|
||||
void exit_io_context(struct task_struct *task)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
atomic_dec(&ioc->nr_tasks);
|
||||
put_io_context_active(ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_clear_queue - break any ioc association with the specified queue
|
||||
* @q: request_queue being cleared
|
||||
@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
|
||||
int node)
|
||||
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
|
||||
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
|
||||
node);
|
||||
if (unlikely(!ioc))
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize */
|
||||
atomic_long_set(&ioc->refcount, 1);
|
||||
atomic_set(&ioc->nr_tasks, 1);
|
||||
atomic_set(&ioc->active_ref, 1);
|
||||
spin_lock_init(&ioc->lock);
|
||||
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
|
||||
INIT_HLIST_HEAD(&ioc->icq_list);
|
||||
@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
|
||||
else
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
task_unlock(task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
||||
return ioc;
|
||||
}
|
||||
task_unlock(task);
|
||||
} while (create_io_context(task, gfp_flags, node));
|
||||
} while (!create_task_io_context(task, gfp_flags, node));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
|
||||
|
||||
/**
|
||||
* ioc_create_icq - create and link io_cq
|
||||
* @ioc: io_context of interest
|
||||
* @q: request_queue of interest
|
||||
* @gfp_mask: allocation mask
|
||||
*
|
||||
* Make sure io_cq linking %current->io_context and @q exists. If either
|
||||
* io_context and/or icq don't exist, they will be created using @gfp_mask.
|
||||
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
|
||||
* will be created using @gfp_mask.
|
||||
*
|
||||
* The caller is responsible for ensuring @ioc won't go away and @q is
|
||||
* alive and will stay alive until this function returns.
|
||||
*/
|
||||
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
||||
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_type *et = q->elevator->type;
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
|
||||
/* allocate stuff */
|
||||
ioc = create_io_context(current, gfp_mask, q->node);
|
||||
if (!ioc)
|
||||
return NULL;
|
||||
|
||||
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
|
||||
q->node);
|
||||
if (!icq)
|
||||
@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
||||
return icq;
|
||||
}
|
||||
|
||||
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
|
||||
{
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
|
||||
icq->flags |= flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_ioprio_changed - notify ioprio change
|
||||
* @ioc: io_context of interest
|
||||
* @ioprio: new ioprio
|
||||
*
|
||||
* @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
|
||||
* icq's. iosched is responsible for checking the bit and applying it on
|
||||
* request issue path.
|
||||
*/
|
||||
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc->ioprio = ioprio;
|
||||
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ioc_cgroup_changed - notify cgroup change
|
||||
* @ioc: io_context of interest
|
||||
*
|
||||
* @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
|
||||
* iosched is responsible for checking the bit and applying it on request
|
||||
* issue path.
|
||||
*/
|
||||
void ioc_cgroup_changed(struct io_context *ioc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ioc_cgroup_changed);
|
||||
|
||||
/**
|
||||
* icq_get_changed - fetch and clear icq changed mask
|
||||
* @icq: icq of interest
|
||||
*
|
||||
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
|
||||
* @icq->ioc->lock.
|
||||
*/
|
||||
unsigned icq_get_changed(struct io_cq *icq)
|
||||
{
|
||||
unsigned int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
|
||||
spin_lock_irqsave(&icq->ioc->lock, flags);
|
||||
changed = icq->flags & ICQ_CHANGED_MASK;
|
||||
icq->flags &= ~ICQ_CHANGED_MASK;
|
||||
spin_unlock_irqrestore(&icq->ioc->lock, flags);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
EXPORT_SYMBOL(icq_get_changed);
|
||||
|
||||
static int __init blk_ioc_init(void)
|
||||
{
|
||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/blktrace_api.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
struct queue_sysfs_entry {
|
||||
struct attribute attr;
|
||||
@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
|
||||
blk_sync_queue(q);
|
||||
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
elevator_exit(q->elevator);
|
||||
}
|
||||
|
||||
blk_throtl_exit(q);
|
||||
|
||||
if (rl->rq_pool)
|
||||
mempool_destroy(rl->rq_pool);
|
||||
|
||||
if (q->queue_tags)
|
||||
__blk_queue_free_tags(q);
|
||||
|
||||
blk_throtl_release(q);
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
32
block/blk.h
32
block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio);
|
||||
void blk_drain_queue(struct request_queue *q, bool drain_all);
|
||||
void blk_queue_bypass_start(struct request_queue *q);
|
||||
void blk_queue_bypass_end(struct request_queue *q);
|
||||
void blk_dequeue_request(struct request *rq);
|
||||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
bool __blk_end_bidi_request(struct request *rq, int error,
|
||||
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
|
||||
|
||||
int blk_dev_init(void);
|
||||
|
||||
void elv_quiesce_start(struct request_queue *q);
|
||||
void elv_quiesce_end(struct request_queue *q);
|
||||
|
||||
|
||||
/*
|
||||
* Return the threshold (number of used requests) at which the queue is
|
||||
@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
|
||||
*/
|
||||
void get_io_context(struct io_context *ioc);
|
||||
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
|
||||
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
|
||||
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
|
||||
gfp_t gfp_mask);
|
||||
void ioc_clear_queue(struct request_queue *q);
|
||||
|
||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
|
||||
int node);
|
||||
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
|
||||
|
||||
/**
|
||||
* create_io_context - try to create task->io_context
|
||||
* @task: target task
|
||||
* @gfp_mask: allocation mask
|
||||
* @node: allocation node
|
||||
*
|
||||
* If @task->io_context is %NULL, allocate a new io_context and install it.
|
||||
* Returns the current @task->io_context which may be %NULL if allocation
|
||||
* failed.
|
||||
* If %current->io_context is %NULL, allocate a new io_context and install
|
||||
* it. Returns the current %current->io_context which may be %NULL if
|
||||
* allocation failed.
|
||||
*
|
||||
* Note that this function can't be called with IRQ disabled because
|
||||
* task_lock which protects @task->io_context is IRQ-unsafe.
|
||||
* task_lock which protects %current->io_context is IRQ-unsafe.
|
||||
*/
|
||||
static inline struct io_context *create_io_context(struct task_struct *task,
|
||||
gfp_t gfp_mask, int node)
|
||||
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
|
||||
{
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
if (unlikely(!task->io_context))
|
||||
create_io_context_slowpath(task, gfp_mask, node);
|
||||
return task->io_context;
|
||||
if (unlikely(!current->io_context))
|
||||
create_task_io_context(current, gfp_mask, node);
|
||||
return current->io_context;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
|
||||
extern void blk_throtl_drain(struct request_queue *q);
|
||||
extern int blk_throtl_init(struct request_queue *q);
|
||||
extern void blk_throtl_exit(struct request_queue *q);
|
||||
extern void blk_throtl_release(struct request_queue *q);
|
||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
static inline void blk_throtl_drain(struct request_queue *q) { }
|
||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||
static inline void blk_throtl_exit(struct request_queue *q) { }
|
||||
static inline void blk_throtl_release(struct request_queue *q) { }
|
||||
#endif /* CONFIG_BLK_DEV_THROTTLING */
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
||||
1082
block/cfq-iosched.c
1082
block/cfq-iosched.c
File diff suppressed because it is too large
Load Diff
115
block/cfq.h
115
block/cfq.h
@@ -1,115 +0,0 @@
|
||||
#ifndef _CFQ_H
|
||||
#define _CFQ_H
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue)
|
||||
{
|
||||
blkiocg_update_dequeue_stats(blkg, dequeue);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time, unsigned long unaccounted_time)
|
||||
{
|
||||
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_set_start_empty_time(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_remove_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_io_merged_stats(blkg, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_avg_queue_size_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
blkiocg_update_set_idle_time_stats(blkg);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
|
||||
{
|
||||
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
|
||||
direction, sync);
|
||||
}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {
|
||||
blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
|
||||
}
|
||||
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return blkiocg_del_blkio_group(blkg);
|
||||
}
|
||||
|
||||
#else /* CFQ_GROUP_IOSCHED */
|
||||
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
||||
struct blkio_group *curr_blkg, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
||||
unsigned long dequeue) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
||||
unsigned long time, unsigned long unaccounted_time) {}
|
||||
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
||||
bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
||||
{
|
||||
}
|
||||
static inline void
|
||||
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void
|
||||
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
|
||||
|
||||
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
||||
uint64_t bytes, bool direction, bool sync) {}
|
||||
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
|
||||
|
||||
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev) {}
|
||||
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CFQ_GROUP_IOSCHED */
|
||||
#endif
|
||||
@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
|
||||
/*
|
||||
* initialize elevator private data (deadline_data).
|
||||
*/
|
||||
static void *deadline_init_queue(struct request_queue *q)
|
||||
static int deadline_init_queue(struct request_queue *q)
|
||||
{
|
||||
struct deadline_data *dd;
|
||||
|
||||
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||
if (!dd)
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&dd->fifo_list[READ]);
|
||||
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
||||
@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
|
||||
dd->writes_starved = writes_starved;
|
||||
dd->front_merges = 1;
|
||||
dd->fifo_batch = fifo_batch;
|
||||
return dd;
|
||||
|
||||
q->elevator->elevator_data = dd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
121
block/elevator.c
121
block/elevator.c
@@ -38,6 +38,7 @@
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
static DEFINE_SPINLOCK(elv_list_lock);
|
||||
static LIST_HEAD(elv_list);
|
||||
@@ -121,15 +122,6 @@ static struct elevator_type *elevator_get(const char *name)
|
||||
return e;
|
||||
}
|
||||
|
||||
static int elevator_init_queue(struct request_queue *q,
|
||||
struct elevator_queue *eq)
|
||||
{
|
||||
eq->elevator_data = eq->type->ops.elevator_init_fn(q);
|
||||
if (eq->elevator_data)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static char chosen_elevator[ELV_NAME_MAX];
|
||||
|
||||
static int __init elevator_setup(char *str)
|
||||
@@ -188,7 +180,6 @@ static void elevator_release(struct kobject *kobj)
|
||||
int elevator_init(struct request_queue *q, char *name)
|
||||
{
|
||||
struct elevator_type *e = NULL;
|
||||
struct elevator_queue *eq;
|
||||
int err;
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
@@ -222,17 +213,16 @@ int elevator_init(struct request_queue *q, char *name)
|
||||
}
|
||||
}
|
||||
|
||||
eq = elevator_alloc(q, e);
|
||||
if (!eq)
|
||||
q->elevator = elevator_alloc(q, e);
|
||||
if (!q->elevator)
|
||||
return -ENOMEM;
|
||||
|
||||
err = elevator_init_queue(q, eq);
|
||||
err = e->ops.elevator_init_fn(q);
|
||||
if (err) {
|
||||
kobject_put(&eq->kobj);
|
||||
kobject_put(&q->elevator->kobj);
|
||||
return err;
|
||||
}
|
||||
|
||||
q->elevator = eq;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_init);
|
||||
@@ -564,25 +554,6 @@ void elv_drain_elevator(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
void elv_quiesce_start(struct request_queue *q)
|
||||
{
|
||||
if (!q->elevator)
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
blk_drain_queue(q, false);
|
||||
}
|
||||
|
||||
void elv_quiesce_end(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
{
|
||||
trace_block_rq_insert(q, rq);
|
||||
@@ -692,12 +663,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.elevator_set_req_fn)
|
||||
return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
|
||||
return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -801,8 +773,9 @@ static struct kobj_type elv_ktype = {
|
||||
.release = elevator_release,
|
||||
};
|
||||
|
||||
int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
int error;
|
||||
|
||||
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
|
||||
@@ -820,11 +793,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
{
|
||||
return __elv_register_queue(q, q->elevator);
|
||||
}
|
||||
EXPORT_SYMBOL(elv_register_queue);
|
||||
|
||||
void elv_unregister_queue(struct request_queue *q)
|
||||
@@ -907,53 +875,60 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
*/
|
||||
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
{
|
||||
struct elevator_queue *old_elevator, *e;
|
||||
struct elevator_queue *old = q->elevator;
|
||||
bool registered = old->registered;
|
||||
int err;
|
||||
|
||||
/* allocate new elevator */
|
||||
e = elevator_alloc(q, new_e);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data.
|
||||
* Block layer doesn't call into a quiesced elevator - all requests
|
||||
* are directly put on the dispatch list without elevator data
|
||||
* using INSERT_BACK. All requests have SOFTBARRIER set and no
|
||||
* merge happens either.
|
||||
*/
|
||||
blk_queue_bypass_start(q);
|
||||
|
||||
err = elevator_init_queue(q, e);
|
||||
/* unregister and clear all auxiliary data of the old elevator */
|
||||
if (registered)
|
||||
elv_unregister_queue(q);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
/* allocate, init and register new elevator */
|
||||
err = -ENOMEM;
|
||||
q->elevator = elevator_alloc(q, new_e);
|
||||
if (!q->elevator)
|
||||
goto fail_init;
|
||||
|
||||
err = new_e->ops.elevator_init_fn(q);
|
||||
if (err) {
|
||||
kobject_put(&e->kobj);
|
||||
return err;
|
||||
kobject_put(&q->elevator->kobj);
|
||||
goto fail_init;
|
||||
}
|
||||
|
||||
/* turn on BYPASS and drain all requests w/ elevator private data */
|
||||
elv_quiesce_start(q);
|
||||
|
||||
/* unregister old queue, register new one and kill old elevator */
|
||||
if (q->elevator->registered) {
|
||||
elv_unregister_queue(q);
|
||||
err = __elv_register_queue(q, e);
|
||||
if (registered) {
|
||||
err = elv_register_queue(q);
|
||||
if (err)
|
||||
goto fail_register;
|
||||
}
|
||||
|
||||
/* done, clear io_cq's, switch elevators and turn off BYPASS */
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
old_elevator = q->elevator;
|
||||
q->elevator = e;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
/* done, kill the old one and finish */
|
||||
elevator_exit(old);
|
||||
blk_queue_bypass_end(q);
|
||||
|
||||
elevator_exit(old_elevator);
|
||||
elv_quiesce_end(q);
|
||||
|
||||
blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
|
||||
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_register:
|
||||
/*
|
||||
* switch failed, exit the new io scheduler and reattach the old
|
||||
* one again (along with re-adding the sysfs dir)
|
||||
*/
|
||||
elevator_exit(e);
|
||||
elevator_exit(q->elevator);
|
||||
fail_init:
|
||||
/* switch failed, restore and re-register old elevator */
|
||||
q->elevator = old;
|
||||
elv_register_queue(q);
|
||||
elv_quiesce_end(q);
|
||||
blk_queue_bypass_end(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
|
||||
return list_entry(rq->queuelist.next, struct request, queuelist);
|
||||
}
|
||||
|
||||
static void *noop_init_queue(struct request_queue *q)
|
||||
static int noop_init_queue(struct request_queue *q)
|
||||
{
|
||||
struct noop_data *nd;
|
||||
|
||||
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
|
||||
if (!nd)
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&nd->queue);
|
||||
return nd;
|
||||
q->elevator->elevator_data = nd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void noop_exit_queue(struct elevator_queue *e)
|
||||
|
||||
61
fs/bio.c
61
fs/bio.c
@@ -19,12 +19,14 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/iocontext.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <scsi/sg.h> /* for struct sg_iovec */
|
||||
|
||||
#include <trace/events/block.h>
|
||||
@@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
|
||||
* last put frees it
|
||||
*/
|
||||
if (atomic_dec_and_test(&bio->bi_cnt)) {
|
||||
bio_disassociate_task(bio);
|
||||
bio->bi_next = NULL;
|
||||
bio->bi_destructor(bio);
|
||||
}
|
||||
@@ -1646,6 +1649,64 @@ bad:
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_create);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/**
|
||||
* bio_associate_current - associate a bio with %current
|
||||
* @bio: target bio
|
||||
*
|
||||
* Associate @bio with %current if it hasn't been associated yet. Block
|
||||
* layer will treat @bio as if it were issued by %current no matter which
|
||||
* task actually issues it.
|
||||
*
|
||||
* This function takes an extra reference of @task's io_context and blkcg
|
||||
* which will be put when @bio is released. The caller must own @bio,
|
||||
* ensure %current->io_context exists, and is responsible for synchronizing
|
||||
* calls to this function.
|
||||
*/
|
||||
int bio_associate_current(struct bio *bio)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
if (bio->bi_ioc)
|
||||
return -EBUSY;
|
||||
|
||||
ioc = current->io_context;
|
||||
if (!ioc)
|
||||
return -ENOENT;
|
||||
|
||||
/* acquire active ref on @ioc and associate */
|
||||
get_io_context_active(ioc);
|
||||
bio->bi_ioc = ioc;
|
||||
|
||||
/* associate blkcg if exists */
|
||||
rcu_read_lock();
|
||||
css = task_subsys_state(current, blkio_subsys_id);
|
||||
if (css && css_tryget(css))
|
||||
bio->bi_css = css;
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_disassociate_task - undo bio_associate_current()
|
||||
* @bio: target bio
|
||||
*/
|
||||
void bio_disassociate_task(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_ioc) {
|
||||
put_io_context(bio->bi_ioc);
|
||||
bio->bi_ioc = NULL;
|
||||
}
|
||||
if (bio->bi_css) {
|
||||
css_put(bio->bi_css);
|
||||
bio->bi_css = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
static void __init biovec_init_slabs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
|
||||
|
||||
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
|
||||
if (ioc) {
|
||||
ioc_ioprio_changed(ioc, ioprio);
|
||||
ioc->ioprio = ioprio;
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
|
||||
@@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
|
||||
*/
|
||||
static int get_iovec_page_array(const struct iovec __user *iov,
|
||||
unsigned int nr_vecs, struct page **pages,
|
||||
struct partial_page *partial, int aligned,
|
||||
struct partial_page *partial, bool aligned,
|
||||
unsigned int pipe_buffers)
|
||||
{
|
||||
int buffers = 0, error = 0;
|
||||
@@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
|
||||
return -ENOMEM;
|
||||
|
||||
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
|
||||
spd.partial, flags & SPLICE_F_GIFT,
|
||||
spd.partial, false,
|
||||
pipe->buffers);
|
||||
if (spd.nr_pages <= 0)
|
||||
ret = spd.nr_pages;
|
||||
|
||||
@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
|
||||
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_current(struct bio *bio);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
|
||||
static inline void bio_disassociate_task(struct bio *bio) { }
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
/*
|
||||
* bio_set is used to allow other portions of the IO system to
|
||||
* allocate their own private memory pools for bio and iovec structures.
|
||||
|
||||
@@ -14,6 +14,8 @@ struct bio;
|
||||
struct bio_integrity_payload;
|
||||
struct page;
|
||||
struct block_device;
|
||||
struct io_context;
|
||||
struct cgroup_subsys_state;
|
||||
typedef void (bio_end_io_t) (struct bio *, int);
|
||||
typedef void (bio_destructor_t) (struct bio *);
|
||||
|
||||
@@ -66,6 +68,14 @@ struct bio {
|
||||
bio_end_io_t *bi_end_io;
|
||||
|
||||
void *bi_private;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
* Optional ioc and css associated with this bio. Put on bio
|
||||
* release. Read comment on top of bio_associate_current().
|
||||
*/
|
||||
struct io_context *bi_ioc;
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
#endif
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
||||
#endif
|
||||
|
||||
@@ -32,10 +32,17 @@ struct blk_trace;
|
||||
struct request;
|
||||
struct sg_io_hdr;
|
||||
struct bsg_job;
|
||||
struct blkcg_gq;
|
||||
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
|
||||
/*
|
||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||
* Defined here to simplify include dependency.
|
||||
*/
|
||||
#define BLKCG_MAX_POLS 2
|
||||
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
@@ -363,6 +370,11 @@ struct request_queue {
|
||||
struct list_head timeout_list;
|
||||
|
||||
struct list_head icq_list;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||
struct blkcg_gq *root_blkg;
|
||||
struct list_head blkg_list;
|
||||
#endif
|
||||
|
||||
struct queue_limits limits;
|
||||
|
||||
@@ -390,12 +402,17 @@ struct request_queue {
|
||||
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
int bypass_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
int bsg_job_size;
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct list_head all_q_node;
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
/* Throttle data */
|
||||
struct throtl_data *td;
|
||||
@@ -407,7 +424,7 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
||||
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
||||
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
|
||||
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
|
||||
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
|
||||
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
|
||||
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
|
||||
@@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
||||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
|
||||
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_noxmerges(q) \
|
||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||
|
||||
@@ -28,12 +28,13 @@ typedef int (elevator_may_queue_fn) (struct request_queue *, int);
|
||||
|
||||
typedef void (elevator_init_icq_fn) (struct io_cq *);
|
||||
typedef void (elevator_exit_icq_fn) (struct io_cq *);
|
||||
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
|
||||
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
|
||||
struct bio *, gfp_t);
|
||||
typedef void (elevator_put_req_fn) (struct request *);
|
||||
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
|
||||
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
||||
|
||||
typedef void *(elevator_init_fn) (struct request_queue *);
|
||||
typedef int (elevator_init_fn) (struct request_queue *);
|
||||
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
||||
|
||||
struct elevator_ops
|
||||
@@ -129,7 +130,8 @@ extern void elv_unregister_queue(struct request_queue *q);
|
||||
extern int elv_may_queue(struct request_queue *, int);
|
||||
extern void elv_abort_queue(struct request_queue *);
|
||||
extern void elv_completed_request(struct request_queue *, struct request *);
|
||||
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
|
||||
extern int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask);
|
||||
extern void elv_put_request(struct request_queue *, struct request *);
|
||||
extern void elv_drain_elevator(struct request_queue *);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user