You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block
* 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits) Revert "block: recursive merge requests" block: Stop using macro stubs for the bio data integrity calls blockdev: convert some macros to static inlines fs: remove unneeded plug in mpage_readpages() block: Add BLKROTATIONAL ioctl block: Introduce blk_set_stacking_limits function block: remove WARN_ON_ONCE() in exit_io_context() block: an exiting task should be allowed to create io_context block: ioc_cgroup_changed() needs to be exported block: recursive merge requests block, cfq: fix empty queue crash caused by request merge block, cfq: move icq creation and rq->elv.icq association to block core block, cfq: restructure io_cq creation path for io_context interface cleanup block, cfq: move io_cq exit/release to blk-ioc.c block, cfq: move icq cache management to block core block, cfq: move io_cq lookup to blk-ioc.c block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq block, cfq: reorganize cfq_io_context into generic and cfq specific parts block: remove elevator_queue->ops block: reorder elevator switch sequence ... Fix up conflicts in: - block/blk-cgroup.c Switch from can_attach_task to can_attach - block/cfq-iosched.c conflict with now removed cic index changes (we now use q->id instead)
This commit is contained in:
+6
-5
@@ -1655,11 +1655,12 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct io_context *ioc;
|
||||
|
||||
cgroup_taskset_for_each(task, cgrp, tset) {
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
if (ioc)
|
||||
ioc->cgroup_changed = 1;
|
||||
task_unlock(task);
|
||||
/* we don't lose anything even if ioc allocation fails */
|
||||
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
|
||||
if (ioc) {
|
||||
ioc_cgroup_changed(ioc);
|
||||
put_io_context(ioc, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+122
-81
@@ -39,6 +39,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
||||
/*
|
||||
* For the allocated request tables
|
||||
*/
|
||||
@@ -358,7 +360,8 @@ EXPORT_SYMBOL(blk_put_queue);
|
||||
void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
{
|
||||
while (true) {
|
||||
int nr_rqs;
|
||||
bool drain = false;
|
||||
int i;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
@@ -375,14 +378,25 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
if (!list_empty(&q->queue_head))
|
||||
__blk_run_queue(q);
|
||||
|
||||
if (drain_all)
|
||||
nr_rqs = q->rq.count[0] + q->rq.count[1];
|
||||
else
|
||||
nr_rqs = q->rq.elvpriv;
|
||||
drain |= q->rq.elvpriv;
|
||||
|
||||
/*
|
||||
* Unfortunately, requests are queued at and tracked from
|
||||
* multiple places and there's no single counter which can
|
||||
* be drained. Check all the queues and counters.
|
||||
*/
|
||||
if (drain_all) {
|
||||
drain |= !list_empty(&q->queue_head);
|
||||
for (i = 0; i < 2; i++) {
|
||||
drain |= q->rq.count[i];
|
||||
drain |= q->in_flight[i];
|
||||
drain |= !list_empty(&q->flush_queue[i]);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (!nr_rqs)
|
||||
if (!drain)
|
||||
break;
|
||||
msleep(10);
|
||||
}
|
||||
@@ -469,6 +483,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
|
||||
if (q->id < 0)
|
||||
goto fail_q;
|
||||
|
||||
q->backing_dev_info.ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||
q->backing_dev_info.state = 0;
|
||||
@@ -477,20 +495,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
q->node = node_id;
|
||||
|
||||
err = bdi_init(&q->backing_dev_info);
|
||||
if (err) {
|
||||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
return NULL;
|
||||
}
|
||||
if (err)
|
||||
goto fail_id;
|
||||
|
||||
if (blk_throtl_init(q)) {
|
||||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
return NULL;
|
||||
}
|
||||
if (blk_throtl_init(q))
|
||||
goto fail_id;
|
||||
|
||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, (unsigned long) q);
|
||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||
INIT_LIST_HEAD(&q->timeout_list);
|
||||
INIT_LIST_HEAD(&q->icq_list);
|
||||
INIT_LIST_HEAD(&q->flush_queue[0]);
|
||||
INIT_LIST_HEAD(&q->flush_queue[1]);
|
||||
INIT_LIST_HEAD(&q->flush_data_in_flight);
|
||||
@@ -508,6 +523,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
|
||||
return q;
|
||||
|
||||
fail_id:
|
||||
ida_simple_remove(&blk_queue_ida, q->id);
|
||||
fail_q:
|
||||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_alloc_queue_node);
|
||||
|
||||
@@ -605,26 +626,31 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
int blk_get_queue(struct request_queue *q)
|
||||
bool blk_get_queue(struct request_queue *q)
|
||||
{
|
||||
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
||||
kobject_get(&q->kobj);
|
||||
return 0;
|
||||
if (likely(!blk_queue_dead(q))) {
|
||||
__blk_get_queue(q);
|
||||
return true;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_queue);
|
||||
|
||||
static inline void blk_free_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (rq->cmd_flags & REQ_ELVPRIV)
|
||||
if (rq->cmd_flags & REQ_ELVPRIV) {
|
||||
elv_put_request(q, rq);
|
||||
if (rq->elv.icq)
|
||||
put_io_context(rq->elv.icq->ioc, q);
|
||||
}
|
||||
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
|
||||
blk_alloc_request(struct request_queue *q, struct io_cq *icq,
|
||||
unsigned int flags, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||
|
||||
@@ -635,10 +661,15 @@ blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
|
||||
|
||||
rq->cmd_flags = flags | REQ_ALLOCED;
|
||||
|
||||
if ((flags & REQ_ELVPRIV) &&
|
||||
unlikely(elv_set_request(q, rq, gfp_mask))) {
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
return NULL;
|
||||
if (flags & REQ_ELVPRIV) {
|
||||
rq->elv.icq = icq;
|
||||
if (unlikely(elv_set_request(q, rq, gfp_mask))) {
|
||||
mempool_free(rq, q->rq.rq_pool);
|
||||
return NULL;
|
||||
}
|
||||
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
||||
if (icq)
|
||||
get_io_context(icq->ioc);
|
||||
}
|
||||
|
||||
return rq;
|
||||
@@ -750,11 +781,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
{
|
||||
struct request *rq = NULL;
|
||||
struct request_list *rl = &q->rq;
|
||||
struct io_context *ioc = NULL;
|
||||
struct elevator_type *et;
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq = NULL;
|
||||
const bool is_sync = rw_is_sync(rw_flags) != 0;
|
||||
bool retried = false;
|
||||
int may_queue;
|
||||
retry:
|
||||
et = q->elevator->type;
|
||||
ioc = current->io_context;
|
||||
|
||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
|
||||
may_queue = elv_may_queue(q, rw_flags);
|
||||
@@ -763,7 +800,20 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
|
||||
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
|
||||
if (rl->count[is_sync]+1 >= q->nr_requests) {
|
||||
ioc = current_io_context(GFP_ATOMIC, q->node);
|
||||
/*
|
||||
* We want ioc to record batching state. If it's
|
||||
* not already there, creating a new one requires
|
||||
* dropping queue_lock, which in turn requires
|
||||
* retesting conditions to avoid queue hang.
|
||||
*/
|
||||
if (!ioc && !retried) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
create_io_context(current, gfp_mask, q->node);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
retried = true;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* The queue will fill after this allocation, so set
|
||||
* it as full, and mark this process as "batching".
|
||||
@@ -799,17 +849,36 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
||||
rl->count[is_sync]++;
|
||||
rl->starved[is_sync] = 0;
|
||||
|
||||
/*
|
||||
* Decide whether the new request will be managed by elevator. If
|
||||
* so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
|
||||
* prevent the current elevator from being destroyed until the new
|
||||
* request is freed. This guarantees icq's won't be destroyed and
|
||||
* makes creating new ones safe.
|
||||
*
|
||||
* Also, lookup icq while holding queue_lock. If it doesn't exist,
|
||||
* it will be created after releasing queue_lock.
|
||||
*/
|
||||
if (blk_rq_should_init_elevator(bio) &&
|
||||
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
|
||||
rw_flags |= REQ_ELVPRIV;
|
||||
rl->elvpriv++;
|
||||
if (et->icq_cache && ioc)
|
||||
icq = ioc_lookup_icq(ioc, q);
|
||||
}
|
||||
|
||||
if (blk_queue_io_stat(q))
|
||||
rw_flags |= REQ_IO_STAT;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
rq = blk_alloc_request(q, rw_flags, gfp_mask);
|
||||
/* create icq if missing */
|
||||
if (unlikely(et->icq_cache && !icq))
|
||||
icq = ioc_create_icq(q, gfp_mask);
|
||||
|
||||
/* rqs are guaranteed to have icq on elv_set_request() if requested */
|
||||
if (likely(!et->icq_cache || icq))
|
||||
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
|
||||
|
||||
if (unlikely(!rq)) {
|
||||
/*
|
||||
* Allocation failed presumably due to memory. Undo anything
|
||||
@@ -871,10 +940,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||
rq = get_request(q, rw_flags, bio, GFP_NOIO);
|
||||
while (!rq) {
|
||||
DEFINE_WAIT(wait);
|
||||
struct io_context *ioc;
|
||||
struct request_list *rl = &q->rq;
|
||||
|
||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
|
||||
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
|
||||
@@ -891,8 +959,8 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||
* up to a big batch of them for a small period time.
|
||||
* See ioc_batching, ioc_set_batching
|
||||
*/
|
||||
ioc = current_io_context(GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, ioc);
|
||||
create_io_context(current, GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, current->io_context);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
finish_wait(&rl->wait[is_sync], &wait);
|
||||
@@ -1009,54 +1077,6 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
|
||||
__elv_add_request(q, rq, where);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_insert_request - insert a special request into a request queue
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request to be inserted
|
||||
* @at_head: insert request at head or tail of queue
|
||||
* @data: private data
|
||||
*
|
||||
* Description:
|
||||
* Many block devices need to execute commands asynchronously, so they don't
|
||||
* block the whole kernel from preemption during request execution. This is
|
||||
* accomplished normally by inserting aritficial requests tagged as
|
||||
* REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
|
||||
* be scheduled for actual execution by the request queue.
|
||||
*
|
||||
* We have the option of inserting the head or the tail of the queue.
|
||||
* Typically we use the tail for new ioctls and so forth. We use the head
|
||||
* of the queue for things like a QUEUE_FULL message from a device, or a
|
||||
* host that is unable to accept a particular command.
|
||||
*/
|
||||
void blk_insert_request(struct request_queue *q, struct request *rq,
|
||||
int at_head, void *data)
|
||||
{
|
||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* tell I/O scheduler that this isn't a regular read/write (ie it
|
||||
* must not attempt merges on this) and that it acts as a soft
|
||||
* barrier
|
||||
*/
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
|
||||
rq->special = data;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
/*
|
||||
* If command is tagged, release the tag
|
||||
*/
|
||||
if (blk_rq_tagged(rq))
|
||||
blk_queue_end_tag(q, rq);
|
||||
|
||||
add_acct_request(q, rq, where);
|
||||
__blk_run_queue(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_insert_request);
|
||||
|
||||
static void part_round_stats_single(int cpu, struct hd_struct *part,
|
||||
unsigned long now)
|
||||
{
|
||||
@@ -1766,6 +1786,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||
return -EIO;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Submitting request must be dequeued before calling this function
|
||||
@@ -2739,6 +2763,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
||||
{
|
||||
trace_block_unplug(q, depth, !from_schedule);
|
||||
|
||||
/*
|
||||
* Don't mess with dead queue.
|
||||
*/
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
spin_unlock(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are punting this to kblockd, then we can safely drop
|
||||
* the queue_lock before waking kblockd (which needs to take
|
||||
@@ -2815,6 +2847,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
depth = 0;
|
||||
spin_lock(q->queue_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Short-circuit if @q is dead
|
||||
*/
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
__blk_end_request_all(rq, -ENODEV);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* rq is already accounted, so use raw insert
|
||||
*/
|
||||
|
||||
+5
-3
@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
{
|
||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
|
||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
||||
WARN_ON(irqs_disabled());
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
rq->errors = -ENXIO;
|
||||
if (rq->end_io)
|
||||
rq->end_io(rq, rq->errors);
|
||||
@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
|
||||
rq->rq_disk = bd_disk;
|
||||
rq->end_io = done;
|
||||
WARN_ON(irqs_disabled());
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__elv_add_request(q, rq, where);
|
||||
__blk_run_queue(q);
|
||||
/* the queue is stopped so it won't be run */
|
||||
|
||||
+419
-104
File diff suppressed because it is too large
Load Diff
+24
-8
@@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
|
||||
* @lim: the queue_limits structure to reset
|
||||
*
|
||||
* Description:
|
||||
* Returns a queue_limit struct to its default state. Can be used by
|
||||
* stacking drivers like DM that stage table swaps and reuse an
|
||||
* existing device queue.
|
||||
* Returns a queue_limit struct to its default state.
|
||||
*/
|
||||
void blk_set_default_limits(struct queue_limits *lim)
|
||||
{
|
||||
@@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
lim->max_integrity_segments = 0;
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
lim->max_sectors = BLK_DEF_MAX_SECTORS;
|
||||
lim->max_hw_sectors = INT_MAX;
|
||||
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
|
||||
lim->max_discard_sectors = 0;
|
||||
lim->discard_granularity = 0;
|
||||
lim->discard_alignment = 0;
|
||||
lim->discard_misaligned = 0;
|
||||
lim->discard_zeroes_data = 1;
|
||||
lim->discard_zeroes_data = 0;
|
||||
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
|
||||
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
|
||||
lim->alignment_offset = 0;
|
||||
@@ -130,6 +127,27 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
||||
/**
|
||||
* blk_set_stacking_limits - set default limits for stacking devices
|
||||
* @lim: the queue_limits structure to reset
|
||||
*
|
||||
* Description:
|
||||
* Returns a queue_limit struct to its default state. Should be used
|
||||
* by stacking drivers like DM that have no internal limits.
|
||||
*/
|
||||
void blk_set_stacking_limits(struct queue_limits *lim)
|
||||
{
|
||||
blk_set_default_limits(lim);
|
||||
|
||||
/* Inherit limits from component devices */
|
||||
lim->discard_zeroes_data = 1;
|
||||
lim->max_segments = USHRT_MAX;
|
||||
lim->max_hw_sectors = UINT_MAX;
|
||||
|
||||
lim->max_sectors = BLK_DEF_MAX_SECTORS;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_stacking_limits);
|
||||
|
||||
/**
|
||||
* blk_queue_make_request - define an alternate make_request function for a device
|
||||
* @q: the request queue for the device to be affected
|
||||
@@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
||||
q->nr_batching = BLK_BATCH_REQ;
|
||||
|
||||
blk_set_default_limits(&q->limits);
|
||||
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
|
||||
q->limits.discard_zeroes_data = 0;
|
||||
|
||||
/*
|
||||
* by default assume old behaviour and bounce for any highmem page
|
||||
|
||||
+9
-3
@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||
if (!entry->show)
|
||||
return -EIO;
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
|
||||
if (blk_queue_dead(q)) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
q = container_of(kobj, struct request_queue, kobj);
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
|
||||
if (blk_queue_dead(q)) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
@@ -479,8 +479,12 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
|
||||
blk_sync_queue(q);
|
||||
|
||||
if (q->elevator)
|
||||
if (q->elevator) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
elevator_exit(q->elevator);
|
||||
}
|
||||
|
||||
blk_throtl_exit(q);
|
||||
|
||||
@@ -494,6 +498,8 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
|
||||
ida_simple_remove(&blk_queue_ida, q->id);
|
||||
kmem_cache_free(blk_requestq_cachep, q);
|
||||
}
|
||||
|
||||
|
||||
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
struct request_queue *q = td->queue;
|
||||
|
||||
/* no throttling for dead queue */
|
||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
|
||||
if (unlikely(blk_queue_dead(q)))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
/* Make sure @q is still alive */
|
||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
kfree(tg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
+50
-8
@@ -1,6 +1,8 @@
|
||||
#ifndef BLK_INTERNAL_H
|
||||
#define BLK_INTERNAL_H
|
||||
|
||||
#include <linux/idr.h>
|
||||
|
||||
/* Amount of time in which a process may batch requests */
|
||||
#define BLK_BATCH_TIME (HZ/50UL)
|
||||
|
||||
@@ -9,6 +11,12 @@
|
||||
|
||||
extern struct kmem_cache *blk_requestq_cachep;
|
||||
extern struct kobj_type blk_queue_ktype;
|
||||
extern struct ida blk_queue_ida;
|
||||
|
||||
static inline void __blk_get_queue(struct request_queue *q)
|
||||
{
|
||||
kobject_get(&q->kobj);
|
||||
}
|
||||
|
||||
void init_request_from_bio(struct request *req, struct bio *bio);
|
||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
@@ -85,8 +93,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
|
||||
q->flush_queue_delayed = 1;
|
||||
return NULL;
|
||||
}
|
||||
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
|
||||
!q->elevator->ops->elevator_dispatch_fn(q, 0))
|
||||
if (unlikely(blk_queue_dead(q)) ||
|
||||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@@ -95,16 +103,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_activate_req_fn)
|
||||
e->ops->elevator_activate_req_fn(q, rq);
|
||||
if (e->type->ops.elevator_activate_req_fn)
|
||||
e->type->ops.elevator_activate_req_fn(q, rq);
|
||||
}
|
||||
|
||||
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_deactivate_req_fn)
|
||||
e->ops->elevator_deactivate_req_fn(q, rq);
|
||||
if (e->type->ops.elevator_deactivate_req_fn)
|
||||
e->type->ops.elevator_deactivate_req_fn(q, rq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
||||
@@ -119,8 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
|
||||
}
|
||||
#endif
|
||||
|
||||
struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
||||
|
||||
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||
struct bio *bio);
|
||||
int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||
@@ -189,6 +195,42 @@ static inline int blk_do_io_stat(struct request *rq)
|
||||
(rq->cmd_flags & REQ_DISCARD));
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal io_context interface
|
||||
*/
|
||||
void get_io_context(struct io_context *ioc);
|
||||
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
|
||||
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
|
||||
void ioc_clear_queue(struct request_queue *q);
|
||||
|
||||
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
|
||||
int node);
|
||||
|
||||
/**
|
||||
* create_io_context - try to create task->io_context
|
||||
* @task: target task
|
||||
* @gfp_mask: allocation mask
|
||||
* @node: allocation node
|
||||
*
|
||||
* If @task->io_context is %NULL, allocate a new io_context and install it.
|
||||
* Returns the current @task->io_context which may be %NULL if allocation
|
||||
* failed.
|
||||
*
|
||||
* Note that this function can't be called with IRQ disabled because
|
||||
* task_lock which protects @task->io_context is IRQ-unsafe.
|
||||
*/
|
||||
static inline struct io_context *create_io_context(struct task_struct *task,
|
||||
gfp_t gfp_mask, int node)
|
||||
{
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
if (unlikely(!task->io_context))
|
||||
create_io_context_slowpath(task, gfp_mask, node);
|
||||
return task->io_context;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal throttling interface
|
||||
*/
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
|
||||
extern void blk_throtl_drain(struct request_queue *q);
|
||||
|
||||
+1
-3
@@ -769,12 +769,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
int ret;
|
||||
#ifdef BSG_DEBUG
|
||||
unsigned char buf[32];
|
||||
#endif
|
||||
ret = blk_get_queue(rq);
|
||||
if (ret)
|
||||
if (!blk_get_queue(rq))
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
bd = bsg_alloc_device();
|
||||
|
||||
+115
-508
File diff suppressed because it is too large
Load Diff
@@ -719,6 +719,9 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
case BLKSECTGET:
|
||||
return compat_put_ushort(arg,
|
||||
queue_max_sectors(bdev_get_queue(bdev)));
|
||||
case BLKROTATIONAL:
|
||||
return compat_put_ushort(arg,
|
||||
!blk_queue_nonrot(bdev_get_queue(bdev)));
|
||||
case BLKRASET: /* compatible, but no compat_ptr (!) */
|
||||
case BLKFRASET:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
||||
@@ -448,9 +448,7 @@ static struct elevator_type iosched_deadline = {
|
||||
|
||||
static int __init deadline_init(void)
|
||||
{
|
||||
elv_register(&iosched_deadline);
|
||||
|
||||
return 0;
|
||||
return elv_register(&iosched_deadline);
|
||||
}
|
||||
|
||||
static void __exit deadline_exit(void)
|
||||
|
||||
+108
-111
@@ -61,8 +61,8 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
|
||||
struct request_queue *q = rq->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_allow_merge_fn)
|
||||
return e->ops->elevator_allow_merge_fn(q, rq, bio);
|
||||
if (e->type->ops.elevator_allow_merge_fn)
|
||||
return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -168,17 +168,13 @@ static struct elevator_type *elevator_get(const char *name)
|
||||
return e;
|
||||
}
|
||||
|
||||
static void *elevator_init_queue(struct request_queue *q,
|
||||
struct elevator_queue *eq)
|
||||
static int elevator_init_queue(struct request_queue *q,
|
||||
struct elevator_queue *eq)
|
||||
{
|
||||
return eq->ops->elevator_init_fn(q);
|
||||
}
|
||||
|
||||
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
|
||||
void *data)
|
||||
{
|
||||
q->elevator = eq;
|
||||
eq->elevator_data = data;
|
||||
eq->elevator_data = eq->type->ops.elevator_init_fn(q);
|
||||
if (eq->elevator_data)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static char chosen_elevator[ELV_NAME_MAX];
|
||||
@@ -207,8 +203,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
|
||||
if (unlikely(!eq))
|
||||
goto err;
|
||||
|
||||
eq->ops = &e->ops;
|
||||
eq->elevator_type = e;
|
||||
eq->type = e;
|
||||
kobject_init(&eq->kobj, &elv_ktype);
|
||||
mutex_init(&eq->sysfs_lock);
|
||||
|
||||
@@ -232,7 +227,7 @@ static void elevator_release(struct kobject *kobj)
|
||||
struct elevator_queue *e;
|
||||
|
||||
e = container_of(kobj, struct elevator_queue, kobj);
|
||||
elevator_put(e->elevator_type);
|
||||
elevator_put(e->type);
|
||||
kfree(e->hash);
|
||||
kfree(e);
|
||||
}
|
||||
@@ -241,7 +236,7 @@ int elevator_init(struct request_queue *q, char *name)
|
||||
{
|
||||
struct elevator_type *e = NULL;
|
||||
struct elevator_queue *eq;
|
||||
void *data;
|
||||
int err;
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
return 0;
|
||||
@@ -278,13 +273,13 @@ int elevator_init(struct request_queue *q, char *name)
|
||||
if (!eq)
|
||||
return -ENOMEM;
|
||||
|
||||
data = elevator_init_queue(q, eq);
|
||||
if (!data) {
|
||||
err = elevator_init_queue(q, eq);
|
||||
if (err) {
|
||||
kobject_put(&eq->kobj);
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
elevator_attach(q, eq, data);
|
||||
q->elevator = eq;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_init);
|
||||
@@ -292,9 +287,8 @@ EXPORT_SYMBOL(elevator_init);
|
||||
void elevator_exit(struct elevator_queue *e)
|
||||
{
|
||||
mutex_lock(&e->sysfs_lock);
|
||||
if (e->ops->elevator_exit_fn)
|
||||
e->ops->elevator_exit_fn(e);
|
||||
e->ops = NULL;
|
||||
if (e->type->ops.elevator_exit_fn)
|
||||
e->type->ops.elevator_exit_fn(e);
|
||||
mutex_unlock(&e->sysfs_lock);
|
||||
|
||||
kobject_put(&e->kobj);
|
||||
@@ -504,8 +498,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
}
|
||||
|
||||
if (e->ops->elevator_merge_fn)
|
||||
return e->ops->elevator_merge_fn(q, req, bio);
|
||||
if (e->type->ops.elevator_merge_fn)
|
||||
return e->type->ops.elevator_merge_fn(q, req, bio);
|
||||
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
@@ -548,8 +542,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_merged_fn)
|
||||
e->ops->elevator_merged_fn(q, rq, type);
|
||||
if (e->type->ops.elevator_merged_fn)
|
||||
e->type->ops.elevator_merged_fn(q, rq, type);
|
||||
|
||||
if (type == ELEVATOR_BACK_MERGE)
|
||||
elv_rqhash_reposition(q, rq);
|
||||
@@ -563,8 +557,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
|
||||
struct elevator_queue *e = q->elevator;
|
||||
const int next_sorted = next->cmd_flags & REQ_SORTED;
|
||||
|
||||
if (next_sorted && e->ops->elevator_merge_req_fn)
|
||||
e->ops->elevator_merge_req_fn(q, rq, next);
|
||||
if (next_sorted && e->type->ops.elevator_merge_req_fn)
|
||||
e->type->ops.elevator_merge_req_fn(q, rq, next);
|
||||
|
||||
elv_rqhash_reposition(q, rq);
|
||||
|
||||
@@ -581,8 +575,8 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_bio_merged_fn)
|
||||
e->ops->elevator_bio_merged_fn(q, rq, bio);
|
||||
if (e->type->ops.elevator_bio_merged_fn)
|
||||
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
|
||||
}
|
||||
|
||||
void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||
@@ -608,12 +602,12 @@ void elv_drain_elevator(struct request_queue *q)
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
|
||||
while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
|
||||
;
|
||||
if (q->nr_sorted && printed++ < 10) {
|
||||
printk(KERN_ERR "%s: forced dispatching is broken "
|
||||
"(nr_sorted=%u), please report this\n",
|
||||
q->elevator->elevator_type->elevator_name, q->nr_sorted);
|
||||
q->elevator->type->elevator_name, q->nr_sorted);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -702,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
* rq cannot be accessed after calling
|
||||
* elevator_add_req_fn.
|
||||
*/
|
||||
q->elevator->ops->elevator_add_req_fn(q, rq);
|
||||
q->elevator->type->ops.elevator_add_req_fn(q, rq);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_FLUSH:
|
||||
@@ -731,8 +725,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_latter_req_fn)
|
||||
return e->ops->elevator_latter_req_fn(q, rq);
|
||||
if (e->type->ops.elevator_latter_req_fn)
|
||||
return e->type->ops.elevator_latter_req_fn(q, rq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -740,8 +734,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_former_req_fn)
|
||||
return e->ops->elevator_former_req_fn(q, rq);
|
||||
if (e->type->ops.elevator_former_req_fn)
|
||||
return e->type->ops.elevator_former_req_fn(q, rq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -749,10 +743,8 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_set_req_fn)
|
||||
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
|
||||
|
||||
rq->elevator_private[0] = NULL;
|
||||
if (e->type->ops.elevator_set_req_fn)
|
||||
return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -760,16 +752,16 @@ void elv_put_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_put_req_fn)
|
||||
e->ops->elevator_put_req_fn(rq);
|
||||
if (e->type->ops.elevator_put_req_fn)
|
||||
e->type->ops.elevator_put_req_fn(rq);
|
||||
}
|
||||
|
||||
int elv_may_queue(struct request_queue *q, int rw)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->ops->elevator_may_queue_fn)
|
||||
return e->ops->elevator_may_queue_fn(q, rw);
|
||||
if (e->type->ops.elevator_may_queue_fn)
|
||||
return e->type->ops.elevator_may_queue_fn(q, rw);
|
||||
|
||||
return ELV_MQUEUE_MAY;
|
||||
}
|
||||
@@ -804,8 +796,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if ((rq->cmd_flags & REQ_SORTED) &&
|
||||
e->ops->elevator_completed_req_fn)
|
||||
e->ops->elevator_completed_req_fn(q, rq);
|
||||
e->type->ops.elevator_completed_req_fn)
|
||||
e->type->ops.elevator_completed_req_fn(q, rq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -823,7 +815,7 @@ elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||
|
||||
e = container_of(kobj, struct elevator_queue, kobj);
|
||||
mutex_lock(&e->sysfs_lock);
|
||||
error = e->ops ? entry->show(e, page) : -ENOENT;
|
||||
error = e->type ? entry->show(e, page) : -ENOENT;
|
||||
mutex_unlock(&e->sysfs_lock);
|
||||
return error;
|
||||
}
|
||||
@@ -841,7 +833,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
e = container_of(kobj, struct elevator_queue, kobj);
|
||||
mutex_lock(&e->sysfs_lock);
|
||||
error = e->ops ? entry->store(e, page, length) : -ENOENT;
|
||||
error = e->type ? entry->store(e, page, length) : -ENOENT;
|
||||
mutex_unlock(&e->sysfs_lock);
|
||||
return error;
|
||||
}
|
||||
@@ -856,14 +848,13 @@ static struct kobj_type elv_ktype = {
|
||||
.release = elevator_release,
|
||||
};
|
||||
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
int error;
|
||||
|
||||
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
|
||||
if (!error) {
|
||||
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
|
||||
struct elv_fs_entry *attr = e->type->elevator_attrs;
|
||||
if (attr) {
|
||||
while (attr->attr.name) {
|
||||
if (sysfs_create_file(&e->kobj, &attr->attr))
|
||||
@@ -876,31 +867,55 @@ int elv_register_queue(struct request_queue *q)
|
||||
}
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(elv_register_queue);
|
||||
|
||||
static void __elv_unregister_queue(struct elevator_queue *e)
|
||||
int elv_register_queue(struct request_queue *q)
|
||||
{
|
||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&e->kobj);
|
||||
e->registered = 0;
|
||||
return __elv_register_queue(q, q->elevator);
|
||||
}
|
||||
EXPORT_SYMBOL(elv_register_queue);
|
||||
|
||||
void elv_unregister_queue(struct request_queue *q)
|
||||
{
|
||||
if (q)
|
||||
__elv_unregister_queue(q->elevator);
|
||||
if (q) {
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&e->kobj);
|
||||
e->registered = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(elv_unregister_queue);
|
||||
|
||||
void elv_register(struct elevator_type *e)
|
||||
int elv_register(struct elevator_type *e)
|
||||
{
|
||||
char *def = "";
|
||||
|
||||
/* create icq_cache if requested */
|
||||
if (e->icq_size) {
|
||||
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
|
||||
WARN_ON(e->icq_align < __alignof__(struct io_cq)))
|
||||
return -EINVAL;
|
||||
|
||||
snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
|
||||
"%s_io_cq", e->elevator_name);
|
||||
e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
|
||||
e->icq_align, 0, NULL);
|
||||
if (!e->icq_cache)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* register, don't allow duplicate names */
|
||||
spin_lock(&elv_list_lock);
|
||||
BUG_ON(elevator_find(e->elevator_name));
|
||||
if (elevator_find(e->elevator_name)) {
|
||||
spin_unlock(&elv_list_lock);
|
||||
if (e->icq_cache)
|
||||
kmem_cache_destroy(e->icq_cache);
|
||||
return -EBUSY;
|
||||
}
|
||||
list_add_tail(&e->list, &elv_list);
|
||||
spin_unlock(&elv_list_lock);
|
||||
|
||||
/* print pretty message */
|
||||
if (!strcmp(e->elevator_name, chosen_elevator) ||
|
||||
(!*chosen_elevator &&
|
||||
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
|
||||
@@ -908,30 +923,26 @@ void elv_register(struct elevator_type *e)
|
||||
|
||||
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
|
||||
def);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(elv_register);
|
||||
|
||||
void elv_unregister(struct elevator_type *e)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
/*
|
||||
* Iterate every thread in the process to remove the io contexts.
|
||||
*/
|
||||
if (e->ops.trim) {
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
if (p->io_context)
|
||||
e->ops.trim(p->io_context);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
/* unregister */
|
||||
spin_lock(&elv_list_lock);
|
||||
list_del_init(&e->list);
|
||||
spin_unlock(&elv_list_lock);
|
||||
|
||||
/*
|
||||
* Destroy icq_cache if it exists. icq's are RCU managed. Make
|
||||
* sure all RCU operations are complete before proceeding.
|
||||
*/
|
||||
if (e->icq_cache) {
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(e->icq_cache);
|
||||
e->icq_cache = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
|
||||
@@ -944,54 +955,41 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
{
|
||||
struct elevator_queue *old_elevator, *e;
|
||||
void *data;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Allocate new elevator
|
||||
*/
|
||||
/* allocate new elevator */
|
||||
e = elevator_alloc(q, new_e);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
|
||||
data = elevator_init_queue(q, e);
|
||||
if (!data) {
|
||||
err = elevator_init_queue(q, e);
|
||||
if (err) {
|
||||
kobject_put(&e->kobj);
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data
|
||||
*/
|
||||
/* turn on BYPASS and drain all requests w/ elevator private data */
|
||||
elv_quiesce_start(q);
|
||||
|
||||
/*
|
||||
* Remember old elevator.
|
||||
*/
|
||||
old_elevator = q->elevator;
|
||||
|
||||
/*
|
||||
* attach and start new elevator
|
||||
*/
|
||||
spin_lock_irq(q->queue_lock);
|
||||
elevator_attach(q, e, data);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (old_elevator->registered) {
|
||||
__elv_unregister_queue(old_elevator);
|
||||
|
||||
err = elv_register_queue(q);
|
||||
/* unregister old queue, register new one and kill old elevator */
|
||||
if (q->elevator->registered) {
|
||||
elv_unregister_queue(q);
|
||||
err = __elv_register_queue(q, e);
|
||||
if (err)
|
||||
goto fail_register;
|
||||
}
|
||||
|
||||
/*
|
||||
* finally exit old elevator and turn off BYPASS.
|
||||
*/
|
||||
/* done, clear io_cq's, switch elevators and turn off BYPASS */
|
||||
spin_lock_irq(q->queue_lock);
|
||||
ioc_clear_queue(q);
|
||||
old_elevator = q->elevator;
|
||||
q->elevator = e;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
elevator_exit(old_elevator);
|
||||
elv_quiesce_end(q);
|
||||
|
||||
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
|
||||
blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1001,7 +999,6 @@ fail_register:
|
||||
* one again (along with re-adding the sysfs dir)
|
||||
*/
|
||||
elevator_exit(e);
|
||||
q->elevator = old_elevator;
|
||||
elv_register_queue(q);
|
||||
elv_quiesce_end(q);
|
||||
|
||||
@@ -1026,7 +1023,7 @@ int elevator_change(struct request_queue *q, const char *name)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
|
||||
if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
|
||||
elevator_put(e);
|
||||
return 0;
|
||||
}
|
||||
@@ -1061,7 +1058,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||
if (!q->elevator || !blk_queue_stackable(q))
|
||||
return sprintf(name, "none\n");
|
||||
|
||||
elv = e->elevator_type;
|
||||
elv = e->type;
|
||||
|
||||
spin_lock(&elv_list_lock);
|
||||
list_for_each_entry(__e, &elv_list, list) {
|
||||
|
||||
+1
-1
@@ -614,7 +614,7 @@ void add_disk(struct gendisk *disk)
|
||||
* Take an extra ref on queue which will be put on disk_release()
|
||||
* so that it sticks around as long as @disk is there.
|
||||
*/
|
||||
WARN_ON_ONCE(blk_get_queue(disk->queue));
|
||||
WARN_ON_ONCE(!blk_get_queue(disk->queue));
|
||||
|
||||
retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
|
||||
"bdi");
|
||||
|
||||
@@ -296,6 +296,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
return put_uint(arg, bdev_discard_zeroes_data(bdev));
|
||||
case BLKSECTGET:
|
||||
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
|
||||
case BLKROTATIONAL:
|
||||
return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev)));
|
||||
case BLKRASET:
|
||||
case BLKFRASET:
|
||||
if(!capable(CAP_SYS_ADMIN))
|
||||
|
||||
@@ -94,9 +94,7 @@ static struct elevator_type elevator_noop = {
|
||||
|
||||
static int __init noop_init(void)
|
||||
{
|
||||
elv_register(&elevator_noop);
|
||||
|
||||
return 0;
|
||||
return elv_register(&elevator_noop);
|
||||
}
|
||||
|
||||
static void __exit noop_exit(void)
|
||||
|
||||
Reference in New Issue
Block a user