You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge commit 'v2.6.38-rc6' into for-2.6.39/core
Conflicts: block/cfq-iosched.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
+1
-1
@@ -2,7 +2,7 @@
|
||||
# Block layer core configuration
|
||||
#
|
||||
menuconfig BLOCK
|
||||
bool "Enable the block layer" if EMBEDDED
|
||||
bool "Enable the block layer" if EXPERT
|
||||
default y
|
||||
help
|
||||
Provide block layer support for the kernel.
|
||||
|
||||
@@ -1452,10 +1452,6 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Currently we do not support hierarchy deeper than two level (0,1) */
|
||||
if (parent != cgroup->top_cgroup)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
|
||||
if (!blkcg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
+29
-11
@@ -33,7 +33,7 @@
|
||||
|
||||
#include "blk.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
|
||||
@@ -64,13 +64,27 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
||||
return;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
|
||||
|
||||
if (!new_io)
|
||||
if (!new_io) {
|
||||
part = rq->part;
|
||||
part_stat_inc(cpu, part, merges[rw]);
|
||||
else {
|
||||
} else {
|
||||
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
|
||||
if (!hd_struct_try_get(part)) {
|
||||
/*
|
||||
* The partition is already being removed,
|
||||
* the request will be accounted on the disk only
|
||||
*
|
||||
* We take a reference on disk->part0 although that
|
||||
* partition will never be deleted, so we can treat
|
||||
* it as any other partition.
|
||||
*/
|
||||
part = &rq->rq_disk->part0;
|
||||
hd_struct_get(part);
|
||||
}
|
||||
part_round_stats(cpu, part);
|
||||
part_inc_in_flight(part, rw);
|
||||
rq->part = part;
|
||||
}
|
||||
|
||||
part_stat_unlock();
|
||||
@@ -128,6 +142,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
||||
rq->ref_count = 1;
|
||||
rq->start_time = jiffies;
|
||||
set_start_time_ns(rq);
|
||||
rq->part = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_init);
|
||||
|
||||
@@ -1342,9 +1357,9 @@ static inline void blk_partition_remap(struct bio *bio)
|
||||
bio->bi_sector += p->start_sect;
|
||||
bio->bi_bdev = bdev->bd_contains;
|
||||
|
||||
trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
|
||||
bdev->bd_dev,
|
||||
bio->bi_sector - p->start_sect);
|
||||
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
|
||||
bdev->bd_dev,
|
||||
bio->bi_sector - p->start_sect);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1513,7 +1528,7 @@ static inline void __generic_make_request(struct bio *bio)
|
||||
goto end_io;
|
||||
|
||||
if (old_sector != -1)
|
||||
trace_block_remap(q, bio, old_dev, old_sector);
|
||||
trace_block_bio_remap(q, bio, old_dev, old_sector);
|
||||
|
||||
old_sector = bio->bi_sector;
|
||||
old_dev = bio->bi_bdev->bd_dev;
|
||||
@@ -1789,7 +1804,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
part = req->part;
|
||||
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
|
||||
part_stat_unlock();
|
||||
}
|
||||
@@ -1809,13 +1824,14 @@ static void blk_account_io_done(struct request *req)
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
part = req->part;
|
||||
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part, rw);
|
||||
|
||||
hd_struct_put(part);
|
||||
part_stat_unlock();
|
||||
}
|
||||
}
|
||||
@@ -2619,7 +2635,9 @@ int __init blk_dev_init(void)
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
sizeof(((struct request *)0)->cmd_flags));
|
||||
|
||||
kblockd_workqueue = create_workqueue("kblockd");
|
||||
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
|
||||
kblockd_workqueue = alloc_workqueue("kblockd",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
||||
if (!kblockd_workqueue)
|
||||
panic("Failed to create kblockd\n");
|
||||
|
||||
|
||||
+2
-3
@@ -64,7 +64,7 @@ static void cfq_exit(struct io_context *ioc)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Called by the exitting task */
|
||||
/* Called by the exiting task */
|
||||
void exit_io_context(struct task_struct *task)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
@@ -74,10 +74,9 @@ void exit_io_context(struct task_struct *task)
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
if (atomic_dec_and_test(&ioc->nr_tasks)) {
|
||||
if (atomic_dec_and_test(&ioc->nr_tasks))
|
||||
cfq_exit(ioc);
|
||||
|
||||
}
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
|
||||
+2
-1
@@ -351,11 +351,12 @@ static void blk_account_io_merge(struct request *req)
|
||||
int cpu;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
part = req->part;
|
||||
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part, rq_data_dir(req));
|
||||
|
||||
hd_struct_put(part);
|
||||
part_stat_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||
* tree of blkg (instead of traversing through hash list all
|
||||
* the time.
|
||||
*/
|
||||
tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
|
||||
|
||||
/*
|
||||
* This is the common case when there are no blkio cgroups.
|
||||
* Avoid lookup in this case
|
||||
*/
|
||||
if (blkcg == &blkio_root_cgroup)
|
||||
tg = &td->root_tg;
|
||||
else
|
||||
tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
|
||||
|
||||
/* Fill in device details for root group */
|
||||
if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
|
||||
|
||||
+84
-56
@@ -87,7 +87,6 @@ struct cfq_rb_root {
|
||||
unsigned count;
|
||||
unsigned total_weight;
|
||||
u64 min_vdisktime;
|
||||
struct rb_node *active;
|
||||
};
|
||||
#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
|
||||
.count = 0, .min_vdisktime = 0, }
|
||||
@@ -97,7 +96,7 @@ struct cfq_rb_root {
|
||||
*/
|
||||
struct cfq_queue {
|
||||
/* reference count */
|
||||
atomic_t ref;
|
||||
int ref;
|
||||
/* various state flags, see below */
|
||||
unsigned int flags;
|
||||
/* parent cfq_data */
|
||||
@@ -180,7 +179,6 @@ struct cfq_group {
|
||||
/* group service_tree key */
|
||||
u64 vdisktime;
|
||||
unsigned int weight;
|
||||
bool on_st;
|
||||
|
||||
/* number of cfqq currently on this group */
|
||||
int nr_cfqq;
|
||||
@@ -209,7 +207,7 @@ struct cfq_group {
|
||||
struct blkio_group blkg;
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
struct hlist_node cfqd_node;
|
||||
atomic_t ref;
|
||||
int ref;
|
||||
#endif
|
||||
/* number of requests that are on the dispatch list or inside driver */
|
||||
int dispatched;
|
||||
@@ -563,11 +561,6 @@ static void update_min_vdisktime(struct cfq_rb_root *st)
|
||||
u64 vdisktime = st->min_vdisktime;
|
||||
struct cfq_group *cfqg;
|
||||
|
||||
if (st->active) {
|
||||
cfqg = rb_entry_cfqg(st->active);
|
||||
vdisktime = cfqg->vdisktime;
|
||||
}
|
||||
|
||||
if (st->left) {
|
||||
cfqg = rb_entry_cfqg(st->left);
|
||||
vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
|
||||
@@ -605,8 +598,8 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
return cfq_target_latency * cfqg->weight / st->total_weight;
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
static inline unsigned
|
||||
cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
|
||||
if (cfqd->cfq_latency) {
|
||||
@@ -632,6 +625,14 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
low_slice);
|
||||
}
|
||||
}
|
||||
return slice;
|
||||
}
|
||||
|
||||
static inline void
|
||||
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
|
||||
|
||||
cfqq->slice_start = jiffies;
|
||||
cfqq->slice_end = jiffies + slice;
|
||||
cfqq->allocated_slice = slice;
|
||||
@@ -646,11 +647,11 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfq_cfqq_slice_new(cfqq))
|
||||
return 0;
|
||||
return false;
|
||||
if (time_before(jiffies, cfqq->slice_end))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -869,7 +870,7 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
struct rb_node *n;
|
||||
|
||||
cfqg->nr_cfqq++;
|
||||
if (cfqg->on_st)
|
||||
if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -885,7 +886,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
cfqg->vdisktime = st->min_vdisktime;
|
||||
|
||||
__cfq_group_service_tree_add(st, cfqg);
|
||||
cfqg->on_st = true;
|
||||
st->total_weight += cfqg->weight;
|
||||
}
|
||||
|
||||
@@ -894,9 +894,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
{
|
||||
struct cfq_rb_root *st = &cfqd->grp_service_tree;
|
||||
|
||||
if (st->active == &cfqg->rb_node)
|
||||
st->active = NULL;
|
||||
|
||||
BUG_ON(cfqg->nr_cfqq < 1);
|
||||
cfqg->nr_cfqq--;
|
||||
|
||||
@@ -905,7 +902,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
return;
|
||||
|
||||
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
|
||||
cfqg->on_st = false;
|
||||
st->total_weight -= cfqg->weight;
|
||||
if (!RB_EMPTY_NODE(&cfqg->rb_node))
|
||||
cfq_rb_erase(&cfqg->rb_node, st);
|
||||
@@ -1026,11 +1022,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
||||
* elevator which will be dropped by either elevator exit
|
||||
* or cgroup deletion path depending on who is exiting first.
|
||||
*/
|
||||
atomic_set(&cfqg->ref, 1);
|
||||
cfqg->ref = 1;
|
||||
|
||||
/*
|
||||
* Add group onto cgroup list. It might happen that bdi->dev is
|
||||
* not initiliazed yet. Initialize this new group without major
|
||||
* not initialized yet. Initialize this new group without major
|
||||
* and minor info and this info will be filled in once a new thread
|
||||
* comes for IO. See code above.
|
||||
*/
|
||||
@@ -1071,7 +1067,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
|
||||
|
||||
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
|
||||
{
|
||||
atomic_inc(&cfqg->ref);
|
||||
cfqg->ref++;
|
||||
return cfqg;
|
||||
}
|
||||
|
||||
@@ -1083,7 +1079,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
||||
|
||||
cfqq->cfqg = cfqg;
|
||||
/* cfqq reference on cfqg */
|
||||
atomic_inc(&cfqq->cfqg->ref);
|
||||
cfqq->cfqg->ref++;
|
||||
}
|
||||
|
||||
static void cfq_put_cfqg(struct cfq_group *cfqg)
|
||||
@@ -1091,11 +1087,12 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
|
||||
struct cfq_rb_root *st;
|
||||
int i, j;
|
||||
|
||||
BUG_ON(atomic_read(&cfqg->ref) <= 0);
|
||||
if (!atomic_dec_and_test(&cfqg->ref))
|
||||
BUG_ON(cfqg->ref <= 0);
|
||||
cfqg->ref--;
|
||||
if (cfqg->ref)
|
||||
return;
|
||||
for_each_cfqg_st(cfqg, i, j, st)
|
||||
BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
|
||||
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
|
||||
kfree(cfqg);
|
||||
}
|
||||
|
||||
@@ -1200,7 +1197,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
||||
cfqq->orig_cfqg = cfqq->cfqg;
|
||||
cfqq->cfqg = &cfqd->root_group;
|
||||
atomic_inc(&cfqd->root_group.ref);
|
||||
cfqd->root_group.ref++;
|
||||
group_changed = 1;
|
||||
} else if (!cfqd->cfq_group_isolation
|
||||
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
|
||||
@@ -1672,8 +1669,11 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
/*
|
||||
* store what was left of this slice, if the queue idled/timed out
|
||||
*/
|
||||
if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
|
||||
cfqq->slice_resid = cfqq->slice_end - jiffies;
|
||||
if (timed_out) {
|
||||
if (cfq_cfqq_slice_new(cfqq))
|
||||
cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
|
||||
else
|
||||
cfqq->slice_resid = cfqq->slice_end - jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
|
||||
}
|
||||
|
||||
@@ -1687,9 +1687,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
if (cfqq == cfqd->active_queue)
|
||||
cfqd->active_queue = NULL;
|
||||
|
||||
if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
|
||||
cfqd->grp_service_tree.active = NULL;
|
||||
|
||||
if (cfqd->active_cic) {
|
||||
put_io_context(cfqd->active_cic->ioc);
|
||||
cfqd->active_cic = NULL;
|
||||
@@ -1901,10 +1898,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
* in their service tree.
|
||||
*/
|
||||
if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
|
||||
service_tree->count);
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
||||
@@ -2040,7 +2037,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
|
||||
int process_refs, io_refs;
|
||||
|
||||
io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
|
||||
process_refs = atomic_read(&cfqq->ref) - io_refs;
|
||||
process_refs = cfqq->ref - io_refs;
|
||||
BUG_ON(process_refs < 0);
|
||||
return process_refs;
|
||||
}
|
||||
@@ -2080,10 +2077,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
|
||||
*/
|
||||
if (new_process_refs >= process_refs) {
|
||||
cfqq->new_cfqq = new_cfqq;
|
||||
atomic_add(process_refs, &new_cfqq->ref);
|
||||
new_cfqq->ref += process_refs;
|
||||
} else {
|
||||
new_cfqq->new_cfqq = cfqq;
|
||||
atomic_add(new_process_refs, &cfqq->ref);
|
||||
cfqq->ref += new_process_refs;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2116,12 +2113,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
unsigned count;
|
||||
struct cfq_rb_root *st;
|
||||
unsigned group_slice;
|
||||
|
||||
if (!cfqg) {
|
||||
cfqd->serving_prio = IDLE_WORKLOAD;
|
||||
cfqd->workload_expires = jiffies + 1;
|
||||
return;
|
||||
}
|
||||
enum wl_prio_t original_prio = cfqd->serving_prio;
|
||||
|
||||
/* Choose next priority. RT > BE > IDLE */
|
||||
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
|
||||
@@ -2134,6 +2126,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
return;
|
||||
}
|
||||
|
||||
if (original_prio != cfqd->serving_prio)
|
||||
goto new_workload;
|
||||
|
||||
/*
|
||||
* For RT and BE, we have to choose also the type
|
||||
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
|
||||
@@ -2148,6 +2143,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
||||
if (count && !time_after(jiffies, cfqd->workload_expires))
|
||||
return;
|
||||
|
||||
new_workload:
|
||||
/* otherwise select new workload type */
|
||||
cfqd->serving_type =
|
||||
cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
|
||||
@@ -2199,7 +2195,6 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
|
||||
if (RB_EMPTY_ROOT(&st->rb))
|
||||
return NULL;
|
||||
cfqg = cfq_rb_first_group(st);
|
||||
st->active = &cfqg->rb_node;
|
||||
update_min_vdisktime(st);
|
||||
return cfqg;
|
||||
}
|
||||
@@ -2293,6 +2288,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
|
||||
goto keep_queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a deep seek queue, but the device is much faster than
|
||||
* the queue can deliver, don't idle
|
||||
**/
|
||||
if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
|
||||
(cfq_cfqq_slice_new(cfqq) ||
|
||||
(cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
|
||||
cfq_clear_cfqq_deep(cfqq);
|
||||
cfq_clear_cfqq_idle_window(cfqq);
|
||||
}
|
||||
|
||||
if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
|
||||
cfqq = NULL;
|
||||
goto keep_queue;
|
||||
@@ -2367,12 +2373,12 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
|
||||
{
|
||||
/* the queue hasn't finished any request, can't estimate */
|
||||
if (cfq_cfqq_slice_new(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
|
||||
cfqq->slice_end))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
@@ -2538,9 +2544,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||
struct cfq_data *cfqd = cfqq->cfqd;
|
||||
struct cfq_group *cfqg, *orig_cfqg;
|
||||
|
||||
BUG_ON(atomic_read(&cfqq->ref) <= 0);
|
||||
BUG_ON(cfqq->ref <= 0);
|
||||
|
||||
if (!atomic_dec_and_test(&cfqq->ref))
|
||||
cfqq->ref--;
|
||||
if (cfqq->ref)
|
||||
return;
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "put_queue");
|
||||
@@ -2843,7 +2850,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
RB_CLEAR_NODE(&cfqq->p_node);
|
||||
INIT_LIST_HEAD(&cfqq->fifo);
|
||||
|
||||
atomic_set(&cfqq->ref, 0);
|
||||
cfqq->ref = 0;
|
||||
cfqq->cfqd = cfqd;
|
||||
|
||||
cfq_mark_cfqq_prio_changed(cfqq);
|
||||
@@ -2979,11 +2986,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
|
||||
* pin the queue now that it's allocated, scheduler exit will prune it
|
||||
*/
|
||||
if (!is_sync && !(*async_cfqq)) {
|
||||
atomic_inc(&cfqq->ref);
|
||||
cfqq->ref++;
|
||||
*async_cfqq = cfqq;
|
||||
}
|
||||
|
||||
atomic_inc(&cfqq->ref);
|
||||
cfqq->ref++;
|
||||
return cfqq;
|
||||
}
|
||||
|
||||
@@ -3265,6 +3272,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
|
||||
return true;
|
||||
|
||||
/* An idle queue should not be idle now for some reason */
|
||||
if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
|
||||
return true;
|
||||
|
||||
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
|
||||
return false;
|
||||
|
||||
@@ -3284,9 +3295,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||
*/
|
||||
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
struct cfq_queue *old_cfqq = cfqd->active_queue;
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "preempt");
|
||||
cfq_slice_expired(cfqd, 1);
|
||||
|
||||
/*
|
||||
* workload type is changed, don't save slice, otherwise preempt
|
||||
* doesn't happen
|
||||
*/
|
||||
if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
|
||||
cfqq->cfqg->saved_workload_slice = 0;
|
||||
|
||||
/*
|
||||
* Put the new queue at the front of the of the current list,
|
||||
* so we know that it will be selected next.
|
||||
@@ -3412,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
struct cfq_io_context *cic = cfqd->active_cic;
|
||||
|
||||
/* If the queue already has requests, don't wait */
|
||||
if (!RB_EMPTY_ROOT(&cfqq->sort_list))
|
||||
return false;
|
||||
|
||||
/* If there are other queues in the group, don't wait */
|
||||
if (cfqq->cfqg->nr_cfqq > 1)
|
||||
return false;
|
||||
@@ -3681,10 +3705,10 @@ new_queue:
|
||||
}
|
||||
|
||||
cfqq->allocated[rw]++;
|
||||
atomic_inc(&cfqq->ref);
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
cfqq->ref++;
|
||||
rq->elevator_private[0] = cic;
|
||||
rq->elevator_private[1] = cfqq;
|
||||
rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
|
||||
@@ -3862,6 +3886,10 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
if (!cfqd)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't need take queue_lock in the routine, since we are
|
||||
* initializing the ioscheduler, and nobody is using cfqd
|
||||
*/
|
||||
cfqd->cic_index = i;
|
||||
|
||||
/* Init root service tree */
|
||||
@@ -3881,7 +3909,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
* Take a reference to root group which we never drop. This is just
|
||||
* to make sure that cfq_put_cfqg() does not try to kfree root group
|
||||
*/
|
||||
atomic_set(&cfqg->ref, 1);
|
||||
cfqg->ref = 1;
|
||||
rcu_read_lock();
|
||||
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
|
||||
(void *)cfqd, 0);
|
||||
@@ -3901,7 +3929,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
* will not attempt to free it.
|
||||
*/
|
||||
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
|
||||
atomic_inc(&cfqd->oom_cfqq.ref);
|
||||
cfqd->oom_cfqq.ref++;
|
||||
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
|
||||
|
||||
INIT_LIST_HEAD(&cfqd->cic_list);
|
||||
|
||||
+520
-30
File diff suppressed because it is too large
Load Diff
+3
-2
@@ -294,11 +294,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
return -EINVAL;
|
||||
if (get_user(n, (int __user *) arg))
|
||||
return -EFAULT;
|
||||
if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
|
||||
if (!(mode & FMODE_EXCL) &&
|
||||
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
ret = set_blocksize(bdev, n);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
||||
return ret;
|
||||
case BLKPG:
|
||||
ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
|
||||
|
||||
Reference in New Issue
Block a user