You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge commit 'v2.6.26' into bkl-removal
This commit is contained in:
@@ -831,6 +831,8 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
|
||||
if (ad->changed_batch && ad->nr_dispatched == 1) {
|
||||
ad->current_batch_expires = jiffies +
|
||||
ad->batch_expire[ad->batch_data_dir];
|
||||
kblockd_schedule_work(&ad->antic_work);
|
||||
ad->changed_batch = 0;
|
||||
|
||||
|
||||
+17
-20
@@ -806,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
|
||||
rq = get_request(q, rw_flags, bio, GFP_NOIO);
|
||||
while (!rq) {
|
||||
DEFINE_WAIT(wait);
|
||||
struct io_context *ioc;
|
||||
struct request_list *rl = &q->rq;
|
||||
|
||||
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
rq = get_request(q, rw_flags, bio, GFP_NOIO);
|
||||
blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
|
||||
|
||||
if (!rq) {
|
||||
struct io_context *ioc;
|
||||
__generic_unplug_device(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
io_schedule();
|
||||
|
||||
blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
|
||||
/*
|
||||
* After sleeping, we become a "batching" process and
|
||||
* will be able to allocate at least one request, and
|
||||
* up to a big batch of them for a small period time.
|
||||
* See ioc_batching, ioc_set_batching
|
||||
*/
|
||||
ioc = current_io_context(GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, ioc);
|
||||
|
||||
__generic_unplug_device(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
io_schedule();
|
||||
|
||||
/*
|
||||
* After sleeping, we become a "batching" process and
|
||||
* will be able to allocate at least one request, and
|
||||
* up to a big batch of them for a small period time.
|
||||
* See ioc_batching, ioc_set_batching
|
||||
*/
|
||||
ioc = current_io_context(GFP_NOIO, q->node);
|
||||
ioc_set_batching(q, ioc);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
spin_lock_irq(q->queue_lock);
|
||||
finish_wait(&rl->wait[rw], &wait);
|
||||
}
|
||||
|
||||
rq = get_request(q, rw_flags, bio, GFP_NOIO);
|
||||
};
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
+25
-4
@@ -75,6 +75,24 @@ static void trace_note_time(struct blk_trace *bt)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
|
||||
{
|
||||
int n;
|
||||
va_list args;
|
||||
unsigned long flags;
|
||||
char *buf;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
|
||||
va_start(args, fmt);
|
||||
n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__trace_note_message);
|
||||
|
||||
static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
|
||||
pid_t pid)
|
||||
{
|
||||
@@ -141,10 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
/*
|
||||
* A word about the locking here - we disable interrupts to reserve
|
||||
* some space in the relay per-cpu buffer, to prevent an irq
|
||||
* from coming in and stepping on our toes. Once reserved, it's
|
||||
* enough to get preemption disabled to prevent read of this data
|
||||
* before we are through filling it. get_cpu()/put_cpu() does this
|
||||
* for us
|
||||
* from coming in and stepping on our toes.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
@@ -232,6 +247,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
|
||||
debugfs_remove(bt->dropped_file);
|
||||
blk_remove_tree(bt->dir);
|
||||
free_percpu(bt->sequence);
|
||||
free_percpu(bt->msg_data);
|
||||
kfree(bt);
|
||||
}
|
||||
|
||||
@@ -346,6 +362,10 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
if (!bt->sequence)
|
||||
goto err;
|
||||
|
||||
bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
|
||||
if (!bt->msg_data)
|
||||
goto err;
|
||||
|
||||
ret = -ENOENT;
|
||||
dir = blk_create_tree(buts->name);
|
||||
if (!dir)
|
||||
@@ -392,6 +412,7 @@ err:
|
||||
if (bt->dropped_file)
|
||||
debugfs_remove(bt->dropped_file);
|
||||
free_percpu(bt->sequence);
|
||||
free_percpu(bt->msg_data);
|
||||
if (bt->rchan)
|
||||
relay_close(bt->rchan);
|
||||
kfree(bt);
|
||||
|
||||
+2
-1
@@ -710,11 +710,12 @@ static void bsg_kref_release_function(struct kref *kref)
|
||||
{
|
||||
struct bsg_class_device *bcd =
|
||||
container_of(kref, struct bsg_class_device, ref);
|
||||
struct device *parent = bcd->parent;
|
||||
|
||||
if (bcd->release)
|
||||
bcd->release(bcd->parent);
|
||||
|
||||
put_device(bcd->parent);
|
||||
put_device(parent);
|
||||
}
|
||||
|
||||
static int bsg_put_device(struct bsg_device *bd)
|
||||
|
||||
+30
-6
@@ -124,6 +124,8 @@ struct cfq_data {
|
||||
struct cfq_queue {
|
||||
/* reference count */
|
||||
atomic_t ref;
|
||||
/* various state flags, see below */
|
||||
unsigned int flags;
|
||||
/* parent cfq_data */
|
||||
struct cfq_data *cfqd;
|
||||
/* service_tree member */
|
||||
@@ -138,14 +140,14 @@ struct cfq_queue {
|
||||
int queued[2];
|
||||
/* currently allocated requests */
|
||||
int allocated[2];
|
||||
/* pending metadata requests */
|
||||
int meta_pending;
|
||||
/* fifo list of requests in sort_list */
|
||||
struct list_head fifo;
|
||||
|
||||
unsigned long slice_end;
|
||||
long slice_resid;
|
||||
|
||||
/* pending metadata requests */
|
||||
int meta_pending;
|
||||
/* number of requests that are on the dispatch list or inside driver */
|
||||
int dispatched;
|
||||
|
||||
@@ -153,8 +155,6 @@ struct cfq_queue {
|
||||
unsigned short ioprio, org_ioprio;
|
||||
unsigned short ioprio_class, org_ioprio_class;
|
||||
|
||||
/* various state flags, see below */
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
enum cfqq_state_flags {
|
||||
@@ -1142,6 +1142,9 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||
kmem_cache_free(cfq_pool, cfqq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must always be called with the rcu_read_lock() held
|
||||
*/
|
||||
static void
|
||||
__call_for_each_cic(struct io_context *ioc,
|
||||
void (*func)(struct io_context *, struct cfq_io_context *))
|
||||
@@ -1197,6 +1200,11 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
|
||||
cfq_cic_free(cic);
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with rcu_read_lock() held or preemption otherwise disabled.
|
||||
* Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
|
||||
* and ->trim() which is called with the task lock held
|
||||
*/
|
||||
static void cfq_free_io_context(struct io_context *ioc)
|
||||
{
|
||||
/*
|
||||
@@ -1502,20 +1510,24 @@ static struct cfq_io_context *
|
||||
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
|
||||
{
|
||||
struct cfq_io_context *cic;
|
||||
unsigned long flags;
|
||||
void *k;
|
||||
|
||||
if (unlikely(!ioc))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* we maintain a last-hit cache, to avoid browsing over the tree
|
||||
*/
|
||||
cic = rcu_dereference(ioc->ioc_data);
|
||||
if (cic && cic->key == cfqd)
|
||||
if (cic && cic->key == cfqd) {
|
||||
rcu_read_unlock();
|
||||
return cic;
|
||||
}
|
||||
|
||||
do {
|
||||
rcu_read_lock();
|
||||
cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
|
||||
rcu_read_unlock();
|
||||
if (!cic)
|
||||
@@ -1524,10 +1536,13 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
|
||||
k = cic->key;
|
||||
if (unlikely(!k)) {
|
||||
cfq_drop_dead_cic(cfqd, ioc, cic);
|
||||
rcu_read_lock();
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
rcu_assign_pointer(ioc->ioc_data, cic);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
@@ -2134,6 +2149,10 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
|
||||
static void cfq_slab_kill(void)
|
||||
{
|
||||
/*
|
||||
* Caller already ensured that pending RCU callbacks are completed,
|
||||
* so we should have no busy allocations at this point.
|
||||
*/
|
||||
if (cfq_pool)
|
||||
kmem_cache_destroy(cfq_pool);
|
||||
if (cfq_ioc_pool)
|
||||
@@ -2292,6 +2311,11 @@ static void __exit cfq_exit(void)
|
||||
ioc_gone = &all_gone;
|
||||
/* ioc_gone's update must be visible before reading ioc_count */
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
* this also protects us from entering cfq_slab_kill() with
|
||||
* pending RCU callbacks
|
||||
*/
|
||||
if (elv_ioc_count_read(ioc_count))
|
||||
wait_for_completion(ioc_gone);
|
||||
cfq_slab_kill();
|
||||
|
||||
@@ -1110,6 +1110,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
|
||||
|
||||
return 1;
|
||||
|
||||
fail_register:
|
||||
|
||||
@@ -660,6 +660,8 @@ dev_t blk_lookup_devt(const char *name, int part)
|
||||
|
||||
mutex_lock(&block_class_lock);
|
||||
list_for_each_entry(dev, &block_class.devices, node) {
|
||||
if (dev->type != &disk_type)
|
||||
continue;
|
||||
if (strcmp(dev->bus_id, name) == 0) {
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user