You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'block-for-2.6.39-core' of ssh://master.kernel.org/pub/scm/linux/kernel/git/tj/misc into for-2.6.39/core
This commit is contained in:
+6
-12
@@ -342,7 +342,7 @@ void blk_start_queue(struct request_queue *q)
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queue);
|
||||
|
||||
@@ -396,13 +396,14 @@ EXPORT_SYMBOL(blk_sync_queue);
|
||||
/**
|
||||
* __blk_run_queue - run a single device queue
|
||||
* @q: The queue to run
|
||||
* @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
|
||||
*
|
||||
* Description:
|
||||
* See @blk_run_queue. This variant must be called with the queue lock
|
||||
* held and interrupts disabled.
|
||||
*
|
||||
*/
|
||||
void __blk_run_queue(struct request_queue *q)
|
||||
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
|
||||
{
|
||||
blk_remove_plug(q);
|
||||
|
||||
@@ -416,7 +417,7 @@ void __blk_run_queue(struct request_queue *q)
|
||||
* Only recurse once to avoid overrunning the stack, let the unplug
|
||||
* handling reinvoke the handler shortly if we already got there.
|
||||
*/
|
||||
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
|
||||
q->request_fn(q);
|
||||
queue_flag_clear(QUEUE_FLAG_REENTER, q);
|
||||
} else {
|
||||
@@ -439,7 +440,7 @@ void blk_run_queue(struct request_queue *q)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_run_queue);
|
||||
@@ -1085,7 +1086,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
|
||||
|
||||
drive_stat_acct(rq, 1);
|
||||
__elv_add_request(q, rq, where, 0);
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_insert_request);
|
||||
@@ -2642,13 +2643,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(kblockd_workqueue, dwork, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
|
||||
+13
-5
@@ -212,9 +212,14 @@ static void flush_end_io(struct request *flush_rq, int error)
|
||||
queued |= blk_flush_complete_seq(rq, seq, error);
|
||||
}
|
||||
|
||||
/* after populating an empty queue, kick it to avoid stall */
|
||||
/*
|
||||
* Moving a request silently to empty queue_head may stall the
|
||||
* queue. Kick the queue in those cases. This function is called
|
||||
* from request completion path and calling directly into
|
||||
* request_fn may confuse the driver. Always use kblockd.
|
||||
*/
|
||||
if (queued && was_empty)
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -257,7 +262,7 @@ static bool blk_kick_flush(struct request_queue *q)
|
||||
q->flush_rq.end_io = flush_end_io;
|
||||
|
||||
q->flush_pending_idx ^= 1;
|
||||
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
|
||||
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -266,9 +271,12 @@ static void flush_data_end_io(struct request *rq, int error)
|
||||
struct request_queue *q = rq->q;
|
||||
bool was_empty = elv_queue_empty(q);
|
||||
|
||||
/* after populating an empty queue, kick it to avoid stall */
|
||||
/*
|
||||
* After populating an empty queue, kick it to avoid stall. Read
|
||||
* the comment in flush_end_io().
|
||||
*/
|
||||
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty)
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
+1
-1
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_issue_zeroout generate number of zero filed write bios
|
||||
* blkdev_issue_zeroout - generate number of zero filed write bios
|
||||
* @bdev: blockdev to issue
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
|
||||
+18
-11
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
|
||||
/* Throttling is performed over 100ms slice and after that slice is renewed */
|
||||
static unsigned long throtl_slice = HZ/10; /* 100 ms */
|
||||
|
||||
/* A workqueue to queue throttle related work */
|
||||
static struct workqueue_struct *kthrotld_workqueue;
|
||||
static void throtl_schedule_delayed_work(struct throtl_data *td,
|
||||
unsigned long delay);
|
||||
|
||||
struct throtl_rb_root {
|
||||
struct rb_root rb;
|
||||
struct rb_node *left;
|
||||
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
|
||||
update_min_dispatch_time(st);
|
||||
|
||||
if (time_before_eq(st->min_disptime, jiffies))
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
else
|
||||
throtl_schedule_delayed_work(td->queue,
|
||||
(st->min_disptime - jiffies));
|
||||
throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* Call with queue lock held */
|
||||
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
|
||||
static void
|
||||
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
||||
{
|
||||
|
||||
struct throtl_data *td = q->td;
|
||||
struct delayed_work *dwork = &td->throtl_work;
|
||||
|
||||
if (total_nr_queued(td) > 0) {
|
||||
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
|
||||
* Cancel that and schedule a new one.
|
||||
*/
|
||||
__cancel_delayed_work(dwork);
|
||||
kblockd_schedule_delayed_work(q, dwork, delay);
|
||||
queue_delayed_work(kthrotld_workqueue, dwork, delay);
|
||||
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
||||
delay, jiffies);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(throtl_schedule_delayed_work);
|
||||
|
||||
static void
|
||||
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
|
||||
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
|
||||
smp_mb__after_atomic_inc();
|
||||
|
||||
/* Schedule a work now to process the limit change */
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_bps(void *key,
|
||||
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_read_iops(void *key,
|
||||
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_update_blkio_group_write_iops(void *key,
|
||||
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&td->limits_changed);
|
||||
smp_mb__after_atomic_inc();
|
||||
throtl_schedule_delayed_work(td->queue, 0);
|
||||
throtl_schedule_delayed_work(td, 0);
|
||||
}
|
||||
|
||||
static void throtl_shutdown_wq(struct request_queue *q)
|
||||
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
|
||||
|
||||
static int __init throtl_init(void)
|
||||
{
|
||||
kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
|
||||
if (!kthrotld_workqueue)
|
||||
panic("Failed to create kthrotld\n");
|
||||
|
||||
blkio_policy_register(&blkio_policy_throtl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
+3
-3
@@ -3344,7 +3344,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
cfqd->busy_queues > 1) {
|
||||
cfq_del_timer(cfqd, cfqq);
|
||||
cfq_clear_cfqq_wait_request(cfqq);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
} else {
|
||||
cfq_blkiocg_update_idle_time_stats(
|
||||
&cfqq->cfqg->blkg);
|
||||
@@ -3359,7 +3359,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
* this new queue is RT and the current one is BE
|
||||
*/
|
||||
cfq_preempt_queue(cfqd, cfqq);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3719,7 +3719,7 @@ static void cfq_kick_queue(struct work_struct *work)
|
||||
struct request_queue *q = cfqd->queue;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_run_queue(cfqd->queue);
|
||||
__blk_run_queue(cfqd->queue, false);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
|
||||
+2
-2
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
|
||||
*/
|
||||
elv_drain_elevator(q);
|
||||
while (q->rq.elvpriv) {
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
msleep(10);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
||||
* with anything. There's no point in delaying queue
|
||||
* processing.
|
||||
*/
|
||||
__blk_run_queue(q);
|
||||
__blk_run_queue(q, false);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
|
||||
+1
-1
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
|
||||
struct block_device *bdev = bdget_disk(disk, partno);
|
||||
if (bdev) {
|
||||
fsync_bdev(bdev);
|
||||
res = __invalidate_device(bdev);
|
||||
res = __invalidate_device(bdev, true);
|
||||
bdput(bdev);
|
||||
}
|
||||
return res;
|
||||
|
||||
+5
-3
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
return -EINVAL;
|
||||
if (get_user(n, (int __user *) arg))
|
||||
return -EFAULT;
|
||||
if (!(mode & FMODE_EXCL) &&
|
||||
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
if (!(mode & FMODE_EXCL)) {
|
||||
bdgrab(bdev);
|
||||
if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
}
|
||||
ret = set_blocksize(bdev, n);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
||||
|
||||
Reference in New Issue
Block a user