You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
[PATCH] 02/05: update ioscheds to use generic dispatch queue
This patch updates all four ioscheds to use generic dispatch queue. There's one behavior change in as-iosched. * In as-iosched, when force dispatching (ELEVATOR_INSERT_BACK), batch_data_dir is reset to REQ_SYNC and changed_batch and new_batch are cleared to zero. This prevernts AS from doing incorrect update_write_batch after the forced dispatched requests are finished. * In cfq-iosched, cfqd->rq_in_driver currently counts the number of activated (removed) requests to determine whether queue-kicking is needed and cfq_max_depth has been reached. With generic dispatch queue, I think counting the number of dispatched requests would be more appropriate. * cfq_max_depth can be lowered to 1 again. Original from Tejun Heo, modified version applied. Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
+89
-209
@@ -98,7 +98,6 @@ struct as_data {
|
||||
|
||||
struct as_rq *next_arq[2]; /* next in sort order */
|
||||
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
|
||||
struct list_head *dispatch; /* driver dispatch queue */
|
||||
struct list_head *hash; /* request hash */
|
||||
|
||||
unsigned long exit_prob; /* probability a task will exit while
|
||||
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
|
||||
return ioc;
|
||||
}
|
||||
|
||||
static void as_put_io_context(struct as_rq *arq)
|
||||
{
|
||||
struct as_io_context *aic;
|
||||
|
||||
if (unlikely(!arq->io_context))
|
||||
return;
|
||||
|
||||
aic = arq->io_context->aic;
|
||||
|
||||
if (arq->is_sync == REQ_SYNC && aic) {
|
||||
spin_lock(&aic->lock);
|
||||
set_bit(AS_TASK_IORUNNING, &aic->state);
|
||||
aic->last_end_request = jiffies;
|
||||
spin_unlock(&aic->lock);
|
||||
}
|
||||
|
||||
put_io_context(arq->io_context);
|
||||
}
|
||||
|
||||
/*
|
||||
* the back merge hash support functions
|
||||
*/
|
||||
@@ -950,23 +968,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
|
||||
|
||||
WARN_ON(!list_empty(&rq->queuelist));
|
||||
|
||||
if (arq->state == AS_RQ_PRESCHED) {
|
||||
WARN_ON(arq->io_context);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (arq->state == AS_RQ_MERGED)
|
||||
goto out_ioc;
|
||||
|
||||
if (arq->state != AS_RQ_REMOVED) {
|
||||
printk("arq->state %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!blk_fs_request(rq))
|
||||
goto out;
|
||||
|
||||
if (ad->changed_batch && ad->nr_dispatched == 1) {
|
||||
kblockd_schedule_work(&ad->antic_work);
|
||||
ad->changed_batch = 0;
|
||||
@@ -1001,21 +1008,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
|
||||
}
|
||||
}
|
||||
|
||||
out_ioc:
|
||||
if (!arq->io_context)
|
||||
goto out;
|
||||
|
||||
if (arq->is_sync == REQ_SYNC) {
|
||||
struct as_io_context *aic = arq->io_context->aic;
|
||||
if (aic) {
|
||||
spin_lock(&aic->lock);
|
||||
set_bit(AS_TASK_IORUNNING, &aic->state);
|
||||
aic->last_end_request = jiffies;
|
||||
spin_unlock(&aic->lock);
|
||||
}
|
||||
}
|
||||
|
||||
put_io_context(arq->io_context);
|
||||
as_put_io_context(arq);
|
||||
out:
|
||||
arq->state = AS_RQ_POSTSCHED;
|
||||
}
|
||||
@@ -1051,68 +1044,6 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
|
||||
as_del_arq_rb(ad, arq);
|
||||
}
|
||||
|
||||
/*
|
||||
* as_remove_dispatched_request is called to remove a request which has gone
|
||||
* to the dispatch list.
|
||||
*/
|
||||
static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
struct as_io_context *aic;
|
||||
|
||||
if (!arq) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(arq->state != AS_RQ_DISPATCHED);
|
||||
WARN_ON(ON_RB(&arq->rb_node));
|
||||
if (arq->io_context && arq->io_context->aic) {
|
||||
aic = arq->io_context->aic;
|
||||
if (aic) {
|
||||
WARN_ON(!atomic_read(&aic->nr_dispatched));
|
||||
atomic_dec(&aic->nr_dispatched);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* as_remove_request is called when a driver has finished with a request.
|
||||
* This should be only called for dispatched requests, but for some reason
|
||||
* a POWER4 box running hwscan it does not.
|
||||
*/
|
||||
static void as_remove_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
|
||||
if (unlikely(arq->state == AS_RQ_NEW))
|
||||
goto out;
|
||||
|
||||
if (ON_RB(&arq->rb_node)) {
|
||||
if (arq->state != AS_RQ_QUEUED) {
|
||||
printk("arq->state %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* We'll lose the aliased request(s) here. I don't think this
|
||||
* will ever happen, but if it does, hopefully someone will
|
||||
* report it.
|
||||
*/
|
||||
WARN_ON(!list_empty(&rq->queuelist));
|
||||
as_remove_queued_request(q, rq);
|
||||
} else {
|
||||
if (arq->state != AS_RQ_DISPATCHED) {
|
||||
printk("arq->state %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
goto out;
|
||||
}
|
||||
as_remove_dispatched_request(q, rq);
|
||||
}
|
||||
out:
|
||||
arq->state = AS_RQ_REMOVED;
|
||||
}
|
||||
|
||||
/*
|
||||
* as_fifo_expired returns 0 if there are no expired reads on the fifo,
|
||||
* 1 otherwise. It is ratelimited so that we only perform the check once per
|
||||
@@ -1165,7 +1096,6 @@ static inline int as_batch_expired(struct as_data *ad)
|
||||
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
|
||||
{
|
||||
struct request *rq = arq->request;
|
||||
struct list_head *insert;
|
||||
const int data_dir = arq->is_sync;
|
||||
|
||||
BUG_ON(!ON_RB(&arq->rb_node));
|
||||
@@ -1198,13 +1128,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
|
||||
/*
|
||||
* take it off the sort and fifo list, add to dispatch queue
|
||||
*/
|
||||
insert = ad->dispatch->prev;
|
||||
|
||||
while (!list_empty(&rq->queuelist)) {
|
||||
struct request *__rq = list_entry_rq(rq->queuelist.next);
|
||||
struct as_rq *__arq = RQ_DATA(__rq);
|
||||
|
||||
list_move_tail(&__rq->queuelist, ad->dispatch);
|
||||
list_del(&__rq->queuelist);
|
||||
|
||||
elv_dispatch_add_tail(ad->q, __rq);
|
||||
|
||||
if (__arq->io_context && __arq->io_context->aic)
|
||||
atomic_inc(&__arq->io_context->aic->nr_dispatched);
|
||||
@@ -1218,7 +1148,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
|
||||
as_remove_queued_request(ad->q, rq);
|
||||
WARN_ON(arq->state != AS_RQ_QUEUED);
|
||||
|
||||
list_add(&rq->queuelist, insert);
|
||||
elv_dispatch_sort(ad->q, rq);
|
||||
|
||||
arq->state = AS_RQ_DISPATCHED;
|
||||
if (arq->io_context && arq->io_context->aic)
|
||||
atomic_inc(&arq->io_context->aic->nr_dispatched);
|
||||
@@ -1230,12 +1161,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
|
||||
* read/write expire, batch expire, etc, and moves it to the dispatch
|
||||
* queue. Returns 1 if a request was found, 0 otherwise.
|
||||
*/
|
||||
static int as_dispatch_request(struct as_data *ad)
|
||||
static int as_dispatch_request(request_queue_t *q, int force)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct as_rq *arq;
|
||||
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
|
||||
const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
|
||||
|
||||
if (unlikely(force)) {
|
||||
/*
|
||||
* Forced dispatch, accounting is useless. Reset
|
||||
* accounting states and dump fifo_lists. Note that
|
||||
* batch_data_dir is reset to REQ_SYNC to avoid
|
||||
* screwing write batch accounting as write batch
|
||||
* accounting occurs on W->R transition.
|
||||
*/
|
||||
int dispatched = 0;
|
||||
|
||||
ad->batch_data_dir = REQ_SYNC;
|
||||
ad->changed_batch = 0;
|
||||
ad->new_batch = 0;
|
||||
|
||||
while (ad->next_arq[REQ_SYNC]) {
|
||||
as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
|
||||
dispatched++;
|
||||
}
|
||||
ad->last_check_fifo[REQ_SYNC] = jiffies;
|
||||
|
||||
while (ad->next_arq[REQ_ASYNC]) {
|
||||
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
|
||||
dispatched++;
|
||||
}
|
||||
ad->last_check_fifo[REQ_ASYNC] = jiffies;
|
||||
|
||||
return dispatched;
|
||||
}
|
||||
|
||||
/* Signal that the write batch was uncontended, so we can't time it */
|
||||
if (ad->batch_data_dir == REQ_ASYNC && !reads) {
|
||||
if (ad->current_write_count == 0 || !writes)
|
||||
@@ -1359,20 +1320,6 @@ fifo_expired:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct request *as_next_request(request_queue_t *q)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct request *rq = NULL;
|
||||
|
||||
/*
|
||||
* if there are still requests on the dispatch queue, grab the first
|
||||
*/
|
||||
if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
|
||||
rq = list_entry_rq(ad->dispatch->next);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add arq to a list behind alias
|
||||
*/
|
||||
@@ -1410,11 +1357,19 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
|
||||
/*
|
||||
* add arq to rbtree and fifo
|
||||
*/
|
||||
static void as_add_request(struct as_data *ad, struct as_rq *arq)
|
||||
static void as_add_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
struct as_rq *alias;
|
||||
int data_dir;
|
||||
|
||||
if (arq->state != AS_RQ_PRESCHED) {
|
||||
printk("arq->state: %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
}
|
||||
arq->state = AS_RQ_NEW;
|
||||
|
||||
if (rq_data_dir(arq->request) == READ
|
||||
|| current->flags&PF_SYNCWRITE)
|
||||
arq->is_sync = 1;
|
||||
@@ -1463,96 +1418,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
|
||||
arq->state = AS_RQ_QUEUED;
|
||||
}
|
||||
|
||||
static void as_activate_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
|
||||
WARN_ON(arq->state != AS_RQ_DISPATCHED);
|
||||
arq->state = AS_RQ_REMOVED;
|
||||
if (arq->io_context && arq->io_context->aic)
|
||||
atomic_dec(&arq->io_context->aic->nr_dispatched);
|
||||
}
|
||||
|
||||
static void as_deactivate_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
|
||||
if (arq) {
|
||||
if (arq->state == AS_RQ_REMOVED) {
|
||||
arq->state = AS_RQ_DISPATCHED;
|
||||
if (arq->io_context && arq->io_context->aic)
|
||||
atomic_inc(&arq->io_context->aic->nr_dispatched);
|
||||
}
|
||||
} else
|
||||
WARN_ON(blk_fs_request(rq)
|
||||
&& (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
|
||||
|
||||
/* Stop anticipating - let this request get through */
|
||||
as_antic_stop(ad);
|
||||
}
|
||||
|
||||
/*
|
||||
* requeue the request. The request has not been completed, nor is it a
|
||||
* new request, so don't touch accounting.
|
||||
*/
|
||||
static void as_requeue_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
as_deactivate_request(q, rq);
|
||||
list_add(&rq->queuelist, &q->queue_head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Account a request that is inserted directly onto the dispatch queue.
|
||||
* arq->io_context->aic->nr_dispatched should not need to be incremented
|
||||
* because only new requests should come through here: requeues go through
|
||||
* our explicit requeue handler.
|
||||
*/
|
||||
static void as_account_queued_request(struct as_data *ad, struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq)) {
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
arq->state = AS_RQ_DISPATCHED;
|
||||
ad->nr_dispatched++;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
as_insert_request(request_queue_t *q, struct request *rq, int where)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
|
||||
if (arq) {
|
||||
if (arq->state != AS_RQ_PRESCHED) {
|
||||
printk("arq->state: %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
}
|
||||
arq->state = AS_RQ_NEW;
|
||||
}
|
||||
|
||||
/* barriers must flush the reorder queue */
|
||||
if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
|
||||
&& where == ELEVATOR_INSERT_SORT)) {
|
||||
WARN_ON(1);
|
||||
where = ELEVATOR_INSERT_BACK;
|
||||
}
|
||||
|
||||
switch (where) {
|
||||
case ELEVATOR_INSERT_BACK:
|
||||
while (ad->next_arq[REQ_SYNC])
|
||||
as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
|
||||
|
||||
while (ad->next_arq[REQ_ASYNC])
|
||||
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
|
||||
|
||||
list_add_tail(&rq->queuelist, ad->dispatch);
|
||||
as_account_queued_request(ad, rq);
|
||||
as_antic_stop(ad);
|
||||
break;
|
||||
case ELEVATOR_INSERT_FRONT:
|
||||
list_add(&rq->queuelist, ad->dispatch);
|
||||
as_account_queued_request(ad, rq);
|
||||
as_antic_stop(ad);
|
||||
break;
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(!blk_fs_request(rq));
|
||||
as_add_request(ad, arq);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
WARN_ON(arq->state != AS_RQ_REMOVED);
|
||||
arq->state = AS_RQ_DISPATCHED;
|
||||
if (arq->io_context && arq->io_context->aic)
|
||||
atomic_inc(&arq->io_context->aic->nr_dispatched);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1565,12 +1448,8 @@ static int as_queue_empty(request_queue_t *q)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
|
||||
if (!list_empty(&ad->fifo_list[REQ_ASYNC])
|
||||
|| !list_empty(&ad->fifo_list[REQ_SYNC])
|
||||
|| !list_empty(ad->dispatch))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return list_empty(&ad->fifo_list[REQ_ASYNC])
|
||||
&& list_empty(&ad->fifo_list[REQ_SYNC]);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
@@ -1763,6 +1642,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
|
||||
* kill knowledge of next, this one is a goner
|
||||
*/
|
||||
as_remove_queued_request(q, next);
|
||||
as_put_io_context(anext);
|
||||
|
||||
anext->state = AS_RQ_MERGED;
|
||||
}
|
||||
@@ -1782,7 +1662,7 @@ static void as_work_handler(void *data)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (as_next_request(q))
|
||||
if (!as_queue_empty(q))
|
||||
q->request_fn(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
@@ -1797,7 +1677,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
|
||||
return;
|
||||
}
|
||||
|
||||
if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) {
|
||||
if (unlikely(arq->state != AS_RQ_POSTSCHED &&
|
||||
arq->state != AS_RQ_PRESCHED &&
|
||||
arq->state != AS_RQ_MERGED)) {
|
||||
printk("arq->state %d\n", arq->state);
|
||||
WARN_ON(1);
|
||||
}
|
||||
@@ -1907,7 +1789,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
|
||||
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
|
||||
ad->sort_list[REQ_SYNC] = RB_ROOT;
|
||||
ad->sort_list[REQ_ASYNC] = RB_ROOT;
|
||||
ad->dispatch = &q->queue_head;
|
||||
ad->fifo_expire[REQ_SYNC] = default_read_expire;
|
||||
ad->fifo_expire[REQ_ASYNC] = default_write_expire;
|
||||
ad->antic_expire = default_antic_expire;
|
||||
@@ -2072,10 +1953,9 @@ static struct elevator_type iosched_as = {
|
||||
.elevator_merge_fn = as_merge,
|
||||
.elevator_merged_fn = as_merged_request,
|
||||
.elevator_merge_req_fn = as_merged_requests,
|
||||
.elevator_next_req_fn = as_next_request,
|
||||
.elevator_add_req_fn = as_insert_request,
|
||||
.elevator_remove_req_fn = as_remove_request,
|
||||
.elevator_requeue_req_fn = as_requeue_request,
|
||||
.elevator_dispatch_fn = as_dispatch_request,
|
||||
.elevator_add_req_fn = as_add_request,
|
||||
.elevator_activate_req_fn = as_activate_request,
|
||||
.elevator_deactivate_req_fn = as_deactivate_request,
|
||||
.elevator_queue_empty_fn = as_queue_empty,
|
||||
.elevator_completed_req_fn = as_completed_request,
|
||||
|
||||
+82
-262
File diff suppressed because it is too large
Load Diff
@@ -50,7 +50,6 @@ struct deadline_data {
|
||||
* next in sort order. read, write or both are NULL
|
||||
*/
|
||||
struct deadline_rq *next_drq[2];
|
||||
struct list_head *dispatch; /* driver dispatch queue */
|
||||
struct list_head *hash; /* request hash */
|
||||
unsigned int batching; /* number of sequential requests made */
|
||||
sector_t last_sector; /* head position */
|
||||
@@ -239,10 +238,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
|
||||
dd->next_drq[data_dir] = rb_entry_drq(rbnext);
|
||||
}
|
||||
|
||||
if (ON_RB(&drq->rb_node)) {
|
||||
rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
|
||||
RB_CLEAR(&drq->rb_node);
|
||||
}
|
||||
BUG_ON(!ON_RB(&drq->rb_node));
|
||||
rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
|
||||
RB_CLEAR(&drq->rb_node);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
@@ -286,7 +284,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
|
||||
/*
|
||||
* add drq to rbtree and fifo
|
||||
*/
|
||||
static inline void
|
||||
static void
|
||||
deadline_add_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
@@ -315,14 +313,11 @@ deadline_add_request(struct request_queue *q, struct request *rq)
|
||||
static void deadline_remove_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct deadline_rq *drq = RQ_DATA(rq);
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
if (drq) {
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
list_del_init(&drq->fifo);
|
||||
deadline_remove_merge_hints(q, drq);
|
||||
deadline_del_drq_rb(dd, drq);
|
||||
}
|
||||
list_del_init(&drq->fifo);
|
||||
deadline_remove_merge_hints(q, drq);
|
||||
deadline_del_drq_rb(dd, drq);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -452,7 +447,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
|
||||
request_queue_t *q = drq->request->q;
|
||||
|
||||
deadline_remove_request(q, drq->request);
|
||||
list_add_tail(&drq->request->queuelist, dd->dispatch);
|
||||
elv_dispatch_add_tail(q, drq->request);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -502,8 +497,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
|
||||
* deadline_dispatch_requests selects the best request according to
|
||||
* read/write expire, fifo_batch, etc
|
||||
*/
|
||||
static int deadline_dispatch_requests(struct deadline_data *dd)
|
||||
static int deadline_dispatch_requests(request_queue_t *q, int force)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
const int reads = !list_empty(&dd->fifo_list[READ]);
|
||||
const int writes = !list_empty(&dd->fifo_list[WRITE]);
|
||||
struct deadline_rq *drq;
|
||||
@@ -597,65 +593,12 @@ dispatch_request:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct request *deadline_next_request(request_queue_t *q)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct request *rq;
|
||||
|
||||
/*
|
||||
* if there are still requests on the dispatch queue, grab the first one
|
||||
*/
|
||||
if (!list_empty(dd->dispatch)) {
|
||||
dispatch:
|
||||
rq = list_entry_rq(dd->dispatch->next);
|
||||
return rq;
|
||||
}
|
||||
|
||||
if (deadline_dispatch_requests(dd))
|
||||
goto dispatch;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
deadline_insert_request(request_queue_t *q, struct request *rq, int where)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
/* barriers must flush the reorder queue */
|
||||
if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
|
||||
&& where == ELEVATOR_INSERT_SORT))
|
||||
where = ELEVATOR_INSERT_BACK;
|
||||
|
||||
switch (where) {
|
||||
case ELEVATOR_INSERT_BACK:
|
||||
while (deadline_dispatch_requests(dd))
|
||||
;
|
||||
list_add_tail(&rq->queuelist, dd->dispatch);
|
||||
break;
|
||||
case ELEVATOR_INSERT_FRONT:
|
||||
list_add(&rq->queuelist, dd->dispatch);
|
||||
break;
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(!blk_fs_request(rq));
|
||||
deadline_add_request(q, rq);
|
||||
break;
|
||||
default:
|
||||
printk("%s: bad insert point %d\n", __FUNCTION__,where);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int deadline_queue_empty(request_queue_t *q)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
if (!list_empty(&dd->fifo_list[WRITE])
|
||||
|| !list_empty(&dd->fifo_list[READ])
|
||||
|| !list_empty(dd->dispatch))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return list_empty(&dd->fifo_list[WRITE])
|
||||
&& list_empty(&dd->fifo_list[READ]);
|
||||
}
|
||||
|
||||
static struct request *
|
||||
@@ -733,7 +676,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
|
||||
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
||||
dd->sort_list[READ] = RB_ROOT;
|
||||
dd->sort_list[WRITE] = RB_ROOT;
|
||||
dd->dispatch = &q->queue_head;
|
||||
dd->fifo_expire[READ] = read_expire;
|
||||
dd->fifo_expire[WRITE] = write_expire;
|
||||
dd->writes_starved = writes_starved;
|
||||
@@ -748,10 +690,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct deadline_rq *drq = RQ_DATA(rq);
|
||||
|
||||
if (drq) {
|
||||
mempool_free(drq, dd->drq_pool);
|
||||
rq->elevator_private = NULL;
|
||||
}
|
||||
mempool_free(drq, dd->drq_pool);
|
||||
rq->elevator_private = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -917,9 +857,8 @@ static struct elevator_type iosched_deadline = {
|
||||
.elevator_merge_fn = deadline_merge,
|
||||
.elevator_merged_fn = deadline_merged_request,
|
||||
.elevator_merge_req_fn = deadline_merged_requests,
|
||||
.elevator_next_req_fn = deadline_next_request,
|
||||
.elevator_add_req_fn = deadline_insert_request,
|
||||
.elevator_remove_req_fn = deadline_remove_request,
|
||||
.elevator_dispatch_fn = deadline_dispatch_requests,
|
||||
.elevator_add_req_fn = deadline_add_request,
|
||||
.elevator_queue_empty_fn = deadline_queue_empty,
|
||||
.elevator_former_req_fn = deadline_former_request,
|
||||
.elevator_latter_req_fn = deadline_latter_request,
|
||||
|
||||
@@ -28,13 +28,9 @@ static void elevator_noop_merge_requests(request_queue_t *q, struct request *req
|
||||
list_del_init(&next->queuelist);
|
||||
}
|
||||
|
||||
static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
|
||||
int where)
|
||||
static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
if (where == ELEVATOR_INSERT_FRONT)
|
||||
list_add(&rq->queuelist, &q->queue_head);
|
||||
else
|
||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||
elv_dispatch_add_tail(q, rq);
|
||||
|
||||
/*
|
||||
* new merges must not precede this barrier
|
||||
@@ -45,19 +41,16 @@ static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
|
||||
q->last_merge = rq;
|
||||
}
|
||||
|
||||
static struct request *elevator_noop_next_request(request_queue_t *q)
|
||||
static int elevator_noop_dispatch(request_queue_t *q, int force)
|
||||
{
|
||||
if (!list_empty(&q->queue_head))
|
||||
return list_entry_rq(q->queue_head.next);
|
||||
|
||||
return NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct elevator_type elevator_noop = {
|
||||
.ops = {
|
||||
.elevator_merge_fn = elevator_noop_merge,
|
||||
.elevator_merge_req_fn = elevator_noop_merge_requests,
|
||||
.elevator_next_req_fn = elevator_noop_next_request,
|
||||
.elevator_dispatch_fn = elevator_noop_dispatch,
|
||||
.elevator_add_req_fn = elevator_noop_add_request,
|
||||
},
|
||||
.elevator_name = "noop",
|
||||
|
||||
Reference in New Issue
Block a user