You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.6-block
Manual fixup for merge with Jens' "Suspend support for libata", commit
ID 9b84754866.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
@@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
|
||||
The generic i/o scheduler would make sure that it places the barrier request and
|
||||
all other requests coming after it after all the previous requests in the
|
||||
queue. Barriers may be implemented in different ways depending on the
|
||||
driver. A SCSI driver for example could make use of ordered tags to
|
||||
preserve the necessary ordering with a lower impact on throughput. For IDE
|
||||
this might be two sync cache flush: a pre and post flush when encountering
|
||||
a barrier write.
|
||||
|
||||
There is a provision for queues to indicate what kind of barriers they
|
||||
can provide. This is as of yet unmerged, details will be added here once it
|
||||
is in the kernel.
|
||||
driver. For more details regarding I/O barriers, please read barrier.txt
|
||||
in this directory.
|
||||
|
||||
1.2.2 Request Priority/Latency
|
||||
|
||||
|
||||
+25
-119
@@ -182,6 +182,9 @@ struct as_rq {
|
||||
|
||||
static kmem_cache_t *arq_pool;
|
||||
|
||||
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
|
||||
static void as_antic_stop(struct as_data *ad);
|
||||
|
||||
/*
|
||||
* IO Context helper functions
|
||||
*/
|
||||
@@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
|
||||
* existing request against the same sector), which can happen when using
|
||||
* direct IO, then return the alias.
|
||||
*/
|
||||
static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
|
||||
static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
|
||||
{
|
||||
struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
@@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
|
||||
{
|
||||
struct as_rq *alias;
|
||||
|
||||
while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
|
||||
as_move_to_dispatch(ad, alias);
|
||||
as_antic_stop(ad);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
|
||||
{
|
||||
if (!ON_RB(&arq->rb_node)) {
|
||||
@@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
|
||||
/*
|
||||
* take it off the sort and fifo list, add to dispatch queue
|
||||
*/
|
||||
while (!list_empty(&rq->queuelist)) {
|
||||
struct request *__rq = list_entry_rq(rq->queuelist.next);
|
||||
struct as_rq *__arq = RQ_DATA(__rq);
|
||||
|
||||
list_del(&__rq->queuelist);
|
||||
|
||||
elv_dispatch_add_tail(ad->q, __rq);
|
||||
|
||||
if (__arq->io_context && __arq->io_context->aic)
|
||||
atomic_inc(&__arq->io_context->aic->nr_dispatched);
|
||||
|
||||
WARN_ON(__arq->state != AS_RQ_QUEUED);
|
||||
__arq->state = AS_RQ_DISPATCHED;
|
||||
|
||||
ad->nr_dispatched++;
|
||||
}
|
||||
|
||||
as_remove_queued_request(ad->q, rq);
|
||||
WARN_ON(arq->state != AS_RQ_QUEUED);
|
||||
|
||||
@@ -1325,42 +1321,6 @@ fifo_expired:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add arq to a list behind alias
|
||||
*/
|
||||
static inline void
|
||||
as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
|
||||
struct as_rq *alias)
|
||||
{
|
||||
struct request *req = arq->request;
|
||||
struct list_head *insert = alias->request->queuelist.prev;
|
||||
|
||||
/*
|
||||
* Transfer list of aliases
|
||||
*/
|
||||
while (!list_empty(&req->queuelist)) {
|
||||
struct request *__rq = list_entry_rq(req->queuelist.next);
|
||||
struct as_rq *__arq = RQ_DATA(__rq);
|
||||
|
||||
list_move_tail(&__rq->queuelist, &alias->request->queuelist);
|
||||
|
||||
WARN_ON(__arq->state != AS_RQ_QUEUED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Another request with the same start sector on the rbtree.
|
||||
* Link this request to that sector. They are untangled in
|
||||
* as_move_to_dispatch
|
||||
*/
|
||||
list_add(&arq->request->queuelist, insert);
|
||||
|
||||
/*
|
||||
* Don't want to have to handle merges.
|
||||
*/
|
||||
as_del_arq_hash(arq);
|
||||
arq->request->flags |= REQ_NOMERGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* add arq to rbtree and fifo
|
||||
*/
|
||||
@@ -1368,7 +1328,6 @@ static void as_add_request(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
struct as_data *ad = q->elevator->elevator_data;
|
||||
struct as_rq *arq = RQ_DATA(rq);
|
||||
struct as_rq *alias;
|
||||
int data_dir;
|
||||
|
||||
arq->state = AS_RQ_NEW;
|
||||
@@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
|
||||
atomic_inc(&arq->io_context->aic->nr_queued);
|
||||
}
|
||||
|
||||
alias = as_add_arq_rb(ad, arq);
|
||||
if (!alias) {
|
||||
/*
|
||||
* set expire time (only used for reads) and add to fifo list
|
||||
*/
|
||||
arq->expires = jiffies + ad->fifo_expire[data_dir];
|
||||
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
|
||||
as_add_arq_rb(ad, arq);
|
||||
if (rq_mergeable(arq->request))
|
||||
as_add_arq_hash(ad, arq);
|
||||
|
||||
if (rq_mergeable(arq->request))
|
||||
as_add_arq_hash(ad, arq);
|
||||
as_update_arq(ad, arq); /* keep state machine up to date */
|
||||
|
||||
} else {
|
||||
as_add_aliased_request(ad, arq, alias);
|
||||
|
||||
/*
|
||||
* have we been anticipating this request?
|
||||
* or does it come from the same process as the one we are
|
||||
* anticipating for?
|
||||
*/
|
||||
if (ad->antic_status == ANTIC_WAIT_REQ
|
||||
|| ad->antic_status == ANTIC_WAIT_NEXT) {
|
||||
if (as_can_break_anticipation(ad, arq))
|
||||
as_antic_stop(ad);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* set expire time (only used for reads) and add to fifo list
|
||||
*/
|
||||
arq->expires = jiffies + ad->fifo_expire[data_dir];
|
||||
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
|
||||
|
||||
as_update_arq(ad, arq); /* keep state machine up to date */
|
||||
arq->state = AS_RQ_QUEUED;
|
||||
}
|
||||
|
||||
@@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
|
||||
* if the merge was a front merge, we need to reposition request
|
||||
*/
|
||||
if (rq_rb_key(req) != arq->rb_key) {
|
||||
struct as_rq *alias, *next_arq = NULL;
|
||||
|
||||
if (ad->next_arq[arq->is_sync] == arq)
|
||||
next_arq = as_find_next_arq(ad, arq);
|
||||
|
||||
/*
|
||||
* Note! We should really be moving any old aliased requests
|
||||
* off this request and try to insert them into the rbtree. We
|
||||
* currently don't bother. Ditto the next function.
|
||||
*/
|
||||
as_del_arq_rb(ad, arq);
|
||||
if ((alias = as_add_arq_rb(ad, arq))) {
|
||||
list_del_init(&arq->fifo);
|
||||
as_add_aliased_request(ad, arq, alias);
|
||||
if (next_arq)
|
||||
ad->next_arq[arq->is_sync] = next_arq;
|
||||
}
|
||||
as_add_arq_rb(ad, arq);
|
||||
/*
|
||||
* Note! At this stage of this and the next function, our next
|
||||
* request may not be optimal - eg the request may have "grown"
|
||||
@@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
|
||||
as_add_arq_hash(ad, arq);
|
||||
|
||||
if (rq_rb_key(req) != arq->rb_key) {
|
||||
struct as_rq *alias, *next_arq = NULL;
|
||||
|
||||
if (ad->next_arq[arq->is_sync] == arq)
|
||||
next_arq = as_find_next_arq(ad, arq);
|
||||
|
||||
as_del_arq_rb(ad, arq);
|
||||
if ((alias = as_add_arq_rb(ad, arq))) {
|
||||
list_del_init(&arq->fifo);
|
||||
as_add_aliased_request(ad, arq, alias);
|
||||
if (next_arq)
|
||||
ad->next_arq[arq->is_sync] = next_arq;
|
||||
}
|
||||
as_add_arq_rb(ad, arq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1609,18 +1527,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Transfer list of aliases
|
||||
*/
|
||||
while (!list_empty(&next->queuelist)) {
|
||||
struct request *__rq = list_entry_rq(next->queuelist.next);
|
||||
struct as_rq *__arq = RQ_DATA(__rq);
|
||||
|
||||
list_move_tail(&__rq->queuelist, &req->queuelist);
|
||||
|
||||
WARN_ON(__arq->state != AS_RQ_QUEUED);
|
||||
}
|
||||
|
||||
/*
|
||||
* kill knowledge of next, this one is a goner
|
||||
*/
|
||||
|
||||
+8
-8
@@ -25,15 +25,15 @@
|
||||
/*
|
||||
* tunables
|
||||
*/
|
||||
static int cfq_quantum = 4; /* max queue in one round of service */
|
||||
static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
|
||||
static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
|
||||
static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
|
||||
static int cfq_back_penalty = 2; /* penalty of a backwards seek */
|
||||
static const int cfq_quantum = 4; /* max queue in one round of service */
|
||||
static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
|
||||
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
|
||||
static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
|
||||
static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
|
||||
|
||||
static int cfq_slice_sync = HZ / 10;
|
||||
static const int cfq_slice_sync = HZ / 10;
|
||||
static int cfq_slice_async = HZ / 25;
|
||||
static int cfq_slice_async_rq = 2;
|
||||
static const int cfq_slice_async_rq = 2;
|
||||
static int cfq_slice_idle = HZ / 100;
|
||||
|
||||
#define CFQ_IDLE_GRACE (HZ / 10)
|
||||
@@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
|
||||
/*
|
||||
* disable queueing at the driver/hardware level
|
||||
*/
|
||||
static int cfq_max_depth = 2;
|
||||
static const int cfq_max_depth = 2;
|
||||
|
||||
/*
|
||||
* for the hash of cfqq inside the cfqd
|
||||
|
||||
@@ -19,10 +19,10 @@
|
||||
/*
|
||||
* See Documentation/block/deadline-iosched.txt
|
||||
*/
|
||||
static int read_expire = HZ / 2; /* max time before a read is submitted. */
|
||||
static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
|
||||
static int writes_starved = 2; /* max times reads can starve a write */
|
||||
static int fifo_batch = 16; /* # of sequential requests treated as one
|
||||
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
|
||||
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
|
||||
static const int writes_starved = 2; /* max times reads can starve a write */
|
||||
static const int fifo_batch = 16; /* # of sequential requests treated as one
|
||||
by the above parameters. For throughput. */
|
||||
|
||||
static const int deadline_hash_shift = 5;
|
||||
|
||||
+59
-27
@@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
|
||||
|
||||
rq->flags &= ~REQ_STARTED;
|
||||
|
||||
/*
|
||||
* if this is the flush, requeue the original instead and drop the flush
|
||||
*/
|
||||
if (rq->flags & REQ_BAR_FLUSH) {
|
||||
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
|
||||
rq = rq->end_io_data;
|
||||
}
|
||||
|
||||
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
||||
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
|
||||
}
|
||||
|
||||
static void elv_drain_elevator(request_queue_t *q)
|
||||
@@ -332,7 +324,18 @@ static void elv_drain_elevator(request_queue_t *q)
|
||||
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
|
||||
int plug)
|
||||
{
|
||||
struct list_head *pos;
|
||||
unsigned ordseq;
|
||||
|
||||
if (q->ordcolor)
|
||||
rq->flags |= REQ_ORDERED_COLOR;
|
||||
|
||||
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* toggle ordered color
|
||||
*/
|
||||
q->ordcolor ^= 1;
|
||||
|
||||
/*
|
||||
* barriers implicitly indicate back insertion
|
||||
*/
|
||||
@@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
|
||||
q->elevator->ops->elevator_add_req_fn(q, rq);
|
||||
break;
|
||||
|
||||
case ELEVATOR_INSERT_REQUEUE:
|
||||
/*
|
||||
* If ordered flush isn't in progress, we do front
|
||||
* insertion; otherwise, requests should be requeued
|
||||
* in ordseq order.
|
||||
*/
|
||||
rq->flags |= REQ_SOFTBARRIER;
|
||||
|
||||
if (q->ordseq == 0) {
|
||||
list_add(&rq->queuelist, &q->queue_head);
|
||||
break;
|
||||
}
|
||||
|
||||
ordseq = blk_ordered_req_seq(rq);
|
||||
|
||||
list_for_each(pos, &q->queue_head) {
|
||||
struct request *pos_rq = list_entry_rq(pos);
|
||||
if (ordseq <= blk_ordered_req_seq(pos_rq))
|
||||
break;
|
||||
}
|
||||
|
||||
list_add_tail(&rq->queuelist, pos);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "%s: bad insertion point %d\n",
|
||||
__FUNCTION__, where);
|
||||
@@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (unlikely(list_empty(&q->queue_head) &&
|
||||
!q->elevator->ops->elevator_dispatch_fn(q, 0)))
|
||||
return NULL;
|
||||
while (1) {
|
||||
while (!list_empty(&q->queue_head)) {
|
||||
rq = list_entry_rq(q->queue_head.next);
|
||||
if (blk_do_ordered(q, &rq))
|
||||
return rq;
|
||||
}
|
||||
|
||||
rq = list_entry_rq(q->queue_head.next);
|
||||
|
||||
/*
|
||||
* if this is a barrier write and the device has to issue a
|
||||
* flush sequence to support it, check how far we are
|
||||
*/
|
||||
if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
|
||||
BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
|
||||
|
||||
if (q->ordered == QUEUE_ORDERED_FLUSH &&
|
||||
!blk_barrier_preflush(rq))
|
||||
rq = blk_start_pre_flush(q, rq);
|
||||
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct request *elv_next_request(request_queue_t *q)
|
||||
@@ -498,7 +516,7 @@ struct request *elv_next_request(request_queue_t *q)
|
||||
blkdev_dequeue_request(rq);
|
||||
rq->flags |= REQ_QUIET;
|
||||
end_that_request_chunk(rq, 0, nr_bytes);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 0);
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
|
||||
ret);
|
||||
@@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
|
||||
* request is released from the driver, io must be done
|
||||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
struct request *first_rq = list_entry_rq(q->queue_head.next);
|
||||
|
||||
q->in_flight--;
|
||||
|
||||
/*
|
||||
* Check if the queue is waiting for fs requests to be
|
||||
* drained for flush sequence.
|
||||
*/
|
||||
if (q->ordseq && q->in_flight == 0 &&
|
||||
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
|
||||
blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
|
||||
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
|
||||
q->request_fn(q);
|
||||
}
|
||||
|
||||
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
|
||||
e->ops->elevator_completed_req_fn(q, rq);
|
||||
}
|
||||
|
||||
+338
-220
File diff suppressed because it is too large
Load Diff
+1
-1
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(scsi_command_size);
|
||||
|
||||
static int sg_get_version(int __user *p)
|
||||
{
|
||||
static int sg_version_num = 30527;
|
||||
static const int sg_version_num = 30527;
|
||||
return put_user(sg_version_num, p);
|
||||
}
|
||||
|
||||
|
||||
@@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
|
||||
|
||||
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
|
||||
|
||||
end_that_request_last(Request);
|
||||
end_that_request_last(Request, UpToDate);
|
||||
|
||||
if (Command->Completion) {
|
||||
complete(Command->Completion);
|
||||
|
||||
@@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
|
||||
printk("Done with %p\n", cmd->rq);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
end_that_request_last(cmd->rq);
|
||||
end_that_request_last(cmd->rq, status ? 1 : -EIO);
|
||||
cmd_free(h,cmd,1);
|
||||
}
|
||||
|
||||
|
||||
@@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
|
||||
complete_buffers(cmd->rq->bio, ok);
|
||||
|
||||
DBGPX(printk("Done with %p\n", cmd->rq););
|
||||
end_that_request_last(cmd->rq);
|
||||
end_that_request_last(cmd->rq, ok ? 1 : -EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate)
|
||||
add_disk_randomness(req->rq_disk);
|
||||
floppy_off((long)req->rq_disk->private_data);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
/* We're done with the request */
|
||||
current_req = NULL;
|
||||
|
||||
+1
-1
@@ -140,7 +140,7 @@ static void nbd_end_request(struct request *req)
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
+1
-1
@@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
|
||||
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
|
||||
assert(rc == 0);
|
||||
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
rc = carm_put_request(host, crq);
|
||||
assert(rc == 0);
|
||||
|
||||
+1
-1
@@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
||||
static void ub_end_rq(struct request *rq, int uptodate)
|
||||
{
|
||||
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, uptodate);
|
||||
}
|
||||
|
||||
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
|
||||
|
||||
@@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
|
||||
if (end_that_request_first(req, uptodate, num_sectors))
|
||||
return;
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
|
||||
if (!end_that_request_first(req, 1, nblock)) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, 1);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
continue;
|
||||
|
||||
@@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
|
||||
*/
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
end_that_request_chunk(failed, 0, failed->data_len);
|
||||
end_that_request_last(failed);
|
||||
end_that_request_last(failed, 0);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
@@ -1735,7 +1735,7 @@ end_request:
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 1);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
return ide_stopped;
|
||||
|
||||
+53
-84
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
|
||||
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq)
|
||||
static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
struct request *rq = flush_rq->end_io_data;
|
||||
int good_sectors = rq->hard_nr_sectors;
|
||||
int bad_sectors;
|
||||
sector_t sector;
|
||||
|
||||
if (flush_rq->errors & ABRT_ERR) {
|
||||
printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
|
||||
blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
|
||||
blk_queue_issue_flush_fn(drive->queue, NULL);
|
||||
good_sectors = 0;
|
||||
} else if (flush_rq->errors) {
|
||||
good_sectors = 0;
|
||||
if (blk_barrier_preflush(rq)) {
|
||||
sector = ide_get_error_location(drive,flush_rq->buffer);
|
||||
if ((sector >= rq->hard_sector) &&
|
||||
(sector < rq->hard_sector + rq->hard_nr_sectors))
|
||||
good_sectors = sector - rq->hard_sector;
|
||||
}
|
||||
}
|
||||
|
||||
if (flush_rq->errors)
|
||||
printk(KERN_ERR "%s: failed barrier write: "
|
||||
"sector=%Lx(good=%d/bad=%d)\n",
|
||||
drive->name, (unsigned long long)rq->sector,
|
||||
good_sectors,
|
||||
(int) (rq->hard_nr_sectors-good_sectors));
|
||||
|
||||
bad_sectors = rq->hard_nr_sectors - good_sectors;
|
||||
|
||||
if (good_sectors)
|
||||
__ide_end_request(drive, rq, 1, good_sectors);
|
||||
if (bad_sectors)
|
||||
__ide_end_request(drive, rq, 0, bad_sectors);
|
||||
}
|
||||
|
||||
static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
|
||||
if (!drive->wcache)
|
||||
return 0;
|
||||
|
||||
memset(rq->cmd, 0, sizeof(rq->cmd));
|
||||
|
||||
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
rq->cmd[0] = WIN_FLUSH_CACHE;
|
||||
|
||||
|
||||
rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER;
|
||||
rq->flags |= REQ_DRIVE_TASK;
|
||||
rq->buffer = rq->cmd;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
|
||||
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_ordered(ide_drive_t *drive)
|
||||
{
|
||||
struct hd_driveid *id = drive->id;
|
||||
unsigned ordered = QUEUE_ORDERED_NONE;
|
||||
prepare_flush_fn *prep_fn = NULL;
|
||||
issue_flush_fn *issue_fn = NULL;
|
||||
|
||||
if (drive->wcache) {
|
||||
unsigned long long capacity;
|
||||
int barrier;
|
||||
/*
|
||||
* We must avoid issuing commands a drive does not
|
||||
* understand or we may crash it. We check flush cache
|
||||
* is supported. We also check we have the LBA48 flush
|
||||
* cache if the drive capacity is too large. By this
|
||||
* time we have trimmed the drive capacity if LBA48 is
|
||||
* not available so we don't need to recheck that.
|
||||
*/
|
||||
capacity = idedisk_capacity(drive);
|
||||
barrier = ide_id_has_flush_cache(id) &&
|
||||
(drive->addressing == 0 || capacity <= (1ULL << 28) ||
|
||||
ide_id_has_flush_cache_ext(id));
|
||||
|
||||
printk(KERN_INFO "%s: cache flushes %ssupported\n",
|
||||
drive->name, barrier ? "" : "not");
|
||||
|
||||
if (barrier) {
|
||||
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
||||
prep_fn = idedisk_prepare_flush;
|
||||
issue_fn = idedisk_issue_flush;
|
||||
}
|
||||
} else
|
||||
ordered = QUEUE_ORDERED_DRAIN;
|
||||
|
||||
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
||||
blk_queue_issue_flush_fn(drive->queue, issue_fn);
|
||||
}
|
||||
|
||||
static int write_cache(ide_drive_t *drive, int arg)
|
||||
{
|
||||
ide_task_t args;
|
||||
int err;
|
||||
int err = 1;
|
||||
|
||||
if (!ide_id_has_flush_cache(drive->id))
|
||||
return 1;
|
||||
|
||||
memset(&args, 0, sizeof(ide_task_t));
|
||||
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
|
||||
if (ide_id_has_flush_cache(drive->id)) {
|
||||
memset(&args, 0, sizeof(ide_task_t));
|
||||
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
|
||||
SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
|
||||
args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
|
||||
args.command_type = IDE_DRIVE_TASK_NO_DATA;
|
||||
args.handler = &task_no_data_intr;
|
||||
args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
|
||||
args.command_type = IDE_DRIVE_TASK_NO_DATA;
|
||||
args.handler = &task_no_data_intr;
|
||||
err = ide_raw_taskfile(drive, &args, NULL);
|
||||
if (err == 0)
|
||||
drive->wcache = arg;
|
||||
}
|
||||
|
||||
err = ide_raw_taskfile(drive, &args, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
update_ordered(drive);
|
||||
|
||||
drive->wcache = arg;
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_idedisk_flushcache (ide_drive_t *drive)
|
||||
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
|
||||
{
|
||||
struct hd_driveid *id = drive->id;
|
||||
unsigned long long capacity;
|
||||
int barrier;
|
||||
|
||||
idedisk_add_settings(drive);
|
||||
|
||||
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
|
||||
drive->wcache = 1;
|
||||
|
||||
write_cache(drive, 1);
|
||||
|
||||
/*
|
||||
* We must avoid issuing commands a drive does not understand
|
||||
* or we may crash it. We check flush cache is supported. We also
|
||||
* check we have the LBA48 flush cache if the drive capacity is
|
||||
* too large. By this time we have trimmed the drive capacity if
|
||||
* LBA48 is not available so we don't need to recheck that.
|
||||
*/
|
||||
barrier = 0;
|
||||
if (ide_id_has_flush_cache(id))
|
||||
barrier = 1;
|
||||
if (drive->addressing == 1) {
|
||||
/* Can't issue the correct flush ? */
|
||||
if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
|
||||
barrier = 0;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "%s: cache flushes %ssupported\n",
|
||||
drive->name, barrier ? "" : "not ");
|
||||
if (barrier) {
|
||||
blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
|
||||
drive->queue->prepare_flush_fn = idedisk_prepare_flush;
|
||||
drive->queue->end_flush_fn = idedisk_end_flush;
|
||||
blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
|
||||
}
|
||||
}
|
||||
|
||||
static void ide_cacheflush_p(ide_drive_t *drive)
|
||||
|
||||
@@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
||||
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, uptodate);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
@@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
||||
if (!nr_sectors)
|
||||
nr_sectors = rq->hard_cur_sectors;
|
||||
|
||||
if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors))
|
||||
ret = rq->nr_sectors != 0;
|
||||
else
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
|
||||
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
return ret;
|
||||
@@ -247,7 +244,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
||||
}
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, 1);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
@@ -379,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
rq->errors = err;
|
||||
end_that_request_last(rq);
|
||||
end_that_request_last(rq, !rq->errors);
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -468,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
end_that_request_last(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
if (likely(dev)) {
|
||||
dev->open_queue_depth--;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user