You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge remote-tracking branch 'jens/for-4.2/core' into dm-4.2
This commit is contained in:
@@ -361,7 +361,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
|
||||
/* Restore original bio completion handler */
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
bio_endio_nodec(bio, error);
|
||||
bio_endio(bio, error);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -388,7 +388,7 @@ void bio_integrity_endio(struct bio *bio, int error)
|
||||
*/
|
||||
if (error) {
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
bio_endio_nodec(bio, error);
|
||||
bio_endio(bio, error);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
+47
-30
@@ -270,8 +270,8 @@ void bio_init(struct bio *bio)
|
||||
{
|
||||
memset(bio, 0, sizeof(*bio));
|
||||
bio->bi_flags = 1 << BIO_UPTODATE;
|
||||
atomic_set(&bio->bi_remaining, 1);
|
||||
atomic_set(&bio->bi_cnt, 1);
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
atomic_set(&bio->__bi_cnt, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_init);
|
||||
|
||||
@@ -292,8 +292,8 @@ void bio_reset(struct bio *bio)
|
||||
__bio_free(bio);
|
||||
|
||||
memset(bio, 0, BIO_RESET_BYTES);
|
||||
bio->bi_flags = flags|(1 << BIO_UPTODATE);
|
||||
atomic_set(&bio->bi_remaining, 1);
|
||||
bio->bi_flags = flags | (1 << BIO_UPTODATE);
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_reset);
|
||||
|
||||
@@ -303,6 +303,17 @@ static void bio_chain_endio(struct bio *bio, int error)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment chain count for the bio. Make sure the CHAIN flag update
|
||||
* is visible before the raised count.
|
||||
*/
|
||||
static inline void bio_inc_remaining(struct bio *bio)
|
||||
{
|
||||
bio->bi_flags |= (1 << BIO_CHAIN);
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&bio->__bi_remaining);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_chain - chain bio completions
|
||||
* @bio: the target bio
|
||||
@@ -320,7 +331,7 @@ void bio_chain(struct bio *bio, struct bio *parent)
|
||||
|
||||
bio->bi_private = parent;
|
||||
bio->bi_end_io = bio_chain_endio;
|
||||
atomic_inc(&parent->bi_remaining);
|
||||
bio_inc_remaining(parent);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_chain);
|
||||
|
||||
@@ -524,13 +535,17 @@ EXPORT_SYMBOL(zero_fill_bio);
|
||||
**/
|
||||
void bio_put(struct bio *bio)
|
||||
{
|
||||
BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
|
||||
|
||||
/*
|
||||
* last put frees it
|
||||
*/
|
||||
if (atomic_dec_and_test(&bio->bi_cnt))
|
||||
if (!bio_flagged(bio, BIO_REFFED))
|
||||
bio_free(bio);
|
||||
else {
|
||||
BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
|
||||
|
||||
/*
|
||||
* last put frees it
|
||||
*/
|
||||
if (atomic_dec_and_test(&bio->__bi_cnt))
|
||||
bio_free(bio);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_put);
|
||||
|
||||
@@ -1741,6 +1756,25 @@ void bio_flush_dcache_pages(struct bio *bi)
|
||||
EXPORT_SYMBOL(bio_flush_dcache_pages);
|
||||
#endif
|
||||
|
||||
static inline bool bio_remaining_done(struct bio *bio)
|
||||
{
|
||||
/*
|
||||
* If we're not chaining, then ->__bi_remaining is always 1 and
|
||||
* we always end io on the first invocation.
|
||||
*/
|
||||
if (!bio_flagged(bio, BIO_CHAIN))
|
||||
return true;
|
||||
|
||||
BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
|
||||
|
||||
if (atomic_dec_and_test(&bio->__bi_remaining)) {
|
||||
clear_bit(BIO_CHAIN, &bio->bi_flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_endio - end I/O on a bio
|
||||
* @bio: bio
|
||||
@@ -1758,15 +1792,13 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
|
||||
void bio_endio(struct bio *bio, int error)
|
||||
{
|
||||
while (bio) {
|
||||
BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
|
||||
|
||||
if (error)
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||
error = -EIO;
|
||||
|
||||
if (!atomic_dec_and_test(&bio->bi_remaining))
|
||||
return;
|
||||
if (unlikely(!bio_remaining_done(bio)))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Need to have a real endio function for chained bios,
|
||||
@@ -1789,21 +1821,6 @@ void bio_endio(struct bio *bio, int error)
|
||||
}
|
||||
EXPORT_SYMBOL(bio_endio);
|
||||
|
||||
/**
|
||||
* bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
|
||||
* @bio: bio
|
||||
* @error: error, if any
|
||||
*
|
||||
* For code that has saved and restored bi_end_io; thing hard before using this
|
||||
* function, probably you should've cloned the entire bio.
|
||||
**/
|
||||
void bio_endio_nodec(struct bio *bio, int error)
|
||||
{
|
||||
atomic_inc(&bio->bi_remaining);
|
||||
bio_endio(bio, error);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_endio_nodec);
|
||||
|
||||
/**
|
||||
* bio_split - split a bio
|
||||
* @bio: bio to split
|
||||
|
||||
+36
-100
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(blk_rq_init);
|
||||
static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
unsigned int nbytes, int error)
|
||||
{
|
||||
if (error)
|
||||
if (error && !(rq->cmd_flags & REQ_CLONE))
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||
error = -EIO;
|
||||
@@ -128,7 +128,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
bio_advance(bio, nbytes);
|
||||
|
||||
/* don't actually finish bio if it's part of flush sequence */
|
||||
if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
|
||||
if (bio->bi_iter.bi_size == 0 &&
|
||||
!(rq->cmd_flags & (REQ_FLUSH_SEQ|REQ_CLONE)))
|
||||
bio_endio(bio, error);
|
||||
}
|
||||
|
||||
@@ -285,6 +286,7 @@ inline void __blk_run_queue_uncond(struct request_queue *q)
|
||||
q->request_fn(q);
|
||||
q->request_fn_active--;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
|
||||
|
||||
/**
|
||||
* __blk_run_queue - run a single device queue
|
||||
@@ -1525,7 +1527,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
||||
* Caller must ensure !blk_queue_nomerges(q) beforehand.
|
||||
*/
|
||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int *request_count)
|
||||
unsigned int *request_count,
|
||||
struct request **same_queue_rq)
|
||||
{
|
||||
struct blk_plug *plug;
|
||||
struct request *rq;
|
||||
@@ -1545,8 +1548,16 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
||||
int el_ret;
|
||||
|
||||
if (rq->q == q)
|
||||
if (rq->q == q) {
|
||||
(*request_count)++;
|
||||
/*
|
||||
* Only blk-mq multiple hardware queues case checks the
|
||||
* rq in the same queue, there should be only one such
|
||||
* rq in a queue
|
||||
**/
|
||||
if (same_queue_rq)
|
||||
*same_queue_rq = rq;
|
||||
}
|
||||
|
||||
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
|
||||
continue;
|
||||
@@ -1611,7 +1622,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
* any locks.
|
||||
*/
|
||||
if (!blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count))
|
||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
@@ -1718,8 +1729,6 @@ static void handle_bad_sector(struct bio *bio)
|
||||
bio->bi_rw,
|
||||
(unsigned long long)bio_end_sector(bio),
|
||||
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
|
||||
|
||||
set_bit(BIO_EOF, &bio->bi_flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
@@ -2904,95 +2913,22 @@ int blk_lld_busy(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_lld_busy);
|
||||
|
||||
/**
|
||||
* blk_rq_unprep_clone - Helper function to free all bios in a cloned request
|
||||
* @rq: the clone request to be cleaned up
|
||||
*
|
||||
* Description:
|
||||
* Free all bios in @rq for a cloned request.
|
||||
*/
|
||||
void blk_rq_unprep_clone(struct request *rq)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
while ((bio = rq->bio) != NULL) {
|
||||
rq->bio = bio->bi_next;
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
|
||||
|
||||
/*
|
||||
* Copy attributes of the original request to the clone request.
|
||||
* The actual data parts (e.g. ->cmd, ->sense) are not copied.
|
||||
*/
|
||||
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
|
||||
void blk_rq_prep_clone(struct request *dst, struct request *src)
|
||||
{
|
||||
dst->cpu = src->cpu;
|
||||
dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
|
||||
dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK);
|
||||
dst->cmd_flags |= REQ_NOMERGE | REQ_CLONE;
|
||||
dst->cmd_type = src->cmd_type;
|
||||
dst->__sector = blk_rq_pos(src);
|
||||
dst->__data_len = blk_rq_bytes(src);
|
||||
dst->nr_phys_segments = src->nr_phys_segments;
|
||||
dst->ioprio = src->ioprio;
|
||||
dst->extra_len = src->extra_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_rq_prep_clone - Helper function to setup clone request
|
||||
* @rq: the request to be setup
|
||||
* @rq_src: original request to be cloned
|
||||
* @bs: bio_set that bios for clone are allocated from
|
||||
* @gfp_mask: memory allocation mask for bio
|
||||
* @bio_ctr: setup function to be called for each clone bio.
|
||||
* Returns %0 for success, non %0 for failure.
|
||||
* @data: private data to be passed to @bio_ctr
|
||||
*
|
||||
* Description:
|
||||
* Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
|
||||
* The actual data parts of @rq_src (e.g. ->cmd, ->sense)
|
||||
* are not copied, and copying such parts is the caller's responsibility.
|
||||
* Also, pages which the original bios are pointing to are not copied
|
||||
* and the cloned bios just point same pages.
|
||||
* So cloned bios must be completed before original bios, which means
|
||||
* the caller must complete @rq before @rq_src.
|
||||
*/
|
||||
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
int (*bio_ctr)(struct bio *, struct bio *, void *),
|
||||
void *data)
|
||||
{
|
||||
struct bio *bio, *bio_src;
|
||||
|
||||
if (!bs)
|
||||
bs = fs_bio_set;
|
||||
|
||||
__rq_for_each_bio(bio_src, rq_src) {
|
||||
bio = bio_clone_fast(bio_src, gfp_mask, bs);
|
||||
if (!bio)
|
||||
goto free_and_out;
|
||||
|
||||
if (bio_ctr && bio_ctr(bio, bio_src, data))
|
||||
goto free_and_out;
|
||||
|
||||
if (rq->bio) {
|
||||
rq->biotail->bi_next = bio;
|
||||
rq->biotail = bio;
|
||||
} else
|
||||
rq->bio = rq->biotail = bio;
|
||||
}
|
||||
|
||||
__blk_rq_prep_clone(rq, rq_src);
|
||||
|
||||
return 0;
|
||||
|
||||
free_and_out:
|
||||
if (bio)
|
||||
bio_put(bio);
|
||||
blk_rq_unprep_clone(rq);
|
||||
|
||||
return -ENOMEM;
|
||||
dst->bio = src->bio;
|
||||
dst->biotail = src->biotail;
|
||||
dst->cmd = src->cmd;
|
||||
dst->cmd_len = src->cmd_len;
|
||||
dst->sense = src->sense;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
|
||||
|
||||
@@ -3034,21 +2970,20 @@ void blk_start_plug(struct blk_plug *plug)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
/*
|
||||
* If this is a nested plug, don't actually assign it.
|
||||
*/
|
||||
if (tsk->plug)
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&plug->list);
|
||||
INIT_LIST_HEAD(&plug->mq_list);
|
||||
INIT_LIST_HEAD(&plug->cb_list);
|
||||
|
||||
/*
|
||||
* If this is a nested plug, don't actually assign it. It will be
|
||||
* flushed on its own.
|
||||
* Store ordering should not be needed here, since a potential
|
||||
* preempt will imply a full memory barrier
|
||||
*/
|
||||
if (!tsk->plug) {
|
||||
/*
|
||||
* Store ordering should not be needed here, since a potential
|
||||
* preempt will imply a full memory barrier
|
||||
*/
|
||||
tsk->plug = plug;
|
||||
}
|
||||
tsk->plug = plug;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_plug);
|
||||
|
||||
@@ -3195,10 +3130,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
|
||||
void blk_finish_plug(struct blk_plug *plug)
|
||||
{
|
||||
if (plug != current->plug)
|
||||
return;
|
||||
blk_flush_plug_list(plug, false);
|
||||
|
||||
if (plug == current->plug)
|
||||
current->plug = NULL;
|
||||
current->plug = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_finish_plug);
|
||||
|
||||
|
||||
@@ -53,7 +53,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
rq_end_io_fn *done)
|
||||
{
|
||||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
bool is_pm_resume;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
WARN_ON(rq->cmd_type == REQ_TYPE_FS);
|
||||
@@ -70,12 +69,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* need to check this before __blk_run_queue(), because rq can
|
||||
* be freed before that returns.
|
||||
*/
|
||||
is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dying(q))) {
|
||||
@@ -88,9 +81,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
|
||||
__elv_add_request(q, rq, where);
|
||||
__blk_run_queue(q);
|
||||
/* the queue is stopped so it won't be run */
|
||||
if (is_pm_resume)
|
||||
__blk_run_queue_uncond(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||
|
||||
+89
-59
@@ -89,7 +89,8 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
|
||||
return -EBUSY;
|
||||
|
||||
ret = wait_event_interruptible(q->mq_freeze_wq,
|
||||
!q->mq_freeze_depth || blk_queue_dying(q));
|
||||
!atomic_read(&q->mq_freeze_depth) ||
|
||||
blk_queue_dying(q));
|
||||
if (blk_queue_dying(q))
|
||||
return -ENODEV;
|
||||
if (ret)
|
||||
@@ -112,13 +113,10 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
|
||||
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q)
|
||||
{
|
||||
bool freeze;
|
||||
int freeze_depth;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
freeze = !q->mq_freeze_depth++;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (freeze) {
|
||||
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
|
||||
if (freeze_depth == 1) {
|
||||
percpu_ref_kill(&q->mq_usage_counter);
|
||||
blk_mq_run_hw_queues(q, false);
|
||||
}
|
||||
@@ -143,13 +141,11 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
|
||||
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
{
|
||||
bool wake;
|
||||
int freeze_depth;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
wake = !--q->mq_freeze_depth;
|
||||
WARN_ON_ONCE(q->mq_freeze_depth < 0);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
if (wake) {
|
||||
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
|
||||
WARN_ON_ONCE(freeze_depth < 0);
|
||||
if (!freeze_depth) {
|
||||
percpu_ref_reinit(&q->mq_usage_counter);
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
@@ -1237,6 +1233,38 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
||||
return rq;
|
||||
}
|
||||
|
||||
static int blk_mq_direct_issue_request(struct request *rq)
|
||||
{
|
||||
int ret;
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
|
||||
rq->mq_ctx->cpu);
|
||||
struct blk_mq_queue_data bd = {
|
||||
.rq = rq,
|
||||
.list = NULL,
|
||||
.last = 1
|
||||
};
|
||||
|
||||
/*
|
||||
* For OK queue, we are done. For error, kill it. Any other
|
||||
* error (busy), just add it to our list as we previously
|
||||
* would have done
|
||||
*/
|
||||
ret = q->mq_ops->queue_rq(hctx, &bd);
|
||||
if (ret == BLK_MQ_RQ_QUEUE_OK)
|
||||
return 0;
|
||||
else {
|
||||
__blk_mq_requeue_request(rq);
|
||||
|
||||
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Multiple hardware queue variant. This will not use per-process plugs,
|
||||
* but will attempt to bypass the hctx queueing if we can go straight to
|
||||
@@ -1248,6 +1276,9 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||
struct blk_map_ctx data;
|
||||
struct request *rq;
|
||||
unsigned int request_count = 0;
|
||||
struct blk_plug *plug;
|
||||
struct request *same_queue_rq = NULL;
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
@@ -1256,6 +1287,10 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
||||
return;
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
return;
|
||||
@@ -1266,38 +1301,42 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
goto run_queue;
|
||||
}
|
||||
|
||||
plug = current->plug;
|
||||
/*
|
||||
* If the driver supports defer issued based on 'last', then
|
||||
* queue it up like normal since we can potentially save some
|
||||
* CPU this way.
|
||||
*/
|
||||
if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
|
||||
struct blk_mq_queue_data bd = {
|
||||
.rq = rq,
|
||||
.list = NULL,
|
||||
.last = 1
|
||||
};
|
||||
int ret;
|
||||
if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
|
||||
!(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
|
||||
struct request *old_rq = NULL;
|
||||
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
|
||||
/*
|
||||
* For OK queue, we are done. For error, kill it. Any other
|
||||
* error (busy), just add it to our list as we previously
|
||||
* would have done
|
||||
* we do limited pluging. If bio can be merged, do merge.
|
||||
* Otherwise the existing request in the plug list will be
|
||||
* issued. So the plug list will have one request at most
|
||||
*/
|
||||
ret = q->mq_ops->queue_rq(data.hctx, &bd);
|
||||
if (ret == BLK_MQ_RQ_QUEUE_OK)
|
||||
goto done;
|
||||
else {
|
||||
__blk_mq_requeue_request(rq);
|
||||
|
||||
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
goto done;
|
||||
if (plug) {
|
||||
/*
|
||||
* The plug list might get flushed before this. If that
|
||||
* happens, same_queue_rq is invalid and plug list is empty
|
||||
**/
|
||||
if (same_queue_rq && !list_empty(&plug->mq_list)) {
|
||||
old_rq = same_queue_rq;
|
||||
list_del_init(&old_rq->queuelist);
|
||||
}
|
||||
}
|
||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||
} else /* is_sync */
|
||||
old_rq = rq;
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
if (!old_rq)
|
||||
return;
|
||||
if (!blk_mq_direct_issue_request(old_rq))
|
||||
return;
|
||||
blk_mq_insert_request(old_rq, false, true, true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||
@@ -1310,7 +1349,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
run_queue:
|
||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||
}
|
||||
done:
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
}
|
||||
|
||||
@@ -1322,16 +1360,11 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const int is_sync = rw_is_sync(bio->bi_rw);
|
||||
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||
unsigned int use_plug, request_count = 0;
|
||||
struct blk_plug *plug;
|
||||
unsigned int request_count = 0;
|
||||
struct blk_map_ctx data;
|
||||
struct request *rq;
|
||||
|
||||
/*
|
||||
* If we have multiple hardware queues, just go directly to
|
||||
* one of those for sync IO.
|
||||
*/
|
||||
use_plug = !is_flush_fua && !is_sync;
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
@@ -1339,8 +1372,8 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (use_plug && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count))
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return;
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
@@ -1358,21 +1391,18 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
* utilize that to temporarily store requests until the task is
|
||||
* either done or scheduled away.
|
||||
*/
|
||||
if (use_plug) {
|
||||
struct blk_plug *plug = current->plug;
|
||||
|
||||
if (plug) {
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
if (list_empty(&plug->mq_list))
|
||||
trace_block_plug(q);
|
||||
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
||||
blk_flush_plug_list(plug, false);
|
||||
trace_block_plug(q);
|
||||
}
|
||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return;
|
||||
plug = current->plug;
|
||||
if (plug) {
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
if (list_empty(&plug->mq_list))
|
||||
trace_block_plug(q);
|
||||
else if (request_count >= BLK_MAX_REQUEST_COUNT) {
|
||||
blk_flush_plug_list(plug, false);
|
||||
trace_block_plug(q);
|
||||
}
|
||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||
@@ -2052,7 +2082,7 @@ void blk_mq_free_queue(struct request_queue *q)
|
||||
/* Basically redo blk_mq_init_queue with queue frozen */
|
||||
static void blk_mq_queue_reinit(struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(!q->mq_freeze_depth);
|
||||
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
|
||||
|
||||
blk_mq_sysfs_unregister(q);
|
||||
|
||||
|
||||
+2
-3
@@ -78,7 +78,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
||||
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||
struct bio *bio);
|
||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int *request_count);
|
||||
unsigned int *request_count,
|
||||
struct request **same_queue_rq);
|
||||
|
||||
void blk_account_io_start(struct request *req, bool new_io);
|
||||
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
||||
@@ -193,8 +194,6 @@ int blk_try_merge(struct request *rq, struct bio *bio);
|
||||
|
||||
void blk_queue_congestion_threshold(struct request_queue *q);
|
||||
|
||||
void __blk_run_queue_uncond(struct request_queue *q);
|
||||
|
||||
int blk_dev_init(void);
|
||||
|
||||
|
||||
|
||||
@@ -128,9 +128,6 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
|
||||
struct bio_vec *bvec, *org_vec;
|
||||
int i;
|
||||
|
||||
if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
|
||||
set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
|
||||
|
||||
/*
|
||||
* free up bounce indirect pages used
|
||||
*/
|
||||
|
||||
+32
-5
@@ -150,21 +150,48 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
||||
}
|
||||
}
|
||||
|
||||
static int blkdev_reread_part(struct block_device *bdev)
|
||||
/*
|
||||
* This is an exported API for the block driver, and will not
|
||||
* acquire bd_mutex. This API should be used in case that
|
||||
* caller has held bd_mutex already.
|
||||
*/
|
||||
int __blkdev_reread_part(struct block_device *bdev)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
int res;
|
||||
|
||||
if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (!mutex_trylock(&bdev->bd_mutex))
|
||||
return -EBUSY;
|
||||
res = rescan_partitions(disk, bdev);
|
||||
|
||||
lockdep_assert_held(&bdev->bd_mutex);
|
||||
|
||||
return rescan_partitions(disk, bdev);
|
||||
}
|
||||
EXPORT_SYMBOL(__blkdev_reread_part);
|
||||
|
||||
/*
|
||||
* This is an exported API for the block driver, and will
|
||||
* try to acquire bd_mutex. If bd_mutex has been held already
|
||||
* in current context, please call __blkdev_reread_part().
|
||||
*
|
||||
* Make sure the held locks in current context aren't required
|
||||
* in open()/close() handler and I/O path for avoiding ABBA deadlock:
|
||||
* - bd_mutex is held before calling block driver's open/close
|
||||
* handler
|
||||
* - reading partition table may submit I/O to the block device
|
||||
*/
|
||||
int blkdev_reread_part(struct block_device *bdev)
|
||||
{
|
||||
int res;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
res = __blkdev_reread_part(bdev);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_reread_part);
|
||||
|
||||
static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
|
||||
uint64_t len, int secure)
|
||||
|
||||
+24
-26
@@ -230,29 +230,40 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
|
||||
int result, flags;
|
||||
struct nbd_request request;
|
||||
unsigned long size = blk_rq_bytes(req);
|
||||
u32 type;
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
type = NBD_CMD_DISC;
|
||||
else if (req->cmd_flags & REQ_DISCARD)
|
||||
type = NBD_CMD_TRIM;
|
||||
else if (req->cmd_flags & REQ_FLUSH)
|
||||
type = NBD_CMD_FLUSH;
|
||||
else if (rq_data_dir(req) == WRITE)
|
||||
type = NBD_CMD_WRITE;
|
||||
else
|
||||
type = NBD_CMD_READ;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.magic = htonl(NBD_REQUEST_MAGIC);
|
||||
request.type = htonl(nbd_cmd(req));
|
||||
|
||||
if (nbd_cmd(req) != NBD_CMD_FLUSH && nbd_cmd(req) != NBD_CMD_DISC) {
|
||||
request.type = htonl(type);
|
||||
if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
}
|
||||
memcpy(request.handle, &req, sizeof(req));
|
||||
|
||||
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
|
||||
req, nbdcmd_to_ascii(nbd_cmd(req)),
|
||||
req, nbdcmd_to_ascii(type),
|
||||
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
|
||||
result = sock_xmit(nbd, 1, &request, sizeof(request),
|
||||
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
|
||||
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
|
||||
if (result <= 0) {
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Send control failed (result %d)\n", result);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (nbd_cmd(req) == NBD_CMD_WRITE) {
|
||||
if (type == NBD_CMD_WRITE) {
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bvec;
|
||||
/*
|
||||
@@ -352,7 +363,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
|
||||
}
|
||||
|
||||
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
|
||||
if (nbd_cmd(req) == NBD_CMD_READ) {
|
||||
if (rq_data_dir(req) != WRITE) {
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bvec;
|
||||
|
||||
@@ -452,23 +463,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
goto error_out;
|
||||
|
||||
nbd_cmd(req) = NBD_CMD_READ;
|
||||
if (rq_data_dir(req) == WRITE) {
|
||||
if ((req->cmd_flags & REQ_DISCARD)) {
|
||||
WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM));
|
||||
nbd_cmd(req) = NBD_CMD_TRIM;
|
||||
} else
|
||||
nbd_cmd(req) = NBD_CMD_WRITE;
|
||||
if (nbd->flags & NBD_FLAG_READ_ONLY) {
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Write on read-only\n");
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
BUG_ON(unlikely(blk_rq_sectors(req)));
|
||||
nbd_cmd(req) = NBD_CMD_FLUSH;
|
||||
if (rq_data_dir(req) == WRITE &&
|
||||
(nbd->flags & NBD_FLAG_READ_ONLY)) {
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Write on read-only\n");
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
req->errors = 0;
|
||||
@@ -592,8 +591,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
||||
fsync_bdev(bdev);
|
||||
mutex_lock(&nbd->tx_lock);
|
||||
blk_rq_init(NULL, &sreq);
|
||||
sreq.cmd_type = REQ_TYPE_SPECIAL;
|
||||
nbd_cmd(&sreq) = NBD_CMD_DISC;
|
||||
sreq.cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
|
||||
/* Check again after getting mutex back. */
|
||||
if (!nbd->sock)
|
||||
|
||||
@@ -442,7 +442,7 @@ static char *pd_buf; /* buffer for request in progress */
|
||||
|
||||
static enum action do_pd_io_start(void)
|
||||
{
|
||||
if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
phase = pd_special;
|
||||
return pd_special();
|
||||
}
|
||||
@@ -725,7 +725,7 @@ static int pd_special_command(struct pd_unit *disk,
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->special = func;
|
||||
|
||||
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
|
||||
|
||||
+2
-2
@@ -620,7 +620,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
|
||||
spin_unlock_irq(&host->lock);
|
||||
|
||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
|
||||
crq->rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
crq->rq->special = crq;
|
||||
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
|
||||
|
||||
@@ -661,7 +661,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
|
||||
crq->msg_bucket = (u32) rc;
|
||||
|
||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
|
||||
crq->rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
crq->rq->special = crq;
|
||||
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ static inline void virtblk_request_done(struct request *req)
|
||||
req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
|
||||
req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
|
||||
req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
|
||||
} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
} else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
req->errors = (error != 0);
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
@@ -251,7 +251,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||
return PTR_ERR(req);
|
||||
}
|
||||
|
||||
req->cmd_type = REQ_TYPE_SPECIAL;
|
||||
req->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
|
||||
blk_put_request(req);
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
|
||||
int error;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->special = (char *)pc;
|
||||
|
||||
if (buf && bufflen) {
|
||||
@@ -191,7 +191,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
||||
|
||||
BUG_ON(sense_len > sizeof(*sense));
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed)
|
||||
return;
|
||||
|
||||
memset(sense, 0, sizeof(*sense));
|
||||
@@ -210,7 +210,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
||||
sense_rq->rq_disk = rq->rq_disk;
|
||||
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
|
||||
sense_rq->cmd[4] = cmd_len;
|
||||
sense_rq->cmd_type = REQ_TYPE_SENSE;
|
||||
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
|
||||
sense_rq->cmd_flags |= REQ_PREEMPT;
|
||||
|
||||
if (drive->media == ide_tape)
|
||||
@@ -310,7 +310,7 @@ int ide_cd_get_xferlen(struct request *rq)
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
return 32768;
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
return blk_rq_bytes(rq);
|
||||
@@ -477,7 +477,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
||||
if (uptodate == 0)
|
||||
drive->failed_pc = NULL;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
rq->errors = 0;
|
||||
error = 0;
|
||||
} else {
|
||||
|
||||
@@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
||||
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
/*
|
||||
* For REQ_TYPE_SENSE, "rq->special" points to the original
|
||||
* For REQ_TYPE_ATA_SENSE, "rq->special" points to the original
|
||||
* failed request. Also, the sense data should be read
|
||||
* directly from rq which might be different from the original
|
||||
* sense buffer if it got copied during mapping.
|
||||
@@ -285,7 +285,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
||||
"stat 0x%x",
|
||||
rq->cmd[0], rq->cmd_type, err, stat);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE) {
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_SENSE) {
|
||||
/*
|
||||
* We got an error trying to get sense info from the drive
|
||||
* (probably while trying to recover from a former error).
|
||||
@@ -526,7 +526,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
||||
ide_expiry_t *expiry = NULL;
|
||||
int dma_error = 0, dma, thislen, uptodate = 0;
|
||||
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
|
||||
int sense = (rq->cmd_type == REQ_TYPE_SENSE);
|
||||
int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE);
|
||||
unsigned int timeout;
|
||||
u16 len;
|
||||
u8 ireason, stat;
|
||||
@@ -791,7 +791,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
||||
if (cdrom_start_rw(drive, rq) == ide_stopped)
|
||||
goto out_end;
|
||||
break;
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
if (!rq->timeout)
|
||||
@@ -799,7 +799,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
||||
|
||||
cdrom_do_block_pc(drive, rq);
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
/* right now this can only be a reset... */
|
||||
uptodate = 1;
|
||||
goto out_end;
|
||||
|
||||
@@ -304,7 +304,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
|
||||
int ret;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd_flags = REQ_QUIET;
|
||||
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
|
||||
blk_put_request(rq);
|
||||
|
||||
@@ -166,7 +166,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
|
||||
return setting->set(drive, arg);
|
||||
|
||||
rq = blk_get_request(q, READ, __GFP_WAIT);
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd_len = 5;
|
||||
rq->cmd[0] = REQ_DEVSET_EXEC;
|
||||
*(int *)&rq->cmd[1] = arg;
|
||||
|
||||
@@ -129,7 +129,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
|
||||
|
||||
if (cmd)
|
||||
ide_complete_cmd(drive, cmd, stat, err);
|
||||
} else if (blk_pm_request(rq)) {
|
||||
} else if (ata_pm_request(rq)) {
|
||||
rq->errors = 1;
|
||||
ide_complete_pm_rq(drive, rq);
|
||||
return ide_stopped;
|
||||
@@ -147,7 +147,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
|
||||
{
|
||||
struct request *rq = drive->hwif->rq;
|
||||
|
||||
if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
|
||||
if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
|
||||
rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (err <= 0 && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
|
||||
@@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
||||
"Aborting request!\n");
|
||||
}
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
|
||||
|
||||
return uptodate;
|
||||
@@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
} else
|
||||
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
rq->errors = 0;
|
||||
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
@@ -265,8 +265,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
|
||||
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
|
||||
|
||||
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
|
||||
u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk;
|
||||
u8 media = drive->media;
|
||||
|
||||
drive->failed_pc = NULL;
|
||||
@@ -320,7 +320,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
||||
goto kill_rq;
|
||||
}
|
||||
|
||||
if (blk_pm_request(rq))
|
||||
if (ata_pm_request(rq))
|
||||
ide_check_pm_state(drive, rq);
|
||||
|
||||
drive->hwif->tp_ops->dev_select(drive);
|
||||
@@ -342,8 +342,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
|
||||
return execute_drive_cmd(drive, rq);
|
||||
else if (blk_pm_request(rq)) {
|
||||
struct request_pm_state *pm = rq->special;
|
||||
else if (ata_pm_request(rq)) {
|
||||
struct ide_pm_state *pm = rq->special;
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: start_power_step(step: %d)\n",
|
||||
drive->name, pm->pm_step);
|
||||
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
||||
pm->pm_step == IDE_PM_COMPLETED)
|
||||
ide_complete_pm_rq(drive, rq);
|
||||
return startstop;
|
||||
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
/*
|
||||
* TODO: Once all ULDs have been modified to
|
||||
* check for specific op codes rather than
|
||||
@@ -538,7 +538,7 @@ repeat:
|
||||
* state machine.
|
||||
*/
|
||||
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
|
||||
blk_pm_request(rq) == 0 &&
|
||||
ata_pm_request(rq) == 0 &&
|
||||
(rq->cmd_flags & REQ_PREEMPT) == 0) {
|
||||
/* there should be no pending command at this point */
|
||||
ide_unlock_port(hwif);
|
||||
|
||||
@@ -222,7 +222,7 @@ static int generic_drive_reset(ide_drive_t *drive)
|
||||
int ret = 0;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
|
||||
rq->cmd_type = REQ_TYPE_SPECIAL;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd_len = 1;
|
||||
rq->cmd[0] = REQ_DRIVE_RESET;
|
||||
if (blk_execute_rq(drive->queue, NULL, rq, 1))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user