mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
committed by
Jens Axboe
parent
fc17b6534e
commit
4e4cbee93d
@@ -221,7 +221,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
|
||||
* @bio: bio to generate/verify integrity metadata for
|
||||
* @proc_fn: Pointer to the relevant processing function
|
||||
*/
|
||||
static int bio_integrity_process(struct bio *bio,
|
||||
static blk_status_t bio_integrity_process(struct bio *bio,
|
||||
integrity_processing_fn *proc_fn)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
@@ -229,7 +229,7 @@ static int bio_integrity_process(struct bio *bio,
|
||||
struct bvec_iter bviter;
|
||||
struct bio_vec bv;
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
unsigned int ret = 0;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
void *prot_buf = page_address(bip->bip_vec->bv_page) +
|
||||
bip->bip_vec->bv_offset;
|
||||
|
||||
@@ -366,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
struct bio *bio = bip->bip_bio;
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
|
||||
bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
|
||||
bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
|
||||
|
||||
/* Restore original bio completion handler */
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
@@ -395,7 +395,7 @@ void bio_integrity_endio(struct bio *bio)
|
||||
* integrity metadata. Restore original bio end_io handler
|
||||
* and run it.
|
||||
*/
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_status) {
|
||||
bio->bi_end_io = bip->bip_end_io;
|
||||
bio_endio(bio);
|
||||
|
||||
|
||||
@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
|
||||
{
|
||||
struct bio *parent = bio->bi_private;
|
||||
|
||||
if (!parent->bi_error)
|
||||
parent->bi_error = bio->bi_error;
|
||||
if (!parent->bi_status)
|
||||
parent->bi_status = bio->bi_status;
|
||||
bio_put(bio);
|
||||
return parent;
|
||||
}
|
||||
@@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio)
|
||||
{
|
||||
struct submit_bio_ret *ret = bio->bi_private;
|
||||
|
||||
ret->error = bio->bi_error;
|
||||
ret->error = blk_status_to_errno(bio->bi_status);
|
||||
complete(&ret->event);
|
||||
}
|
||||
|
||||
@@ -1818,7 +1818,7 @@ again:
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
|
||||
bio, bio->bi_error);
|
||||
bio, bio->bi_status);
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
}
|
||||
|
||||
|
||||
@@ -144,6 +144,9 @@ static const struct {
|
||||
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
|
||||
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
|
||||
|
||||
/* device mapper special case, should not leak out: */
|
||||
[BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
|
||||
|
||||
/* everything else not covered above: */
|
||||
[BLK_STS_IOERR] = { -EIO, "I/O" },
|
||||
};
|
||||
@@ -188,7 +191,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
unsigned int nbytes, blk_status_t error)
|
||||
{
|
||||
if (error)
|
||||
bio->bi_error = blk_status_to_errno(error);
|
||||
bio->bi_status = error;
|
||||
|
||||
if (unlikely(rq->rq_flags & RQF_QUIET))
|
||||
bio_set_flag(bio, BIO_QUIET);
|
||||
@@ -1717,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
bio->bi_error = -EIO;
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
@@ -1775,7 +1778,10 @@ get_rq:
|
||||
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
|
||||
if (IS_ERR(req)) {
|
||||
__wbt_done(q->rq_wb, wb_acct);
|
||||
bio->bi_error = PTR_ERR(req);
|
||||
if (PTR_ERR(req) == -ENOMEM)
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
else
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -1930,7 +1936,7 @@ generic_make_request_checks(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int nr_sectors = bio_sectors(bio);
|
||||
int err = -EIO;
|
||||
blk_status_t status = BLK_STS_IOERR;
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct hd_struct *part;
|
||||
|
||||
@@ -1973,7 +1979,7 @@ generic_make_request_checks(struct bio *bio)
|
||||
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
|
||||
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
|
||||
if (!nr_sectors) {
|
||||
err = 0;
|
||||
status = BLK_STS_OK;
|
||||
goto end_io;
|
||||
}
|
||||
}
|
||||
@@ -2025,9 +2031,9 @@ generic_make_request_checks(struct bio *bio)
|
||||
return true;
|
||||
|
||||
not_supported:
|
||||
err = -EOPNOTSUPP;
|
||||
status = BLK_STS_NOTSUPP;
|
||||
end_io:
|
||||
bio->bi_error = err;
|
||||
bio->bi_status = status;
|
||||
bio_endio(bio);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
|
||||
.sysfs_ops = &integrity_ops,
|
||||
};
|
||||
|
||||
static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
||||
static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static const struct blk_integrity_profile nop_profile = {
|
||||
|
||||
@@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
||||
mempool_free(bvec->bv_page, pool);
|
||||
}
|
||||
|
||||
bio_orig->bi_error = bio->bi_error;
|
||||
bio_orig->bi_status = bio->bi_status;
|
||||
bio_endio(bio_orig);
|
||||
bio_put(bio);
|
||||
}
|
||||
@@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
|
||||
{
|
||||
struct bio *bio_orig = bio->bi_private;
|
||||
|
||||
if (!bio->bi_error)
|
||||
if (!bio->bi_status)
|
||||
copy_to_high_bio_irq(bio_orig, bio);
|
||||
|
||||
bounce_end_io(bio, pool);
|
||||
|
||||
@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
|
||||
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
|
||||
* tag.
|
||||
*/
|
||||
static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
|
||||
unsigned int type)
|
||||
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
|
||||
csum_fn *fn, unsigned int type)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
|
||||
iter->seed++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
|
||||
unsigned int type)
|
||||
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
|
||||
csum_fn *fn, unsigned int type)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
|
||||
"(rcvd %04x, want %04x)\n", iter->disk_name,
|
||||
(unsigned long long)iter->seed,
|
||||
be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
|
||||
return -EILSEQ;
|
||||
return BLK_STS_PROTECTION;
|
||||
}
|
||||
|
||||
next:
|
||||
@@ -117,45 +117,45 @@ next:
|
||||
iter->seed++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_crc_fn, 1);
|
||||
}
|
||||
|
||||
static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_ip_fn, 1);
|
||||
}
|
||||
|
||||
static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_crc_fn, 1);
|
||||
}
|
||||
|
||||
static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_ip_fn, 1);
|
||||
}
|
||||
|
||||
static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_crc_fn, 3);
|
||||
}
|
||||
|
||||
static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_ip_fn, 3);
|
||||
}
|
||||
|
||||
static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_crc_fn, 3);
|
||||
}
|
||||
|
||||
static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
|
||||
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_ip_fn, 3);
|
||||
}
|
||||
|
||||
@@ -1070,7 +1070,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
|
||||
d->ip.rq = NULL;
|
||||
do {
|
||||
bio = rq->bio;
|
||||
bok = !fastfail && !bio->bi_error;
|
||||
bok = !fastfail && !bio->bi_status;
|
||||
} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
|
||||
|
||||
/* cf. http://lkml.org/lkml/2006/10/31/28 */
|
||||
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
|
||||
ahout->cmdstat, ahin->cmdstat,
|
||||
d->aoemajor, d->aoeminor);
|
||||
noskb: if (buf)
|
||||
buf->bio->bi_error = -EIO;
|
||||
buf->bio->bi_status = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1144,7 +1144,7 @@ noskb: if (buf)
|
||||
"aoe: runt data size in read from",
|
||||
(long) d->aoemajor, d->aoeminor,
|
||||
skb->len, n);
|
||||
buf->bio->bi_error = -EIO;
|
||||
buf->bio->bi_status = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
if (n > f->iter.bi_size) {
|
||||
@@ -1152,7 +1152,7 @@ noskb: if (buf)
|
||||
"aoe: too-large data size in read from",
|
||||
(long) d->aoemajor, d->aoeminor,
|
||||
n, f->iter.bi_size);
|
||||
buf->bio->bi_error = -EIO;
|
||||
buf->bio->bi_status = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
bvcpy(skb, f->buf->bio, f->iter, n);
|
||||
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
|
||||
if (buf == NULL)
|
||||
return;
|
||||
buf->iter.bi_size = 0;
|
||||
buf->bio->bi_error = -EIO;
|
||||
buf->bio->bi_status = BLK_STS_IOERR;
|
||||
if (buf->nframesout == 0)
|
||||
aoe_end_buf(d, buf);
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
|
||||
if (rq == NULL)
|
||||
return;
|
||||
while ((bio = d->ip.nxbio)) {
|
||||
bio->bi_error = -EIO;
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
d->ip.nxbio = bio->bi_next;
|
||||
n = (unsigned long) rq->special;
|
||||
rq->special = (void *) --n;
|
||||
|
||||
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
|
||||
else
|
||||
submit_bio(bio);
|
||||
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
|
||||
if (!bio->bi_error)
|
||||
if (!bio->bi_status)
|
||||
err = device->md_io.error;
|
||||
|
||||
out:
|
||||
|
||||
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
|
||||
!bm_test_page_unchanged(b->bm_pages[idx]))
|
||||
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
|
||||
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_status) {
|
||||
/* ctx error will hold the completed-last non-zero error code,
|
||||
* in case error codes differ. */
|
||||
ctx->error = bio->bi_error;
|
||||
ctx->error = blk_status_to_errno(bio->bi_status);
|
||||
bm_set_page_io_err(b->bm_pages[idx]);
|
||||
/* Not identical to on disk version of it.
|
||||
* Is BM_PAGE_IO_ERROR enough? */
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
|
||||
bio->bi_error, idx);
|
||||
bio->bi_status, idx);
|
||||
} else {
|
||||
bm_clear_page_io_err(b->bm_pages[idx]);
|
||||
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
|
||||
|
||||
@@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
|
||||
__release(local);
|
||||
if (!bio->bi_bdev) {
|
||||
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
|
||||
bio->bi_error = -ENODEV;
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
|
||||
struct drbd_device *device = octx->device;
|
||||
struct issue_flush_context *ctx = octx->ctx;
|
||||
|
||||
if (bio->bi_error) {
|
||||
ctx->error = bio->bi_error;
|
||||
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
|
||||
if (bio->bi_status) {
|
||||
ctx->error = blk_status_to_errno(bio->bi_status);
|
||||
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
|
||||
}
|
||||
kfree(octx);
|
||||
bio_put(bio);
|
||||
|
||||
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
|
||||
void complete_master_bio(struct drbd_device *device,
|
||||
struct bio_and_error *m)
|
||||
{
|
||||
m->bio->bi_error = m->error;
|
||||
m->bio->bi_status = errno_to_blk_status(m->error);
|
||||
bio_endio(m->bio);
|
||||
dec_ap_bio(device);
|
||||
}
|
||||
@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
|
||||
|
||||
if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
|
||||
GFP_NOIO, 0))
|
||||
req->private_bio->bi_error = -EIO;
|
||||
req->private_bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(req->private_bio);
|
||||
}
|
||||
|
||||
@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
|
||||
/* only pass the error to the upper layers.
|
||||
* if user cannot handle io errors, that's not our business. */
|
||||
drbd_err(device, "could not kmalloc() req\n");
|
||||
bio->bi_error = -ENOMEM;
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
bio_endio(bio);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
|
||||
struct drbd_device *device;
|
||||
|
||||
device = bio->bi_private;
|
||||
device->md_io.error = bio->bi_error;
|
||||
device->md_io.error = blk_status_to_errno(bio->bi_status);
|
||||
|
||||
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
|
||||
* to timeout on the lower level device, and eventually detach from it.
|
||||
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
|
||||
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
|
||||
bio_op(bio) == REQ_OP_DISCARD;
|
||||
|
||||
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
|
||||
if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
|
||||
drbd_warn(device, "%s: error=%d s=%llus\n",
|
||||
is_write ? (is_discard ? "discard" : "write")
|
||||
: "read", bio->bi_error,
|
||||
: "read", bio->bi_status,
|
||||
(unsigned long long)peer_req->i.sector);
|
||||
|
||||
if (bio->bi_error)
|
||||
if (bio->bi_status)
|
||||
set_bit(__EE_WAS_ERROR, &peer_req->flags);
|
||||
|
||||
bio_put(bio); /* no need for the bio anymore */
|
||||
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
|
||||
|
||||
if (!bio->bi_error)
|
||||
if (!bio->bi_status)
|
||||
drbd_panic_after_delayed_completion_of_aborted_request(device);
|
||||
}
|
||||
|
||||
/* to avoid recursion in __req_mod */
|
||||
if (unlikely(bio->bi_error)) {
|
||||
if (unlikely(bio->bi_status)) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
case REQ_OP_DISCARD:
|
||||
if (bio->bi_error == -EOPNOTSUPP)
|
||||
if (bio->bi_status == BLK_STS_NOTSUPP)
|
||||
what = DISCARD_COMPLETED_NOTSUPP;
|
||||
else
|
||||
what = DISCARD_COMPLETED_WITH_ERROR;
|
||||
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
|
||||
}
|
||||
|
||||
bio_put(req->private_bio);
|
||||
req->private_bio = ERR_PTR(bio->bi_error);
|
||||
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
|
||||
|
||||
/* not req_mod(), we need irqsave here! */
|
||||
spin_lock_irqsave(&device->resource->req_lock, flags);
|
||||
|
||||
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
|
||||
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
|
||||
int drive = cbdata->drive;
|
||||
|
||||
if (bio->bi_error) {
|
||||
if (bio->bi_status) {
|
||||
pr_info("floppy: error %d while reading block 0\n",
|
||||
bio->bi_error);
|
||||
bio->bi_status);
|
||||
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
}
|
||||
complete(&cbdata->complete);
|
||||
|
||||
@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio)
|
||||
|
||||
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
|
||||
bio, (unsigned long long)pkt->sector,
|
||||
(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
|
||||
(unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
|
||||
|
||||
if (bio->bi_error)
|
||||
if (bio->bi_status)
|
||||
atomic_inc(&pkt->io_errors);
|
||||
if (atomic_dec_and_test(&pkt->io_wait)) {
|
||||
atomic_inc(&pkt->run_sm);
|
||||
@@ -969,7 +969,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
|
||||
struct pktcdvd_device *pd = pkt->pd;
|
||||
BUG_ON(!pd);
|
||||
|
||||
pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
|
||||
pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
|
||||
|
||||
pd->stats.pkt_ended++;
|
||||
|
||||
@@ -1305,16 +1305,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
pkt_queue_bio(pd, pkt->w_bio);
|
||||
}
|
||||
|
||||
static void pkt_finish_packet(struct packet_data *pkt, int error)
|
||||
static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
if (error)
|
||||
if (status)
|
||||
pkt->cache_valid = 0;
|
||||
|
||||
/* Finish all bios corresponding to this packet */
|
||||
while ((bio = bio_list_pop(&pkt->orig_bios))) {
|
||||
bio->bi_error = error;
|
||||
bio->bi_status = status;
|
||||
bio_endio(bio);
|
||||
}
|
||||
}
|
||||
@@ -1349,7 +1349,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
|
||||
if (atomic_read(&pkt->io_wait) > 0)
|
||||
return;
|
||||
|
||||
if (!pkt->w_bio->bi_error) {
|
||||
if (!pkt->w_bio->bi_status) {
|
||||
pkt_set_state(pkt, PACKET_FINISHED_STATE);
|
||||
} else {
|
||||
pkt_set_state(pkt, PACKET_RECOVERY_STATE);
|
||||
@@ -1366,7 +1366,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
|
||||
break;
|
||||
|
||||
case PACKET_FINISHED_STATE:
|
||||
pkt_finish_packet(pkt, pkt->w_bio->bi_error);
|
||||
pkt_finish_packet(pkt, pkt->w_bio->bi_status);
|
||||
return;
|
||||
|
||||
default:
|
||||
@@ -2301,7 +2301,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
|
||||
struct packet_stacked_data *psd = bio->bi_private;
|
||||
struct pktcdvd_device *pd = psd->pd;
|
||||
|
||||
psd->bio->bi_error = bio->bi_error;
|
||||
psd->bio->bi_status = bio->bi_status;
|
||||
bio_put(bio);
|
||||
bio_endio(psd->bio);
|
||||
mempool_free(psd, psd_pool);
|
||||
|
||||
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
|
||||
kfree(priv->cache.tags);
|
||||
}
|
||||
|
||||
static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
|
||||
static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
|
||||
size_t len, size_t *retlen, u_char *buf)
|
||||
{
|
||||
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
|
||||
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
|
||||
(unsigned int)from, len);
|
||||
|
||||
if (from >= priv->size)
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (len > priv->size - from)
|
||||
len = priv->size - from;
|
||||
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
|
||||
static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
|
||||
size_t len, size_t *retlen, const u_char *buf)
|
||||
{
|
||||
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
|
||||
unsigned int cached, count;
|
||||
|
||||
if (to >= priv->size)
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (len > priv->size - to)
|
||||
len = priv->size - to;
|
||||
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
|
||||
int write = bio_data_dir(bio) == WRITE;
|
||||
const char *op = write ? "write" : "read";
|
||||
loff_t offset = bio->bi_iter.bi_sector << 9;
|
||||
int error = 0;
|
||||
blk_status_t error = 0;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct bio *next;
|
||||
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
|
||||
|
||||
if (retlen != len) {
|
||||
dev_err(&dev->core, "Short %s\n", op);
|
||||
error = -EIO;
|
||||
error = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -593,7 +593,7 @@ out:
|
||||
next = bio_list_peek(&priv->list);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
bio->bi_error = error;
|
||||
bio->bi_status = error;
|
||||
bio_endio(bio);
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct rsxx_cardinfo *card = q->queuedata;
|
||||
struct rsxx_bio_meta *bio_meta;
|
||||
int st = -EINVAL;
|
||||
blk_status_t st = BLK_STS_IOERR;
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
if (bio_end_sector(bio) > get_capacity(card->gendisk))
|
||||
goto req_err;
|
||||
|
||||
if (unlikely(card->halt)) {
|
||||
st = -EFAULT;
|
||||
if (unlikely(card->halt))
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
if (unlikely(card->dma_fault)) {
|
||||
st = (-EFAULT);
|
||||
if (unlikely(card->dma_fault))
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
if (bio->bi_iter.bi_size == 0) {
|
||||
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
|
||||
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
|
||||
if (!bio_meta) {
|
||||
st = -ENOMEM;
|
||||
st = BLK_STS_RESOURCE;
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
@@ -205,7 +201,7 @@ queue_err:
|
||||
kmem_cache_free(bio_meta_pool, bio_meta);
|
||||
req_err:
|
||||
if (st)
|
||||
bio->bi_error = st;
|
||||
bio->bi_status = st;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
|
||||
mutex_unlock(&ctrl->work_lock);
|
||||
}
|
||||
|
||||
static int rsxx_queue_discard(struct rsxx_cardinfo *card,
|
||||
static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
|
||||
struct list_head *q,
|
||||
unsigned int laddr,
|
||||
rsxx_dma_cb cb,
|
||||
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
|
||||
|
||||
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
|
||||
if (!dma)
|
||||
return -ENOMEM;
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
dma->cmd = HW_CMD_BLK_DISCARD;
|
||||
dma->laddr = laddr;
|
||||
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rsxx_queue_dma(struct rsxx_cardinfo *card,
|
||||
static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
|
||||
struct list_head *q,
|
||||
int dir,
|
||||
unsigned int dma_off,
|
||||
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
|
||||
|
||||
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
|
||||
if (!dma)
|
||||
return -ENOMEM;
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
|
||||
dma->laddr = laddr;
|
||||
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
struct bio *bio,
|
||||
atomic_t *n_dmas,
|
||||
rsxx_dma_cb cb,
|
||||
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
unsigned int dma_len;
|
||||
int dma_cnt[RSXX_MAX_TARGETS];
|
||||
int tgt;
|
||||
int st;
|
||||
blk_status_t st;
|
||||
int i;
|
||||
|
||||
addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
|
||||
@@ -769,7 +769,6 @@ bvec_err:
|
||||
for (i = 0; i < card->n_targets; i++)
|
||||
rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
|
||||
FREE_DMA);
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
|
||||
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
|
||||
void rsxx_dma_cleanup(void);
|
||||
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
|
||||
int rsxx_dma_configure(struct rsxx_cardinfo *card);
|
||||
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
struct bio *bio,
|
||||
atomic_t *n_dmas,
|
||||
rsxx_dma_cb cb,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user