You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Boaz Harrosh <bharrosh@panasas.com> Cc: Benny Halevy <bhalevy@tonian.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <chris.mason@fusionio.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Joern Engel <joern@logfs.org> Cc: Prasad Joshi <prasadjoshi.linux@gmail.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Ben Myers <bpm@sgi.com> Cc: xfs@oss.sgi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: Ian Campbell <Ian.Campbell@citrix.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchand@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Peng Tao <tao.peng@emc.com> Cc: Andy Adamson <andros@netapp.com> Cc: fanchaoting <fanchaoting@cn.fujitsu.com> Cc: Jie Liu <jeff.liu@oracle.com> Cc: Sunil Mushran <sunil.mushran@gmail.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Pankaj Kumar <pankaj.km@samsung.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Mel Gorman <mgorman@suse.de>6
This commit is contained in:
@@ -447,14 +447,13 @@ struct bio_vec {
|
||||
* main unit of I/O for the block layer and lower layers (ie drivers)
|
||||
*/
|
||||
struct bio {
|
||||
sector_t bi_sector;
|
||||
struct bio *bi_next; /* request queue link */
|
||||
struct block_device *bi_bdev; /* target device */
|
||||
unsigned long bi_flags; /* status, command, etc */
|
||||
unsigned long bi_rw; /* low bits: r/w, high: priority */
|
||||
|
||||
unsigned int bi_vcnt; /* how may bio_vec's */
|
||||
unsigned int bi_idx; /* current index into bio_vec array */
|
||||
struct bvec_iter bi_iter; /* current index into bio_vec array */
|
||||
|
||||
unsigned int bi_size; /* total size in bytes */
|
||||
unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
|
||||
@@ -480,7 +479,7 @@ With this multipage bio design:
|
||||
- Code that traverses the req list can find all the segments of a bio
|
||||
by using rq_for_each_segment. This handles the fact that a request
|
||||
has multiple bios, each of which can have multiple segments.
|
||||
- Drivers which can't process a large bio in one shot can use the bi_idx
|
||||
- Drivers which can't process a large bio in one shot can use the bi_iter
|
||||
field to keep track of the next bio_vec entry to process.
|
||||
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
|
||||
[TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
|
||||
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
|
||||
nr_sectors and current_nr_sectors fields (based on the corresponding
|
||||
hard_xxx values and the number of bytes transferred) and updates it on
|
||||
every transfer that invokes end_that_request_first. It does the same for the
|
||||
buffer, bio, bio->bi_idx fields too.
|
||||
buffer, bio, bio->bi_iter fields too.
|
||||
|
||||
The buffer field is just a virtual address mapping of the current segment
|
||||
of the i/o buffer in cases where the buffer resides in low-memory. For high
|
||||
|
||||
@@ -64,7 +64,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
|
||||
struct nfhd_device *dev = queue->queuedata;
|
||||
struct bio_vec *bvec;
|
||||
int i, dir, len, shift;
|
||||
sector_t sec = bio->bi_sector;
|
||||
sector_t sec = bio->bi_iter.bi_sector;
|
||||
|
||||
dir = bio_data_dir(bio);
|
||||
shift = dev->bshift;
|
||||
|
||||
@@ -113,7 +113,8 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
unsigned int transfered;
|
||||
unsigned short idx;
|
||||
|
||||
phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
|
||||
phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
|
||||
AXON_RAM_SECTOR_SHIFT);
|
||||
phys_end = bank->io_addr + bank->size;
|
||||
transfered = 0;
|
||||
bio_for_each_segment(vec, bio, idx) {
|
||||
|
||||
+18
-18
@@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
bio_advance(bio, nbytes);
|
||||
|
||||
/* don't actually finish bio if it's part of flush sequence */
|
||||
if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
|
||||
if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
|
||||
bio_endio(bio, error);
|
||||
}
|
||||
|
||||
@@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
|
||||
bio->bi_io_vec->bv_offset = 0;
|
||||
bio->bi_io_vec->bv_len = len;
|
||||
|
||||
bio->bi_size = len;
|
||||
bio->bi_iter.bi_size = len;
|
||||
bio->bi_vcnt = 1;
|
||||
bio->bi_phys_segments = 1;
|
||||
|
||||
@@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||
|
||||
req->biotail->bi_next = bio;
|
||||
req->biotail = bio;
|
||||
req->__data_len += bio->bi_size;
|
||||
req->__data_len += bio->bi_iter.bi_size;
|
||||
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
|
||||
|
||||
blk_account_io_start(req, false);
|
||||
@@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
||||
* not touch req->buffer either...
|
||||
*/
|
||||
req->buffer = bio_data(bio);
|
||||
req->__sector = bio->bi_sector;
|
||||
req->__data_len += bio->bi_size;
|
||||
req->__sector = bio->bi_iter.bi_sector;
|
||||
req->__data_len += bio->bi_iter.bi_size;
|
||||
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
|
||||
|
||||
blk_account_io_start(req, false);
|
||||
@@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||
req->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
|
||||
req->errors = 0;
|
||||
req->__sector = bio->bi_sector;
|
||||
req->__sector = bio->bi_iter.bi_sector;
|
||||
req->ioprio = bio_prio(bio);
|
||||
blk_rq_bio_prep(req->q, req, bio);
|
||||
}
|
||||
@@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio)
|
||||
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
|
||||
struct hd_struct *p = bdev->bd_part;
|
||||
|
||||
bio->bi_sector += p->start_sect;
|
||||
bio->bi_iter.bi_sector += p->start_sect;
|
||||
bio->bi_bdev = bdev->bd_contains;
|
||||
|
||||
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
|
||||
bdev->bd_dev,
|
||||
bio->bi_sector - p->start_sect);
|
||||
bio->bi_iter.bi_sector - p->start_sect);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
|
||||
/* Test device or partition size, when known. */
|
||||
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
|
||||
if (maxsector) {
|
||||
sector_t sector = bio->bi_sector;
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
|
||||
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
|
||||
/*
|
||||
@@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio)
|
||||
"generic_make_request: Trying to access "
|
||||
"nonexistent block-device %s (%Lu)\n",
|
||||
bdevname(bio->bi_bdev, b),
|
||||
(long long) bio->bi_sector);
|
||||
(long long) bio->bi_iter.bi_sector);
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
@@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio)
|
||||
}
|
||||
|
||||
part = bio->bi_bdev->bd_part;
|
||||
if (should_fail_request(part, bio->bi_size) ||
|
||||
if (should_fail_request(part, bio->bi_iter.bi_size) ||
|
||||
should_fail_request(&part_to_disk(part)->part0,
|
||||
bio->bi_size))
|
||||
bio->bi_iter.bi_size))
|
||||
goto end_io;
|
||||
|
||||
/*
|
||||
@@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio)
|
||||
if (rw & WRITE) {
|
||||
count_vm_events(PGPGOUT, count);
|
||||
} else {
|
||||
task_io_account_read(bio->bi_size);
|
||||
task_io_account_read(bio->bi_iter.bi_size);
|
||||
count_vm_events(PGPGIN, count);
|
||||
}
|
||||
|
||||
@@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio)
|
||||
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
(rw & WRITE) ? "WRITE" : "READ",
|
||||
(unsigned long long)bio->bi_sector,
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bdevname(bio->bi_bdev, b),
|
||||
count);
|
||||
}
|
||||
@@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
|
||||
for (bio = rq->bio; bio; bio = bio->bi_next) {
|
||||
if ((bio->bi_rw & ff) != ff)
|
||||
break;
|
||||
bytes += bio->bi_size;
|
||||
bytes += bio->bi_iter.bi_size;
|
||||
}
|
||||
|
||||
/* this could lead to infinite loop */
|
||||
@@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
total_bytes = 0;
|
||||
while (req->bio) {
|
||||
struct bio *bio = req->bio;
|
||||
unsigned bio_bytes = min(bio->bi_size, nr_bytes);
|
||||
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
|
||||
|
||||
if (bio_bytes == bio->bi_size)
|
||||
if (bio_bytes == bio->bi_iter.bi_size)
|
||||
req->bio = bio->bi_next;
|
||||
|
||||
req_bio_endio(req, bio, bio_bytes, error);
|
||||
@@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
||||
rq->buffer = bio_data(bio);
|
||||
}
|
||||
rq->__data_len = bio->bi_size;
|
||||
rq->__data_len = bio->bi_iter.bi_size;
|
||||
rq->bio = rq->biotail = bio;
|
||||
|
||||
if (bio->bi_bdev)
|
||||
|
||||
+1
-1
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
||||
* copied from blk_rq_pos(rq).
|
||||
*/
|
||||
if (error_sector)
|
||||
*error_sector = bio->bi_sector;
|
||||
*error_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
|
||||
+6
-6
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
req_sects = end_sect - sector;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = bio_batch_end_io;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_private = &bb;
|
||||
|
||||
bio->bi_size = req_sects << 9;
|
||||
bio->bi_iter.bi_size = req_sects << 9;
|
||||
nr_sects -= req_sects;
|
||||
sector = end_sect;
|
||||
|
||||
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = bio_batch_end_io;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_private = &bb;
|
||||
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
|
||||
|
||||
if (nr_sects > max_write_same_sectors) {
|
||||
bio->bi_size = max_write_same_sectors << 9;
|
||||
bio->bi_iter.bi_size = max_write_same_sectors << 9;
|
||||
nr_sects -= max_write_same_sectors;
|
||||
sector += max_write_same_sectors;
|
||||
} else {
|
||||
bio->bi_size = nr_sects << 9;
|
||||
bio->bi_iter.bi_size = nr_sects << 9;
|
||||
nr_sects = 0;
|
||||
}
|
||||
|
||||
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
break;
|
||||
}
|
||||
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_end_io = bio_batch_end_io;
|
||||
bio->bi_private = &bb;
|
||||
|
||||
+3
-3
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||
rq->biotail->bi_next = bio;
|
||||
rq->biotail = bio;
|
||||
|
||||
rq->__data_len += bio->bi_size;
|
||||
rq->__data_len += bio->bi_iter.bi_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
|
||||
ret = blk_rq_append_bio(q, rq, bio);
|
||||
if (!ret)
|
||||
return bio->bi_size;
|
||||
return bio->bi_iter.bi_size;
|
||||
|
||||
/* if it was boucned we must call the end io function */
|
||||
bio_endio(bio, 0);
|
||||
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
if (bio->bi_size != len) {
|
||||
if (bio->bi_iter.bi_size != len) {
|
||||
/*
|
||||
* Grab an extra reference to this bio, as bio_unmap_user()
|
||||
* expects to be able to drop it twice as it happens on the
|
||||
|
||||
+2
-2
@@ -543,9 +543,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
|
||||
int blk_try_merge(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
|
||||
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
|
||||
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_FRONT_MERGE;
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
||||
+1
-1
@@ -301,7 +301,7 @@ void blk_mq_complete_request(struct request *rq, int error)
|
||||
struct bio *next = bio->bi_next;
|
||||
|
||||
bio->bi_next = NULL;
|
||||
bytes += bio->bi_size;
|
||||
bytes += bio->bi_iter.bi_size;
|
||||
blk_mq_bio_endio(rq, bio, error);
|
||||
bio = next;
|
||||
}
|
||||
|
||||
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
||||
do_div(tmp, HZ);
|
||||
bytes_allowed = tmp;
|
||||
|
||||
if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
|
||||
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
|
||||
if (wait)
|
||||
*wait = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Calc approx time to dispatch */
|
||||
extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
|
||||
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
|
||||
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
|
||||
|
||||
if (!jiffy_wait)
|
||||
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
bool rw = bio_data_dir(bio);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
tg->bytes_disp[rw] += bio->bi_size;
|
||||
tg->bytes_disp[rw] += bio->bi_iter.bi_size;
|
||||
tg->io_disp[rw]++;
|
||||
|
||||
/*
|
||||
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
*/
|
||||
if (!(bio->bi_rw & REQ_THROTTLED)) {
|
||||
bio->bi_rw |= REQ_THROTTLED;
|
||||
throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
|
||||
bio->bi_rw);
|
||||
throtl_update_dispatch_stats(tg_to_blkg(tg),
|
||||
bio->bi_iter.bi_size, bio->bi_rw);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
if (tg) {
|
||||
if (!tg->has_rules[rw]) {
|
||||
throtl_update_dispatch_stats(tg_to_blkg(tg),
|
||||
bio->bi_size, bio->bi_rw);
|
||||
bio->bi_iter.bi_size, bio->bi_rw);
|
||||
goto out_unlock_rcu;
|
||||
}
|
||||
}
|
||||
@@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
|
||||
/* out-of-limit, queue to @tg */
|
||||
throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
|
||||
rw == READ ? 'R' : 'W',
|
||||
tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
|
||||
tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
|
||||
tg->io_disp[rw], tg->iops[rw],
|
||||
sq->nr_queued[READ], sq->nr_queued[WRITE]);
|
||||
|
||||
|
||||
+1
-1
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||
/*
|
||||
* See if our hash lookup can find a potential backmerge.
|
||||
*/
|
||||
__rq = elv_rqhash_find(q, bio->bi_sector);
|
||||
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
|
||||
if (__rq && elv_rq_merge_ok(__rq, bio)) {
|
||||
*req = __rq;
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
|
||||
@@ -929,8 +929,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
buf->rq = rq;
|
||||
buf->bio = bio;
|
||||
buf->resid = bio->bi_size;
|
||||
buf->sector = bio->bi_sector;
|
||||
buf->resid = bio->bi_iter.bi_size;
|
||||
buf->sector = bio->bi_iter.bi_sector;
|
||||
bio_pageinc(bio);
|
||||
buf->bv = bio_iovec(bio);
|
||||
buf->bv_resid = buf->bv->bv_len;
|
||||
@@ -1152,7 +1152,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
|
||||
do {
|
||||
bio = rq->bio;
|
||||
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
|
||||
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
|
||||
|
||||
/* cf. http://lkml.org/lkml/2006/10/31/28 */
|
||||
if (!fastfail)
|
||||
|
||||
+2
-2
@@ -333,13 +333,13 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
int i;
|
||||
int err = -EIO;
|
||||
|
||||
sector = bio->bi_sector;
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
|
||||
goto out;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
err = 0;
|
||||
discard_from_brd(brd, sector, bio->bi_size);
|
||||
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
|
||||
bio = bio_alloc_drbd(GFP_NOIO);
|
||||
bio->bi_bdev = bdev->md_bdev;
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
err = -EIO;
|
||||
if (bio_add_page(bio, page, size, 0) != size)
|
||||
goto out;
|
||||
|
||||
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
|
||||
} else
|
||||
page = b->bm_pages[page_nr];
|
||||
bio->bi_bdev = mdev->ldev->md_bdev;
|
||||
bio->bi_sector = on_disk_sector;
|
||||
bio->bi_iter.bi_sector = on_disk_sector;
|
||||
/* bio_add_page of a single page to an empty bio will always succeed,
|
||||
* according to api. Do we want to assert that? */
|
||||
bio_add_page(bio, page, len, 0);
|
||||
|
||||
@@ -1333,7 +1333,7 @@ next_bio:
|
||||
goto fail;
|
||||
}
|
||||
/* > peer_req->i.sector, unless this is the first bio */
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||
bio->bi_rw = rw;
|
||||
bio->bi_private = peer_req;
|
||||
@@ -1353,7 +1353,7 @@ next_bio:
|
||||
dev_err(DEV,
|
||||
"bio_add_page failed for len=%u, "
|
||||
"bi_vcnt=0 (bi_sector=%llu)\n",
|
||||
len, (unsigned long long)bio->bi_sector);
|
||||
len, (uint64_t)bio->bi_iter.bi_sector);
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
@@ -1615,7 +1615,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
|
||||
mdev->recv_cnt += data_size>>9;
|
||||
|
||||
bio = req->master_bio;
|
||||
D_ASSERT(sector == bio->bi_sector);
|
||||
D_ASSERT(sector == bio->bi_iter.bi_sector);
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
|
||||
|
||||
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
|
||||
req->epoch = 0;
|
||||
|
||||
drbd_clear_interval(&req->i);
|
||||
req->i.sector = bio_src->bi_sector;
|
||||
req->i.size = bio_src->bi_size;
|
||||
req->i.sector = bio_src->bi_iter.bi_sector;
|
||||
req->i.size = bio_src->bi_iter.bi_size;
|
||||
req->i.local = true;
|
||||
req->i.waiting = false;
|
||||
|
||||
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
|
||||
/*
|
||||
* what we "blindly" assume:
|
||||
*/
|
||||
D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
|
||||
D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
|
||||
|
||||
inc_ap_bio(mdev);
|
||||
__drbd_make_request(mdev, bio, start_time);
|
||||
|
||||
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
|
||||
|
||||
/* Short lived temporary struct on the stack.
|
||||
* We could squirrel the error to be returned into
|
||||
* bio->bi_size, or similar. But that would be too ugly. */
|
||||
* bio->bi_iter.bi_size, or similar. But that would be too ugly. */
|
||||
struct bio_and_error {
|
||||
struct bio *bio;
|
||||
int error;
|
||||
|
||||
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
|
||||
bio_vec.bv_len = size;
|
||||
bio_vec.bv_offset = 0;
|
||||
bio.bi_vcnt = 1;
|
||||
bio.bi_size = size;
|
||||
bio.bi_iter.bi_size = size;
|
||||
bio.bi_bdev = bdev;
|
||||
bio.bi_sector = 0;
|
||||
bio.bi_iter.bi_sector = 0;
|
||||
bio.bi_flags = (1 << BIO_QUIET);
|
||||
init_completion(&complete);
|
||||
bio.bi_private = &complete;
|
||||
|
||||
@@ -415,7 +415,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
loff_t pos;
|
||||
int ret;
|
||||
|
||||
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
||||
pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
|
||||
|
||||
if (bio_rw(bio) == WRITE) {
|
||||
struct file *file = lo->lo_backing_file;
|
||||
@@ -444,7 +444,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
goto out;
|
||||
}
|
||||
ret = file->f_op->fallocate(file, mode, pos,
|
||||
bio->bi_size);
|
||||
bio->bi_iter.bi_size);
|
||||
if (unlikely(ret && ret != -EINVAL &&
|
||||
ret != -EOPNOTSUPP))
|
||||
ret = -EIO;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user