You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe: - Major bit is Kents prep work for immutable bio vecs. - Stable candidate fix for a scheduling-while-atomic in the queue bypass operation. - Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging discard bios. - Tejuns changes to convert the writeback thread pool to the generic workqueue mechanism. - Runtime PM framework, SCSI patches exists on top of these in James' tree. - A few random fixes. * 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits) relay: move remove_buf_file inside relay_close_buf partitions/efi.c: replace useless kzalloc's by kmalloc's fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read() block: fix max discard sectors limit blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Documentation: cfq-iosched: update documentation help for cfq tunables writeback: expose the bdi_wq workqueue writeback: replace custom worker pool implementation with unbound workqueue writeback: remove unused bdi_pending_list aoe: Fix unitialized var usage bio-integrity: Add explicit field for owner of bip_buf block: Add an explicit bio flag for bios that own their bvec block: Add bio_alloc_pages() block: Convert some code to bio_for_each_segment_all() block: Add bio_for_each_segment_all() bounce: Refactor __blk_queue_bounce to not use bi_io_vec raid1: use bio_copy_data() pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage pktcdvd: use bio_copy_data() block: Add bio_copy_data() ...
This commit is contained in:
@@ -5,7 +5,7 @@ The main aim of CFQ scheduler is to provide a fair allocation of the disk
|
||||
I/O bandwidth for all the processes which requests an I/O operation.
|
||||
|
||||
CFQ maintains the per process queue for the processes which request I/O
|
||||
operation(syncronous requests). In case of asynchronous requests, all the
|
||||
operation(synchronous requests). In case of asynchronous requests, all the
|
||||
requests from all the processes are batched together according to their
|
||||
process's I/O priority.
|
||||
|
||||
@@ -66,6 +66,47 @@ This parameter is used to set the timeout of synchronous requests. Default
|
||||
value of this is 124ms. In case to favor synchronous requests over asynchronous
|
||||
one, this value should be decreased relative to fifo_expire_async.
|
||||
|
||||
group_idle
|
||||
-----------
|
||||
This parameter forces idling at the CFQ group level instead of CFQ
|
||||
queue level. This was introduced after after a bottleneck was observed
|
||||
in higher end storage due to idle on sequential queue and allow dispatch
|
||||
from a single queue. The idea with this parameter is that it can be run with
|
||||
slice_idle=0 and group_idle=8, so that idling does not happen on individual
|
||||
queues in the group but happens overall on the group and thus still keeps the
|
||||
IO controller working.
|
||||
Not idling on individual queues in the group will dispatch requests from
|
||||
multiple queues in the group at the same time and achieve higher throughput
|
||||
on higher end storage.
|
||||
|
||||
Default value for this parameter is 8ms.
|
||||
|
||||
latency
|
||||
-------
|
||||
This parameter is used to enable/disable the latency mode of the CFQ
|
||||
scheduler. If latency mode (called low_latency) is enabled, CFQ tries
|
||||
to recompute the slice time for each process based on the target_latency set
|
||||
for the system. This favors fairness over throughput. Disabling low
|
||||
latency (setting it to 0) ignores target latency, allowing each process in the
|
||||
system to get a full time slice.
|
||||
|
||||
By default low latency mode is enabled.
|
||||
|
||||
target_latency
|
||||
--------------
|
||||
This parameter is used to calculate the time slice for a process if cfq's
|
||||
latency mode is enabled. It will ensure that sync requests have an estimated
|
||||
latency. But if sequential workload is higher(e.g. sequential read),
|
||||
then to meet the latency constraints, throughput may decrease because of less
|
||||
time for each process to issue I/O request before the cfq queue is switched.
|
||||
|
||||
Though this can be overcome by disabling the latency_mode, it may increase
|
||||
the read latency for some applications. This parameter allows for changing
|
||||
target_latency through the sysfs interface which can provide the balanced
|
||||
throughput and read latency.
|
||||
|
||||
Default value for target_latency is 300ms.
|
||||
|
||||
slice_async
|
||||
-----------
|
||||
This parameter is same as of slice_sync but for asynchronous queue. The
|
||||
@@ -98,8 +139,8 @@ in the device exceeds this parameter. This parameter is used for synchronous
|
||||
request.
|
||||
|
||||
In case of storage with several disk, this setting can limit the parallel
|
||||
processing of request. Therefore, increasing the value can imporve the
|
||||
performace although this can cause the latency of some I/O to increase due
|
||||
processing of request. Therefore, increasing the value can improve the
|
||||
performance although this can cause the latency of some I/O to increase due
|
||||
to more number of requests.
|
||||
|
||||
CFQ Group scheduling
|
||||
|
||||
+2
-2
@@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q,
|
||||
if (!new_blkg)
|
||||
return -ENOMEM;
|
||||
|
||||
preloaded = !radix_tree_preload(GFP_KERNEL);
|
||||
|
||||
blk_queue_bypass_start(q);
|
||||
|
||||
preloaded = !radix_tree_preload(GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Make sure the root blkg exists and count the existing blkgs. As
|
||||
* @q is bypassing at this point, blkg_lookup_create() can't be
|
||||
|
||||
+196
-69
@@ -30,6 +30,7 @@
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/block.h>
|
||||
@@ -159,20 +160,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||
error = -EIO;
|
||||
|
||||
if (unlikely(nbytes > bio->bi_size)) {
|
||||
printk(KERN_ERR "%s: want %u bytes done, %u left\n",
|
||||
__func__, nbytes, bio->bi_size);
|
||||
nbytes = bio->bi_size;
|
||||
}
|
||||
|
||||
if (unlikely(rq->cmd_flags & REQ_QUIET))
|
||||
set_bit(BIO_QUIET, &bio->bi_flags);
|
||||
|
||||
bio->bi_size -= nbytes;
|
||||
bio->bi_sector += (nbytes >> 9);
|
||||
|
||||
if (bio_integrity(bio))
|
||||
bio_integrity_advance(bio, nbytes);
|
||||
bio_advance(bio, nbytes);
|
||||
|
||||
/* don't actually finish bio if it's part of flush sequence */
|
||||
if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
|
||||
@@ -1264,6 +1255,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(part_round_stats);
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static void blk_pm_put_request(struct request *rq)
|
||||
{
|
||||
if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
|
||||
pm_runtime_mark_last_busy(rq->q->dev);
|
||||
}
|
||||
#else
|
||||
static inline void blk_pm_put_request(struct request *rq) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* queue lock must be held
|
||||
*/
|
||||
@@ -1274,6 +1275,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
|
||||
if (unlikely(--req->ref_count))
|
||||
return;
|
||||
|
||||
blk_pm_put_request(req);
|
||||
|
||||
elv_completed_request(q, req);
|
||||
|
||||
/* this is a bio leak */
|
||||
@@ -1597,7 +1600,7 @@ static void handle_bad_sector(struct bio *bio)
|
||||
printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
|
||||
bdevname(bio->bi_bdev, b),
|
||||
bio->bi_rw,
|
||||
(unsigned long long)bio->bi_sector + bio_sectors(bio),
|
||||
(unsigned long long)bio_end_sector(bio),
|
||||
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
|
||||
|
||||
set_bit(BIO_EOF, &bio->bi_flags);
|
||||
@@ -2053,6 +2056,28 @@ static void blk_account_io_done(struct request *req)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/*
|
||||
* Don't process normal requests when queue is suspended
|
||||
* or in the process of suspending/resuming
|
||||
*/
|
||||
static struct request *blk_pm_peek_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
|
||||
(q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
|
||||
return NULL;
|
||||
else
|
||||
return rq;
|
||||
}
|
||||
#else
|
||||
static inline struct request *blk_pm_peek_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
return rq;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* blk_peek_request - peek at the top of a request queue
|
||||
* @q: request queue to peek at
|
||||
@@ -2075,6 +2100,11 @@ struct request *blk_peek_request(struct request_queue *q)
|
||||
int ret;
|
||||
|
||||
while ((rq = __elv_next_request(q)) != NULL) {
|
||||
|
||||
rq = blk_pm_peek_request(q, rq);
|
||||
if (!rq)
|
||||
break;
|
||||
|
||||
if (!(rq->cmd_flags & REQ_STARTED)) {
|
||||
/*
|
||||
* This is the first time the device driver
|
||||
@@ -2253,8 +2283,7 @@ EXPORT_SYMBOL(blk_fetch_request);
|
||||
**/
|
||||
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
{
|
||||
int total_bytes, bio_nbytes, next_idx = 0;
|
||||
struct bio *bio;
|
||||
int total_bytes;
|
||||
|
||||
if (!req->bio)
|
||||
return false;
|
||||
@@ -2300,56 +2329,21 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
|
||||
blk_account_io_completion(req, nr_bytes);
|
||||
|
||||
total_bytes = bio_nbytes = 0;
|
||||
while ((bio = req->bio) != NULL) {
|
||||
int nbytes;
|
||||
total_bytes = 0;
|
||||
while (req->bio) {
|
||||
struct bio *bio = req->bio;
|
||||
unsigned bio_bytes = min(bio->bi_size, nr_bytes);
|
||||
|
||||
if (nr_bytes >= bio->bi_size) {
|
||||
if (bio_bytes == bio->bi_size)
|
||||
req->bio = bio->bi_next;
|
||||
nbytes = bio->bi_size;
|
||||
req_bio_endio(req, bio, nbytes, error);
|
||||
next_idx = 0;
|
||||
bio_nbytes = 0;
|
||||
} else {
|
||||
int idx = bio->bi_idx + next_idx;
|
||||
|
||||
if (unlikely(idx >= bio->bi_vcnt)) {
|
||||
blk_dump_rq_flags(req, "__end_that");
|
||||
printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
|
||||
__func__, idx, bio->bi_vcnt);
|
||||
break;
|
||||
}
|
||||
req_bio_endio(req, bio, bio_bytes, error);
|
||||
|
||||
nbytes = bio_iovec_idx(bio, idx)->bv_len;
|
||||
BIO_BUG_ON(nbytes > bio->bi_size);
|
||||
total_bytes += bio_bytes;
|
||||
nr_bytes -= bio_bytes;
|
||||
|
||||
/*
|
||||
* not a complete bvec done
|
||||
*/
|
||||
if (unlikely(nbytes > nr_bytes)) {
|
||||
bio_nbytes += nr_bytes;
|
||||
total_bytes += nr_bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* advance to the next vector
|
||||
*/
|
||||
next_idx++;
|
||||
bio_nbytes += nbytes;
|
||||
}
|
||||
|
||||
total_bytes += nbytes;
|
||||
nr_bytes -= nbytes;
|
||||
|
||||
bio = req->bio;
|
||||
if (bio) {
|
||||
/*
|
||||
* end more in this run, or just return 'not-done'
|
||||
*/
|
||||
if (unlikely(nr_bytes <= 0))
|
||||
break;
|
||||
}
|
||||
if (!nr_bytes)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2365,16 +2359,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* if the request wasn't completed, update state
|
||||
*/
|
||||
if (bio_nbytes) {
|
||||
req_bio_endio(req, bio, bio_nbytes, error);
|
||||
bio->bi_idx += next_idx;
|
||||
bio_iovec(bio)->bv_offset += nr_bytes;
|
||||
bio_iovec(bio)->bv_len -= nr_bytes;
|
||||
}
|
||||
|
||||
req->__data_len -= total_bytes;
|
||||
req->buffer = bio_data(req->bio);
|
||||
|
||||
@@ -3046,6 +3030,149 @@ void blk_finish_plug(struct blk_plug *plug)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_finish_plug);
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/**
|
||||
* blk_pm_runtime_init - Block layer runtime PM initialization routine
|
||||
* @q: the queue of the device
|
||||
* @dev: the device the queue belongs to
|
||||
*
|
||||
* Description:
|
||||
* Initialize runtime-PM-related fields for @q and start auto suspend for
|
||||
* @dev. Drivers that want to take advantage of request-based runtime PM
|
||||
* should call this function after @dev has been initialized, and its
|
||||
* request queue @q has been allocated, and runtime PM for it can not happen
|
||||
* yet(either due to disabled/forbidden or its usage_count > 0). In most
|
||||
* cases, driver should call this function before any I/O has taken place.
|
||||
*
|
||||
* This function takes care of setting up using auto suspend for the device,
|
||||
* the autosuspend delay is set to -1 to make runtime suspend impossible
|
||||
* until an updated value is either set by user or by driver. Drivers do
|
||||
* not need to touch other autosuspend settings.
|
||||
*
|
||||
* The block layer runtime PM is request based, so only works for drivers
|
||||
* that use request as their IO unit instead of those directly use bio's.
|
||||
*/
|
||||
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
|
||||
{
|
||||
q->dev = dev;
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
pm_runtime_set_autosuspend_delay(q->dev, -1);
|
||||
pm_runtime_use_autosuspend(q->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_pm_runtime_init);
|
||||
|
||||
/**
|
||||
* blk_pre_runtime_suspend - Pre runtime suspend check
|
||||
* @q: the queue of the device
|
||||
*
|
||||
* Description:
|
||||
* This function will check if runtime suspend is allowed for the device
|
||||
* by examining if there are any requests pending in the queue. If there
|
||||
* are requests pending, the device can not be runtime suspended; otherwise,
|
||||
* the queue's status will be updated to SUSPENDING and the driver can
|
||||
* proceed to suspend the device.
|
||||
*
|
||||
* For the not allowed case, we mark last busy for the device so that
|
||||
* runtime PM core will try to autosuspend it some time later.
|
||||
*
|
||||
* This function should be called near the start of the device's
|
||||
* runtime_suspend callback.
|
||||
*
|
||||
* Return:
|
||||
* 0 - OK to runtime suspend the device
|
||||
* -EBUSY - Device should not be runtime suspended
|
||||
*/
|
||||
int blk_pre_runtime_suspend(struct request_queue *q)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (q->nr_pending) {
|
||||
ret = -EBUSY;
|
||||
pm_runtime_mark_last_busy(q->dev);
|
||||
} else {
|
||||
q->rpm_status = RPM_SUSPENDING;
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
||||
|
||||
/**
|
||||
* blk_post_runtime_suspend - Post runtime suspend processing
|
||||
* @q: the queue of the device
|
||||
* @err: return value of the device's runtime_suspend function
|
||||
*
|
||||
* Description:
|
||||
* Update the queue's runtime status according to the return value of the
|
||||
* device's runtime suspend function and mark last busy for the device so
|
||||
* that PM core will try to auto suspend the device at a later time.
|
||||
*
|
||||
* This function should be called near the end of the device's
|
||||
* runtime_suspend callback.
|
||||
*/
|
||||
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_SUSPENDED;
|
||||
} else {
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
pm_runtime_mark_last_busy(q->dev);
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_post_runtime_suspend);
|
||||
|
||||
/**
|
||||
* blk_pre_runtime_resume - Pre runtime resume processing
|
||||
* @q: the queue of the device
|
||||
*
|
||||
* Description:
|
||||
* Update the queue's runtime status to RESUMING in preparation for the
|
||||
* runtime resume of the device.
|
||||
*
|
||||
* This function should be called near the start of the device's
|
||||
* runtime_resume callback.
|
||||
*/
|
||||
void blk_pre_runtime_resume(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->rpm_status = RPM_RESUMING;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||
|
||||
/**
|
||||
* blk_post_runtime_resume - Post runtime resume processing
|
||||
* @q: the queue of the device
|
||||
* @err: return value of the device's runtime_resume function
|
||||
*
|
||||
* Description:
|
||||
* Update the queue's runtime status according to the return value of the
|
||||
* device's runtime_resume function. If it is successfully resumed, process
|
||||
* the requests that are queued into the device's queue when it is resuming
|
||||
* and then mark last busy and initiate autosuspend for it.
|
||||
*
|
||||
* This function should be called near the end of the device's
|
||||
* runtime_resume callback.
|
||||
*/
|
||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (!err) {
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
__blk_run_queue(q);
|
||||
pm_runtime_mark_last_busy(q->dev);
|
||||
pm_runtime_autosuspend(q->dev);
|
||||
} else {
|
||||
q->rpm_status = RPM_SUSPENDED;
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
#endif
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
|
||||
+2
-5
@@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
|
||||
return NULL;
|
||||
|
||||
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
||||
if (cfqq) {
|
||||
sector_t sector = bio->bi_sector + bio_sectors(bio);
|
||||
|
||||
return elv_rb_find(&cfqq->sort_list, sector);
|
||||
}
|
||||
if (cfqq)
|
||||
return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||
* check for front merge
|
||||
*/
|
||||
if (dd->front_merges) {
|
||||
sector_t sector = bio->bi_sector + bio_sectors(bio);
|
||||
sector_t sector = bio_end_sector(bio);
|
||||
|
||||
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
|
||||
if (__rq) {
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
@@ -536,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
|
||||
e->type->ops.elevator_bio_merged_fn(q, rq, bio);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static void blk_pm_requeue_request(struct request *rq)
|
||||
{
|
||||
if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
|
||||
rq->q->nr_pending--;
|
||||
}
|
||||
|
||||
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
|
||||
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
|
||||
pm_request_resume(q->dev);
|
||||
}
|
||||
#else
|
||||
static inline void blk_pm_requeue_request(struct request *rq) {}
|
||||
static inline void blk_pm_add_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
/*
|
||||
@@ -550,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
|
||||
|
||||
rq->cmd_flags &= ~REQ_STARTED;
|
||||
|
||||
blk_pm_requeue_request(rq);
|
||||
|
||||
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
|
||||
}
|
||||
|
||||
@@ -572,6 +596,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
||||
{
|
||||
trace_block_rq_insert(q, rq);
|
||||
|
||||
blk_pm_add_request(q, rq);
|
||||
|
||||
rq->q = q;
|
||||
|
||||
if (rq->cmd_flags & REQ_SOFTBARRIER) {
|
||||
|
||||
@@ -238,7 +238,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
|
||||
le32_to_cpu(gpt->sizeof_partition_entry);
|
||||
if (!count)
|
||||
return NULL;
|
||||
pte = kzalloc(count, GFP_KERNEL);
|
||||
pte = kmalloc(count, GFP_KERNEL);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
|
||||
@@ -267,7 +267,7 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
|
||||
gpt_header *gpt;
|
||||
unsigned ssz = bdev_logical_block_size(state->bdev);
|
||||
|
||||
gpt = kzalloc(ssz, GFP_KERNEL);
|
||||
gpt = kmalloc(ssz, GFP_KERNEL);
|
||||
if (!gpt)
|
||||
return NULL;
|
||||
|
||||
|
||||
@@ -928,7 +928,7 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
|
||||
buf->resid = bio->bi_size;
|
||||
buf->sector = bio->bi_sector;
|
||||
bio_pageinc(bio);
|
||||
buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
|
||||
buf->bv = bv = bio_iovec(bio);
|
||||
buf->bv_resid = bv->bv_len;
|
||||
WARN_ON(buf->bv_resid == 0);
|
||||
}
|
||||
|
||||
+1
-2
@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
int err = -EIO;
|
||||
|
||||
sector = bio->bi_sector;
|
||||
if (sector + (bio->bi_size >> SECTOR_SHIFT) >
|
||||
get_capacity(bdev->bd_disk))
|
||||
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
|
||||
goto out;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
|
||||
@@ -3775,7 +3775,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
|
||||
bio_vec.bv_len = size;
|
||||
bio_vec.bv_offset = 0;
|
||||
bio.bi_vcnt = 1;
|
||||
bio.bi_idx = 0;
|
||||
bio.bi_size = size;
|
||||
bio.bi_bdev = bdev;
|
||||
bio.bi_sector = 0;
|
||||
|
||||
+23
-79
@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
|
||||
pd->iosched.successive_reads += bio->bi_size >> 10;
|
||||
else {
|
||||
pd->iosched.successive_reads = 0;
|
||||
pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
|
||||
pd->iosched.last_write = bio_end_sector(bio);
|
||||
}
|
||||
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
|
||||
if (pd->read_speed == pd->write_speed) {
|
||||
@@ -947,31 +947,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy CD_FRAMESIZE bytes from src_bio into a destination page
|
||||
*/
|
||||
static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
|
||||
{
|
||||
unsigned int copy_size = CD_FRAMESIZE;
|
||||
|
||||
while (copy_size > 0) {
|
||||
struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
|
||||
void *vfrom = kmap_atomic(src_bvl->bv_page) +
|
||||
src_bvl->bv_offset + offs;
|
||||
void *vto = page_address(dst_page) + dst_offs;
|
||||
int len = min_t(int, copy_size, src_bvl->bv_len - offs);
|
||||
|
||||
BUG_ON(len < 0);
|
||||
memcpy(vto, vfrom, len);
|
||||
kunmap_atomic(vfrom);
|
||||
|
||||
seg++;
|
||||
offs = 0;
|
||||
dst_offs += len;
|
||||
copy_size -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy all data for this packet to pkt->pages[], so that
|
||||
* a) The number of required segments for the write bio is minimized, which
|
||||
@@ -1181,16 +1156,15 @@ static int pkt_start_recovery(struct packet_data *pkt)
|
||||
new_sector = new_block * (CD_FRAMESIZE >> 9);
|
||||
pkt->sector = new_sector;
|
||||
|
||||
bio_reset(pkt->bio);
|
||||
pkt->bio->bi_bdev = pd->bdev;
|
||||
pkt->bio->bi_rw = REQ_WRITE;
|
||||
pkt->bio->bi_sector = new_sector;
|
||||
pkt->bio->bi_next = NULL;
|
||||
pkt->bio->bi_flags = 1 << BIO_UPTODATE;
|
||||
pkt->bio->bi_idx = 0;
|
||||
pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
|
||||
pkt->bio->bi_vcnt = pkt->frames;
|
||||
|
||||
BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
|
||||
BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
|
||||
BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
|
||||
BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
|
||||
BUG_ON(pkt->bio->bi_private != pkt);
|
||||
pkt->bio->bi_end_io = pkt_end_io_packet_write;
|
||||
pkt->bio->bi_private = pkt;
|
||||
|
||||
drop_super(sb);
|
||||
return 1;
|
||||
@@ -1325,55 +1299,35 @@ try_next_bio:
|
||||
*/
|
||||
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
{
|
||||
struct bio *bio;
|
||||
int f;
|
||||
int frames_write;
|
||||
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
|
||||
|
||||
bio_reset(pkt->w_bio);
|
||||
pkt->w_bio->bi_sector = pkt->sector;
|
||||
pkt->w_bio->bi_bdev = pd->bdev;
|
||||
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
|
||||
pkt->w_bio->bi_private = pkt;
|
||||
|
||||
/* XXX: locking? */
|
||||
for (f = 0; f < pkt->frames; f++) {
|
||||
bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
|
||||
bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
|
||||
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
|
||||
BUG();
|
||||
}
|
||||
VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
|
||||
|
||||
/*
|
||||
* Fill-in bvec with data from orig_bios.
|
||||
*/
|
||||
frames_write = 0;
|
||||
spin_lock(&pkt->lock);
|
||||
bio_list_for_each(bio, &pkt->orig_bios) {
|
||||
int segment = bio->bi_idx;
|
||||
int src_offs = 0;
|
||||
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
|
||||
int num_frames = bio->bi_size / CD_FRAMESIZE;
|
||||
BUG_ON(first_frame < 0);
|
||||
BUG_ON(first_frame + num_frames > pkt->frames);
|
||||
for (f = first_frame; f < first_frame + num_frames; f++) {
|
||||
struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
|
||||
bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
|
||||
|
||||
while (src_offs >= src_bvl->bv_len) {
|
||||
src_offs -= src_bvl->bv_len;
|
||||
segment++;
|
||||
BUG_ON(segment >= bio->bi_vcnt);
|
||||
src_bvl = bio_iovec_idx(bio, segment);
|
||||
}
|
||||
|
||||
if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
|
||||
bvec[f].bv_page = src_bvl->bv_page;
|
||||
bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
|
||||
} else {
|
||||
pkt_copy_bio_data(bio, segment, src_offs,
|
||||
bvec[f].bv_page, bvec[f].bv_offset);
|
||||
}
|
||||
src_offs += CD_FRAMESIZE;
|
||||
frames_write++;
|
||||
}
|
||||
}
|
||||
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
|
||||
spin_unlock(&pkt->lock);
|
||||
|
||||
VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
|
||||
frames_write, (unsigned long long)pkt->sector);
|
||||
BUG_ON(frames_write != pkt->write_size);
|
||||
pkt->write_size, (unsigned long long)pkt->sector);
|
||||
|
||||
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
|
||||
pkt_make_local_copy(pkt, bvec);
|
||||
@@ -1383,16 +1337,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
}
|
||||
|
||||
/* Start the write request */
|
||||
bio_reset(pkt->w_bio);
|
||||
pkt->w_bio->bi_sector = pkt->sector;
|
||||
pkt->w_bio->bi_bdev = pd->bdev;
|
||||
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
|
||||
pkt->w_bio->bi_private = pkt;
|
||||
for (f = 0; f < pkt->frames; f++)
|
||||
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
|
||||
BUG();
|
||||
VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
|
||||
|
||||
atomic_set(&pkt->io_wait, 1);
|
||||
pkt->w_bio->bi_rw = WRITE;
|
||||
pkt_queue_bio(pd, pkt->w_bio);
|
||||
@@ -2431,7 +2375,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
cloned_bio->bi_bdev = pd->bdev;
|
||||
cloned_bio->bi_private = psd;
|
||||
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
|
||||
pd->stats.secs_r += bio->bi_size >> 9;
|
||||
pd->stats.secs_r += bio_sectors(bio);
|
||||
pkt_queue_bio(pd, cloned_bio);
|
||||
return;
|
||||
}
|
||||
@@ -2452,7 +2396,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
zone = ZONE(bio->bi_sector, pd);
|
||||
VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
|
||||
(unsigned long long)bio->bi_sector,
|
||||
(unsigned long long)(bio->bi_sector + bio_sectors(bio)));
|
||||
(unsigned long long)bio_end_sector(bio));
|
||||
|
||||
/* Check if we have to split the bio */
|
||||
{
|
||||
@@ -2460,7 +2404,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
sector_t last_zone;
|
||||
int first_sectors;
|
||||
|
||||
last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
|
||||
last_zone = ZONE(bio_end_sector(bio) - 1, pd);
|
||||
if (last_zone != zone) {
|
||||
BUG_ON(last_zone != zone + pd->settings.size);
|
||||
first_sectors = last_zone - bio->bi_sector;
|
||||
|
||||
+1
-1
@@ -1143,7 +1143,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
|
||||
/* Find first affected segment... */
|
||||
|
||||
resid = offset;
|
||||
__bio_for_each_segment(bv, bio_src, idx, 0) {
|
||||
bio_for_each_segment(bv, bio_src, idx) {
|
||||
if (resid < bv->bv_len)
|
||||
break;
|
||||
resid -= bv->bv_len;
|
||||
|
||||
@@ -858,8 +858,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
||||
unsigned int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
for (i = 0; i < clone->bi_vcnt; i++) {
|
||||
bv = bio_iovec_idx(clone, i);
|
||||
bio_for_each_segment_all(bv, clone, i) {
|
||||
BUG_ON(!bv->bv_page);
|
||||
mempool_free(bv->bv_page, cc->page_pool);
|
||||
bv->bv_page = NULL;
|
||||
|
||||
@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
|
||||
{
|
||||
io->bdev = m->dev->bdev;
|
||||
io->sector = map_sector(m, bio);
|
||||
io->count = bio->bi_size >> 9;
|
||||
io->count = bio_sectors(bio);
|
||||
}
|
||||
|
||||
static void hold_bio(struct mirror_set *ms, struct bio *bio)
|
||||
|
||||
@@ -258,7 +258,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
|
||||
sector_t begin, end;
|
||||
|
||||
stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
|
||||
stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
|
||||
stripe_map_range_sector(sc, bio_end_sector(bio),
|
||||
target_stripe, &end);
|
||||
if (begin < end) {
|
||||
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
|
||||
|
||||
@@ -501,7 +501,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if ((bio->bi_sector + bio_sectors(bio)) >>
|
||||
if (bio_end_sector(bio) >>
|
||||
(v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
|
||||
DMERR_LIMIT("io out of range");
|
||||
return -EIO;
|
||||
@@ -519,7 +519,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
bio->bi_end_io = verity_end_io;
|
||||
bio->bi_private = io;
|
||||
io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
|
||||
io->io_vec_size = bio_segments(bio);
|
||||
if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
|
||||
io->io_vec = io->io_vec_inline;
|
||||
else
|
||||
|
||||
+2
-4
@@ -185,8 +185,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
|
||||
WRITE))
|
||||
if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
|
||||
failit = 1;
|
||||
if (check_mode(conf, WritePersistent)) {
|
||||
add_sector(conf, bio->bi_sector, WritePersistent);
|
||||
@@ -196,8 +195,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
|
||||
failit = 1;
|
||||
} else {
|
||||
/* read request */
|
||||
if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9),
|
||||
READ))
|
||||
if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
|
||||
failit = 1;
|
||||
if (check_mode(conf, ReadTransient))
|
||||
failit = 1;
|
||||
|
||||
+1
-2
@@ -317,8 +317,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
|
||||
tmp_dev->end_sector)) {
|
||||
if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
|
||||
/* This bio crosses a device boundary, so we have to
|
||||
* split it.
|
||||
*/
|
||||
|
||||
+4
-13
@@ -197,21 +197,12 @@ void md_trim_bio(struct bio *bio, int offset, int size)
|
||||
if (offset == 0 && size == bio->bi_size)
|
||||
return;
|
||||
|
||||
bio->bi_sector += offset;
|
||||
bio->bi_size = size;
|
||||
offset <<= 9;
|
||||
clear_bit(BIO_SEG_VALID, &bio->bi_flags);
|
||||
|
||||
while (bio->bi_idx < bio->bi_vcnt &&
|
||||
bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
|
||||
/* remove this whole bio_vec */
|
||||
offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
|
||||
bio->bi_idx++;
|
||||
}
|
||||
if (bio->bi_idx < bio->bi_vcnt) {
|
||||
bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
|
||||
bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
|
||||
}
|
||||
bio_advance(bio, offset << 9);
|
||||
|
||||
bio->bi_size = size;
|
||||
|
||||
/* avoid any complications with bi_idx being non-zero*/
|
||||
if (bio->bi_idx) {
|
||||
memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
|
||||
|
||||
+4
-5
@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
|
||||
{
|
||||
if (likely(is_power_of_2(chunk_sects))) {
|
||||
return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
|
||||
+ (bio->bi_size >> 9));
|
||||
+ bio_sectors(bio));
|
||||
} else{
|
||||
sector_t sector = bio->bi_sector;
|
||||
return chunk_sects >= (sector_div(sector, chunk_sects)
|
||||
+ (bio->bi_size >> 9));
|
||||
+ bio_sectors(bio));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -527,8 +527,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
||||
sector_t sector = bio->bi_sector;
|
||||
struct bio_pair *bp;
|
||||
/* Sanity check -- queue functions should prevent this happening */
|
||||
if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
|
||||
bio->bi_idx != 0)
|
||||
if (bio_segments(bio) > 1)
|
||||
goto bad_map;
|
||||
/* This is a one page bio that upper layers
|
||||
* refuse to split for us, so we need to split it.
|
||||
@@ -567,7 +566,7 @@ bad_map:
|
||||
printk("md/raid0:%s: make_request bug: can't convert block across chunks"
|
||||
" or bigger than %dk %llu %d\n",
|
||||
mdname(mddev), chunk_sects / 2,
|
||||
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
|
||||
(unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
|
||||
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user