You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - BFQ cleanups and fixes (Yu, Zhang, Yahu, Paolo) - blk-rq-qos completion fix (Tejun) - blk-cgroup merge fix (Tejun) - Add offline error return value to distinguish it from an IO error on the device (Song) - IO stats fixes (Zhang, Christoph) - blkcg refcount fixes (Ming, Yu) - Fix for indefinite dispatch loop softlockup (Shin'ichiro) - blk-mq hardware queue management improvements (Ming) - sbitmap dead code removal (Ming, John) - Plugging merge improvements (me) - Show blk-crypto capabilities in sysfs (Eric) - Multiple delayed queue run improvement (David) - Block throttling fixes (Ming) - Start deprecating auto module loading based on dev_t (Christoph) - bio allocation improvements (Christoph, Chaitanya) - Get rid of bio_devname (Christoph) - bio clone improvements (Christoph) - Block plugging improvements (Christoph) - Get rid of genhd.h header (Christoph) - Ensure drivers use appropriate flush helpers (Christoph) - Refcounting improvements (Christoph) - Queue initialization and teardown improvements (Ming, Christoph) - Misc fixes/improvements (Barry, Chaitanya, Colin, Dan, Jiapeng, Lukas, Nian, Yang, Eric, Chengming) * tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block: (127 commits) block: cancel all throttled bios in del_gendisk() block: let blkcg_gq grab request queue's refcnt block: avoid use-after-free on throttle data block: limit request dispatch loop duration block/bfq-iosched: Fix spelling mistake "tenative" -> "tentative" sr: simplify the local variable initialization in sr_block_open() block: don't merge across cgroup boundaries if blkcg is enabled block: fix rq-qos breakage from skipping rq_qos_done_bio() block: flush plug based on hardware and software queue order block: ensure plug merging checks the correct queue at least once block: move rq_qos_exit() into disk_release() block: do more work in elevator_exit block: move blk_exit_queue into disk_release block: move q_usage_counter release into blk_queue_release block: don't remove hctx debugfs dir from blk_mq_exit_queue block: move blkcg initialization/destroy into disk allocation/release handler sr: implement ->free_disk to simplify refcounting sd: implement ->free_disk to simplify refcounting sd: delay calling free_opal_dev sd: call sd_zbc_release_disk before releasing the scsi_device reference ...
This commit is contained in:
@@ -155,6 +155,55 @@ Description:
|
||||
last zone of the device which may be smaller.
|
||||
|
||||
|
||||
What: /sys/block/<disk>/queue/crypto/
|
||||
Date: February 2022
|
||||
Contact: linux-block@vger.kernel.org
|
||||
Description:
|
||||
The presence of this subdirectory of /sys/block/<disk>/queue/
|
||||
indicates that the device supports inline encryption. This
|
||||
subdirectory contains files which describe the inline encryption
|
||||
capabilities of the device. For more information about inline
|
||||
encryption, refer to Documentation/block/inline-encryption.rst.
|
||||
|
||||
|
||||
What: /sys/block/<disk>/queue/crypto/max_dun_bits
|
||||
Date: February 2022
|
||||
Contact: linux-block@vger.kernel.org
|
||||
Description:
|
||||
[RO] This file shows the maximum length, in bits, of data unit
|
||||
numbers accepted by the device in inline encryption requests.
|
||||
|
||||
|
||||
What: /sys/block/<disk>/queue/crypto/modes/<mode>
|
||||
Date: February 2022
|
||||
Contact: linux-block@vger.kernel.org
|
||||
Description:
|
||||
[RO] For each crypto mode (i.e., encryption/decryption
|
||||
algorithm) the device supports with inline encryption, a file
|
||||
will exist at this location. It will contain a hexadecimal
|
||||
number that is a bitmask of the supported data unit sizes, in
|
||||
bytes, for that crypto mode.
|
||||
|
||||
Currently, the crypto modes that may be supported are:
|
||||
|
||||
* AES-256-XTS
|
||||
* AES-128-CBC-ESSIV
|
||||
* Adiantum
|
||||
|
||||
For example, if a device supports AES-256-XTS inline encryption
|
||||
with data unit sizes of 512 and 4096 bytes, the file
|
||||
/sys/block/<disk>/queue/crypto/modes/AES-256-XTS will exist and
|
||||
will contain "0x1200".
|
||||
|
||||
|
||||
What: /sys/block/<disk>/queue/crypto/num_keyslots
|
||||
Date: February 2022
|
||||
Contact: linux-block@vger.kernel.org
|
||||
Description:
|
||||
[RO] This file shows the number of keyslots the device has for
|
||||
use with inline encryption.
|
||||
|
||||
|
||||
What: /sys/block/<disk>/queue/dax
|
||||
Date: June 2016
|
||||
Contact: linux-block@vger.kernel.org
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,4 +7,4 @@ This file documents the sysfs file ``block/<disk>/capability``.
|
||||
``capability`` is a bitfield, printed in hexadecimal, indicating which
|
||||
capabilities a specific block device supports:
|
||||
|
||||
.. kernel-doc:: include/linux/genhd.h
|
||||
.. kernel-doc:: include/linux/blkdev.h
|
||||
|
||||
@@ -8,7 +8,6 @@ Block
|
||||
:maxdepth: 1
|
||||
|
||||
bfq-iosched
|
||||
biodoc
|
||||
biovecs
|
||||
blk-mq
|
||||
capability
|
||||
|
||||
@@ -3441,6 +3441,7 @@ F: Documentation/ABI/stable/sysfs-block
|
||||
F: Documentation/block/
|
||||
F: block/
|
||||
F: drivers/block/
|
||||
F: include/linux/bio.h
|
||||
F: include/linux/blk*
|
||||
F: kernel/trace/blktrace.c
|
||||
F: lib/sbitmap.c
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bcd.h>
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@@ -26,6 +26,16 @@ menuconfig BLOCK
|
||||
|
||||
if BLOCK
|
||||
|
||||
config BLOCK_LEGACY_AUTOLOAD
|
||||
bool "Legacy autoloading support"
|
||||
default y
|
||||
help
|
||||
Enable loading modules and creating block device instances based on
|
||||
accesses through their device special file. This is a historic Linux
|
||||
feature and makes no sense in a udev world where device files are
|
||||
created on demand, but scripts that manually create device nodes and
|
||||
then call losetup might rely on this behavior.
|
||||
|
||||
config BLK_RQ_ALLOC_TIME
|
||||
bool
|
||||
|
||||
@@ -218,6 +228,9 @@ config BLK_PM
|
||||
config BLOCK_HOLDER_DEPRECATED
|
||||
bool
|
||||
|
||||
config BLK_MQ_STACKING
|
||||
bool
|
||||
|
||||
source "block/Kconfig.iosched"
|
||||
|
||||
endif # BLOCK
|
||||
|
||||
@@ -36,6 +36,7 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
|
||||
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
|
||||
obj-$(CONFIG_BLK_PM) += blk-pm.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \
|
||||
blk-crypto-sysfs.o
|
||||
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
|
||||
obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o
|
||||
|
||||
11
block/bdev.c
11
block/bdev.c
@@ -678,7 +678,7 @@ static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
|
||||
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
|
||||
bdev_disk_changed(disk, false);
|
||||
bdev->bd_openers++;
|
||||
return 0;;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
|
||||
@@ -733,12 +733,15 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
struct inode *inode;
|
||||
|
||||
inode = ilookup(blockdev_superblock, dev);
|
||||
if (!inode) {
|
||||
if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
|
||||
blk_request_module(dev);
|
||||
inode = ilookup(blockdev_superblock, dev);
|
||||
if (!inode)
|
||||
return NULL;
|
||||
if (inode)
|
||||
pr_warn_ratelimited(
|
||||
"block device autoloading is deprecated and will be removed.\n");
|
||||
}
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
/* switch from the inode reference to a device mode one: */
|
||||
bdev = &BDEV_I(inode)->bdev;
|
||||
|
||||
@@ -645,7 +645,21 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
struct bfq_group *bfqg)
|
||||
{
|
||||
struct bfq_entity *entity = &bfqq->entity;
|
||||
struct bfq_group *old_parent = bfqq_group(bfqq);
|
||||
|
||||
/*
|
||||
* No point to move bfqq to the same group, which can happen when
|
||||
* root group is offlined
|
||||
*/
|
||||
if (old_parent == bfqg)
|
||||
return;
|
||||
|
||||
/*
|
||||
* oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
|
||||
* until elevator exit.
|
||||
*/
|
||||
if (bfqq == &bfqd->oom_bfqq)
|
||||
return;
|
||||
/*
|
||||
* Get extra reference to prevent bfqq from being freed in
|
||||
* next possible expire or deactivate.
|
||||
@@ -666,7 +680,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
bfq_deactivate_bfqq(bfqd, bfqq, false, false);
|
||||
else if (entity->on_st_or_in_serv)
|
||||
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
|
||||
bfqg_and_blkg_put(bfqq_group(bfqq));
|
||||
bfqg_and_blkg_put(old_parent);
|
||||
|
||||
if (entity->parent &&
|
||||
entity->parent->last_bfqq_created == bfqq)
|
||||
|
||||
@@ -774,7 +774,7 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
if (!bfqq->next_rq)
|
||||
return;
|
||||
|
||||
bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
|
||||
bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
|
||||
__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
|
||||
blk_rq_pos(bfqq->next_rq), &parent, &p);
|
||||
if (!__bfqq) {
|
||||
@@ -2153,7 +2153,7 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
bfqq->waker_detection_started = now_ns;
|
||||
bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
|
||||
MAX_BFQQ_NAME_LENGTH);
|
||||
bfq_log_bfqq(bfqd, bfqq, "set tenative waker %s", waker_name);
|
||||
bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
|
||||
} else /* Same tentative waker queue detected again */
|
||||
bfqq->num_waker_detections++;
|
||||
|
||||
@@ -2669,7 +2669,7 @@ static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
|
||||
struct bfq_queue *bfqq,
|
||||
sector_t sector)
|
||||
{
|
||||
struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
|
||||
struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
|
||||
struct rb_node *parent, *node;
|
||||
struct bfq_queue *__bfqq;
|
||||
|
||||
@@ -2782,6 +2782,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||
* are likely to increase the throughput.
|
||||
*/
|
||||
bfqq->new_bfqq = new_bfqq;
|
||||
/*
|
||||
* The above assignment schedules the following redirections:
|
||||
* each time some I/O for bfqq arrives, the process that
|
||||
* generated that I/O is disassociated from bfqq and
|
||||
* associated with new_bfqq. Here we increases new_bfqq->ref
|
||||
* in advance, adding the number of processes that are
|
||||
* expected to be associated with new_bfqq as they happen to
|
||||
* issue I/O.
|
||||
*/
|
||||
new_bfqq->ref += process_refs;
|
||||
return new_bfqq;
|
||||
}
|
||||
@@ -2844,6 +2853,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
{
|
||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||
|
||||
/* if a merge has already been setup, then proceed with that first */
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
/*
|
||||
* Check delayed stable merge for rotational or non-queueing
|
||||
* devs. For this branch to be executed, bfqq must not be
|
||||
@@ -2945,9 +2958,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
if (bfq_too_late_for_merging(bfqq))
|
||||
return NULL;
|
||||
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
||||
return NULL;
|
||||
|
||||
@@ -5181,7 +5191,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
||||
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
|
||||
struct request *rq;
|
||||
struct bfq_queue *in_serv_queue;
|
||||
bool waiting_rq, idle_timer_disabled;
|
||||
bool waiting_rq, idle_timer_disabled = false;
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
|
||||
@@ -5189,14 +5199,15 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
||||
waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
|
||||
|
||||
rq = __bfq_dispatch_request(hctx);
|
||||
|
||||
idle_timer_disabled =
|
||||
waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
|
||||
if (in_serv_queue == bfqd->in_service_queue) {
|
||||
idle_timer_disabled =
|
||||
waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
|
||||
idle_timer_disabled);
|
||||
bfq_update_dispatch_stats(hctx->queue, rq,
|
||||
idle_timer_disabled ? in_serv_queue : NULL,
|
||||
idle_timer_disabled);
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include <linux/blktrace_api.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
|
||||
#include "blk-cgroup-rwstat.h"
|
||||
|
||||
@@ -1051,7 +1050,6 @@ extern struct blkcg_policy blkcg_policy_bfq;
|
||||
for (parent = NULL; entity ; entity = parent)
|
||||
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
|
||||
|
||||
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
|
||||
struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
|
||||
unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
|
||||
struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
|
||||
|
||||
@@ -142,16 +142,6 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
|
||||
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_entity *group_entity = bfqq->entity.parent;
|
||||
|
||||
if (!group_entity)
|
||||
group_entity = &bfqq->bfqd->root_group->entity;
|
||||
|
||||
return container_of(group_entity, struct bfq_group, entity);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if this budget changes may let next_in_service->parent
|
||||
* become the next_in_service entity for its parent entity.
|
||||
@@ -230,11 +220,6 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
|
||||
|
||||
#else /* CONFIG_BFQ_GROUP_IOSCHED */
|
||||
|
||||
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
|
||||
{
|
||||
return bfqq->bfqd->root_group;
|
||||
}
|
||||
|
||||
static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
|
||||
{
|
||||
return false;
|
||||
@@ -519,7 +504,7 @@ unsigned short bfq_ioprio_to_weight(int ioprio)
|
||||
static unsigned short bfq_weight_to_ioprio(int weight)
|
||||
{
|
||||
return max_t(int, 0,
|
||||
IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight);
|
||||
IOPRIO_NR_LEVELS - weight / BFQ_WEIGHT_CONVERSION_COEFF);
|
||||
}
|
||||
|
||||
static void bfq_get_entity(struct bfq_entity *entity)
|
||||
|
||||
@@ -420,7 +420,6 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_clone);
|
||||
|
||||
int bioset_integrity_create(struct bio_set *bs, int pool_size)
|
||||
{
|
||||
|
||||
190
block/bio.c
190
block/bio.c
@@ -15,7 +15,6 @@
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
@@ -24,6 +23,7 @@
|
||||
#include <trace/events/block.h>
|
||||
#include "blk.h"
|
||||
#include "blk-rq-qos.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
struct bio_alloc_cache {
|
||||
struct bio *free_list;
|
||||
@@ -249,12 +249,12 @@ static void bio_free(struct bio *bio)
|
||||
* they must remember to pair any call to bio_init() with bio_uninit()
|
||||
* when IO has completed, or when the bio is released.
|
||||
*/
|
||||
void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs)
|
||||
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
||||
unsigned short max_vecs, unsigned int opf)
|
||||
{
|
||||
bio->bi_next = NULL;
|
||||
bio->bi_bdev = NULL;
|
||||
bio->bi_opf = 0;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_opf = opf;
|
||||
bio->bi_flags = 0;
|
||||
bio->bi_ioprio = 0;
|
||||
bio->bi_write_hint = 0;
|
||||
@@ -268,6 +268,8 @@ void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
bio->bi_blkg = NULL;
|
||||
bio->bi_issue.value = 0;
|
||||
if (bdev)
|
||||
bio_associate_blkg(bio);
|
||||
#ifdef CONFIG_BLK_CGROUP_IOCOST
|
||||
bio->bi_iocost_cost = 0;
|
||||
#endif
|
||||
@@ -293,6 +295,8 @@ EXPORT_SYMBOL(bio_init);
|
||||
/**
|
||||
* bio_reset - reinitialize a bio
|
||||
* @bio: bio to reset
|
||||
* @bdev: block device to use the bio for
|
||||
* @opf: operation and flags for bio
|
||||
*
|
||||
* Description:
|
||||
* After calling bio_reset(), @bio will be in the same state as a freshly
|
||||
@@ -300,11 +304,15 @@ EXPORT_SYMBOL(bio_init);
|
||||
* preserved are the ones that are initialized by bio_alloc_bioset(). See
|
||||
* comment in struct bio.
|
||||
*/
|
||||
void bio_reset(struct bio *bio)
|
||||
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
|
||||
{
|
||||
bio_uninit(bio);
|
||||
memset(bio, 0, BIO_RESET_BYTES);
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
bio->bi_bdev = bdev;
|
||||
if (bio->bi_bdev)
|
||||
bio_associate_blkg(bio);
|
||||
bio->bi_opf = opf;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_reset);
|
||||
|
||||
@@ -344,6 +352,20 @@ void bio_chain(struct bio *bio, struct bio *parent)
|
||||
}
|
||||
EXPORT_SYMBOL(bio_chain);
|
||||
|
||||
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
|
||||
unsigned int nr_pages, unsigned int opf, gfp_t gfp)
|
||||
{
|
||||
struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
|
||||
|
||||
if (bio) {
|
||||
bio_chain(bio, new);
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_next_bio);
|
||||
|
||||
static void bio_alloc_rescue(struct work_struct *work)
|
||||
{
|
||||
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
|
||||
@@ -400,8 +422,10 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
|
||||
/**
|
||||
* bio_alloc_bioset - allocate a bio for I/O
|
||||
* @bdev: block device to allocate the bio for (can be %NULL)
|
||||
* @nr_vecs: number of bvecs to pre-allocate
|
||||
* @opf: operation and flags for bio
|
||||
* @gfp_mask: the GFP_* mask given to the slab allocator
|
||||
* @nr_iovecs: number of iovecs to pre-allocate
|
||||
* @bs: the bio_set to allocate from.
|
||||
*
|
||||
* Allocate a bio from the mempools in @bs.
|
||||
@@ -430,15 +454,16 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
*
|
||||
* Returns: Pointer to new bio on success, NULL on failure.
|
||||
*/
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
unsigned int opf, gfp_t gfp_mask,
|
||||
struct bio_set *bs)
|
||||
{
|
||||
gfp_t saved_gfp = gfp_mask;
|
||||
struct bio *bio;
|
||||
void *p;
|
||||
|
||||
/* should not use nobvec bioset for nr_iovecs > 0 */
|
||||
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
|
||||
/* should not use nobvec bioset for nr_vecs > 0 */
|
||||
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
@@ -475,23 +500,23 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
|
||||
return NULL;
|
||||
|
||||
bio = p + bs->front_pad;
|
||||
if (nr_iovecs > BIO_INLINE_VECS) {
|
||||
if (nr_vecs > BIO_INLINE_VECS) {
|
||||
struct bio_vec *bvl = NULL;
|
||||
|
||||
bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
|
||||
bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
|
||||
if (!bvl && gfp_mask != saved_gfp) {
|
||||
punt_bios_to_rescuer(bs);
|
||||
gfp_mask = saved_gfp;
|
||||
bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
|
||||
bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
|
||||
}
|
||||
if (unlikely(!bvl))
|
||||
goto err_free;
|
||||
|
||||
bio_init(bio, bvl, nr_iovecs);
|
||||
} else if (nr_iovecs) {
|
||||
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
|
||||
bio_init(bio, bdev, bvl, nr_vecs, opf);
|
||||
} else if (nr_vecs) {
|
||||
bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
|
||||
} else {
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_init(bio, bdev, NULL, 0, opf);
|
||||
}
|
||||
|
||||
bio->bi_pool = bs;
|
||||
@@ -522,7 +547,8 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
|
||||
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
|
||||
if (unlikely(!bio))
|
||||
return NULL;
|
||||
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
|
||||
bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
|
||||
0);
|
||||
bio->bi_pool = NULL;
|
||||
return bio;
|
||||
}
|
||||
@@ -702,80 +728,84 @@ void bio_put(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(bio_put);
|
||||
|
||||
/**
|
||||
* __bio_clone_fast - clone a bio that shares the original bio's biovec
|
||||
* @bio: destination bio
|
||||
* @bio_src: bio to clone
|
||||
*
|
||||
* Clone a &bio. Caller will own the returned bio, but not
|
||||
* the actual data it points to. Reference count of returned
|
||||
* bio will be one.
|
||||
*
|
||||
* Caller must ensure that @bio_src is not freed before @bio.
|
||||
*/
|
||||
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
||||
static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
|
||||
{
|
||||
WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
|
||||
|
||||
/*
|
||||
* most users will be overriding ->bi_bdev with a new target,
|
||||
* so we don't set nor calculate new physical/hw segment counts here
|
||||
*/
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio_set_flag(bio, BIO_CLONED);
|
||||
if (bio_flagged(bio_src, BIO_THROTTLED))
|
||||
bio_set_flag(bio, BIO_THROTTLED);
|
||||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
if (bio->bi_bdev == bio_src->bi_bdev &&
|
||||
bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
blkcg_bio_issue_init(bio);
|
||||
|
||||
if (bio_crypt_clone(bio, bio_src, gfp) < 0)
|
||||
return -ENOMEM;
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, gfp) < 0)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__bio_clone_fast);
|
||||
|
||||
/**
|
||||
* bio_clone_fast - clone a bio that shares the original bio's biovec
|
||||
* @bio: bio to clone
|
||||
* @gfp_mask: allocation priority
|
||||
* @bs: bio_set to allocate from
|
||||
* bio_alloc_clone - clone a bio that shares the original bio's biovec
|
||||
* @bdev: block_device to clone onto
|
||||
* @bio_src: bio to clone from
|
||||
* @gfp: allocation priority
|
||||
* @bs: bio_set to allocate from
|
||||
*
|
||||
* Like __bio_clone_fast, only also allocates the returned bio
|
||||
* Allocate a new bio that is a clone of @bio_src. The caller owns the returned
|
||||
* bio, but not the actual data it points to.
|
||||
*
|
||||
* The caller must ensure that the return bio is not freed before @bio_src.
|
||||
*/
|
||||
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
|
||||
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
|
||||
gfp_t gfp, struct bio_set *bs)
|
||||
{
|
||||
struct bio *b;
|
||||
struct bio *bio;
|
||||
|
||||
b = bio_alloc_bioset(gfp_mask, 0, bs);
|
||||
if (!b)
|
||||
bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
|
||||
__bio_clone_fast(b, bio);
|
||||
if (__bio_clone(bio, bio_src, gfp) < 0) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
}
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
|
||||
if (bio_crypt_clone(b, bio, gfp_mask) < 0)
|
||||
goto err_put;
|
||||
|
||||
if (bio_integrity(bio) &&
|
||||
bio_integrity_clone(b, bio, gfp_mask) < 0)
|
||||
goto err_put;
|
||||
|
||||
return b;
|
||||
|
||||
err_put:
|
||||
bio_put(b);
|
||||
return NULL;
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_clone_fast);
|
||||
EXPORT_SYMBOL(bio_alloc_clone);
|
||||
|
||||
const char *bio_devname(struct bio *bio, char *buf)
|
||||
/**
|
||||
* bio_init_clone - clone a bio that shares the original bio's biovec
|
||||
* @bdev: block_device to clone onto
|
||||
* @bio: bio to clone into
|
||||
* @bio_src: bio to clone from
|
||||
* @gfp: allocation priority
|
||||
*
|
||||
* Initialize a new bio in caller provided memory that is a clone of @bio_src.
|
||||
* The caller owns the returned bio, but not the actual data it points to.
|
||||
*
|
||||
* The caller must ensure that @bio_src is not freed before @bio.
|
||||
*/
|
||||
int bio_init_clone(struct block_device *bdev, struct bio *bio,
|
||||
struct bio *bio_src, gfp_t gfp)
|
||||
{
|
||||
return bdevname(bio->bi_bdev, buf);
|
||||
int ret;
|
||||
|
||||
bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
|
||||
ret = __bio_clone(bio, bio_src, gfp);
|
||||
if (ret)
|
||||
bio_uninit(bio);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_devname);
|
||||
EXPORT_SYMBOL(bio_init_clone);
|
||||
|
||||
/**
|
||||
* bio_full - check if the bio is full
|
||||
@@ -1054,7 +1084,7 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
|
||||
size_t off)
|
||||
{
|
||||
if (len > UINT_MAX || off > UINT_MAX)
|
||||
return 0;
|
||||
return false;
|
||||
return bio_add_page(bio, &folio->page, len, off) > 0;
|
||||
}
|
||||
|
||||
@@ -1486,8 +1516,7 @@ again:
|
||||
if (!bio_integrity_endio(bio))
|
||||
return;
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
|
||||
rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio);
|
||||
rq_qos_done_bio(bio);
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
|
||||
@@ -1541,7 +1570,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
|
||||
return NULL;
|
||||
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
|
||||
if (!split)
|
||||
return NULL;
|
||||
|
||||
@@ -1636,9 +1665,9 @@ EXPORT_SYMBOL(bioset_exit);
|
||||
* Note that the bio must be embedded at the END of that structure always,
|
||||
* or things will break badly.
|
||||
* If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
|
||||
* for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
|
||||
* If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
|
||||
* dispatch queued requests when the mempool runs out of space.
|
||||
* for allocating iovecs. This pool is not needed e.g. for bio_init_clone().
|
||||
* If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
|
||||
* to dispatch queued requests when the mempool runs out of space.
|
||||
*
|
||||
*/
|
||||
int bioset_init(struct bio_set *bs,
|
||||
@@ -1708,7 +1737,9 @@ EXPORT_SYMBOL(bioset_init_from_src);
|
||||
/**
|
||||
* bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
|
||||
* @kiocb: kiocb describing the IO
|
||||
* @bdev: block device to allocate the bio for (can be %NULL)
|
||||
* @nr_vecs: number of iovecs to pre-allocate
|
||||
* @opf: operation and flags for bio
|
||||
* @bs: bio_set to allocate from
|
||||
*
|
||||
* Description:
|
||||
@@ -1719,14 +1750,14 @@ EXPORT_SYMBOL(bioset_init_from_src);
|
||||
* MUST be done from process context, not hard/soft IRQ.
|
||||
*
|
||||
*/
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
|
||||
struct bio_set *bs)
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
|
||||
{
|
||||
struct bio_alloc_cache *cache;
|
||||
struct bio *bio;
|
||||
|
||||
if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
|
||||
return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
|
||||
return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
|
||||
|
||||
cache = per_cpu_ptr(bs->cache, get_cpu());
|
||||
if (cache->free_list) {
|
||||
@@ -1734,13 +1765,14 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
|
||||
cache->free_list = bio->bi_next;
|
||||
cache->nr--;
|
||||
put_cpu();
|
||||
bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
|
||||
bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
|
||||
nr_vecs, opf);
|
||||
bio->bi_pool = bs;
|
||||
bio_set_flag(bio, BIO_PERCPU_CACHE);
|
||||
return bio;
|
||||
}
|
||||
put_cpu();
|
||||
bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
|
||||
bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
|
||||
bio_set_flag(bio, BIO_PERCPU_CACHE);
|
||||
return bio;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user