mirror of
https://github.com/ukui/kernel.git
synced 2026-03-09 10:07:04 -07:00
Merge tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
"Smaller set of DM changes for this merge. I've based these changes on
Jens' for-4.4/reservations branch because the associated DM changes
required it.
- Revert a dm-multipath change that caused a regression for
unprivledged users (e.g. kvm guests) that issued ioctls when a
multipath device had no available paths.
- Include Christoph's refactoring of DM's ioctl handling and add
support for passing through persistent reservations with DM
multipath.
- All other changes are very simple cleanups"
* tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm switch: simplify conditional in alloc_region_table()
dm delay: document that offsets are specified in sectors
dm delay: capitalize the start of an delay_ctr() error message
dm delay: Use DM_MAPIO macros instead of open-coded equivalents
dm linear: remove redundant target name from error messages
dm persistent data: eliminate unnecessary return values
dm: eliminate unused "bioset" process for each bio-based DM device
dm: convert ffs to __ffs
dm: drop NULL test before kmem_cache_destroy() and mempool_destroy()
dm: add support for passing through persistent reservations
dm: refactor ioctl handling
Revert "dm mpath: fix stalls when handling invalid ioctls"
dm: initialize non-blk-mq queue data before queue is used
This commit is contained in:
@@ -8,6 +8,7 @@ Parameters:
|
||||
<device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
|
||||
|
||||
With separate write parameters, the first set is only used for reads.
|
||||
Offsets are specified in sectors.
|
||||
Delays are specified in milliseconds.
|
||||
|
||||
Example scripts
|
||||
|
||||
@@ -1598,11 +1598,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
|
||||
|
||||
c->bdev = bdev;
|
||||
c->block_size = block_size;
|
||||
c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
|
||||
c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
|
||||
ffs(block_size) - 1 - PAGE_SHIFT : 0;
|
||||
c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
|
||||
PAGE_SHIFT - (ffs(block_size) - 1) : 0);
|
||||
c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
|
||||
c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
|
||||
__ffs(block_size) - PAGE_SHIFT : 0;
|
||||
c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
|
||||
PAGE_SHIFT - __ffs(block_size) : 0);
|
||||
|
||||
c->aux_size = aux_size;
|
||||
c->alloc_callback = alloc_callback;
|
||||
@@ -1861,12 +1861,8 @@ static void __exit dm_bufio_exit(void)
|
||||
cancel_delayed_work_sync(&dm_bufio_work);
|
||||
destroy_workqueue(dm_bufio_wq);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
|
||||
struct kmem_cache *kc = dm_bufio_caches[i];
|
||||
|
||||
if (kc)
|
||||
kmem_cache_destroy(kc);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
|
||||
kmem_cache_destroy(dm_bufio_caches[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
|
||||
kfree(dm_bufio_cache_names[i]);
|
||||
|
||||
@@ -260,7 +260,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
|
||||
}
|
||||
}
|
||||
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __setup_mapping_info(struct dm_cache_metadata *cmd)
|
||||
@@ -465,7 +467,9 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
|
||||
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
|
||||
sb_flags = le32_to_cpu(disk_super->flags);
|
||||
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
@@ -83,7 +83,7 @@ static struct list_head *list_pop(struct list_head *q)
|
||||
static int alloc_hash(struct hash *hash, unsigned elts)
|
||||
{
|
||||
hash->nr_buckets = next_power(elts >> 4, 16);
|
||||
hash->hash_bits = ffs(hash->nr_buckets) - 1;
|
||||
hash->hash_bits = __ffs(hash->nr_buckets);
|
||||
hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
|
||||
|
||||
return hash->table ? 0 : -ENOMEM;
|
||||
|
||||
@@ -1410,7 +1410,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
|
||||
mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
|
||||
|
||||
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
|
||||
mq->hash_bits = ffs(mq->nr_buckets) - 1;
|
||||
mq->hash_bits = __ffs(mq->nr_buckets);
|
||||
mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
|
||||
if (!mq->table)
|
||||
goto bad_alloc_table;
|
||||
|
||||
@@ -566,7 +566,7 @@ static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_ent
|
||||
|
||||
ht->es = es;
|
||||
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
|
||||
ht->hash_bits = ffs(nr_buckets) - 1;
|
||||
ht->hash_bits = __ffs(nr_buckets);
|
||||
|
||||
ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
|
||||
if (!ht->buckets)
|
||||
|
||||
@@ -2309,8 +2309,7 @@ static void destroy(struct cache *cache)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (cache->migration_pool)
|
||||
mempool_destroy(cache->migration_pool);
|
||||
mempool_destroy(cache->migration_pool);
|
||||
|
||||
if (cache->all_io_ds)
|
||||
dm_deferred_set_destroy(cache->all_io_ds);
|
||||
|
||||
@@ -1544,10 +1544,8 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->bs)
|
||||
bioset_free(cc->bs);
|
||||
|
||||
if (cc->page_pool)
|
||||
mempool_destroy(cc->page_pool);
|
||||
if (cc->req_pool)
|
||||
mempool_destroy(cc->req_pool);
|
||||
mempool_destroy(cc->page_pool);
|
||||
mempool_destroy(cc->req_pool);
|
||||
|
||||
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
||||
cc->iv_gen_ops->dtr(cc);
|
||||
|
||||
@@ -122,6 +122,7 @@ static void flush_expired_bios(struct work_struct *work)
|
||||
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
|
||||
*
|
||||
* With separate write parameters, the first set is only used for reads.
|
||||
* Offsets are specified in sectors.
|
||||
* Delays are specified in milliseconds.
|
||||
*/
|
||||
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
@@ -132,7 +133,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
int ret;
|
||||
|
||||
if (argc != 3 && argc != 6) {
|
||||
ti->error = "requires exactly 3 or 6 arguments";
|
||||
ti->error = "Requires exactly 3 or 6 arguments";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -237,7 +238,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
|
||||
unsigned long expires = 0;
|
||||
|
||||
if (!delay || !atomic_read(&dc->may_delay))
|
||||
return 1;
|
||||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
|
||||
|
||||
@@ -257,7 +258,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
|
||||
|
||||
queue_timeout(dc, expires);
|
||||
|
||||
return 0;
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
|
||||
static void delay_presuspend(struct dm_target *ti)
|
||||
|
||||
@@ -343,7 +343,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
|
||||
}
|
||||
}
|
||||
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@@ -582,7 +584,9 @@ static int open_metadata(struct era_metadata *md)
|
||||
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
|
||||
md->archived_writesets = true;
|
||||
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
dm_bm_unlock(sblock);
|
||||
@@ -1046,12 +1050,7 @@ static int metadata_take_snap(struct era_metadata *md)
|
||||
|
||||
md->metadata_snap = dm_block_location(clone);
|
||||
|
||||
r = dm_tm_unlock(md->tm, clone);
|
||||
if (r) {
|
||||
DMERR("%s: couldn't unlock clone", __func__);
|
||||
md->metadata_snap = SUPERBLOCK_LOCATION;
|
||||
return r;
|
||||
}
|
||||
dm_tm_unlock(md->tm, clone);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
|
||||
store->chunk_size = chunk_size;
|
||||
store->chunk_mask = chunk_size - 1;
|
||||
store->chunk_shift = ffs(chunk_size) - 1;
|
||||
store->chunk_shift = __ffs(chunk_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -373,20 +373,20 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
|
||||
static int flakey_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct flakey_c *fc = ti->private;
|
||||
struct dm_dev *dev = fc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = fc->dev->bdev;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (fc->start ||
|
||||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
|
||||
@@ -405,7 +405,7 @@ static struct target_type flakey_target = {
|
||||
.map = flakey_map,
|
||||
.end_io = flakey_end_io,
|
||||
.status = flakey_status,
|
||||
.ioctl = flakey_ioctl,
|
||||
.prepare_ioctl = flakey_prepare_ioctl,
|
||||
.iterate_devices = flakey_iterate_devices,
|
||||
};
|
||||
|
||||
|
||||
@@ -65,8 +65,7 @@ struct dm_io_client *dm_io_client_create(void)
|
||||
return client;
|
||||
|
||||
bad:
|
||||
if (client->pool)
|
||||
mempool_destroy(client->pool);
|
||||
mempool_destroy(client->pool);
|
||||
kfree(client);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -39,20 +39,20 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
|
||||
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
|
||||
if (lc == NULL) {
|
||||
ti->error = "dm-linear: Cannot allocate linear context";
|
||||
ti->error = "Cannot allocate linear context";
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
|
||||
ti->error = "dm-linear: Invalid device sector";
|
||||
ti->error = "Invalid device sector";
|
||||
goto bad;
|
||||
}
|
||||
lc->start = tmp;
|
||||
|
||||
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
|
||||
if (ret) {
|
||||
ti->error = "dm-linear: Device lookup failed";
|
||||
ti->error = "Device lookup failed";
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@@ -116,21 +116,21 @@ static void linear_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int linear_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct linear_c *lc = (struct linear_c *) ti->private;
|
||||
struct dm_dev *dev = lc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = dev->bdev;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (lc->start ||
|
||||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linear_iterate_devices(struct dm_target *ti,
|
||||
@@ -149,7 +149,7 @@ static struct target_type linear_target = {
|
||||
.dtr = linear_dtr,
|
||||
.map = linear_map,
|
||||
.status = linear_status,
|
||||
.ioctl = linear_ioctl,
|
||||
.prepare_ioctl = linear_prepare_ioctl,
|
||||
.iterate_devices = linear_iterate_devices,
|
||||
};
|
||||
|
||||
|
||||
@@ -313,8 +313,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
out:
|
||||
kfree(devices_rdata);
|
||||
if (r) {
|
||||
if (lc->flush_entry_pool)
|
||||
mempool_destroy(lc->flush_entry_pool);
|
||||
mempool_destroy(lc->flush_entry_pool);
|
||||
kfree(lc);
|
||||
kfree(ctr_str);
|
||||
} else {
|
||||
|
||||
@@ -714,20 +714,19 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int log_writes_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct log_writes_c *lc = ti->private;
|
||||
struct dm_dev *dev = lc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = dev->bdev;
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int log_writes_iterate_devices(struct dm_target *ti,
|
||||
@@ -782,7 +781,7 @@ static struct target_type log_writes_target = {
|
||||
.map = log_writes_map,
|
||||
.end_io = normal_end_io,
|
||||
.status = log_writes_status,
|
||||
.ioctl = log_writes_ioctl,
|
||||
.prepare_ioctl = log_writes_prepare_ioctl,
|
||||
.message = log_writes_message,
|
||||
.iterate_devices = log_writes_iterate_devices,
|
||||
.io_hints = log_writes_io_hints,
|
||||
|
||||
@@ -1533,18 +1533,14 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int multipath_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct multipath *m = ti->private;
|
||||
struct pgpath *pgpath;
|
||||
struct block_device *bdev;
|
||||
fmode_t mode;
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
bdev = NULL;
|
||||
mode = 0;
|
||||
r = 0;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
@@ -1555,26 +1551,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
pgpath = m->current_pgpath;
|
||||
|
||||
if (pgpath) {
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
mode = pgpath->path.dev->mode;
|
||||
*bdev = pgpath->path.dev->bdev;
|
||||
*mode = pgpath->path.dev->mode;
|
||||
}
|
||||
|
||||
if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
|
||||
r = -ENOTCONN;
|
||||
else if (!bdev)
|
||||
else if (!*bdev)
|
||||
r = -EIO;
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
|
||||
int err = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (err)
|
||||
r = err;
|
||||
}
|
||||
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
if (!m->current_pg) {
|
||||
@@ -1587,7 +1574,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
}
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int multipath_iterate_devices(struct dm_target *ti,
|
||||
@@ -1690,7 +1682,7 @@ out:
|
||||
*---------------------------------------------------------------*/
|
||||
static struct target_type multipath_target = {
|
||||
.name = "multipath",
|
||||
.version = {1, 9, 0},
|
||||
.version = {1, 10, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = multipath_ctr,
|
||||
.dtr = multipath_dtr,
|
||||
@@ -1703,7 +1695,7 @@ static struct target_type multipath_target = {
|
||||
.resume = multipath_resume,
|
||||
.status = multipath_status,
|
||||
.message = multipath_message,
|
||||
.ioctl = multipath_ioctl,
|
||||
.prepare_ioctl = multipath_prepare_ioctl,
|
||||
.iterate_devices = multipath_iterate_devices,
|
||||
.busy = multipath_busy,
|
||||
};
|
||||
|
||||
@@ -193,7 +193,7 @@ struct dm_region_hash *dm_region_hash_create(
|
||||
rh->max_recovery = max_recovery;
|
||||
rh->log = log;
|
||||
rh->region_size = region_size;
|
||||
rh->region_shift = ffs(region_size) - 1;
|
||||
rh->region_shift = __ffs(region_size);
|
||||
rwlock_init(&rh->hash_lock);
|
||||
rh->mask = nr_buckets - 1;
|
||||
rh->nr_buckets = nr_buckets;
|
||||
@@ -249,9 +249,7 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
|
||||
if (rh->log)
|
||||
dm_dirty_log_destroy(rh->log);
|
||||
|
||||
if (rh->region_pool)
|
||||
mempool_destroy(rh->region_pool);
|
||||
|
||||
mempool_destroy(rh->region_pool);
|
||||
vfree(rh->buckets);
|
||||
kfree(rh);
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
|
||||
bdev) >> 9);
|
||||
ps->store->chunk_mask = ps->store->chunk_size - 1;
|
||||
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
|
||||
ps->store->chunk_shift = __ffs(ps->store->chunk_size);
|
||||
chunk_size_supplied = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -99,11 +99,11 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
|
||||
if (sector_div(nr_regions, sctx->region_size))
|
||||
nr_regions++;
|
||||
|
||||
sctx->nr_regions = nr_regions;
|
||||
if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) {
|
||||
if (nr_regions >= ULONG_MAX) {
|
||||
ti->error = "Region table too large";
|
||||
return -EINVAL;
|
||||
}
|
||||
sctx->nr_regions = nr_regions;
|
||||
|
||||
nr_slots = nr_regions;
|
||||
if (sector_div(nr_slots, sctx->region_entries_per_slot))
|
||||
@@ -511,27 +511,24 @@ static void switch_status(struct dm_target *ti, status_type_t type,
|
||||
*
|
||||
* Passthrough all ioctls to the path for sector 0
|
||||
*/
|
||||
static int switch_ioctl(struct dm_target *ti, unsigned cmd,
|
||||
unsigned long arg)
|
||||
static int switch_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct switch_ctx *sctx = ti->private;
|
||||
struct block_device *bdev;
|
||||
fmode_t mode;
|
||||
unsigned path_nr;
|
||||
int r = 0;
|
||||
|
||||
path_nr = switch_get_path_nr(sctx, 0);
|
||||
|
||||
bdev = sctx->path_list[path_nr].dmdev->bdev;
|
||||
mode = sctx->path_list[path_nr].dmdev->mode;
|
||||
*bdev = sctx->path_list[path_nr].dmdev->bdev;
|
||||
*mode = sctx->path_list[path_nr].dmdev->mode;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (ti->len + sctx->path_list[path_nr].start !=
|
||||
i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int switch_iterate_devices(struct dm_target *ti,
|
||||
@@ -560,7 +557,7 @@ static struct target_type switch_target = {
|
||||
.map = switch_map,
|
||||
.message = switch_message,
|
||||
.status = switch_status,
|
||||
.ioctl = switch_ioctl,
|
||||
.prepare_ioctl = switch_prepare_ioctl,
|
||||
.iterate_devices = switch_iterate_devices,
|
||||
};
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user