You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md: (34 commits) md: Fix some bugs in recovery_disabled handling. md/raid5: fix bug that could result in reads from a failed device. lib/raid6: Fix filename emitted in generated code md.c: trivial comment fix MD: Allow restarting an interrupted incremental recovery. md: clear In_sync bit on devices added to an active array. md: add proper write-congestion reporting to RAID1 and RAID10. md: rename "mdk_personality" to "md_personality" md/bitmap remove fault injection options. md/raid5: typedef removal: raid5_conf_t -> struct r5conf md/raid1: typedef removal: conf_t -> struct r1conf md/raid10: typedef removal: conf_t -> struct r10conf md/raid0: typedef removal: raid0_conf_t -> struct r0conf md/multipath: typedef removal: multipath_conf_t -> struct mpconf md/linear: typedef removal: linear_conf_t -> struct linear_conf md/faulty: remove typedef: conf_t -> struct faulty_conf md/linear: remove typedefs: dev_info_t -> struct dev_info md: remove typedefs: mirror_info_t -> struct mirror_info md: remove typedefs: r10bio_t -> struct r10bio and r1bio_t -> struct r1bio md: remove typedefs: mdk_thread_t -> struct md_thread ...
This commit is contained in:
+77
-111
File diff suppressed because it is too large
Load Diff
+6
-6
@@ -193,7 +193,7 @@ struct bitmap {
|
||||
unsigned long pages; /* total number of pages in the bitmap */
|
||||
unsigned long missing_pages; /* number of pages not yet allocated */
|
||||
|
||||
mddev_t *mddev; /* the md device that the bitmap is for */
|
||||
struct mddev *mddev; /* the md device that the bitmap is for */
|
||||
|
||||
/* bitmap chunksize -- how much data does each bit represent? */
|
||||
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
|
||||
@@ -238,10 +238,10 @@ struct bitmap {
|
||||
/* the bitmap API */
|
||||
|
||||
/* these are used only by md/bitmap */
|
||||
int bitmap_create(mddev_t *mddev);
|
||||
int bitmap_load(mddev_t *mddev);
|
||||
void bitmap_flush(mddev_t *mddev);
|
||||
void bitmap_destroy(mddev_t *mddev);
|
||||
int bitmap_create(struct mddev *mddev);
|
||||
int bitmap_load(struct mddev *mddev);
|
||||
void bitmap_flush(struct mddev *mddev);
|
||||
void bitmap_destroy(struct mddev *mddev);
|
||||
|
||||
void bitmap_print_sb(struct bitmap *bitmap);
|
||||
void bitmap_update_sb(struct bitmap *bitmap);
|
||||
@@ -262,7 +262,7 @@ void bitmap_close_sync(struct bitmap *bitmap);
|
||||
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
|
||||
|
||||
void bitmap_unplug(struct bitmap *bitmap);
|
||||
void bitmap_daemon_work(mddev_t *mddev);
|
||||
void bitmap_daemon_work(struct mddev *mddev);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
+14
-14
@@ -37,7 +37,7 @@ struct raid_dev {
|
||||
*/
|
||||
struct dm_dev *meta_dev;
|
||||
struct dm_dev *data_dev;
|
||||
struct mdk_rdev_s rdev;
|
||||
struct md_rdev rdev;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -57,7 +57,7 @@ struct raid_set {
|
||||
|
||||
uint64_t print_flags;
|
||||
|
||||
struct mddev_s md;
|
||||
struct mddev md;
|
||||
struct raid_type *raid_type;
|
||||
struct dm_target_callbacks callbacks;
|
||||
|
||||
@@ -594,7 +594,7 @@ struct dm_raid_superblock {
|
||||
/* Always set to 0 when writing. */
|
||||
} __packed;
|
||||
|
||||
static int read_disk_sb(mdk_rdev_t *rdev, int size)
|
||||
static int read_disk_sb(struct md_rdev *rdev, int size)
|
||||
{
|
||||
BUG_ON(!rdev->sb_page);
|
||||
|
||||
@@ -611,9 +611,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
mdk_rdev_t *r, *t;
|
||||
struct md_rdev *r, *t;
|
||||
uint64_t failed_devices;
|
||||
struct dm_raid_superblock *sb;
|
||||
|
||||
@@ -651,7 +651,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
*
|
||||
* Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
|
||||
*/
|
||||
static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
|
||||
static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
|
||||
{
|
||||
int ret;
|
||||
struct dm_raid_superblock *sb;
|
||||
@@ -689,7 +689,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
|
||||
return (events_sb > events_refsb) ? 1 : 0;
|
||||
}
|
||||
|
||||
static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
int role;
|
||||
struct raid_set *rs = container_of(mddev, struct raid_set, md);
|
||||
@@ -698,7 +698,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
struct dm_raid_superblock *sb;
|
||||
uint32_t new_devs = 0;
|
||||
uint32_t rebuilds = 0;
|
||||
mdk_rdev_t *r, *t;
|
||||
struct md_rdev *r, *t;
|
||||
struct dm_raid_superblock *sb2;
|
||||
|
||||
sb = page_address(rdev->sb_page);
|
||||
@@ -809,7 +809,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
struct dm_raid_superblock *sb = page_address(rdev->sb_page);
|
||||
|
||||
@@ -849,8 +849,8 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
||||
{
|
||||
int ret;
|
||||
mdk_rdev_t *rdev, *freshest, *tmp;
|
||||
mddev_t *mddev = &rs->md;
|
||||
struct md_rdev *rdev, *freshest, *tmp;
|
||||
struct mddev *mddev = &rs->md;
|
||||
|
||||
freshest = NULL;
|
||||
rdev_for_each(rdev, tmp, mddev) {
|
||||
@@ -1004,7 +1004,7 @@ static void raid_dtr(struct dm_target *ti)
|
||||
static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
|
||||
{
|
||||
struct raid_set *rs = ti->private;
|
||||
mddev_t *mddev = &rs->md;
|
||||
struct mddev *mddev = &rs->md;
|
||||
|
||||
mddev->pers->make_request(mddev, bio);
|
||||
|
||||
@@ -1097,7 +1097,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
|
||||
rs->md.bitmap_info.max_write_behind);
|
||||
|
||||
if (rs->print_flags & DMPF_STRIPE_CACHE) {
|
||||
raid5_conf_t *conf = rs->md.private;
|
||||
struct r5conf *conf = rs->md.private;
|
||||
|
||||
/* convert from kiB to sectors */
|
||||
DMEMIT(" stripe_cache %d",
|
||||
@@ -1146,7 +1146,7 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
{
|
||||
struct raid_set *rs = ti->private;
|
||||
unsigned chunk_size = rs->md.chunk_sectors << 9;
|
||||
raid5_conf_t *conf = rs->md.private;
|
||||
struct r5conf *conf = rs->md.private;
|
||||
|
||||
blk_limits_io_min(limits, chunk_size);
|
||||
blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
|
||||
|
||||
+19
-19
@@ -81,16 +81,16 @@ static void faulty_fail(struct bio *bio, int error)
|
||||
bio_io_error(b);
|
||||
}
|
||||
|
||||
typedef struct faulty_conf {
|
||||
struct faulty_conf {
|
||||
int period[Modes];
|
||||
atomic_t counters[Modes];
|
||||
sector_t faults[MaxFault];
|
||||
int modes[MaxFault];
|
||||
int nfaults;
|
||||
mdk_rdev_t *rdev;
|
||||
} conf_t;
|
||||
struct md_rdev *rdev;
|
||||
};
|
||||
|
||||
static int check_mode(conf_t *conf, int mode)
|
||||
static int check_mode(struct faulty_conf *conf, int mode)
|
||||
{
|
||||
if (conf->period[mode] == 0 &&
|
||||
atomic_read(&conf->counters[mode]) <= 0)
|
||||
@@ -105,7 +105,7 @@ static int check_mode(conf_t *conf, int mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir)
|
||||
static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
|
||||
{
|
||||
/* If we find a ReadFixable sector, we fix it ... */
|
||||
int i;
|
||||
@@ -129,7 +129,7 @@ static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void add_sector(conf_t *conf, sector_t start, int mode)
|
||||
static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
|
||||
{
|
||||
int i;
|
||||
int n = conf->nfaults;
|
||||
@@ -169,9 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
|
||||
conf->nfaults = n+1;
|
||||
}
|
||||
|
||||
static int make_request(mddev_t *mddev, struct bio *bio)
|
||||
static int make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
conf_t *conf = mddev->private;
|
||||
struct faulty_conf *conf = mddev->private;
|
||||
int failit = 0;
|
||||
|
||||
if (bio_data_dir(bio) == WRITE) {
|
||||
@@ -222,9 +222,9 @@ static int make_request(mddev_t *mddev, struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
static void status(struct seq_file *seq, mddev_t *mddev)
|
||||
static void status(struct seq_file *seq, struct mddev *mddev)
|
||||
{
|
||||
conf_t *conf = mddev->private;
|
||||
struct faulty_conf *conf = mddev->private;
|
||||
int n;
|
||||
|
||||
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
|
||||
@@ -255,11 +255,11 @@ static void status(struct seq_file *seq, mddev_t *mddev)
|
||||
}
|
||||
|
||||
|
||||
static int reshape(mddev_t *mddev)
|
||||
static int reshape(struct mddev *mddev)
|
||||
{
|
||||
int mode = mddev->new_layout & ModeMask;
|
||||
int count = mddev->new_layout >> ModeShift;
|
||||
conf_t *conf = mddev->private;
|
||||
struct faulty_conf *conf = mddev->private;
|
||||
|
||||
if (mddev->new_layout < 0)
|
||||
return 0;
|
||||
@@ -284,7 +284,7 @@ static int reshape(mddev_t *mddev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
||||
{
|
||||
WARN_ONCE(raid_disks,
|
||||
"%s does not support generic reshape\n", __func__);
|
||||
@@ -295,11 +295,11 @@ static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
return sectors;
|
||||
}
|
||||
|
||||
static int run(mddev_t *mddev)
|
||||
static int run(struct mddev *mddev)
|
||||
{
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
int i;
|
||||
conf_t *conf;
|
||||
struct faulty_conf *conf;
|
||||
|
||||
if (md_check_no_bitmap(mddev))
|
||||
return -EINVAL;
|
||||
@@ -325,16 +325,16 @@ static int run(mddev_t *mddev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stop(mddev_t *mddev)
|
||||
static int stop(struct mddev *mddev)
|
||||
{
|
||||
conf_t *conf = mddev->private;
|
||||
struct faulty_conf *conf = mddev->private;
|
||||
|
||||
kfree(conf);
|
||||
mddev->private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mdk_personality faulty_personality =
|
||||
static struct md_personality faulty_personality =
|
||||
{
|
||||
.name = "faulty",
|
||||
.level = LEVEL_FAULTY,
|
||||
|
||||
+23
-23
@@ -26,10 +26,10 @@
|
||||
/*
|
||||
* find which device holds a particular offset
|
||||
*/
|
||||
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
|
||||
static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
|
||||
{
|
||||
int lo, mid, hi;
|
||||
linear_conf_t *conf;
|
||||
struct linear_conf *conf;
|
||||
|
||||
lo = 0;
|
||||
hi = mddev->raid_disks - 1;
|
||||
@@ -63,8 +63,8 @@ static int linear_mergeable_bvec(struct request_queue *q,
|
||||
struct bvec_merge_data *bvm,
|
||||
struct bio_vec *biovec)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
dev_info_t *dev0;
|
||||
struct mddev *mddev = q->queuedata;
|
||||
struct dev_info *dev0;
|
||||
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
|
||||
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
|
||||
|
||||
@@ -89,8 +89,8 @@ static int linear_mergeable_bvec(struct request_queue *q,
|
||||
|
||||
static int linear_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
linear_conf_t *conf;
|
||||
struct mddev *mddev = data;
|
||||
struct linear_conf *conf;
|
||||
int i, ret = 0;
|
||||
|
||||
if (mddev_congested(mddev, bits))
|
||||
@@ -108,9 +108,9 @@ static int linear_congested(void *data, int bits)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
||||
{
|
||||
linear_conf_t *conf;
|
||||
struct linear_conf *conf;
|
||||
sector_t array_sectors;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -123,13 +123,13 @@ static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
return array_sectors;
|
||||
}
|
||||
|
||||
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
||||
static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
|
||||
{
|
||||
linear_conf_t *conf;
|
||||
mdk_rdev_t *rdev;
|
||||
struct linear_conf *conf;
|
||||
struct md_rdev *rdev;
|
||||
int i, cnt;
|
||||
|
||||
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
|
||||
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
|
||||
GFP_KERNEL);
|
||||
if (!conf)
|
||||
return NULL;
|
||||
@@ -139,7 +139,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
||||
|
||||
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
||||
int j = rdev->raid_disk;
|
||||
dev_info_t *disk = conf->disks + j;
|
||||
struct dev_info *disk = conf->disks + j;
|
||||
sector_t sectors;
|
||||
|
||||
if (j < 0 || j >= raid_disks || disk->rdev) {
|
||||
@@ -194,9 +194,9 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int linear_run (mddev_t *mddev)
|
||||
static int linear_run (struct mddev *mddev)
|
||||
{
|
||||
linear_conf_t *conf;
|
||||
struct linear_conf *conf;
|
||||
|
||||
if (md_check_no_bitmap(mddev))
|
||||
return -EINVAL;
|
||||
@@ -213,7 +213,7 @@ static int linear_run (mddev_t *mddev)
|
||||
return md_integrity_register(mddev);
|
||||
}
|
||||
|
||||
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
/* Adding a drive to a linear array allows the array to grow.
|
||||
* It is permitted if the new drive has a matching superblock
|
||||
@@ -223,7 +223,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
* The current one is never freed until the array is stopped.
|
||||
* This avoids races.
|
||||
*/
|
||||
linear_conf_t *newconf, *oldconf;
|
||||
struct linear_conf *newconf, *oldconf;
|
||||
|
||||
if (rdev->saved_raid_disk != mddev->raid_disks)
|
||||
return -EINVAL;
|
||||
@@ -245,9 +245,9 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linear_stop (mddev_t *mddev)
|
||||
static int linear_stop (struct mddev *mddev)
|
||||
{
|
||||
linear_conf_t *conf = mddev->private;
|
||||
struct linear_conf *conf = mddev->private;
|
||||
|
||||
/*
|
||||
* We do not require rcu protection here since
|
||||
@@ -264,9 +264,9 @@ static int linear_stop (mddev_t *mddev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linear_make_request (mddev_t *mddev, struct bio *bio)
|
||||
static int linear_make_request (struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
dev_info_t *tmp_dev;
|
||||
struct dev_info *tmp_dev;
|
||||
sector_t start_sector;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
|
||||
@@ -323,14 +323,14 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void linear_status (struct seq_file *seq, mddev_t *mddev)
|
||||
static void linear_status (struct seq_file *seq, struct mddev *mddev)
|
||||
{
|
||||
|
||||
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
|
||||
}
|
||||
|
||||
|
||||
static struct mdk_personality linear_personality =
|
||||
static struct md_personality linear_personality =
|
||||
{
|
||||
.name = "linear",
|
||||
.level = LEVEL_LINEAR,
|
||||
|
||||
+3
-9
@@ -2,20 +2,14 @@
|
||||
#define _LINEAR_H
|
||||
|
||||
struct dev_info {
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
sector_t end_sector;
|
||||
};
|
||||
|
||||
typedef struct dev_info dev_info_t;
|
||||
|
||||
struct linear_private_data
|
||||
struct linear_conf
|
||||
{
|
||||
struct rcu_head rcu;
|
||||
sector_t array_sectors;
|
||||
dev_info_t disks[0];
|
||||
struct dev_info disks[0];
|
||||
};
|
||||
|
||||
|
||||
typedef struct linear_private_data linear_conf_t;
|
||||
|
||||
#endif
|
||||
|
||||
+320
-311
File diff suppressed because it is too large
Load Diff
+74
-77
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
md_k.h : kernel internal structure of the Linux MD driver
|
||||
md.h : kernel internal structure of the Linux MD driver
|
||||
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
@@ -26,9 +26,6 @@
|
||||
|
||||
#define MaxSector (~(sector_t)0)
|
||||
|
||||
typedef struct mddev_s mddev_t;
|
||||
typedef struct mdk_rdev_s mdk_rdev_t;
|
||||
|
||||
/* Bad block numbers are stored sorted in a single page.
|
||||
* 64bits is used for each block or extent.
|
||||
* 54 bits are sector number, 9 bits are extent size,
|
||||
@@ -39,12 +36,11 @@ typedef struct mdk_rdev_s mdk_rdev_t;
|
||||
/*
|
||||
* MD's 'extended' device
|
||||
*/
|
||||
struct mdk_rdev_s
|
||||
{
|
||||
struct md_rdev {
|
||||
struct list_head same_set; /* RAID devices within the same set */
|
||||
|
||||
sector_t sectors; /* Device size (in 512bytes sectors) */
|
||||
mddev_t *mddev; /* RAID array if running */
|
||||
struct mddev *mddev; /* RAID array if running */
|
||||
int last_events; /* IO event timestamp */
|
||||
|
||||
/*
|
||||
@@ -168,7 +164,7 @@ struct mdk_rdev_s
|
||||
|
||||
extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
|
||||
sector_t *first_bad, int *bad_sectors);
|
||||
static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
|
||||
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
|
||||
sector_t *first_bad, int *bad_sectors)
|
||||
{
|
||||
if (unlikely(rdev->badblocks.count)) {
|
||||
@@ -181,15 +177,14 @@ static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
|
||||
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
||||
int acknowledged);
|
||||
extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
|
||||
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors);
|
||||
extern void md_ack_all_badblocks(struct badblocks *bb);
|
||||
|
||||
struct mddev_s
|
||||
{
|
||||
struct mddev {
|
||||
void *private;
|
||||
struct mdk_personality *pers;
|
||||
struct md_personality *pers;
|
||||
dev_t unit;
|
||||
int md_minor;
|
||||
struct list_head disks;
|
||||
@@ -256,8 +251,8 @@ struct mddev_s
|
||||
atomic_t plug_cnt; /* If device is expecting
|
||||
* more bios soon.
|
||||
*/
|
||||
struct mdk_thread_s *thread; /* management thread */
|
||||
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
|
||||
struct md_thread *thread; /* management thread */
|
||||
struct md_thread *sync_thread; /* doing resync or reconstruct */
|
||||
sector_t curr_resync; /* last block scheduled */
|
||||
/* As resync requests can complete out of order, we cannot easily track
|
||||
* how much resync has been completed. So we occasionally pause until
|
||||
@@ -402,11 +397,11 @@ struct mddev_s
|
||||
atomic_t flush_pending;
|
||||
struct work_struct flush_work;
|
||||
struct work_struct event_work; /* used by dm to report failure event */
|
||||
void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
|
||||
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
|
||||
};
|
||||
|
||||
|
||||
static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
|
||||
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
|
||||
{
|
||||
int faulty = test_bit(Faulty, &rdev->flags);
|
||||
if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
|
||||
@@ -418,35 +413,35 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
|
||||
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
|
||||
}
|
||||
|
||||
struct mdk_personality
|
||||
struct md_personality
|
||||
{
|
||||
char *name;
|
||||
int level;
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
int (*make_request)(mddev_t *mddev, struct bio *bio);
|
||||
int (*run)(mddev_t *mddev);
|
||||
int (*stop)(mddev_t *mddev);
|
||||
void (*status)(struct seq_file *seq, mddev_t *mddev);
|
||||
int (*make_request)(struct mddev *mddev, struct bio *bio);
|
||||
int (*run)(struct mddev *mddev);
|
||||
int (*stop)(struct mddev *mddev);
|
||||
void (*status)(struct seq_file *seq, struct mddev *mddev);
|
||||
/* error_handler must set ->faulty and clear ->in_sync
|
||||
* if appropriate, and should abort recovery if needed
|
||||
*/
|
||||
void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
|
||||
int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
|
||||
int (*hot_remove_disk) (mddev_t *mddev, int number);
|
||||
int (*spare_active) (mddev_t *mddev);
|
||||
sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
|
||||
int (*resize) (mddev_t *mddev, sector_t sectors);
|
||||
sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
|
||||
int (*check_reshape) (mddev_t *mddev);
|
||||
int (*start_reshape) (mddev_t *mddev);
|
||||
void (*finish_reshape) (mddev_t *mddev);
|
||||
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
|
||||
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
|
||||
int (*hot_remove_disk) (struct mddev *mddev, int number);
|
||||
int (*spare_active) (struct mddev *mddev);
|
||||
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
|
||||
int (*resize) (struct mddev *mddev, sector_t sectors);
|
||||
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
|
||||
int (*check_reshape) (struct mddev *mddev);
|
||||
int (*start_reshape) (struct mddev *mddev);
|
||||
void (*finish_reshape) (struct mddev *mddev);
|
||||
/* quiesce moves between quiescence states
|
||||
* 0 - fully active
|
||||
* 1 - no new requests allowed
|
||||
* others - reserved
|
||||
*/
|
||||
void (*quiesce) (mddev_t *mddev, int state);
|
||||
void (*quiesce) (struct mddev *mddev, int state);
|
||||
/* takeover is used to transition an array from one
|
||||
* personality to another. The new personality must be able
|
||||
* to handle the data in the current layout.
|
||||
@@ -456,14 +451,14 @@ struct mdk_personality
|
||||
* This needs to be installed and then ->run used to activate the
|
||||
* array.
|
||||
*/
|
||||
void *(*takeover) (mddev_t *mddev);
|
||||
void *(*takeover) (struct mddev *mddev);
|
||||
};
|
||||
|
||||
|
||||
struct md_sysfs_entry {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(mddev_t *, char *);
|
||||
ssize_t (*store)(mddev_t *, const char *, size_t);
|
||||
ssize_t (*show)(struct mddev *, char *);
|
||||
ssize_t (*store)(struct mddev *, const char *, size_t);
|
||||
};
|
||||
extern struct attribute_group md_bitmap_group;
|
||||
|
||||
@@ -479,19 +474,19 @@ static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
|
||||
sysfs_notify_dirent(sd);
|
||||
}
|
||||
|
||||
static inline char * mdname (mddev_t * mddev)
|
||||
static inline char * mdname (struct mddev * mddev)
|
||||
{
|
||||
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
|
||||
}
|
||||
|
||||
static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
|
||||
}
|
||||
|
||||
static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
@@ -514,14 +509,14 @@ static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
#define rdev_for_each_rcu(rdev, mddev) \
|
||||
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
|
||||
|
||||
typedef struct mdk_thread_s {
|
||||
void (*run) (mddev_t *mddev);
|
||||
mddev_t *mddev;
|
||||
struct md_thread {
|
||||
void (*run) (struct mddev *mddev);
|
||||
struct mddev *mddev;
|
||||
wait_queue_head_t wqueue;
|
||||
unsigned long flags;
|
||||
struct task_struct *tsk;
|
||||
unsigned long timeout;
|
||||
} mdk_thread_t;
|
||||
};
|
||||
|
||||
#define THREAD_WAKEUP 0
|
||||
|
||||
@@ -556,48 +551,50 @@ static inline void safe_put_page(struct page *p)
|
||||
if (p) put_page(p);
|
||||
}
|
||||
|
||||
extern int register_md_personality(struct mdk_personality *p);
|
||||
extern int unregister_md_personality(struct mdk_personality *p);
|
||||
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
|
||||
mddev_t *mddev, const char *name);
|
||||
extern void md_unregister_thread(mdk_thread_t **threadp);
|
||||
extern void md_wakeup_thread(mdk_thread_t *thread);
|
||||
extern void md_check_recovery(mddev_t *mddev);
|
||||
extern void md_write_start(mddev_t *mddev, struct bio *bi);
|
||||
extern void md_write_end(mddev_t *mddev);
|
||||
extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
|
||||
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
|
||||
extern int register_md_personality(struct md_personality *p);
|
||||
extern int unregister_md_personality(struct md_personality *p);
|
||||
extern struct md_thread *md_register_thread(
|
||||
void (*run)(struct mddev *mddev),
|
||||
struct mddev *mddev,
|
||||
const char *name);
|
||||
extern void md_unregister_thread(struct md_thread **threadp);
|
||||
extern void md_wakeup_thread(struct md_thread *thread);
|
||||
extern void md_check_recovery(struct mddev *mddev);
|
||||
extern void md_write_start(struct mddev *mddev, struct bio *bi);
|
||||
extern void md_write_end(struct mddev *mddev);
|
||||
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
|
||||
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
|
||||
|
||||
extern int mddev_congested(mddev_t *mddev, int bits);
|
||||
extern void md_flush_request(mddev_t *mddev, struct bio *bio);
|
||||
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
extern int mddev_congested(struct mddev *mddev, int bits);
|
||||
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
|
||||
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
|
||||
sector_t sector, int size, struct page *page);
|
||||
extern void md_super_wait(mddev_t *mddev);
|
||||
extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
|
||||
extern void md_super_wait(struct mddev *mddev);
|
||||
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
|
||||
struct page *page, int rw, bool metadata_op);
|
||||
extern void md_do_sync(mddev_t *mddev);
|
||||
extern void md_new_event(mddev_t *mddev);
|
||||
extern int md_allow_write(mddev_t *mddev);
|
||||
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
|
||||
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
|
||||
extern int md_check_no_bitmap(mddev_t *mddev);
|
||||
extern int md_integrity_register(mddev_t *mddev);
|
||||
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
|
||||
extern void md_do_sync(struct mddev *mddev);
|
||||
extern void md_new_event(struct mddev *mddev);
|
||||
extern int md_allow_write(struct mddev *mddev);
|
||||
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
|
||||
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
|
||||
extern int md_check_no_bitmap(struct mddev *mddev);
|
||||
extern int md_integrity_register(struct mddev *mddev);
|
||||
extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
|
||||
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
|
||||
extern void restore_bitmap_write_access(struct file *file);
|
||||
|
||||
extern void mddev_init(mddev_t *mddev);
|
||||
extern int md_run(mddev_t *mddev);
|
||||
extern void md_stop(mddev_t *mddev);
|
||||
extern void md_stop_writes(mddev_t *mddev);
|
||||
extern int md_rdev_init(mdk_rdev_t *rdev);
|
||||
extern void mddev_init(struct mddev *mddev);
|
||||
extern int md_run(struct mddev *mddev);
|
||||
extern void md_stop(struct mddev *mddev);
|
||||
extern void md_stop_writes(struct mddev *mddev);
|
||||
extern int md_rdev_init(struct md_rdev *rdev);
|
||||
|
||||
extern void mddev_suspend(mddev_t *mddev);
|
||||
extern void mddev_resume(mddev_t *mddev);
|
||||
extern void mddev_suspend(struct mddev *mddev);
|
||||
extern void mddev_resume(struct mddev *mddev);
|
||||
extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
|
||||
mddev_t *mddev);
|
||||
struct mddev *mddev);
|
||||
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
||||
mddev_t *mddev);
|
||||
extern int mddev_check_plugged(mddev_t *mddev);
|
||||
struct mddev *mddev);
|
||||
extern int mddev_check_plugged(struct mddev *mddev);
|
||||
extern void md_trim_bio(struct bio *bio, int offset, int size);
|
||||
#endif /* _MD_MD_H */
|
||||
|
||||
+32
-32
@@ -31,7 +31,7 @@
|
||||
#define NR_RESERVED_BUFS 32
|
||||
|
||||
|
||||
static int multipath_map (multipath_conf_t *conf)
|
||||
static int multipath_map (struct mpconf *conf)
|
||||
{
|
||||
int i, disks = conf->raid_disks;
|
||||
|
||||
@@ -42,7 +42,7 @@ static int multipath_map (multipath_conf_t *conf)
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < disks; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && test_bit(In_sync, &rdev->flags)) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
@@ -58,8 +58,8 @@ static int multipath_map (multipath_conf_t *conf)
|
||||
static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
|
||||
{
|
||||
unsigned long flags;
|
||||
mddev_t *mddev = mp_bh->mddev;
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mddev *mddev = mp_bh->mddev;
|
||||
struct mpconf *conf = mddev->private;
|
||||
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
list_add(&mp_bh->retry_list, &conf->retry_list);
|
||||
@@ -76,7 +76,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
|
||||
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
|
||||
{
|
||||
struct bio *bio = mp_bh->master_bio;
|
||||
multipath_conf_t *conf = mp_bh->mddev->private;
|
||||
struct mpconf *conf = mp_bh->mddev->private;
|
||||
|
||||
bio_endio(bio, err);
|
||||
mempool_free(mp_bh, conf->pool);
|
||||
@@ -86,8 +86,8 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||
{
|
||||
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
struct multipath_bh *mp_bh = bio->bi_private;
|
||||
multipath_conf_t *conf = mp_bh->mddev->private;
|
||||
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
|
||||
struct mpconf *conf = mp_bh->mddev->private;
|
||||
struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
|
||||
|
||||
if (uptodate)
|
||||
multipath_end_bh_io(mp_bh, 0);
|
||||
@@ -106,9 +106,9 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
}
|
||||
|
||||
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
||||
static int multipath_make_request(struct mddev *mddev, struct bio * bio)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
struct multipath_bh * mp_bh;
|
||||
struct multipath_info *multipath;
|
||||
|
||||
@@ -140,9 +140,9 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void multipath_status (struct seq_file *seq, mddev_t *mddev)
|
||||
static void multipath_status (struct seq_file *seq, struct mddev *mddev)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
int i;
|
||||
|
||||
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
|
||||
@@ -156,8 +156,8 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
|
||||
|
||||
static int multipath_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mddev *mddev = data;
|
||||
struct mpconf *conf = mddev->private;
|
||||
int i, ret = 0;
|
||||
|
||||
if (mddev_congested(mddev, bits))
|
||||
@@ -165,7 +165,7 @@ static int multipath_congested(void *data, int bits)
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < mddev->raid_disks ; i++) {
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
||||
@@ -183,9 +183,9 @@ static int multipath_congested(void *data, int bits)
|
||||
/*
|
||||
* Careful, this can execute in IRQ contexts as well!
|
||||
*/
|
||||
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
if (conf->raid_disks - mddev->degraded <= 1) {
|
||||
@@ -218,7 +218,7 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
conf->raid_disks - mddev->degraded);
|
||||
}
|
||||
|
||||
static void print_multipath_conf (multipath_conf_t *conf)
|
||||
static void print_multipath_conf (struct mpconf *conf)
|
||||
{
|
||||
int i;
|
||||
struct multipath_info *tmp;
|
||||
@@ -242,9 +242,9 @@ static void print_multipath_conf (multipath_conf_t *conf)
|
||||
}
|
||||
|
||||
|
||||
static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
struct request_queue *q;
|
||||
int err = -EEXIST;
|
||||
int path;
|
||||
@@ -291,11 +291,11 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int multipath_remove_disk(mddev_t *mddev, int number)
|
||||
static int multipath_remove_disk(struct mddev *mddev, int number)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
int err = 0;
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
struct multipath_info *p = conf->multipaths + number;
|
||||
|
||||
print_multipath_conf(conf);
|
||||
@@ -335,12 +335,12 @@ abort:
|
||||
* 3. Performs writes following reads for array syncronising.
|
||||
*/
|
||||
|
||||
static void multipathd (mddev_t *mddev)
|
||||
static void multipathd (struct mddev *mddev)
|
||||
{
|
||||
struct multipath_bh *mp_bh;
|
||||
struct bio *bio;
|
||||
unsigned long flags;
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
struct list_head *head = &conf->retry_list;
|
||||
|
||||
md_check_recovery(mddev);
|
||||
@@ -379,7 +379,7 @@ static void multipathd (mddev_t *mddev)
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
|
||||
static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
||||
{
|
||||
WARN_ONCE(sectors || raid_disks,
|
||||
"%s does not support generic reshape\n", __func__);
|
||||
@@ -387,12 +387,12 @@ static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
return mddev->dev_sectors;
|
||||
}
|
||||
|
||||
static int multipath_run (mddev_t *mddev)
|
||||
static int multipath_run (struct mddev *mddev)
|
||||
{
|
||||
multipath_conf_t *conf;
|
||||
struct mpconf *conf;
|
||||
int disk_idx;
|
||||
struct multipath_info *disk;
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
int working_disks;
|
||||
|
||||
if (md_check_no_bitmap(mddev))
|
||||
@@ -409,7 +409,7 @@ static int multipath_run (mddev_t *mddev)
|
||||
* should be freed in multipath_stop()]
|
||||
*/
|
||||
|
||||
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
|
||||
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
|
||||
mddev->private = conf;
|
||||
if (!conf) {
|
||||
printk(KERN_ERR
|
||||
@@ -510,9 +510,9 @@ out:
|
||||
}
|
||||
|
||||
|
||||
static int multipath_stop (mddev_t *mddev)
|
||||
static int multipath_stop (struct mddev *mddev)
|
||||
{
|
||||
multipath_conf_t *conf = mddev->private;
|
||||
struct mpconf *conf = mddev->private;
|
||||
|
||||
md_unregister_thread(&mddev->thread);
|
||||
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
||||
@@ -523,7 +523,7 @@ static int multipath_stop (mddev_t *mddev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mdk_personality multipath_personality =
|
||||
static struct md_personality multipath_personality =
|
||||
{
|
||||
.name = "multipath",
|
||||
.level = LEVEL_MULTIPATH,
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
#define _MULTIPATH_H
|
||||
|
||||
struct multipath_info {
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
};
|
||||
|
||||
struct multipath_private_data {
|
||||
mddev_t *mddev;
|
||||
struct mpconf {
|
||||
struct mddev *mddev;
|
||||
struct multipath_info *multipaths;
|
||||
int raid_disks;
|
||||
spinlock_t device_lock;
|
||||
@@ -15,8 +15,6 @@ struct multipath_private_data {
|
||||
mempool_t *pool;
|
||||
};
|
||||
|
||||
typedef struct multipath_private_data multipath_conf_t;
|
||||
|
||||
/*
|
||||
* this is our 'private' 'collective' MULTIPATH buffer head.
|
||||
* it contains information about what kind of IO operations were started
|
||||
@@ -24,7 +22,7 @@ typedef struct multipath_private_data multipath_conf_t;
|
||||
*/
|
||||
|
||||
struct multipath_bh {
|
||||
mddev_t *mddev;
|
||||
struct mddev *mddev;
|
||||
struct bio *master_bio;
|
||||
struct bio bio;
|
||||
int path;
|
||||
|
||||
+83
-108
@@ -27,9 +27,9 @@
|
||||
|
||||
static int raid0_congested(void *data, int bits)
|
||||
{
|
||||
mddev_t *mddev = data;
|
||||
raid0_conf_t *conf = mddev->private;
|
||||
mdk_rdev_t **devlist = conf->devlist;
|
||||
struct mddev *mddev = data;
|
||||
struct r0conf *conf = mddev->private;
|
||||
struct md_rdev **devlist = conf->devlist;
|
||||
int raid_disks = conf->strip_zone[0].nb_dev;
|
||||
int i, ret = 0;
|
||||
|
||||
@@ -47,52 +47,53 @@ static int raid0_congested(void *data, int bits)
|
||||
/*
|
||||
* inform the user of the raid configuration
|
||||
*/
|
||||
static void dump_zones(mddev_t *mddev)
|
||||
static void dump_zones(struct mddev *mddev)
|
||||
{
|
||||
int j, k, h;
|
||||
int j, k;
|
||||
sector_t zone_size = 0;
|
||||
sector_t zone_start = 0;
|
||||
char b[BDEVNAME_SIZE];
|
||||
raid0_conf_t *conf = mddev->private;
|
||||
struct r0conf *conf = mddev->private;
|
||||
int raid_disks = conf->strip_zone[0].nb_dev;
|
||||
printk(KERN_INFO "******* %s configuration *********\n",
|
||||
mdname(mddev));
|
||||
h = 0;
|
||||
printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
|
||||
mdname(mddev),
|
||||
conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
|
||||
for (j = 0; j < conf->nr_strip_zones; j++) {
|
||||
printk(KERN_INFO "zone%d=[", j);
|
||||
printk(KERN_INFO "md: zone%d=[", j);
|
||||
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
|
||||
printk(KERN_CONT "%s/",
|
||||
printk(KERN_CONT "%s%s", k?"/":"",
|
||||
bdevname(conf->devlist[j*raid_disks
|
||||
+ k]->bdev, b));
|
||||
printk(KERN_CONT "]\n");
|
||||
|
||||
zone_size = conf->strip_zone[j].zone_end - zone_start;
|
||||
printk(KERN_INFO " zone offset=%llukb "
|
||||
"device offset=%llukb size=%llukb\n",
|
||||
printk(KERN_INFO " zone-offset=%10lluKB, "
|
||||
"device-offset=%10lluKB, size=%10lluKB\n",
|
||||
(unsigned long long)zone_start>>1,
|
||||
(unsigned long long)conf->strip_zone[j].dev_start>>1,
|
||||
(unsigned long long)zone_size>>1);
|
||||
zone_start = conf->strip_zone[j].zone_end;
|
||||
}
|
||||
printk(KERN_INFO "**********************************\n\n");
|
||||
printk(KERN_INFO "\n");
|
||||
}
|
||||
|
||||
static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
||||
{
|
||||
int i, c, err;
|
||||
sector_t curr_zone_end, sectors;
|
||||
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
|
||||
struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
|
||||
struct strip_zone *zone;
|
||||
int cnt;
|
||||
char b[BDEVNAME_SIZE];
|
||||
raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
|
||||
char b2[BDEVNAME_SIZE];
|
||||
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
|
||||
|
||||
if (!conf)
|
||||
return -ENOMEM;
|
||||
list_for_each_entry(rdev1, &mddev->disks, same_set) {
|
||||
printk(KERN_INFO "md/raid0:%s: looking at %s\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev1->bdev, b));
|
||||
pr_debug("md/raid0:%s: looking at %s\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev1->bdev, b));
|
||||
c = 0;
|
||||
|
||||
/* round size to chunk_size */
|
||||
@@ -101,16 +102,16 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||
rdev1->sectors = sectors * mddev->chunk_sectors;
|
||||
|
||||
list_for_each_entry(rdev2, &mddev->disks, same_set) {
|
||||
printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)",
|
||||
mdname(mddev),
|
||||
bdevname(rdev1->bdev,b),
|
||||
(unsigned long long)rdev1->sectors);
|
||||
printk(KERN_CONT " with %s(%llu)\n",
|
||||
bdevname(rdev2->bdev,b),
|
||||
(unsigned long long)rdev2->sectors);
|
||||
pr_debug("md/raid0:%s: comparing %s(%llu)"
|
||||
" with %s(%llu)\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev1->bdev,b),
|
||||
(unsigned long long)rdev1->sectors,
|
||||
bdevname(rdev2->bdev,b2),
|
||||
(unsigned long long)rdev2->sectors);
|
||||
if (rdev2 == rdev1) {
|
||||
printk(KERN_INFO "md/raid0:%s: END\n",
|
||||
mdname(mddev));
|
||||
pr_debug("md/raid0:%s: END\n",
|
||||
mdname(mddev));
|
||||
break;
|
||||
}
|
||||
if (rdev2->sectors == rdev1->sectors) {
|
||||
@@ -118,30 +119,30 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||
* Not unique, don't count it as a new
|
||||
* group
|
||||
*/
|
||||
printk(KERN_INFO "md/raid0:%s: EQUAL\n",
|
||||
mdname(mddev));
|
||||
pr_debug("md/raid0:%s: EQUAL\n",
|
||||
mdname(mddev));
|
||||
c = 1;
|
||||
break;
|
||||
}
|
||||
printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n",
|
||||
mdname(mddev));
|
||||
pr_debug("md/raid0:%s: NOT EQUAL\n",
|
||||
mdname(mddev));
|
||||
}
|
||||
if (!c) {
|
||||
printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n",
|
||||
mdname(mddev));
|
||||
pr_debug("md/raid0:%s: ==> UNIQUE\n",
|
||||
mdname(mddev));
|
||||
conf->nr_strip_zones++;
|
||||
printk(KERN_INFO "md/raid0:%s: %d zones\n",
|
||||
mdname(mddev), conf->nr_strip_zones);
|
||||
pr_debug("md/raid0:%s: %d zones\n",
|
||||
mdname(mddev), conf->nr_strip_zones);
|
||||
}
|
||||
}
|
||||
printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
|
||||
mdname(mddev), conf->nr_strip_zones);
|
||||
pr_debug("md/raid0:%s: FINAL %d zones\n",
|
||||
mdname(mddev), conf->nr_strip_zones);
|
||||
err = -ENOMEM;
|
||||
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
|
||||
conf->nr_strip_zones, GFP_KERNEL);
|
||||
if (!conf->strip_zone)
|
||||
goto abort;
|
||||
conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
|
||||
conf->devlist = kzalloc(sizeof(struct md_rdev*)*
|
||||
conf->nr_strip_zones*mddev->raid_disks,
|
||||
GFP_KERNEL);
|
||||
if (!conf->devlist)
|
||||
@@ -218,44 +219,45 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||
zone = conf->strip_zone + i;
|
||||
dev = conf->devlist + i * mddev->raid_disks;
|
||||
|
||||
printk(KERN_INFO "md/raid0:%s: zone %d\n",
|
||||
mdname(mddev), i);
|
||||
pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
|
||||
zone->dev_start = smallest->sectors;
|
||||
smallest = NULL;
|
||||
c = 0;
|
||||
|
||||
for (j=0; j<cnt; j++) {
|
||||
rdev = conf->devlist[j];
|
||||
printk(KERN_INFO "md/raid0:%s: checking %s ...",
|
||||
mdname(mddev),
|
||||
bdevname(rdev->bdev, b));
|
||||
if (rdev->sectors <= zone->dev_start) {
|
||||
printk(KERN_CONT " nope.\n");
|
||||
pr_debug("md/raid0:%s: checking %s ... nope\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev->bdev, b));
|
||||
continue;
|
||||
}
|
||||
printk(KERN_CONT " contained as device %d\n", c);
|
||||
pr_debug("md/raid0:%s: checking %s ..."
|
||||
" contained as device %d\n",
|
||||
mdname(mddev),
|
||||
bdevname(rdev->bdev, b), c);
|
||||
dev[c] = rdev;
|
||||
c++;
|
||||
if (!smallest || rdev->sectors < smallest->sectors) {
|
||||
smallest = rdev;
|
||||
printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n",
|
||||
mdname(mddev),
|
||||
(unsigned long long)rdev->sectors);
|
||||
pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
|
||||
mdname(mddev),
|
||||
(unsigned long long)rdev->sectors);
|
||||
}
|
||||
}
|
||||
|
||||
zone->nb_dev = c;
|
||||
sectors = (smallest->sectors - zone->dev_start) * c;
|
||||
printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
|
||||
mdname(mddev),
|
||||
zone->nb_dev, (unsigned long long)sectors);
|
||||
pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
|
||||
mdname(mddev),
|
||||
zone->nb_dev, (unsigned long long)sectors);
|
||||
|
||||
curr_zone_end += sectors;
|
||||
zone->zone_end = curr_zone_end;
|
||||
|
||||
printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
|
||||
mdname(mddev),
|
||||
(unsigned long long)smallest->sectors);
|
||||
pr_debug("md/raid0:%s: current zone start: %llu\n",
|
||||
mdname(mddev),
|
||||
(unsigned long long)smallest->sectors);
|
||||
}
|
||||
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
@@ -275,7 +277,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
|
||||
blk_queue_io_opt(mddev->queue,
|
||||
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
||||
|
||||
printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
|
||||
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
||||
*private_conf = conf;
|
||||
|
||||
return 0;
|
||||
@@ -299,7 +301,7 @@ static int raid0_mergeable_bvec(struct request_queue *q,
|
||||
struct bvec_merge_data *bvm,
|
||||
struct bio_vec *biovec)
|
||||
{
|
||||
mddev_t *mddev = q->queuedata;
|
||||
struct mddev *mddev = q->queuedata;
|
||||
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
|
||||
int max;
|
||||
unsigned int chunk_sectors = mddev->chunk_sectors;
|
||||
@@ -318,10 +320,10 @@ static int raid0_mergeable_bvec(struct request_queue *q,
|
||||
return max;
|
||||
}
|
||||
|
||||
static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
||||
{
|
||||
sector_t array_sectors = 0;
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
|
||||
WARN_ONCE(sectors || raid_disks,
|
||||
"%s does not support generic reshape\n", __func__);
|
||||
@@ -332,9 +334,9 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||
return array_sectors;
|
||||
}
|
||||
|
||||
static int raid0_run(mddev_t *mddev)
|
||||
static int raid0_run(struct mddev *mddev)
|
||||
{
|
||||
raid0_conf_t *conf;
|
||||
struct r0conf *conf;
|
||||
int ret;
|
||||
|
||||
if (mddev->chunk_sectors == 0) {
|
||||
@@ -382,9 +384,9 @@ static int raid0_run(mddev_t *mddev)
|
||||
return md_integrity_register(mddev);
|
||||
}
|
||||
|
||||
static int raid0_stop(mddev_t *mddev)
|
||||
static int raid0_stop(struct mddev *mddev)
|
||||
{
|
||||
raid0_conf_t *conf = mddev->private;
|
||||
struct r0conf *conf = mddev->private;
|
||||
|
||||
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
||||
kfree(conf->strip_zone);
|
||||
@@ -397,7 +399,7 @@ static int raid0_stop(mddev_t *mddev)
|
||||
/* Find the zone which holds a particular offset
|
||||
* Update *sectorp to be an offset in that zone
|
||||
*/
|
||||
static struct strip_zone *find_zone(struct raid0_private_data *conf,
|
||||
static struct strip_zone *find_zone(struct r0conf *conf,
|
||||
sector_t *sectorp)
|
||||
{
|
||||
int i;
|
||||
@@ -417,12 +419,12 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf,
|
||||
* remaps the bio to the target device. we separate two flows.
|
||||
* power 2 flow and a general flow for the sake of perfromance
|
||||
*/
|
||||
static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
|
||||
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
|
||||
sector_t sector, sector_t *sector_offset)
|
||||
{
|
||||
unsigned int sect_in_chunk;
|
||||
sector_t chunk;
|
||||
raid0_conf_t *conf = mddev->private;
|
||||
struct r0conf *conf = mddev->private;
|
||||
int raid_disks = conf->strip_zone[0].nb_dev;
|
||||
unsigned int chunk_sects = mddev->chunk_sectors;
|
||||
|
||||
@@ -453,7 +455,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
|
||||
/*
|
||||
* Is io distribute over 1 or more chunks ?
|
||||
*/
|
||||
static inline int is_io_in_chunk_boundary(mddev_t *mddev,
|
||||
static inline int is_io_in_chunk_boundary(struct mddev *mddev,
|
||||
unsigned int chunk_sects, struct bio *bio)
|
||||
{
|
||||
if (likely(is_power_of_2(chunk_sects))) {
|
||||
@@ -466,12 +468,12 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
|
||||
}
|
||||
}
|
||||
|
||||
static int raid0_make_request(mddev_t *mddev, struct bio *bio)
|
||||
static int raid0_make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
unsigned int chunk_sects;
|
||||
sector_t sector_offset;
|
||||
struct strip_zone *zone;
|
||||
mdk_rdev_t *tmp_dev;
|
||||
struct md_rdev *tmp_dev;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
@@ -526,43 +528,16 @@ bad_map:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void raid0_status(struct seq_file *seq, mddev_t *mddev)
|
||||
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
|
||||
{
|
||||
#undef MD_DEBUG
|
||||
#ifdef MD_DEBUG
|
||||
int j, k, h;
|
||||
char b[BDEVNAME_SIZE];
|
||||
raid0_conf_t *conf = mddev->private;
|
||||
int raid_disks = conf->strip_zone[0].nb_dev;
|
||||
|
||||
sector_t zone_size;
|
||||
sector_t zone_start = 0;
|
||||
h = 0;
|
||||
|
||||
for (j = 0; j < conf->nr_strip_zones; j++) {
|
||||
seq_printf(seq, " z%d", j);
|
||||
seq_printf(seq, "=[");
|
||||
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
|
||||
seq_printf(seq, "%s/", bdevname(
|
||||
conf->devlist[j*raid_disks + k]
|
||||
->bdev, b));
|
||||
|
||||
zone_size = conf->strip_zone[j].zone_end - zone_start;
|
||||
seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
|
||||
(unsigned long long)zone_start>>1,
|
||||
(unsigned long long)conf->strip_zone[j].dev_start>>1,
|
||||
(unsigned long long)zone_size>>1);
|
||||
zone_start = conf->strip_zone[j].zone_end;
|
||||
}
|
||||
#endif
|
||||
seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
|
||||
return;
|
||||
}
|
||||
|
||||
static void *raid0_takeover_raid45(mddev_t *mddev)
|
||||
static void *raid0_takeover_raid45(struct mddev *mddev)
|
||||
{
|
||||
mdk_rdev_t *rdev;
|
||||
raid0_conf_t *priv_conf;
|
||||
struct md_rdev *rdev;
|
||||
struct r0conf *priv_conf;
|
||||
|
||||
if (mddev->degraded != 1) {
|
||||
printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
|
||||
@@ -593,9 +568,9 @@ static void *raid0_takeover_raid45(mddev_t *mddev)
|
||||
return priv_conf;
|
||||
}
|
||||
|
||||
static void *raid0_takeover_raid10(mddev_t *mddev)
|
||||
static void *raid0_takeover_raid10(struct mddev *mddev)
|
||||
{
|
||||
raid0_conf_t *priv_conf;
|
||||
struct r0conf *priv_conf;
|
||||
|
||||
/* Check layout:
|
||||
* - far_copies must be 1
|
||||
@@ -634,9 +609,9 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
|
||||
return priv_conf;
|
||||
}
|
||||
|
||||
static void *raid0_takeover_raid1(mddev_t *mddev)
|
||||
static void *raid0_takeover_raid1(struct mddev *mddev)
|
||||
{
|
||||
raid0_conf_t *priv_conf;
|
||||
struct r0conf *priv_conf;
|
||||
|
||||
/* Check layout:
|
||||
* - (N - 1) mirror drives must be already faulty
|
||||
@@ -660,7 +635,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
|
||||
return priv_conf;
|
||||
}
|
||||
|
||||
static void *raid0_takeover(mddev_t *mddev)
|
||||
static void *raid0_takeover(struct mddev *mddev)
|
||||
{
|
||||
/* raid0 can take over:
|
||||
* raid4 - if all data disks are active.
|
||||
@@ -691,11 +666,11 @@ static void *raid0_takeover(mddev_t *mddev)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void raid0_quiesce(mddev_t *mddev, int state)
|
||||
static void raid0_quiesce(struct mddev *mddev, int state)
|
||||
{
|
||||
}
|
||||
|
||||
static struct mdk_personality raid0_personality=
|
||||
static struct md_personality raid0_personality=
|
||||
{
|
||||
.name = "raid0",
|
||||
.level = 0,
|
||||
|
||||
+3
-7
@@ -1,20 +1,16 @@
|
||||
#ifndef _RAID0_H
|
||||
#define _RAID0_H
|
||||
|
||||
struct strip_zone
|
||||
{
|
||||
struct strip_zone {
|
||||
sector_t zone_end; /* Start of the next zone (in sectors) */
|
||||
sector_t dev_start; /* Zone offset in real dev (in sectors) */
|
||||
int nb_dev; /* # of devices attached to the zone */
|
||||
};
|
||||
|
||||
struct raid0_private_data
|
||||
{
|
||||
struct r0conf {
|
||||
struct strip_zone *strip_zone;
|
||||
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
|
||||
struct md_rdev **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
|
||||
int nr_strip_zones;
|
||||
};
|
||||
|
||||
typedef struct raid0_private_data raid0_conf_t;
|
||||
|
||||
#endif
|
||||
|
||||
+179
-156
File diff suppressed because it is too large
Load Diff
+52
-33
@@ -1,10 +1,8 @@
|
||||
#ifndef _RAID1_H
|
||||
#define _RAID1_H
|
||||
|
||||
typedef struct mirror_info mirror_info_t;
|
||||
|
||||
struct mirror_info {
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
sector_t head_position;
|
||||
};
|
||||
|
||||
@@ -17,61 +15,82 @@ struct mirror_info {
|
||||
*/
|
||||
|
||||
struct pool_info {
|
||||
mddev_t *mddev;
|
||||
struct mddev *mddev;
|
||||
int raid_disks;
|
||||
};
|
||||
|
||||
|
||||
typedef struct r1bio_s r1bio_t;
|
||||
|
||||
struct r1_private_data_s {
|
||||
mddev_t *mddev;
|
||||
mirror_info_t *mirrors;
|
||||
struct r1conf {
|
||||
struct mddev *mddev;
|
||||
struct mirror_info *mirrors;
|
||||
int raid_disks;
|
||||
|
||||
/* When choose the best device for a read (read_balance())
|
||||
* we try to keep sequential reads one the same device
|
||||
* using 'last_used' and 'next_seq_sect'
|
||||
*/
|
||||
int last_used;
|
||||
sector_t next_seq_sect;
|
||||
/* During resync, read_balancing is only allowed on the part
|
||||
* of the array that has been resynced. 'next_resync' tells us
|
||||
* where that is.
|
||||
*/
|
||||
sector_t next_resync;
|
||||
|
||||
spinlock_t device_lock;
|
||||
|
||||
/* list of 'struct r1bio' that need to be processed by raid1d,
|
||||
* whether to retry a read, writeout a resync or recovery
|
||||
* block, or anything else.
|
||||
*/
|
||||
struct list_head retry_list;
|
||||
/* queue pending writes and submit them on unplug */
|
||||
|
||||
/* queue pending writes to be submitted on unplug */
|
||||
struct bio_list pending_bio_list;
|
||||
int pending_count;
|
||||
|
||||
/* for use when syncing mirrors: */
|
||||
|
||||
/* for use when syncing mirrors:
|
||||
* We don't allow both normal IO and resync/recovery IO at
|
||||
* the same time - resync/recovery can only happen when there
|
||||
* is no other IO. So when either is active, the other has to wait.
|
||||
* See more details description in raid1.c near raise_barrier().
|
||||
*/
|
||||
wait_queue_head_t wait_barrier;
|
||||
spinlock_t resync_lock;
|
||||
int nr_pending;
|
||||
int nr_waiting;
|
||||
int nr_queued;
|
||||
int barrier;
|
||||
sector_t next_resync;
|
||||
int fullsync; /* set to 1 if a full sync is needed,
|
||||
* (fresh device added).
|
||||
* Cleared when a sync completes.
|
||||
*/
|
||||
int recovery_disabled; /* when the same as
|
||||
* mddev->recovery_disabled
|
||||
* we don't allow recovery
|
||||
* to be attempted as we
|
||||
* expect a read error
|
||||
*/
|
||||
|
||||
wait_queue_head_t wait_barrier;
|
||||
/* Set to 1 if a full sync is needed, (fresh device added).
|
||||
* Cleared when a sync completes.
|
||||
*/
|
||||
int fullsync;
|
||||
|
||||
/* When the same as mddev->recovery_disabled we don't allow
|
||||
* recovery to be attempted as we expect a read error.
|
||||
*/
|
||||
int recovery_disabled;
|
||||
|
||||
|
||||
/* poolinfo contains information about the content of the
|
||||
* mempools - it changes when the array grows or shrinks
|
||||
*/
|
||||
struct pool_info *poolinfo;
|
||||
mempool_t *r1bio_pool;
|
||||
mempool_t *r1buf_pool;
|
||||
|
||||
/* temporary buffer to synchronous IO when attempting to repair
|
||||
* a read error.
|
||||
*/
|
||||
struct page *tmppage;
|
||||
|
||||
mempool_t *r1bio_pool;
|
||||
mempool_t *r1buf_pool;
|
||||
|
||||
/* When taking over an array from a different personality, we store
|
||||
* the new thread here until we fully activate the array.
|
||||
*/
|
||||
struct mdk_thread_s *thread;
|
||||
struct md_thread *thread;
|
||||
};
|
||||
|
||||
typedef struct r1_private_data_s conf_t;
|
||||
|
||||
/*
|
||||
* this is our 'private' RAID1 bio.
|
||||
*
|
||||
@@ -79,7 +98,7 @@ typedef struct r1_private_data_s conf_t;
|
||||
* for this RAID1 operation, and about their status:
|
||||
*/
|
||||
|
||||
struct r1bio_s {
|
||||
struct r1bio {
|
||||
atomic_t remaining; /* 'have we finished' count,
|
||||
* used from IRQ handlers
|
||||
*/
|
||||
@@ -89,7 +108,7 @@ struct r1bio_s {
|
||||
sector_t sector;
|
||||
int sectors;
|
||||
unsigned long state;
|
||||
mddev_t *mddev;
|
||||
struct mddev *mddev;
|
||||
/*
|
||||
* original bio going to /dev/mdx
|
||||
*/
|
||||
@@ -148,6 +167,6 @@ struct r1bio_s {
|
||||
#define R1BIO_MadeGood 7
|
||||
#define R1BIO_WriteError 8
|
||||
|
||||
extern int md_raid1_congested(mddev_t *mddev, int bits);
|
||||
extern int md_raid1_congested(struct mddev *mddev, int bits);
|
||||
|
||||
#endif
|
||||
|
||||
+151
-129
File diff suppressed because it is too large
Load Diff
+8
-14
@@ -1,10 +1,8 @@
|
||||
#ifndef _RAID10_H
|
||||
#define _RAID10_H
|
||||
|
||||
typedef struct mirror_info mirror_info_t;
|
||||
|
||||
struct mirror_info {
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
sector_t head_position;
|
||||
int recovery_disabled; /* matches
|
||||
* mddev->recovery_disabled
|
||||
@@ -13,11 +11,9 @@ struct mirror_info {
|
||||
*/
|
||||
};
|
||||
|
||||
typedef struct r10bio_s r10bio_t;
|
||||
|
||||
struct r10_private_data_s {
|
||||
mddev_t *mddev;
|
||||
mirror_info_t *mirrors;
|
||||
struct r10conf {
|
||||
struct mddev *mddev;
|
||||
struct mirror_info *mirrors;
|
||||
int raid_disks;
|
||||
spinlock_t device_lock;
|
||||
|
||||
@@ -46,7 +42,7 @@ struct r10_private_data_s {
|
||||
struct list_head retry_list;
|
||||
/* queue pending writes and submit them on unplug */
|
||||
struct bio_list pending_bio_list;
|
||||
|
||||
int pending_count;
|
||||
|
||||
spinlock_t resync_lock;
|
||||
int nr_pending;
|
||||
@@ -68,11 +64,9 @@ struct r10_private_data_s {
|
||||
/* When taking over an array from a different personality, we store
|
||||
* the new thread here until we fully activate the array.
|
||||
*/
|
||||
struct mdk_thread_s *thread;
|
||||
struct md_thread *thread;
|
||||
};
|
||||
|
||||
typedef struct r10_private_data_s conf_t;
|
||||
|
||||
/*
|
||||
* this is our 'private' RAID10 bio.
|
||||
*
|
||||
@@ -80,14 +74,14 @@ typedef struct r10_private_data_s conf_t;
|
||||
* for this RAID10 operation, and about their status:
|
||||
*/
|
||||
|
||||
struct r10bio_s {
|
||||
struct r10bio {
|
||||
atomic_t remaining; /* 'have we finished' count,
|
||||
* used from IRQ handlers
|
||||
*/
|
||||
sector_t sector; /* virtual sector number */
|
||||
int sectors;
|
||||
unsigned long state;
|
||||
mddev_t *mddev;
|
||||
struct mddev *mddev;
|
||||
/*
|
||||
* original bio going to /dev/mdx
|
||||
*/
|
||||
|
||||
+158
-205
File diff suppressed because it is too large
Load Diff
+9
-11
@@ -197,7 +197,7 @@ enum reconstruct_states {
|
||||
struct stripe_head {
|
||||
struct hlist_node hash;
|
||||
struct list_head lru; /* inactive_list or handle_list */
|
||||
struct raid5_private_data *raid_conf;
|
||||
struct r5conf *raid_conf;
|
||||
short generation; /* increments with every
|
||||
* reshape */
|
||||
sector_t sector; /* sector of this row */
|
||||
@@ -248,7 +248,7 @@ struct stripe_head_state {
|
||||
unsigned long ops_request;
|
||||
|
||||
struct bio *return_bi;
|
||||
mdk_rdev_t *blocked_rdev;
|
||||
struct md_rdev *blocked_rdev;
|
||||
int handle_bad_blocks;
|
||||
};
|
||||
|
||||
@@ -344,12 +344,12 @@ enum {
|
||||
|
||||
|
||||
struct disk_info {
|
||||
mdk_rdev_t *rdev;
|
||||
struct md_rdev *rdev;
|
||||
};
|
||||
|
||||
struct raid5_private_data {
|
||||
struct r5conf {
|
||||
struct hlist_head *stripe_hashtbl;
|
||||
mddev_t *mddev;
|
||||
struct mddev *mddev;
|
||||
struct disk_info *spare;
|
||||
int chunk_sectors;
|
||||
int level, algorithm;
|
||||
@@ -436,11 +436,9 @@ struct raid5_private_data {
|
||||
/* When taking over an array from a different personality, we store
|
||||
* the new thread here until we fully activate the array.
|
||||
*/
|
||||
struct mdk_thread_s *thread;
|
||||
struct md_thread *thread;
|
||||
};
|
||||
|
||||
typedef struct raid5_private_data raid5_conf_t;
|
||||
|
||||
/*
|
||||
* Our supported algorithms
|
||||
*/
|
||||
@@ -503,7 +501,7 @@ static inline int algorithm_is_DDF(int layout)
|
||||
return layout >= 8 && layout <= 10;
|
||||
}
|
||||
|
||||
extern int md_raid5_congested(mddev_t *mddev, int bits);
|
||||
extern void md_raid5_kick_device(raid5_conf_t *conf);
|
||||
extern int raid5_set_cache_size(mddev_t *mddev, int size);
|
||||
extern int md_raid5_congested(struct mddev *mddev, int bits);
|
||||
extern void md_raid5_kick_device(struct r5conf *conf);
|
||||
extern int raid5_set_cache_size(struct mddev *mddev, int size);
|
||||
#endif
|
||||
|
||||
+1
-1
@@ -11,7 +11,7 @@
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* raid6int$#.c
|
||||
* int$#.c
|
||||
*
|
||||
* $#-way unrolled portable integer math RAID-6 instruction set
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user