Merge branch 'linus' into x86/memory-corruption-check

This commit is contained in:
Ingo Molnar
2008-10-12 15:05:39 +02:00
3879 changed files with 196194 additions and 93547 deletions
+3
View File
@@ -126,6 +126,7 @@ header-y += pci_regs.h
header-y += pfkeyv2.h
header-y += pg.h
header-y += phantom.h
header-y += phonet.h
header-y += pkt_cls.h
header-y += pkt_sched.h
header-y += posix_types.h
@@ -180,6 +181,7 @@ unifdef-y += audit.h
unifdef-y += auto_fs.h
unifdef-y += auxvec.h
unifdef-y += binfmts.h
unifdef-y += blktrace_api.h
unifdef-y += capability.h
unifdef-y += capi.h
unifdef-y += cciss_ioctl.h
@@ -232,6 +234,7 @@ unifdef-y += if_fddi.h
unifdef-y += if_frad.h
unifdef-y += if_ltalk.h
unifdef-y += if_link.h
unifdef-y += if_phonet.h
unifdef-y += if_pppol2tp.h
unifdef-y += if_pppox.h
unifdef-y += if_tr.h
+127 -2
View File
@@ -30,6 +30,7 @@
#define __LINUX_ATA_H__
#include <linux/types.h>
#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@@ -88,6 +89,7 @@ enum {
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
ATA_ID_CFA_POWER = 160,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
ATA_ID_SERNO_LEN = 20,
@@ -557,6 +559,15 @@ static inline int ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
static inline int ata_id_flush_enabled(const u16 *id)
{
if (ata_id_has_flush(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
}
static inline int ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -564,6 +575,19 @@ static inline int ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
static inline int ata_id_flush_ext_enabled(const u16 *id)
{
if (ata_id_has_flush_ext(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
/*
* some Maxtor disks have bit 13 defined incorrectly
* so check bit 10 too
*/
return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
}
static inline int ata_id_has_lba48(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -573,6 +597,15 @@ static inline int ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
static inline int ata_id_lba48_enabled(const u16 *id)
{
if (ata_id_has_lba48(id) == 0)
return 0;
if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
return 0;
return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
}
static inline int ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@@ -644,7 +677,15 @@ static inline unsigned int ata_id_major_version(const u16 *id)
static inline int ata_id_is_sata(const u16 *id)
{
return ata_id_major_version(id) >= 5 && id[ATA_ID_HW_CONFIG] == 0;
/*
* See if word 93 is 0 AND drive is at least ATA-5 compatible
* verifying that word 80 by casting it to a signed type --
* this trick allows us to filter out the reserved values of
* 0x0000 and 0xffff along with the earlier ATA revisions...
*/
if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020)
return 1;
return 0;
}
static inline int ata_id_has_tpm(const u16 *id)
@@ -667,6 +708,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
return 0;
}
static inline int ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
(id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
id[ATA_ID_CFSSE] & (1 << 13))
return 1;
return 0;
}
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -691,6 +741,11 @@ static inline int ata_id_is_cfa(const u16 *id)
return 0;
}
static inline int ata_id_is_ssd(const u16 *id)
{
return id[ATA_ID_ROT_SPEED] == 0x01;
}
static inline int ata_drive_40wire(const u16 *dev_id)
{
if (ata_id_is_sata(dev_id))
@@ -727,6 +782,76 @@ static inline int atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
/*
* ata_id_is_lba_capacity_ok() performs a sanity check on
* the claimed LBA capacity value for the device.
*
* Returns 1 if LBA capacity looks sensible, 0 otherwise.
*
* It is called only once for each device.
*/
static inline int ata_id_is_lba_capacity_ok(u16 *id)
{
unsigned long lba_sects, chs_sects, head, tail;
/* No non-LBA info .. so valid! */
if (id[ATA_ID_CYLS] == 0)
return 1;
lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
/*
* The ATA spec tells large drives to return
* C/H/S = 16383/16/63 independent of their size.
* Some drives can be jumpered to use 15 heads instead of 16.
* Some drives can be jumpered to use 4092 cyls instead of 16383.
*/
if ((id[ATA_ID_CYLS] == 16383 ||
(id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
id[ATA_ID_SECTORS] == 63 &&
(id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
(lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
return 1;
chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
/* perform a rough sanity check on lba_sects: within 10% is OK */
if (lba_sects - chs_sects < chs_sects/10)
return 1;
/* some drives have the word order reversed */
head = (lba_sects >> 16) & 0xffff;
tail = lba_sects & 0xffff;
lba_sects = head | (tail << 16);
if (lba_sects - chs_sects < chs_sects/10) {
*(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
return 1; /* LBA capacity is (now) good */
}
return 0; /* LBA capacity value may be bad */
}
static inline void ata_id_to_hd_driveid(u16 *id)
{
#ifdef __BIG_ENDIAN
/* accessed in struct hd_driveid as 8-bit values */
id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
/* as 32-bit values */
*(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
*(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
/* as 64-bit value */
*(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
#endif
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
@@ -745,7 +870,7 @@ static inline int ata_ok(u8 status)
static inline int lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number */
return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)
+55 -53
View File
@@ -26,21 +26,8 @@
#ifdef CONFIG_BLOCK
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
#include <asm/io.h>
#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
#else
#define BIOVEC_VIRT_START_SIZE(x) 0
#define BIOVEC_VIRT_OVERSIZE(x) 0
#endif
#ifndef BIO_VMERGE_BOUNDARY
#define BIO_VMERGE_BOUNDARY 0
#endif
#define BIO_DEBUG
#ifdef BIO_DEBUG
@@ -88,25 +75,14 @@ struct bio {
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned short bi_phys_segments;
/* Number of segments after physical and DMA remapping
* hardware coalescing is performed.
*/
unsigned short bi_hw_segments;
unsigned int bi_phys_segments;
unsigned int bi_size; /* residual I/O count */
/*
* To keep track of the max hw size, we account for the
* sizes of the first and last virtually mergeable segments
* in this bio
*/
unsigned int bi_hw_front_size;
unsigned int bi_hw_back_size;
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
unsigned int bi_comp_cpu; /* completion CPU */
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
@@ -126,11 +102,14 @@ struct bio {
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@@ -144,18 +123,31 @@ struct bio {
/*
* bio bi_rw flags
*
* bit 0 -- read (not set) or write (set)
* bit 0 -- data direction
* If not set, bio is a read from device. If set, it's a write to device.
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued.
* bit 3 -- fail fast, don't want low level driver retries
* bit 4 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
* bit 5 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
* bit 6 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
*/
#define BIO_RW 0
#define BIO_RW_AHEAD 1
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
#define BIO_RW_FAILFAST 3
#define BIO_RW_SYNC 4
#define BIO_RW_META 5
#define BIO_RW_DISCARD 6
/*
* upper 16 bits of bi_rw define the io priority of this bio
@@ -185,14 +177,15 @@ struct bio {
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio)
{
if (bio->bi_vcnt)
return bio_iovec(bio)->bv_len >> 9;
return 0;
else /* dataless requests such as discard */
return bio->bi_size >> 9;
}
static inline void *bio_data(struct bio *bio)
@@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
#endif
#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -319,15 +310,14 @@ struct bio_pair {
atomic_t cnt;
int error;
};
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
int first_sectors);
extern mempool_t *bio_split_pool;
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
extern struct bio *bio_kmalloc(gfp_t, int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
@@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, gfp_t);
@@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
unsigned long, unsigned int, int, gfp_t);
struct sg_iovec;
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
struct block_device *,
struct sg_iovec *, int, int);
struct sg_iovec *, int, int, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@@ -359,14 +350,24 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
int, int);
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *, struct sg_iovec *,
int, int, gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
* Allow queuer to specify a completion CPU for this bio
*/
static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
{
bio->bi_comp_cpu = cpu;
}
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
@@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
/*
* Check whether this bio carries any data or not. A NULL bio is allowed.
*/
static inline int bio_has_data(struct bio *bio)
{
return bio && bio->bi_io_vec != NULL;
}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
#define bip_for_each_vec(bvl, bip, i) \
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
static inline int bio_integrity(struct bio *bio)
{
#if defined(CONFIG_BLK_DEV_INTEGRITY)
return bio->bi_integrity != NULL;
#else
return 0;
#endif
}
#define bio_integrity(bio) (bio->bi_integrity != NULL)
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
+94 -59
View File
@@ -16,7 +16,9 @@
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>
#include <linux/gfp.h>
#include <linux/bsg.h>
#include <linux/smp.h>
#include <asm/scatterlist.h>
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_PM_SUSPEND, /* suspend request */
REQ_TYPE_PM_RESUME, /* resume request */
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
REQ_TYPE_FLUSH, /* flush request */
REQ_TYPE_SPECIAL, /* driver defined type */
REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
/*
@@ -76,19 +77,18 @@ enum rq_cmd_type_bits {
*
*/
enum {
/*
* just examples for now
*/
REQ_LB_OP_EJECT = 0x40, /* eject request */
REQ_LB_OP_FLUSH = 0x41, /* flush device */
REQ_LB_OP_FLUSH = 0x41, /* flush request */
REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
};
/*
* request type modified bits. first three bits match BIO_RW* bits, important
* request type modified bits. first two bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
@@ -111,6 +111,7 @@ enum rq_flag_bits {
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -140,12 +141,14 @@ enum rq_flag_bits {
*/
struct request {
struct list_head queuelist;
struct list_head donelist;
struct call_single_data csd;
int cpu;
struct request_queue *q;
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
/* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them!
@@ -190,13 +193,6 @@ struct request {
*/
unsigned short nr_phys_segments;
/* Number of scatter-gather addr+len pairs after
* physical and DMA remapping hardware coalescing is performed.
* This is the number of scatter-gather entries the driver
* will actually have to deal with after DMA mapping is done.
*/
unsigned short nr_hw_segments;
unsigned short ioprio;
void *special;
@@ -220,6 +216,8 @@ struct request {
void *data;
void *sense;
unsigned long deadline;
struct list_head timeout_list;
unsigned int timeout;
int retries;
@@ -233,6 +231,11 @@ struct request {
struct request *next_rq;
};
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
}
/*
* State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
* requests. Some step values could eventually be made generic.
@@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
struct bio_vec;
struct bvec_merge_data {
@@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
enum blk_eh_timer_return {
BLK_EH_NOT_HANDLED,
BLK_EH_HANDLED,
BLK_EH_RESET_TIMER,
};
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
enum blk_queue_state {
Queue_down,
@@ -307,10 +320,13 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
prepare_discard_fn *prepare_discard_fn;
merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
/*
* Dispatch queue sorting
@@ -385,6 +401,10 @@ struct request_queue
unsigned int nr_sorted;
unsigned int in_flight;
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
/*
* sg stuff
*/
@@ -421,6 +441,10 @@ struct request_queue
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
static inline int queue_is_locked(struct request_queue *q)
{
@@ -526,7 +550,10 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -536,16 +563,18 @@ enum {
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_pm_request(rq) \
(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */
@@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(blk_discard_rq(rq) || blk_fs_request((rq))))
/*
* q->prep_rq_fn return values
@@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_MMU */
struct rq_map_data {
struct page **pages;
int page_order;
int nr_entries;
};
struct req_iterator {
int i;
struct bio *bio;
@@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
extern void blk_plug_device_unlocked(struct request_queue *);
extern int blk_remove_plug(struct request_queue *);
@@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
extern void blk_start_queueing(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct sg_iovec *, int, unsigned int);
struct rq_map_data *, struct sg_iovec *, int,
unsigned int, gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error,
extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
extern void end_request(struct request *, int);
extern void end_queued_request(struct request *, int);
extern void end_dequeued_request(struct request *, int);
extern int blk_end_request_callback(struct request *rq, int error,
unsigned int nr_bytes,
int (drv_callback)(struct request *));
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
extern void blk_update_request(struct request *rq, int error,
unsigned int nr_bytes);
/*
* blk_end_request() takes bytes instead of sectors as a complete size.
@@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
extern int blk_do_ordered(struct request_queue *, struct request **);
@@ -837,14 +887,22 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
}
extern int blkdev_issue_flush(struct block_device *, sector_t *);
extern int blkdev_issue_discard(struct block_device *,
sector_t sector, sector_t nr_sects, gfp_t);
static inline int sb_issue_discard(struct super_block *sb,
sector_t block, sector_t nr_blocks)
{
block <<= (sb->s_blocksize_bits - 9);
nr_blocks <<= (sb->s_blocksize_bits - 9);
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
}
/*
* command filter functions
*/
extern int blk_verify_command(struct blk_cmd_filter *filter,
unsigned char *cmd, int has_write_perm);
extern int blk_register_filter(struct gendisk *disk);
extern void blk_unregister_filter(struct gendisk *disk);
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define MAX_PHYS_SEGMENTS 128
@@ -876,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q)
return q ? q->dma_alignment : 511;
}
static inline int blk_rq_aligned(struct request_queue *q, void *addr,
unsigned int len)
{
unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
return !((unsigned long)addr & alignment) && !(len & alignment);
}
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
@@ -902,7 +967,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -947,49 +1012,19 @@ struct blk_integrity {
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct block_device *, struct block_device *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request *);
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
{
if (bi)
return bi->tuple_size;
return 0;
}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
static inline
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
return bdev->bd_disk->integrity;
}
static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi)
return bi->tag_size;
return 0;
}
static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi == NULL)
return 0;
if (rw == READ && bi->verify_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_READ))
return 1;
if (rw == WRITE && bi->generate_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_WRITE))
return 1;
return 0;
return disk->integrity;
}
static inline int blk_integrity_rq(struct request *rq)
@@ -1006,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq)
#define blk_rq_count_integrity_sg(a) (0)
#define blk_rq_map_integrity_sg(a, b) (0)
#define bdev_get_integrity(a) (0)
#define bdev_get_tag_size(a) (0)
#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
#define blk_integrity_unregister(a) do { } while (0);
+36 -26
View File
@@ -1,8 +1,10 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/relay.h>
#endif
/*
* Trace categories
@@ -21,6 +23,7 @@ enum blktrace_cat {
BLK_TC_NOTIFY = 1 << 10, /* special message */
BLK_TC_AHEAD = 1 << 11, /* readahead */
BLK_TC_META = 1 << 12, /* metadata */
BLK_TC_DISCARD = 1 << 13, /* discard requests */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
@@ -47,6 +50,7 @@ enum blktrace_act {
__BLK_TA_SPLIT, /* bio was split */
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
__BLK_TA_ABORT, /* request aborted */
};
/*
@@ -77,6 +81,7 @@ enum blktrace_notify {
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -89,17 +94,17 @@ enum blktrace_notify {
* The trace itself
*/
struct blk_io_trace {
u32 magic; /* MAGIC << 8 | version */
u32 sequence; /* event number */
u64 time; /* in microseconds */
u64 sector; /* disk offset */
u32 bytes; /* transfer length */
u32 action; /* what happened */
u32 pid; /* who did it */
u32 device; /* device number */
u32 cpu; /* on what cpu did it happen */
u16 error; /* completion error */
u16 pdu_len; /* length of data after this trace */
__u32 magic; /* MAGIC << 8 | version */
__u32 sequence; /* event number */
__u64 time; /* in microseconds */
__u64 sector; /* disk offset */
__u32 bytes; /* transfer length */
__u32 action; /* what happened */
__u32 pid; /* who did it */
__u32 device; /* device number */
__u32 cpu; /* on what cpu did it happen */
__u16 error; /* completion error */
__u16 pdu_len; /* length of data after this trace */
};
/*
@@ -117,6 +122,23 @@ enum {
Blktrace_stopped,
};
#define BLKTRACE_BDEV_SIZE 32
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BLKTRACE_BDEV_SIZE]; /* output */
__u16 act_mask; /* input */
__u32 buf_size; /* input */
__u32 buf_nr; /* input */
__u64 start_lba;
__u64 end_lba;
__u32 pid;
};
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -133,21 +155,6 @@ struct blk_trace {
atomic_t dropped;
};
/*
* User setup structure passed with BLKTRACESTART
*/
struct blk_user_trace_setup {
char name[BDEVNAME_SIZE]; /* output */
u16 act_mask; /* input */
u32 buf_size; /* input */
u32 buf_nr; /* input */
u64 start_lba;
u64 end_lba;
u32 pid;
};
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
@@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (likely(!bt))
return;
if (blk_discard_rq(rq))
rw |= (1 << BIO_RW_DISCARD);
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
+80
View File
@@ -0,0 +1,80 @@
/*
* Extend a 32-bit counter to 63 bits
*
* Author: Nicolas Pitre
* Created: December 3, 2006
* Copyright: MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#ifndef __LINUX_CNT32_TO_63_H__
#define __LINUX_CNT32_TO_63_H__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h>
/* this is used only to give gcc a clue about good code generation */
union cnt32_to_63 {
struct {
#if defined(__LITTLE_ENDIAN)
u32 lo, hi;
#elif defined(__BIG_ENDIAN)
u32 hi, lo;
#endif
};
u64 val;
};
/**
* cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
* @cnt_lo: The low part of the counter
*
* Many hardware clock counters are only 32 bits wide and therefore have
* a relatively short period making wrap-arounds rather frequent. This
* is a problem when implementing sched_clock() for example, where a 64-bit
* non-wrapping monotonic value is expected to be returned.
*
* To overcome that limitation, let's extend a 32-bit counter to 63 bits
* in a completely lock free fashion. Bits 0 to 31 of the clock are provided
* by the hardware while bits 32 to 62 are stored in memory. The top bit in
* memory is used to synchronize with the hardware clock half-period. When
* the top bit of both counters (hardware and in memory) differ then the
* memory is updated with a new value, incrementing it when the hardware
* counter wraps around.
*
* Because a word store in memory is atomic then the incremented value will
* always be in synch with the top bit indicating to any potential concurrent
* reader if the value in memory is up to date or not with regards to the
* needed increment. And any race in updating the value in memory is harmless
* as the same value would simply be stored more than once.
*
* The only restriction for the algorithm to work properly is that this
* code must be executed at least once per each half period of the 32-bit
* counter to properly update the state bit in memory. This is usually not a
* problem in practice, but if it is then a kernel timer could be scheduled
* to manage for this code to be executed often enough.
*
* Note that the top bit (bit 63) in the returned value should be considered
* as garbage. It is not cleared here because callers are likely to use a
* multiplier on the returned value which can get rid of the top bit
* implicitly by making the multiplier even, therefore saving on a runtime
* clear-bit instruction. Otherwise caller must remember to clear the top
* bit explicitly.
*/
#define cnt32_to_63(cnt_lo) \
({ \
static volatile u32 __m_cnt_hi; \
union cnt32_to_63 __x; \
__x.hi = __m_cnt_hi; \
__x.lo = (cnt_lo); \
if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
__x.val; \
})
#endif
+3 -1
View File
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time.
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+41
View File
@@ -10,6 +10,18 @@
#include <linux/wait.h>
/**
* struct completion - structure used to maintain state for a "completion"
*
* This is the opaque structure used to maintain the state for a "completion".
* Completions currently use a FIFO to queue threads that have to wait for
* the "completion" event.
*
* See also: complete(), wait_for_completion() (and friends _timeout,
* _interruptible, _interruptible_timeout, and _killable), init_completion(),
* and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
* INIT_COMPLETION().
*/
struct completion {
unsigned int done;
wait_queue_head_t wait;
@@ -21,6 +33,14 @@ struct completion {
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
/**
* DECLARE_COMPLETION: - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure. Generally used
* for static declarations. You should use the _ONSTACK variant for automatic
* variables.
*/
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
@@ -29,6 +49,13 @@ struct completion {
* completions - so we use the _ONSTACK() variant for those that
* are on the kernel stack:
*/
/**
* DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure on the kernel
* stack.
*/
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@@ -36,6 +63,13 @@ struct completion {
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
#endif
/**
* init_completion: - Initialize a dynamically allocated completion
* @x: completion structure that is to be initialized
*
* This inline function will initialize a dynamically created completion
* structure.
*/
static inline void init_completion(struct completion *x)
{
x->done = 0;
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
/**
* INIT_COMPLETION: - reinitialize a completion structure
* @x: completion structure to be reinitialized
*
* This macro should be used to reinitialize a completion structure so it can
* be reused. This is especially important after complete_all() is used.
*/
#define INIT_COMPLETION(x) ((x).done = 0)
+1
View File
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
#endif
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+5 -2
View File
@@ -187,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
unsigned int cpu);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -226,7 +227,9 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu);
/* optional */
unsigned int (*getavg) (unsigned int cpu);
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*resume) (struct cpufreq_policy *policy);
+35
View File
@@ -38,6 +38,7 @@
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -60,6 +61,14 @@
*/
#define CRYPTO_ALG_GENIV 0x00000200
/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
*/
#define CRYPTO_ALG_TESTED 0x00000400
/*
* Transform masks and values (for crt_flags).
*/
@@ -105,6 +114,7 @@ struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_ahash;
struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
@@ -290,6 +300,15 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
struct rng_alg {
int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
unsigned int seedsize;
};
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
@@ -298,6 +317,7 @@ struct compress_alg {
#define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
struct crypto_alg {
struct list_head cra_list;
@@ -325,6 +345,7 @@ struct crypto_alg {
struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress;
struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -430,6 +451,12 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
struct rng_tfm {
int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
unsigned int dlen);
int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
};
#define crt_ablkcipher crt_u.ablkcipher
#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
@@ -437,6 +464,7 @@ struct compress_tfm {
#define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -450,6 +478,7 @@ struct crypto_tfm {
struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
struct crypto_alg *__crt_alg;
@@ -481,6 +510,10 @@ struct crypto_hash {
struct crypto_tfm base;
};
struct crypto_rng {
struct crypto_tfm base;
};
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
/*
* Transform helpers which query the underlying algorithm.
*/
+17 -1
View File
@@ -13,7 +13,6 @@
struct dm_target;
struct dm_table;
struct dm_dev;
struct mapped_device;
struct bio_vec;
@@ -84,6 +83,12 @@ void dm_error(const char *message);
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
struct dm_dev {
struct block_device *bdev;
int mode;
char name[16];
};
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
@@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct mapped_device *md);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
/*
* Geometry functions.
@@ -231,6 +237,11 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
int dm_table_complete(struct dm_table *t);
/*
* Unplug all devices in a table.
*/
void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
@@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t);
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
/*
* A wrapper around vmalloc.
*/
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
/*-----------------------------------------------------------------
* Macros.
*---------------------------------------------------------------*/
+13 -1
View File
@@ -199,6 +199,11 @@ struct class {
struct class_private *p;
};
struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
extern struct kobject *sysfs_dev_block_kobj;
extern struct kobject *sysfs_dev_char_kobj;
extern int __must_check __class_register(struct class *class,
@@ -213,6 +218,13 @@ extern void class_unregister(struct class *class);
__class_register(class, &__key); \
})
extern void class_dev_iter_init(struct class_dev_iter *iter,
struct class *class,
struct device *start,
const struct device_type *type);
extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
extern void class_dev_iter_exit(struct class_dev_iter *iter);
extern int class_for_each_device(struct class *class, struct device *start,
void *data,
int (*fn)(struct device *dev, void *data));
@@ -396,7 +408,7 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
struct list_head node;
struct klist_node knode_class;
struct class *class;
dev_t devt; /* dev_t, creates the sysfs "dev" */
struct attribute_group **groups; /* optional groups */
+4 -1
View File
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -65,9 +65,12 @@ struct dlm_lksb {
char * sb_lvbptr;
};
/* dlm_new_lockspace() flags */
#define DLM_LSFL_NODIR 0x00000001
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_FS 0x00000004
#define DLM_LSFL_NEWEXCL 0x00000008
#ifdef __KERNEL__
+1 -1
View File
@@ -26,7 +26,7 @@
/* Version of the device interface */
#define DLM_DEVICE_VERSION_MAJOR 6
#define DLM_DEVICE_VERSION_MINOR 0
#define DLM_DEVICE_VERSION_PATCH 0
#define DLM_DEVICE_VERSION_PATCH 1
/* struct passed to the lock write */
struct dlm_lock_params {
+12
View File
@@ -48,6 +48,11 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
{
return addr + size <= mask;
}
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
@@ -58,6 +63,13 @@ static inline int is_device_dma_capable(struct device *dev)
#define dma_sync_single dma_sync_single_for_cpu
#define dma_sync_sg dma_sync_sg_for_cpu
static inline u64 dma_get_mask(struct device *dev)
{
if (dev && dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return DMA_32BIT_MASK;
}
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
+100 -27
View File
@@ -25,9 +25,99 @@
#include <linux/types.h>
#include <linux/msi.h>
#ifdef CONFIG_DMAR
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
#else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int dmar_table_init(void)
{
return -ENODEV;
}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
struct irte {
union {
struct {
__u64 present : 1,
fpd : 1,
dst_mode : 1,
redir_hint : 1,
trigger_mode : 1,
dlvry_mode : 3,
avail : 4,
__reserved_1 : 4,
vector : 8,
__reserved_2 : 8,
dest_id : 32;
};
__u64 low;
};
union {
struct {
__u64 sid : 16,
sq : 2,
svt : 2,
__reserved_3 : 44;
};
__u64 high;
};
};
extern int get_irte(int irq, struct irte *entry);
extern int modify_irte(int irq, struct irte *irte_modified);
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
u16 sub_handle);
extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
extern int flush_irte(int irq);
extern int free_irte(int irq);
extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
#else
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define intr_remapping_enabled (0)
#endif
#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions
@@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern int arch_setup_dmar_msi(unsigned int irq);
/* Intel IOMMU detection and initialization functions */
extern void detect_intel_iommu(void);
extern int intel_iommu_init(void);
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern struct list_head dmar_drhd_units;
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct pci_dev **devices; /* target devices */
int devices_cnt; /* target device count */
};
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
extern int dmar_disabled;
#else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int intel_iommu_init(void)
{
#ifdef CONFIG_INTR_REMAP
return dmar_dev_scope_init();
#else
return -ENODEV;
#endif
}
#endif /* !CONFIG_DMAR */
#endif /* __DMAR_H__ */
+5 -4
View File
@@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, int);
extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);
@@ -173,15 +174,15 @@ enum {
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
* Hack to reuse the donelist list_head as the fifo time holder while
* Hack to reuse the csd.list list_head as the fifo time holder while
* the request is in the io scheduler. Saves an unsigned long in rq.
*/
#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next)
#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp))
#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
INIT_LIST_HEAD(&(rq)->donelist); \
INIT_LIST_HEAD(&(rq)->csd.list); \
} while (0)
/*
+2
View File
@@ -837,6 +837,8 @@ extern void ext3_truncate (struct inode *);
extern void ext3_set_inode_flags(struct inode *);
extern void ext3_get_inode_flags(struct ext3_inode_info *);
extern void ext3_set_aops(struct inode *inode);
extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
+7 -1
View File
@@ -15,10 +15,16 @@ struct floppy_struct {
sect, /* sectors per track */
head, /* nr of heads */
track, /* nr of tracks */
stretch; /* !=0 means double track steps */
stretch; /* bit 0 !=0 means double track steps */
/* bit 1 != 0 means swap sides */
/* bits 2..9 give the first sector */
/* number (the LSB is flipped) */
#define FD_STRETCH 1
#define FD_SWAPSIDES 2
#define FD_ZEROBASED 4
#define FD_SECTBASEMASK 0x3FC
#define FD_MKSECTBASE(s) (((s) ^ 1) << 2)
#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1)
unsigned char gap, /* gap1 size */

Some files were not shown because too many files have changed in this diff Show More