Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent

This commit is contained in:
Ingo Molnar
2009-12-28 09:23:13 +01:00
4277 changed files with 400613 additions and 256458 deletions
+2 -1
View File
@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
select CRC32
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
@@ -575,7 +576,7 @@ config DEBUG_BUGVERBOSE
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
default !EMBEDDED
default y
help
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
+3 -10
View File
@@ -4,17 +4,10 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
static const char *skip_sep(const char *cp)
{
while (*cp && isspace(*cp))
cp++;
return cp;
}
static const char *skip_arg(const char *cp)
{
while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
int count = 0;
while (*str) {
str = skip_sep(str);
str = skip_spaces(str);
if (*str) {
count++;
str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
argvp = argv;
while (*str) {
str = skip_sep(str);
str = skip_spaces(str);
if (*str) {
const char *p = str;
+81
View File
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(bitmap_clear);
/*
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index, align_mask);
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area);
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
+8 -6
View File
@@ -37,7 +37,8 @@
#include <asm/byteorder.h>
static inline unsigned short from32to16(unsigned long x)
#ifndef do_csum
static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd, count;
unsigned long result = 0;
unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
result = *buff;
#else
result += (*buff << 8);
#else
result = *buff;
#endif
len--;
buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
unsigned long carry = 0;
unsigned int carry = 0;
do {
unsigned long w = *(unsigned int *) buff;
unsigned int w = *(unsigned int *) buff;
count--;
buff += 4;
result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
out:
return result;
}
#endif
/*
* This is a version of ip_compute_csum() optimized for IP headers,
+44 -77
View File
@@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
static inline u32
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
{
# ifdef __LITTLE_ENDIAN
# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
# else
# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
# endif
const u32 *b = (const u32 *)buf;
size_t rem_len;
/* Align it */
if (unlikely((long)b & 3 && len)) {
u8 *p = (u8 *)b;
do {
DO_CRC(*p++);
} while ((--len) && ((long)p)&3);
b = (u32 *)p;
}
rem_len = len & 3;
/* load data 32 bits wide, xor data 32 bits wide. */
len = len >> 2;
for (--b; len; --len) {
crc ^= *++b; /* use pre increment for speed */
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
}
len = rem_len;
/* And the last few bytes */
if (len) {
u8 *p = (u8 *)(b + 1) - 1;
do {
DO_CRC(*++p); /* use pre increment for speed */
} while (--len);
}
return crc;
}
#endif
/**
* crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
const u32 *b =(u32 *)p;
const u32 *tab = crc32table_le;
# ifdef __LITTLE_ENDIAN
# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
# else
# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
# endif
crc = __cpu_to_le32(crc);
/* Align it */
if(unlikely(((long)b)&3 && len)){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while ((--len) && ((long)b)&3 );
}
if(likely(len >= 4)){
/* load data 32 bits wide, xor data 32 bits wide. */
size_t save_len = len & 3;
len = len >> 2;
--b; /* use pre increment below(*++b) for speed */
do {
crc ^= *++b;
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
} while (--len);
b++; /* point to next byte(s) */
len = save_len;
}
/* And the last few bytes */
if(len){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while (--len);
}
crc = crc32_body(crc, p, len, tab);
return __le32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
@@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
const u32 *b =(u32 *)p;
const u32 *tab = crc32table_be;
# ifdef __LITTLE_ENDIAN
# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
# else
# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
# endif
crc = __cpu_to_be32(crc);
/* Align it */
if(unlikely(((long)b)&3 && len)){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (u32 *)p;
} while ((--len) && ((long)b)&3 );
}
if(likely(len >= 4)){
/* load data 32 bits wide, xor data 32 bits wide. */
size_t save_len = len & 3;
len = len >> 2;
--b; /* use pre increment below(*++b) for speed */
do {
crc ^= *++b;
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
DO_CRC(0);
} while (--len);
b++; /* point to next byte(s) */
len = save_len;
}
/* And the last few bytes */
if(len){
do {
u8 *p = (u8 *)b;
DO_CRC(*p++);
b = (void *)p;
} while (--len);
}
crc = crc32_body(crc, p, len, tab);
return __be32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
+25 -25
View File
@@ -7,30 +7,30 @@
#include <linux/ctype.h>
#include <linux/module.h>
unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
const unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);
+37 -37
View File
@@ -26,14 +26,14 @@
struct debug_bucket {
struct hlist_head list;
spinlock_t lock;
raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_SPINLOCK(pool_lock);
static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -96,10 +96,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
spin_lock_irqsave(&pool_lock, flags);
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
spin_unlock_irqrestore(&pool_lock, flags);
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
spin_lock(&pool_lock);
raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
spin_unlock(&pool_lock);
raw_spin_unlock(&pool_lock);
return obj;
}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
struct debug_obj *obj;
unsigned long flags;
spin_lock_irqsave(&pool_lock, flags);
raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
spin_unlock_irqrestore(&pool_lock, flags);
raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
spin_lock_irqsave(&pool_lock, flags);
raw_spin_lock_irqsave(&pool_lock, flags);
}
spin_unlock_irqrestore(&pool_lock, flags);
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
unsigned long flags;
int sched = 0;
spin_lock_irqsave(&pool_lock, flags);
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
spin_unlock_irqrestore(&pool_lock, flags);
raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
}
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
default:
break;
}
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
debug_print_object(&o, "deactivate");
}
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
break;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -597,7 +597,7 @@ repeat:
break;
}
}
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
}
res = 0;
out:
spin_unlock_irqrestore(&db->lock, flags);
raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
spin_lock_init(&obj_hash[i].lock);
raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+5
View File
@@ -676,6 +676,8 @@ static int dma_debug_device_change(struct notifier_block *nb,
struct device *dev = data;
int count;
if (global_disable)
return;
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -697,6 +699,9 @@ void dma_debug_add_bus(struct bus_type *bus)
{
struct notifier_block *nb;
if (global_disable)
return;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (nb == NULL) {
pr_err("dma_debug_add_bus: out of memory\n");
+2 -2
View File
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
@@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
char *end;
/* Skip leading whitespace */
while (*buf && isspace(*buf))
buf++;
buf = skip_spaces(buf);
if (!*buf)
break; /* oh, it was trailing whitespace */
+12 -21
View File
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
#include <linux/bitmap.h>
#include <linux/genalloc.h>
@@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
int nbits, bit, start_bit, end_bit;
int nbits, start_bit, end_bit;
if (size == 0)
return 0;
@@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
bit = -1;
while (bit + 1 < end_bit) {
bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
if (bit >= end_bit)
break;
start_bit = bit;
if (nbits > 1) {
bit = find_next_bit(chunk->bits, bit + nbits,
bit + 1);
if (bit - start_bit < nbits)
continue;
}
addr = chunk->start_addr +
((unsigned long)start_bit << order);
while (nbits--)
__set_bit(start_bit++, chunk->bits);
start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
nbits, 0);
if (start_bit >= end_bit) {
spin_unlock_irqrestore(&chunk->lock, flags);
read_unlock(&pool->lock);
return addr;
continue;
}
addr = chunk->start_addr + ((unsigned long)start_bit << order);
bitmap_set(chunk->bits, start_bit, nbits);
spin_unlock_irqrestore(&chunk->lock, flags);
read_unlock(&pool->lock);
return addr;
}
read_unlock(&pool->lock);
return 0;
+9 -50
View File
@@ -3,41 +3,7 @@
*/
#include <linux/module.h>
#include <linux/bitops.h>
static unsigned long find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = (index + align_mask) & ~align_mask;
end = index + nr;
if (end >= size)
return -1;
for (i = index; i < end; i++) {
if (test_bit(i, map)) {
start = i+1;
goto again;
}
}
return index;
}
void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
{
unsigned long end = i + len;
while (i < end) {
__set_bit(i, map);
i++;
}
}
#include <linux/bitmap.h>
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask)
{
unsigned long index;
/* We don't want the last of the limit */
size -= 1;
again:
index = find_next_zero_area(map, size, start, nr, align_mask);
if (index != -1) {
index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
iommu_area_reserve(map, index, nr);
bitmap_set(map, index, nr);
return index;
}
return index;
return -1;
}
EXPORT_SYMBOL(iommu_area_alloc);
void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
{
unsigned long end = start + nr;
while (start < end) {
__clear_bit(start, map);
start++;
}
}
EXPORT_SYMBOL(iommu_area_free);
unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size)
{
+14 -12
View File
@@ -23,7 +23,7 @@
*
* Don't use in new code.
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
* (This works on UP too - _raw_spin_trylock will never
* (This works on UP too - do_raw_spin_trylock will never
* return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
while (!_raw_spin_trylock(&kernel_flag)) {
while (!do_raw_spin_trylock(&kernel_flag)) {
if (need_resched())
return -EAGAIN;
cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void)
{
_raw_spin_unlock(&kernel_flag);
do_raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched();
}
/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
* _raw_spin_trylock() will always succeed.
* do_raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
preempt_disable();
if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
_raw_spin_lock(&kernel_flag);
do_raw_spin_lock(&kernel_flag);
return;
}
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
while (spin_is_locked(&kernel_flag))
while (raw_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
} while (!do_raw_spin_trylock(&kernel_flag));
}
}
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
_raw_spin_lock(&kernel_flag);
do_raw_spin_lock(&kernel_flag);
}
#endif
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
* the BKL is not covered by lockdep, so we open-code the
* unlocking sequence (and thus avoid the dep-chain ops):
*/
_raw_spin_unlock(&kernel_flag);
do_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
@@ -122,8 +122,10 @@ void __lockfunc _lock_kernel(const char *func, const char *file, int line)
trace_lock_kernel(func, file, line);
if (likely(!depth))
if (likely(!depth)) {
might_sleep();
__lock_kernel();
}
current->lock_depth = depth;
}
+6 -1
View File
@@ -263,7 +263,7 @@ long __init lmb_reserve(u64 base, u64 size)
return lmb_add_region(_rgn, base, size);
}
long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long i;
@@ -493,6 +493,11 @@ int __init lmb_is_reserved(u64 addr)
return 0;
}
int lmb_is_region_reserved(u64 base, u64 size)
{
return lmb_overlaps_region(&lmb.reserved, base, size);
}
/*
* Given a <base, len>, find which memory regions belong to this range.
* Adjust the request and return a contiguous chunk.
+7 -4
View File
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
args[argc].from = s;
switch (*p++) {
case 's':
if (strlen(s) == 0)
case 's': {
size_t str_len = strlen(s);
if (str_len == 0)
return 0;
else if (len == -1 || len > strlen(s))
len = strlen(s);
if (len == -1 || len > str_len)
len = str_len;
args[argc].to = s + len;
break;
}
case 'd':
simple_strtol(s, &args[argc].to, 0);
goto num;
+5 -3
View File
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
WARN_ON(!head->lock);
if (head->lock)
WARN_ON_SMP(!spin_is_locked(head->lock));
WARN_ON(!head->rawlock && !head->spinlock);
if (head->rawlock)
WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
if (head->spinlock)
WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
+14 -9
View File
@@ -17,6 +17,19 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
int rwsem_is_locked(struct rw_semaphore *sem)
{
int ret = 1;
unsigned long flags;
if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
EXPORT_SYMBOL(rwsem_is_locked);
/*
* initialise the semaphore
*/
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
@@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
EXPORT_SYMBOL(__init_rwsem);
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
+32 -32
View File
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
void __spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__spin_lock_init);
EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
static void spin_bug(spinlock_t *lock, const char *msg)
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(spinlock_t *lock)
debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(spinlock_t *lock)
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_spin_unlock(spinlock_t *lock)
static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
static void __spin_lock_debug(spinlock_t *lock)
static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_spin_trylock(&lock->raw_lock))
if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
void _raw_spin_lock(spinlock_t *lock)
void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
int _raw_spin_trylock(spinlock_t *lock)
int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
void _raw_spin_unlock(spinlock_t *lock)
void do_raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_read_trylock(&lock->raw_lock))
if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
}
#endif
void _raw_read_lock(rwlock_t *lock)
void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
__raw_read_lock(&lock->raw_lock);
arch_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
int do_raw_read_trylock(rwlock_t *lock)
{
int ret = __raw_read_trylock(&lock->raw_lock);
int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
return ret;
}
void _raw_read_unlock(rwlock_t *lock)
void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
__raw_read_unlock(&lock->raw_lock);
arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
if (__raw_write_trylock(&lock->raw_lock))
if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
}
#endif
void _raw_write_lock(rwlock_t *lock)
void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
__raw_write_lock(&lock->raw_lock);
arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int _raw_write_trylock(rwlock_t *lock)
int do_raw_write_trylock(rwlock_t *lock)
{
int ret = __raw_write_trylock(&lock->raw_lock);
int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
return ret;
}
void _raw_write_unlock(rwlock_t *lock)
void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
__raw_write_unlock(&lock->raw_lock);
arch_write_unlock(&lock->raw_lock);
}
+18 -7
View File
@@ -338,20 +338,34 @@ EXPORT_SYMBOL(strnchr);
#endif
/**
* strstrip - Removes leading and trailing whitespace from @s.
* skip_spaces - Removes leading whitespace from @s.
* @s: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @s.
*/
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
EXPORT_SYMBOL(skip_spaces);
/**
* strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strstrip(char *s)
char *strim(char *s)
{
size_t size;
char *end;
s = skip_spaces(s);
size = strlen(s);
if (!size)
return s;
@@ -360,12 +374,9 @@ char *strstrip(char *s)
end--;
*(end + 1) = '\0';
while (*s && isspace(*s))
s++;
return s;
}
EXPORT_SYMBOL(strstrip);
EXPORT_SYMBOL(strim);
#ifndef __HAVE_ARCH_STRLEN
/**
+2 -2
View File
@@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
@@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
if (dev_addr + size > dma_mask) {
if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
+314 -209
View File
File diff suppressed because it is too large Load Diff