mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branch 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (33 commits) lockdep: fix deadlock in lockdep_trace_alloc lockdep: annotate reclaim context (__GFP_NOFS), fix SLOB lockdep: annotate reclaim context (__GFP_NOFS), fix lockdep: build fix for !PROVE_LOCKING lockstat: warn about disabled lock debugging lockdep: use stringify.h lockdep: simplify check_prev_add_irq() lockdep: get_user_chars() redo lockdep: simplify get_user_chars() lockdep: add comments to mark_lock_irq() lockdep: remove macro usage from mark_held_locks() lockdep: fully reduce mark_lock_irq() lockdep: merge the !_READ mark_lock_irq() helpers lockdep: merge the _READ mark_lock_irq() helpers lockdep: simplify mark_lock_irq() helpers #3 lockdep: further simplify mark_lock_irq() helpers lockdep: simplify the mark_lock_irq() helpers lockdep: split up mark_lock_irq() lockdep: generate usage strings lockdep: generate the state bit definitions ...
This commit is contained in:
@@ -27,33 +27,37 @@ lock-class.
|
||||
State
|
||||
-----
|
||||
|
||||
The validator tracks lock-class usage history into 5 separate state bits:
|
||||
The validator tracks lock-class usage history into 4n + 1 separate state bits:
|
||||
|
||||
- 'ever held in hardirq context' [ == hardirq-safe ]
|
||||
- 'ever held in softirq context' [ == softirq-safe ]
|
||||
- 'ever held with hardirqs enabled' [ == hardirq-unsafe ]
|
||||
- 'ever held with softirqs and hardirqs enabled' [ == softirq-unsafe ]
|
||||
- 'ever held in STATE context'
|
||||
- 'ever head as readlock in STATE context'
|
||||
- 'ever head with STATE enabled'
|
||||
- 'ever head as readlock with STATE enabled'
|
||||
|
||||
Where STATE can be either one of (kernel/lockdep_states.h)
|
||||
- hardirq
|
||||
- softirq
|
||||
- reclaim_fs
|
||||
|
||||
- 'ever used' [ == !unused ]
|
||||
|
||||
When locking rules are violated, these 4 state bits are presented in the
|
||||
locking error messages, inside curlies. A contrived example:
|
||||
When locking rules are violated, these state bits are presented in the
|
||||
locking error messages, inside curlies. A contrived example:
|
||||
|
||||
modprobe/2287 is trying to acquire lock:
|
||||
(&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
|
||||
(&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
|
||||
|
||||
but task is already holding lock:
|
||||
(&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
|
||||
(&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
|
||||
|
||||
|
||||
The bit position indicates hardirq, softirq, hardirq-read,
|
||||
softirq-read respectively, and the character displayed in each
|
||||
indicates:
|
||||
The bit position indicates STATE, STATE-read, for each of the states listed
|
||||
above, and the character displayed in each indicates:
|
||||
|
||||
'.' acquired while irqs disabled
|
||||
'+' acquired in irq context
|
||||
'-' acquired with irqs enabled
|
||||
'?' read acquired in irq context with irqs enabled.
|
||||
'?' acquired in irq context with irqs enabled.
|
||||
|
||||
Unused mutexes cannot be part of the cause of an error.
|
||||
|
||||
|
||||
@@ -20,43 +20,10 @@ struct lockdep_map;
|
||||
#include <linux/stacktrace.h>
|
||||
|
||||
/*
|
||||
* Lock-class usage-state bits:
|
||||
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
|
||||
* the total number of states... :-(
|
||||
*/
|
||||
enum lock_usage_bit
|
||||
{
|
||||
LOCK_USED = 0,
|
||||
LOCK_USED_IN_HARDIRQ,
|
||||
LOCK_USED_IN_SOFTIRQ,
|
||||
LOCK_ENABLED_SOFTIRQS,
|
||||
LOCK_ENABLED_HARDIRQS,
|
||||
LOCK_USED_IN_HARDIRQ_READ,
|
||||
LOCK_USED_IN_SOFTIRQ_READ,
|
||||
LOCK_ENABLED_SOFTIRQS_READ,
|
||||
LOCK_ENABLED_HARDIRQS_READ,
|
||||
LOCK_USAGE_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* Usage-state bitmasks:
|
||||
*/
|
||||
#define LOCKF_USED (1 << LOCK_USED)
|
||||
#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
|
||||
#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
|
||||
#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
|
||||
#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
|
||||
|
||||
#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
|
||||
#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
|
||||
|
||||
#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
|
||||
#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
|
||||
#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
|
||||
#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
|
||||
|
||||
#define LOCKF_ENABLED_IRQS_READ \
|
||||
(LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
|
||||
#define LOCKF_USED_IN_IRQ_READ \
|
||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
#define XXX_LOCK_USAGE_STATES (1+3*4)
|
||||
|
||||
#define MAX_LOCKDEP_SUBCLASSES 8UL
|
||||
|
||||
@@ -97,7 +64,7 @@ struct lock_class {
|
||||
* IRQ/softirq usage tracking bits:
|
||||
*/
|
||||
unsigned long usage_mask;
|
||||
struct stack_trace usage_traces[LOCK_USAGE_STATES];
|
||||
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
|
||||
|
||||
/*
|
||||
* These fields represent a directed graph of lock dependencies,
|
||||
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
|
||||
lock_set_class(lock, lock->name, lock->key, subclass, ip);
|
||||
}
|
||||
|
||||
# define INIT_LOCKDEP .lockdep_recursion = 0,
|
||||
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
|
||||
extern void lockdep_clear_current_reclaim_state(void);
|
||||
extern void lockdep_trace_alloc(gfp_t mask);
|
||||
|
||||
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
|
||||
|
||||
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
|
||||
|
||||
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
|
||||
# define lock_release(l, n, i) do { } while (0)
|
||||
# define lock_set_class(l, n, k, s, i) do { } while (0)
|
||||
# define lock_set_subclass(l, s, i) do { } while (0)
|
||||
# define lockdep_set_current_reclaim_state(g) do { } while (0)
|
||||
# define lockdep_clear_current_reclaim_state() do { } while (0)
|
||||
# define lockdep_trace_alloc(g) do { } while (0)
|
||||
# define lockdep_init() do { } while (0)
|
||||
# define lockdep_info() do { } while (0)
|
||||
# define lockdep_init_map(lock, name, key, sub) \
|
||||
|
||||
@@ -50,8 +50,10 @@ struct mutex {
|
||||
atomic_t count;
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
|
||||
struct thread_info *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
const char *name;
|
||||
void *magic;
|
||||
#endif
|
||||
@@ -68,7 +70,6 @@ struct mutex_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
struct mutex *lock;
|
||||
void *magic;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -331,7 +331,9 @@ extern signed long schedule_timeout(signed long timeout);
|
||||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void __schedule(void);
|
||||
asmlinkage void schedule(void);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
|
||||
|
||||
struct nsproxy;
|
||||
struct user_namespace;
|
||||
@@ -1334,6 +1336,7 @@ struct task_struct {
|
||||
int lockdep_depth;
|
||||
unsigned int lockdep_recursion;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
gfp_t lockdep_reclaim_gfp;
|
||||
#endif
|
||||
|
||||
/* journalling filesystem info */
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/debugobjects.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
struct tvec_base;
|
||||
|
||||
@@ -21,52 +22,126 @@ struct timer_list {
|
||||
char start_comm[16];
|
||||
int start_pid;
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
struct lockdep_map lockdep_map;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct tvec_base boot_tvec_bases;
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
|
||||
* (second argument) here is required, otherwise it could be initialised to
|
||||
* the copy of the lockdep_map later! We use the pointer to and the string
|
||||
* "<file>:<line>" as the key resp. the name of the lockdep_map.
|
||||
*/
|
||||
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
|
||||
.lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
|
||||
#else
|
||||
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
|
||||
#endif
|
||||
|
||||
#define TIMER_INITIALIZER(_function, _expires, _data) { \
|
||||
.entry = { .prev = TIMER_ENTRY_STATIC }, \
|
||||
.function = (_function), \
|
||||
.expires = (_expires), \
|
||||
.data = (_data), \
|
||||
.base = &boot_tvec_bases, \
|
||||
__TIMER_LOCKDEP_MAP_INITIALIZER( \
|
||||
__FILE__ ":" __stringify(__LINE__)) \
|
||||
}
|
||||
|
||||
#define DEFINE_TIMER(_name, _function, _expires, _data) \
|
||||
struct timer_list _name = \
|
||||
TIMER_INITIALIZER(_function, _expires, _data)
|
||||
|
||||
void init_timer(struct timer_list *timer);
|
||||
void init_timer_deferrable(struct timer_list *timer);
|
||||
void init_timer_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key);
|
||||
void init_timer_deferrable_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define init_timer(timer) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
init_timer_key((timer), #timer, &__key); \
|
||||
} while (0)
|
||||
|
||||
#define init_timer_deferrable(timer) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
init_timer_deferrable_key((timer), #timer, &__key); \
|
||||
} while (0)
|
||||
|
||||
#define init_timer_on_stack(timer) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
init_timer_on_stack_key((timer), #timer, &__key); \
|
||||
} while (0)
|
||||
|
||||
#define setup_timer(timer, fn, data) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
setup_timer_key((timer), #timer, &__key, (fn), (data));\
|
||||
} while (0)
|
||||
|
||||
#define setup_timer_on_stack(timer, fn, data) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
setup_timer_on_stack_key((timer), #timer, &__key, \
|
||||
(fn), (data)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define init_timer(timer)\
|
||||
init_timer_key((timer), NULL, NULL)
|
||||
#define init_timer_deferrable(timer)\
|
||||
init_timer_deferrable_key((timer), NULL, NULL)
|
||||
#define init_timer_on_stack(timer)\
|
||||
init_timer_on_stack_key((timer), NULL, NULL)
|
||||
#define setup_timer(timer, fn, data)\
|
||||
setup_timer_key((timer), NULL, NULL, (fn), (data))
|
||||
#define setup_timer_on_stack(timer, fn, data)\
|
||||
setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
||||
extern void init_timer_on_stack(struct timer_list *timer);
|
||||
extern void init_timer_on_stack_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key);
|
||||
extern void destroy_timer_on_stack(struct timer_list *timer);
|
||||
#else
|
||||
static inline void destroy_timer_on_stack(struct timer_list *timer) { }
|
||||
static inline void init_timer_on_stack(struct timer_list *timer)
|
||||
static inline void init_timer_on_stack_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
init_timer(timer);
|
||||
init_timer_key(timer, name, key);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void setup_timer(struct timer_list * timer,
|
||||
static inline void setup_timer_key(struct timer_list * timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key,
|
||||
void (*function)(unsigned long),
|
||||
unsigned long data)
|
||||
{
|
||||
timer->function = function;
|
||||
timer->data = data;
|
||||
init_timer(timer);
|
||||
init_timer_key(timer, name, key);
|
||||
}
|
||||
|
||||
static inline void setup_timer_on_stack(struct timer_list *timer,
|
||||
static inline void setup_timer_on_stack_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key,
|
||||
void (*function)(unsigned long),
|
||||
unsigned long data)
|
||||
{
|
||||
timer->function = function;
|
||||
timer->data = data;
|
||||
init_timer_on_stack(timer);
|
||||
init_timer_on_stack_key(timer, name, key);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
530
kernel/lockdep.c
530
kernel/lockdep.c
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,45 @@
|
||||
* lockdep subsystem internal functions and variables.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Lock-class usage-state bits:
|
||||
*/
|
||||
enum lock_usage_bit {
|
||||
#define LOCKDEP_STATE(__STATE) \
|
||||
LOCK_USED_IN_##__STATE, \
|
||||
LOCK_USED_IN_##__STATE##_READ, \
|
||||
LOCK_ENABLED_##__STATE, \
|
||||
LOCK_ENABLED_##__STATE##_READ,
|
||||
#include "lockdep_states.h"
|
||||
#undef LOCKDEP_STATE
|
||||
LOCK_USED,
|
||||
LOCK_USAGE_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* Usage-state bitmasks:
|
||||
*/
|
||||
#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
|
||||
|
||||
enum {
|
||||
#define LOCKDEP_STATE(__STATE) \
|
||||
__LOCKF(USED_IN_##__STATE) \
|
||||
__LOCKF(USED_IN_##__STATE##_READ) \
|
||||
__LOCKF(ENABLED_##__STATE) \
|
||||
__LOCKF(ENABLED_##__STATE##_READ)
|
||||
#include "lockdep_states.h"
|
||||
#undef LOCKDEP_STATE
|
||||
__LOCKF(USED)
|
||||
};
|
||||
|
||||
#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
|
||||
#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
|
||||
|
||||
#define LOCKF_ENABLED_IRQ_READ \
|
||||
(LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
|
||||
#define LOCKF_USED_IN_IRQ_READ \
|
||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
|
||||
/*
|
||||
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
|
||||
* we track.
|
||||
@@ -31,8 +70,10 @@
|
||||
extern struct list_head all_lock_classes;
|
||||
extern struct lock_chain lock_chains[];
|
||||
|
||||
extern void
|
||||
get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
|
||||
#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
|
||||
|
||||
extern void get_usage_chars(struct lock_class *class,
|
||||
char usage[LOCK_USAGE_CHARS]);
|
||||
|
||||
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct lock_class *class = v;
|
||||
struct lock_list *entry;
|
||||
char c1, c2, c3, c4;
|
||||
char usage[LOCK_USAGE_CHARS];
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(m, "all lock classes:\n");
|
||||
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
|
||||
#endif
|
||||
|
||||
get_usage_chars(class, &c1, &c2, &c3, &c4);
|
||||
seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
|
||||
get_usage_chars(class, usage);
|
||||
seq_printf(m, " %s", usage);
|
||||
|
||||
seq_printf(m, ": ");
|
||||
print_name(m, class);
|
||||
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
nr_uncategorized++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_IRQ)
|
||||
nr_irq_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_IRQS)
|
||||
if (class->usage_mask & LOCKF_ENABLED_IRQ)
|
||||
nr_irq_unsafe++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
|
||||
nr_softirq_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
|
||||
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
|
||||
nr_softirq_unsafe++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
|
||||
nr_hardirq_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
|
||||
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
|
||||
nr_hardirq_unsafe++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
|
||||
nr_irq_read_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
|
||||
if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
|
||||
nr_irq_read_unsafe++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
nr_softirq_read_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
|
||||
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
|
||||
nr_softirq_read_unsafe++;
|
||||
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
|
||||
nr_hardirq_read_safe++;
|
||||
if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
|
||||
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
|
||||
nr_hardirq_read_unsafe++;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
|
||||
static void seq_header(struct seq_file *m)
|
||||
{
|
||||
seq_printf(m, "lock_stat version 0.3\n");
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
|
||||
|
||||
seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
|
||||
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
|
||||
"%14s %14s\n",
|
||||
|
||||
9
kernel/lockdep_states.h
Normal file
9
kernel/lockdep_states.h
Normal file
@@ -0,0 +1,9 @@
|
||||
/*
|
||||
* Lockdep states,
|
||||
*
|
||||
* please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
|
||||
* you add one, or come up with a nice dynamic solution.
|
||||
*/
|
||||
LOCKDEP_STATE(HARDIRQ)
|
||||
LOCKDEP_STATE(SOFTIRQ)
|
||||
LOCKDEP_STATE(RECLAIM_FS)
|
||||
@@ -26,11 +26,6 @@
|
||||
/*
|
||||
* Must be called with lock->wait_lock held.
|
||||
*/
|
||||
void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
|
||||
{
|
||||
lock->owner = new_owner;
|
||||
}
|
||||
|
||||
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
|
||||
{
|
||||
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
|
||||
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
|
||||
/* Mark the current thread as blocked on the lock: */
|
||||
ti->task->blocked_on = waiter;
|
||||
waiter->lock = lock;
|
||||
}
|
||||
|
||||
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock)
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
|
||||
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
|
||||
mutex_clear_owner(lock);
|
||||
}
|
||||
|
||||
void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||
#endif
|
||||
lock->owner = NULL;
|
||||
lock->magic = lock;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,14 +13,6 @@
|
||||
/*
|
||||
* This must be called with lock->wait_lock held.
|
||||
*/
|
||||
extern void
|
||||
debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
|
||||
|
||||
static inline void debug_mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = NULL;
|
||||
}
|
||||
|
||||
extern void debug_mutex_lock_common(struct mutex *lock,
|
||||
struct mutex_waiter *waiter);
|
||||
extern void debug_mutex_wake_waiter(struct mutex *lock,
|
||||
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock);
|
||||
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = NULL;
|
||||
}
|
||||
|
||||
#define spin_lock_mutex(lock, flags) \
|
||||
do { \
|
||||
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
|
||||
|
||||
121
kernel/mutex.c
121
kernel/mutex.c
@@ -10,6 +10,11 @@
|
||||
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
|
||||
* David Howells for suggestions and improvements.
|
||||
*
|
||||
* - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
|
||||
* from the -rt tree, where it was originally implemented for rtmutexes
|
||||
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
|
||||
* and Sven Dietrich.
|
||||
*
|
||||
* Also see Documentation/mutex-design.txt.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
atomic_set(&lock->count, 1);
|
||||
spin_lock_init(&lock->wait_lock);
|
||||
INIT_LIST_HEAD(&lock->wait_list);
|
||||
mutex_clear_owner(lock);
|
||||
|
||||
debug_mutex_init(lock, name, key);
|
||||
}
|
||||
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
|
||||
* 'unlocked' into 'locked' state.
|
||||
*/
|
||||
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
||||
mutex_set_owner(lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_lock);
|
||||
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
|
||||
* The unlocking fastpath is the 0->1 transition from 'locked'
|
||||
* into 'unlocked' state:
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_MUTEXES
|
||||
/*
|
||||
* When debugging is enabled we must not clear the owner before time,
|
||||
* the slow path will always be taken, and that clears the owner field
|
||||
* after verifying that it was indeed current.
|
||||
*/
|
||||
mutex_clear_owner(lock);
|
||||
#endif
|
||||
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
||||
}
|
||||
|
||||
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
struct mutex_waiter waiter;
|
||||
unsigned int old_val;
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
mutex_acquire(&lock->dep_map, subclass, 0, ip);
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
|
||||
/*
|
||||
* Optimistic spinning.
|
||||
*
|
||||
* We try to spin for acquisition when we find that there are no
|
||||
* pending waiters and the lock owner is currently running on a
|
||||
* (different) CPU.
|
||||
*
|
||||
* The rationale is that if the lock owner is running, it is likely to
|
||||
* release the lock soon.
|
||||
*
|
||||
* Since this needs the lock owner, and this mutex implementation
|
||||
* doesn't track the owner atomically in the lock field, we need to
|
||||
* track it non-atomically.
|
||||
*
|
||||
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
|
||||
* to serialize everything.
|
||||
*/
|
||||
|
||||
for (;;) {
|
||||
struct thread_info *owner;
|
||||
|
||||
/*
|
||||
* If there's an owner, wait for it to either
|
||||
* release the lock or go to sleep.
|
||||
*/
|
||||
owner = ACCESS_ONCE(lock->owner);
|
||||
if (owner && !mutex_spin_on_owner(lock, owner))
|
||||
break;
|
||||
|
||||
if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
mutex_set_owner(lock);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When there's no owner, we might have preempted between the
|
||||
* owner acquiring the lock and setting the owner field. If
|
||||
* we're an RT task that will live-lock because we won't let
|
||||
* the owner complete.
|
||||
*/
|
||||
if (!owner && (need_resched() || rt_task(task)))
|
||||
break;
|
||||
|
||||
/*
|
||||
* The cpu_relax() call is a compiler barrier which forces
|
||||
* everything in this loop to be re-loaded. We don't need
|
||||
* memory barriers as we'll eventually observe the right
|
||||
* values at the cost of a few extra spins.
|
||||
*/
|
||||
cpu_relax();
|
||||
}
|
||||
#endif
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
mutex_acquire(&lock->dep_map, subclass, 0, ip);
|
||||
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
||||
|
||||
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
||||
list_add_tail(&waiter.list, &lock->wait_list);
|
||||
waiter.task = task;
|
||||
|
||||
old_val = atomic_xchg(&lock->count, -1);
|
||||
if (old_val == 1)
|
||||
if (atomic_xchg(&lock->count, -1) == 1)
|
||||
goto done;
|
||||
|
||||
lock_contended(&lock->dep_map, ip);
|
||||
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
* that when we release the lock, we properly wake up the
|
||||
* other waiters:
|
||||
*/
|
||||
old_val = atomic_xchg(&lock->count, -1);
|
||||
if (old_val == 1)
|
||||
if (atomic_xchg(&lock->count, -1) == 1)
|
||||
break;
|
||||
|
||||
/*
|
||||
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
return -EINTR;
|
||||
}
|
||||
__set_task_state(task, state);
|
||||
|
||||
/* didnt get the lock, go to sleep: */
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
schedule();
|
||||
__schedule();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
done:
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
/* got the lock - rejoice! */
|
||||
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
||||
debug_mutex_set_owner(lock, task_thread_info(task));
|
||||
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
||||
mutex_set_owner(lock);
|
||||
|
||||
/* set it to 0 if there are no waiters left: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
@@ -196,6 +265,7 @@ done:
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -222,7 +292,8 @@ int __sched
|
||||
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
might_sleep();
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
||||
subclass, _RET_IP_);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
||||
wake_up_process(waiter->task);
|
||||
}
|
||||
|
||||
debug_mutex_clear_owner(lock);
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
||||
*/
|
||||
int __sched mutex_lock_interruptible(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
return __mutex_fastpath_lock_retval
|
||||
ret = __mutex_fastpath_lock_retval
|
||||
(&lock->count, __mutex_lock_interruptible_slowpath);
|
||||
if (!ret)
|
||||
mutex_set_owner(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_lock_interruptible);
|
||||
|
||||
int __sched mutex_lock_killable(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
return __mutex_fastpath_lock_retval
|
||||
ret = __mutex_fastpath_lock_retval
|
||||
(&lock->count, __mutex_lock_killable_slowpath);
|
||||
if (!ret)
|
||||
mutex_set_owner(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_killable);
|
||||
|
||||
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
||||
|
||||
prev = atomic_xchg(&lock->count, -1);
|
||||
if (likely(prev == 1)) {
|
||||
debug_mutex_set_owner(lock, current_thread_info());
|
||||
mutex_set_owner(lock);
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
}
|
||||
|
||||
/* Set it back to 0 if there are no waiters: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
||||
*/
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
return __mutex_fastpath_trylock(&lock->count,
|
||||
__mutex_trylock_slowpath);
|
||||
int ret;
|
||||
|
||||
ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
|
||||
if (ret)
|
||||
mutex_set_owner(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
|
||||
@@ -16,8 +16,26 @@
|
||||
#define mutex_remove_waiter(lock, waiter, ti) \
|
||||
__list_del((waiter)->list.prev, (waiter)->list.next)
|
||||
|
||||
#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
|
||||
#define debug_mutex_clear_owner(lock) do { } while (0)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = NULL;
|
||||
}
|
||||
#else
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
|
||||
#define debug_mutex_free_waiter(waiter) do { } while (0)
|
||||
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
|
||||
|
||||
@@ -4942,15 +4942,13 @@ pick_next_task(struct rq *rq)
|
||||
/*
|
||||
* schedule() is the main scheduler function.
|
||||
*/
|
||||
asmlinkage void __sched schedule(void)
|
||||
asmlinkage void __sched __schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
unsigned long *switch_count;
|
||||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
rcu_qsctr_inc(cpu);
|
||||
@@ -5007,13 +5005,80 @@ need_resched_nonpreemptible:
|
||||
|
||||
if (unlikely(reacquire_kernel_lock(current) < 0))
|
||||
goto need_resched_nonpreemptible;
|
||||
}
|
||||
|
||||
asmlinkage void __sched schedule(void)
|
||||
{
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
__schedule();
|
||||
preempt_enable_no_resched();
|
||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
||||
goto need_resched;
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Look out! "owner" is an entirely speculative pointer
|
||||
* access and not reliable.
|
||||
*/
|
||||
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct rq *rq;
|
||||
|
||||
if (!sched_feat(OWNER_SPIN))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
/*
|
||||
* Need to access the cpu field knowing that
|
||||
* DEBUG_PAGEALLOC could have unmapped it if
|
||||
* the mutex owner just released it and exited.
|
||||
*/
|
||||
if (probe_kernel_address(&owner->cpu, cpu))
|
||||
goto out;
|
||||
#else
|
||||
cpu = owner->cpu;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Even if the access succeeded (likely case),
|
||||
* the cpu field may no longer be valid.
|
||||
*/
|
||||
if (cpu >= nr_cpumask_bits)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We need to validate that we can do a
|
||||
* get_cpu() and that we have the percpu area.
|
||||
*/
|
||||
if (!cpu_online(cpu))
|
||||
goto out;
|
||||
|
||||
rq = cpu_rq(cpu);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Owner changed, break to re-assess state.
|
||||
*/
|
||||
if (lock->owner != owner)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Is that owner really running on that cpu?
|
||||
*/
|
||||
if (task_thread_info(rq->curr) != owner || need_resched())
|
||||
return 0;
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
out:
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* this is the entry point to schedule() from in-kernel preemption
|
||||
|
||||
@@ -14,3 +14,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
|
||||
SCHED_FEAT(ASYM_EFF_LOAD, 1)
|
||||
SCHED_FEAT(WAKEUP_OVERLAP, 0)
|
||||
SCHED_FEAT(LAST_BUDDY, 1)
|
||||
SCHED_FEAT(OWNER_SPIN, 1)
|
||||
|
||||
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)
|
||||
debug_object_free(timer, &timer_debug_descr);
|
||||
}
|
||||
|
||||
static void __init_timer(struct timer_list *timer);
|
||||
static void __init_timer(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
void init_timer_on_stack(struct timer_list *timer)
|
||||
void init_timer_on_stack_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
debug_object_init_on_stack(timer, &timer_debug_descr);
|
||||
__init_timer(timer);
|
||||
__init_timer(timer, name, key);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_timer_on_stack);
|
||||
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
|
||||
|
||||
void destroy_timer_on_stack(struct timer_list *timer)
|
||||
{
|
||||
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
|
||||
static inline void debug_timer_deactivate(struct timer_list *timer) { }
|
||||
#endif
|
||||
|
||||
static void __init_timer(struct timer_list *timer)
|
||||
static void __init_timer(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
timer->entry.next = NULL;
|
||||
timer->base = __raw_get_cpu_var(tvec_bases);
|
||||
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer)
|
||||
timer->start_pid = -1;
|
||||
memset(timer->start_comm, 0, TASK_COMM_LEN);
|
||||
#endif
|
||||
lockdep_init_map(&timer->lockdep_map, name, key, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer)
|
||||
* init_timer() must be done to a timer prior calling *any* of the
|
||||
* other timer functions.
|
||||
*/
|
||||
void init_timer(struct timer_list *timer)
|
||||
void init_timer_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
debug_timer_init(timer);
|
||||
__init_timer(timer);
|
||||
__init_timer(timer, name, key);
|
||||
}
|
||||
EXPORT_SYMBOL(init_timer);
|
||||
EXPORT_SYMBOL(init_timer_key);
|
||||
|
||||
void init_timer_deferrable(struct timer_list *timer)
|
||||
void init_timer_deferrable_key(struct timer_list *timer,
|
||||
const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
init_timer(timer);
|
||||
init_timer_key(timer, name, key);
|
||||
timer_set_deferrable(timer);
|
||||
}
|
||||
EXPORT_SYMBOL(init_timer_deferrable);
|
||||
EXPORT_SYMBOL(init_timer_deferrable_key);
|
||||
|
||||
static inline void detach_timer(struct timer_list *timer,
|
||||
int clear_pending)
|
||||
@@ -826,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
|
||||
*/
|
||||
int del_timer_sync(struct timer_list *timer)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
lock_map_acquire(&timer->lockdep_map);
|
||||
lock_map_release(&timer->lockdep_map);
|
||||
local_irq_restore(flags);
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
int ret = try_to_del_timer_sync(timer);
|
||||
if (ret >= 0)
|
||||
@@ -897,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base)
|
||||
|
||||
set_running_timer(base, timer);
|
||||
detach_timer(timer, 1);
|
||||
|
||||
spin_unlock_irq(&base->lock);
|
||||
{
|
||||
int preempt_count = preempt_count();
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
* It is permissible to free the timer from
|
||||
* inside the function that is called from
|
||||
* it, this we need to take into account for
|
||||
* lockdep too. To avoid bogus "held lock
|
||||
* freed" warnings as well as problems when
|
||||
* looking into timer->lockdep_map, make a
|
||||
* copy and use that here.
|
||||
*/
|
||||
struct lockdep_map lockdep_map =
|
||||
timer->lockdep_map;
|
||||
#endif
|
||||
/*
|
||||
* Couple the lock chain with the lock chain at
|
||||
* del_timer_sync() by acquiring the lock_map
|
||||
* around the fn() call here and in
|
||||
* del_timer_sync().
|
||||
*/
|
||||
lock_map_acquire(&lockdep_map);
|
||||
|
||||
fn(data);
|
||||
|
||||
lock_map_release(&lockdep_map);
|
||||
|
||||
if (preempt_count != preempt_count()) {
|
||||
printk(KERN_ERR "huh, entered %p "
|
||||
"with preempt_count %08x, exited"
|
||||
|
||||
@@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
|
||||
unsigned long did_some_progress;
|
||||
unsigned long pages_reclaimed = 0;
|
||||
|
||||
lockdep_trace_alloc(gfp_mask);
|
||||
|
||||
might_sleep_if(wait);
|
||||
|
||||
if (should_fail_alloc_page(gfp_mask, order))
|
||||
@@ -1578,12 +1580,15 @@ nofail_alloc:
|
||||
*/
|
||||
cpuset_update_task_memory_state();
|
||||
p->flags |= PF_MEMALLOC;
|
||||
|
||||
lockdep_set_current_reclaim_state(gfp_mask);
|
||||
reclaim_state.reclaimed_slab = 0;
|
||||
p->reclaim_state = &reclaim_state;
|
||||
|
||||
did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
|
||||
|
||||
p->reclaim_state = NULL;
|
||||
lockdep_clear_current_reclaim_state();
|
||||
p->flags &= ~PF_MEMALLOC;
|
||||
|
||||
cond_resched();
|
||||
|
||||
@@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
||||
unsigned long save_flags;
|
||||
void *ptr;
|
||||
|
||||
lockdep_trace_alloc(flags);
|
||||
|
||||
if (slab_should_failslab(cachep, flags))
|
||||
return NULL;
|
||||
|
||||
@@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
||||
unsigned long save_flags;
|
||||
void *objp;
|
||||
|
||||
lockdep_trace_alloc(flags);
|
||||
|
||||
if (slab_should_failslab(cachep, flags))
|
||||
return NULL;
|
||||
|
||||
|
||||
@@ -475,6 +475,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
||||
unsigned int *m;
|
||||
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
||||
|
||||
lockdep_trace_alloc(gfp);
|
||||
|
||||
if (size < PAGE_SIZE - align) {
|
||||
if (!size)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
@@ -1590,6 +1590,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
||||
unsigned long flags;
|
||||
unsigned int objsize;
|
||||
|
||||
lockdep_trace_alloc(gfpflags);
|
||||
might_sleep_if(gfpflags & __GFP_WAIT);
|
||||
|
||||
if (should_failslab(s->objsize, gfpflags))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user