You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock()
lockdep: Comment all warnings
lib: atomic64: Change the type of local lock to raw_spinlock_t
locking, lib/atomic64: Annotate atomic64_lock::lock as raw
locking, x86, iommu: Annotate qi->q_lock as raw
locking, x86, iommu: Annotate irq_2_ir_lock as raw
locking, x86, iommu: Annotate iommu->register_lock as raw
locking, dma, ipu: Annotate bank_lock as raw
locking, ARM: Annotate low level hw locks as raw
locking, drivers/dca: Annotate dca_lock as raw
locking, powerpc: Annotate uic->lock as raw
locking, x86: mce: Annotate cmci_discover_lock as raw
locking, ACPI: Annotate c3_lock as raw
locking, oprofile: Annotate oprofilefs lock as raw
locking, video: Annotate vga console lock as raw
locking, latencytop: Annotate latency_lock as raw
locking, timer_stats: Annotate table_lock as raw
locking, rwsem: Annotate inner lock as raw
locking, semaphores: Annotate inner lock as raw
locking, sched: Annotate thread_group_cputimer as raw
...
Fix up conflicts in kernel/posix-cpu-timers.c manually: making
cputimer->cputime a raw lock conflicted with the ABBA fix in commit
bcd5cff721 ("cputimer: Cure lock inversion").
This commit is contained in:
+33
-33
@@ -29,11 +29,11 @@
|
||||
* Ensure each lock is in a separate cacheline.
|
||||
*/
|
||||
static union {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
char pad[L1_CACHE_BYTES];
|
||||
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
|
||||
|
||||
static inline spinlock_t *lock_addr(const atomic64_t *v)
|
||||
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
|
||||
{
|
||||
unsigned long addr = (unsigned long) v;
|
||||
|
||||
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
|
||||
long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_read);
|
||||
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
|
||||
void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter = i;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_set);
|
||||
|
||||
void atomic64_add(long long a, atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter += a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add);
|
||||
|
||||
long long atomic64_add_return(long long a, atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter += a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_return);
|
||||
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
|
||||
void atomic64_sub(long long a, atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter -= a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub);
|
||||
|
||||
long long atomic64_sub_return(long long a, atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter -= a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub_return);
|
||||
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
|
||||
long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter - 1;
|
||||
if (val >= 0)
|
||||
v->counter = val;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_dec_if_positive);
|
||||
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
|
||||
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
if (val == o)
|
||||
v->counter = n;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_cmpxchg);
|
||||
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
|
||||
long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
v->counter = new;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
|
||||
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = lock_addr(v);
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (v->counter != u) {
|
||||
v->counter += a;
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_unless);
|
||||
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_LOCKS; ++i)
|
||||
spin_lock_init(&atomic64_lock[i].lock);
|
||||
raw_spin_lock_init(&atomic64_lock[i].lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
spin_lock(&fbc->lock);
|
||||
raw_spin_lock(&fbc->lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
*pcount = 0;
|
||||
}
|
||||
fbc->count = amount;
|
||||
spin_unlock(&fbc->lock);
|
||||
raw_spin_unlock(&fbc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(percpu_counter_set);
|
||||
|
||||
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
preempt_disable();
|
||||
count = __this_cpu_read(*fbc->counters) + amount;
|
||||
if (count >= batch || count <= -batch) {
|
||||
spin_lock(&fbc->lock);
|
||||
raw_spin_lock(&fbc->lock);
|
||||
fbc->count += count;
|
||||
__this_cpu_write(*fbc->counters, 0);
|
||||
spin_unlock(&fbc->lock);
|
||||
raw_spin_unlock(&fbc->lock);
|
||||
} else {
|
||||
__this_cpu_write(*fbc->counters, count);
|
||||
}
|
||||
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
||||
s64 ret;
|
||||
int cpu;
|
||||
|
||||
spin_lock(&fbc->lock);
|
||||
raw_spin_lock(&fbc->lock);
|
||||
ret = fbc->count;
|
||||
for_each_online_cpu(cpu) {
|
||||
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
}
|
||||
spin_unlock(&fbc->lock);
|
||||
raw_spin_unlock(&fbc->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__percpu_counter_sum);
|
||||
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
|
||||
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
spin_lock_init(&fbc->lock);
|
||||
raw_spin_lock_init(&fbc->lock);
|
||||
lockdep_set_class(&fbc->lock, key);
|
||||
fbc->count = amount;
|
||||
fbc->counters = alloc_percpu(s32);
|
||||
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
|
||||
s32 *pcount;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fbc->lock, flags);
|
||||
raw_spin_lock_irqsave(&fbc->lock, flags);
|
||||
pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
fbc->count += *pcount;
|
||||
*pcount = 0;
|
||||
spin_unlock_irqrestore(&fbc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
||||
}
|
||||
mutex_unlock(&percpu_counters_lock);
|
||||
#endif
|
||||
|
||||
+6
-6
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
|
||||
|
||||
int prop_local_init_percpu(struct prop_local_percpu *pl)
|
||||
{
|
||||
spin_lock_init(&pl->lock);
|
||||
raw_spin_lock_init(&pl->lock);
|
||||
pl->shift = 0;
|
||||
pl->period = 0;
|
||||
return percpu_counter_init(&pl->events, 0);
|
||||
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
|
||||
if (pl->period == global_period)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&pl->lock, flags);
|
||||
raw_spin_lock_irqsave(&pl->lock, flags);
|
||||
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
|
||||
|
||||
/*
|
||||
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
|
||||
percpu_counter_set(&pl->events, 0);
|
||||
|
||||
pl->period = global_period;
|
||||
spin_unlock_irqrestore(&pl->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pl->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
|
||||
|
||||
int prop_local_init_single(struct prop_local_single *pl)
|
||||
{
|
||||
spin_lock_init(&pl->lock);
|
||||
raw_spin_lock_init(&pl->lock);
|
||||
pl->shift = 0;
|
||||
pl->period = 0;
|
||||
pl->events = 0;
|
||||
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
|
||||
if (pl->period == global_period)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&pl->lock, flags);
|
||||
raw_spin_lock_irqsave(&pl->lock, flags);
|
||||
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
|
||||
/*
|
||||
* For each missed period, we half the local counter.
|
||||
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
|
||||
else
|
||||
pl->events = 0;
|
||||
pl->period = global_period;
|
||||
spin_unlock_irqrestore(&pl->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pl->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
+2
-2
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
* in addition to the one that will be printed by
|
||||
* the entity that is holding the lock already:
|
||||
*/
|
||||
if (!spin_trylock_irqsave(&rs->lock, flags))
|
||||
if (!raw_spin_trylock_irqsave(&rs->lock, flags))
|
||||
return 0;
|
||||
|
||||
if (!rs->begin)
|
||||
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
rs->missed++;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&rs->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rs->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
+19
-19
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
int ret = 1;
|
||||
unsigned long flags;
|
||||
|
||||
if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
|
||||
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
|
||||
ret = (sem->activity != 0);
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||
lockdep_init_map(&sem->dep_map, name, key, 0);
|
||||
#endif
|
||||
sem->activity = 0;
|
||||
spin_lock_init(&sem->wait_lock);
|
||||
raw_spin_lock_init(&sem->wait_lock);
|
||||
INIT_LIST_HEAD(&sem->wait_list);
|
||||
}
|
||||
EXPORT_SYMBOL(__init_rwsem);
|
||||
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
|
||||
struct task_struct *tsk;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity++;
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the lock */
|
||||
for (;;) {
|
||||
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
|
||||
int ret = 0;
|
||||
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
struct task_struct *tsk;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity = -1;
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the lock */
|
||||
for (;;) {
|
||||
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
|
||||
sem = __rwsem_wake_one_writer(sem);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
sem->activity = 0;
|
||||
if (!list_empty(&sem->wait_list))
|
||||
sem = __rwsem_do_wake(sem, 1);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
sem->activity = 1;
|
||||
if (!list_empty(&sem->wait_list))
|
||||
sem = __rwsem_do_wake(sem, 0);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
+7
-7
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
||||
lockdep_init_map(&sem->dep_map, name, key, 0);
|
||||
#endif
|
||||
sem->count = RWSEM_UNLOCKED_VALUE;
|
||||
spin_lock_init(&sem->wait_lock);
|
||||
raw_spin_lock_init(&sem->wait_lock);
|
||||
INIT_LIST_HEAD(&sem->wait_list);
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/* set up my own style of waitqueue */
|
||||
spin_lock_irq(&sem->wait_lock);
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
waiter.task = tsk;
|
||||
waiter.flags = flags;
|
||||
get_task_struct(tsk);
|
||||
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
|
||||
adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
|
||||
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
|
||||
|
||||
spin_unlock_irq(&sem->wait_lock);
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
|
||||
/* wait to be given the lock */
|
||||
for (;;) {
|
||||
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
/* do nothing if list empty */
|
||||
if (!list_empty(&sem->wait_list))
|
||||
sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return sem;
|
||||
}
|
||||
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
/* do nothing if list empty */
|
||||
if (!list_empty(&sem->wait_list))
|
||||
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return sem;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user