You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
4327edf6b8
commit
fb1c8f93d8
@@ -24,19 +24,19 @@
|
||||
# define ATOMIC_HASH_SIZE 4
|
||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
||||
|
||||
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||
|
||||
/* Can't use _raw_spin_lock_irq because of #include problems, so
|
||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||
* this is the substitute */
|
||||
#define _atomic_spin_lock_irqsave(l,f) do { \
|
||||
spinlock_t *s = ATOMIC_HASH(l); \
|
||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
||||
local_irq_save(f); \
|
||||
_raw_spin_lock(s); \
|
||||
__raw_spin_lock(s); \
|
||||
} while(0)
|
||||
|
||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
||||
spinlock_t *s = ATOMIC_HASH(l); \
|
||||
_raw_spin_unlock(s); \
|
||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
||||
__raw_spin_unlock(s); \
|
||||
local_irq_restore(f); \
|
||||
} while(0)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define _PARISC_BITOPS_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/spinlock.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
|
||||
|
||||
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
|
||||
* Unfortunately, that doesn't apply to PA-RISC. */
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/config.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
#include <asm/hardware.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
+28
-135
@@ -2,30 +2,25 @@
|
||||
#define __ASM_SPINLOCK_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/spinlock_types.h>
|
||||
|
||||
/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
|
||||
* since it only has load-and-zero. Moreover, at least on some PA processors,
|
||||
* the semaphore address has to be 16-byte aligned.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_DEBUG_SPINLOCK
|
||||
|
||||
#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
#undef SPIN_LOCK_UNLOCKED
|
||||
#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
|
||||
|
||||
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
|
||||
|
||||
static inline int spin_is_locked(spinlock_t *x)
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a = __ldcw_align(x);
|
||||
return *a == 0;
|
||||
}
|
||||
|
||||
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
|
||||
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
|
||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||
#define __raw_spin_unlock_wait(x) \
|
||||
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
||||
|
||||
static inline void _raw_spin_lock(spinlock_t *x)
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
|
||||
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x)
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline void _raw_spin_unlock(spinlock_t *x)
|
||||
static inline void __raw_spin_unlock(raw_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
mb();
|
||||
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x)
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline int _raw_spin_trylock(spinlock_t *x)
|
||||
static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
int ret;
|
||||
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define spin_lock_own(LOCK, LOCATION) ((void)0)
|
||||
|
||||
#else /* !(CONFIG_DEBUG_SPINLOCK) */
|
||||
|
||||
#define SPINLOCK_MAGIC 0x1D244B3C
|
||||
|
||||
#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
|
||||
#undef SPIN_LOCK_UNLOCKED
|
||||
#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
|
||||
|
||||
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
|
||||
|
||||
#define CHECK_LOCK(x) \
|
||||
do { \
|
||||
if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
|
||||
printk(KERN_ERR "%s:%d: spin_is_locked" \
|
||||
" on uninitialized spinlock %p.\n", \
|
||||
__FILE__, __LINE__, (x)); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define spin_is_locked(x) \
|
||||
({ \
|
||||
CHECK_LOCK(x); \
|
||||
volatile unsigned int *a = __ldcw_align(x); \
|
||||
if (unlikely((*a == 0) && (x)->babble)) { \
|
||||
(x)->babble--; \
|
||||
printk("KERN_WARNING \
|
||||
%s:%d: spin_is_locked(%s/%p) already" \
|
||||
" locked by %s:%d in %s at %p(%d)\n", \
|
||||
__FILE__,__LINE__, (x)->module, (x), \
|
||||
(x)->bfile, (x)->bline, (x)->task->comm,\
|
||||
(x)->previous, (x)->oncpu); \
|
||||
} \
|
||||
*a == 0; \
|
||||
})
|
||||
|
||||
#define spin_unlock_wait(x) \
|
||||
do { \
|
||||
CHECK_LOCK(x); \
|
||||
volatile unsigned int *a = __ldcw_align(x); \
|
||||
if (unlikely((*a == 0) && (x)->babble)) { \
|
||||
(x)->babble--; \
|
||||
printk("KERN_WARNING \
|
||||
%s:%d: spin_unlock_wait(%s/%p)" \
|
||||
" owned by %s:%d in %s at %p(%d)\n", \
|
||||
__FILE__,__LINE__, (x)->module, (x), \
|
||||
(x)->bfile, (x)->bline, (x)->task->comm,\
|
||||
(x)->previous, (x)->oncpu); \
|
||||
} \
|
||||
barrier(); \
|
||||
} while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
|
||||
|
||||
extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
|
||||
extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
|
||||
extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
|
||||
|
||||
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
|
||||
|
||||
#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
|
||||
#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
|
||||
#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
|
||||
|
||||
/* just in case we need it */
|
||||
#define spin_lock_own(LOCK, LOCATION) \
|
||||
do { \
|
||||
volatile unsigned int *a = __ldcw_align(LOCK); \
|
||||
if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
|
||||
printk("KERN_WARNING \
|
||||
%s: called on %d from %p but lock %s on %d\n", \
|
||||
LOCATION, smp_processor_id(), \
|
||||
__builtin_return_address(0), \
|
||||
(*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
|
||||
} while (0)
|
||||
|
||||
#endif /* !(CONFIG_DEBUG_SPINLOCK) */
|
||||
|
||||
/*
|
||||
* Read-write spinlocks, allowing multiple readers
|
||||
* but only one writer.
|
||||
*/
|
||||
typedef struct {
|
||||
spinlock_t lock;
|
||||
volatile int counter;
|
||||
#ifdef CONFIG_PREEMPT
|
||||
unsigned int break_lock;
|
||||
#endif
|
||||
} rwlock_t;
|
||||
|
||||
#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
|
||||
|
||||
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
|
||||
|
||||
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
|
||||
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
|
||||
|
||||
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
|
||||
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWLOCK
|
||||
extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
|
||||
#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
|
||||
#else
|
||||
static __inline__ void _raw_read_lock(rwlock_t *rw)
|
||||
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
_raw_spin_lock(&rw->lock);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter++;
|
||||
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RWLOCK */
|
||||
|
||||
static __inline__ void _raw_read_unlock(rwlock_t *rw)
|
||||
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
_raw_spin_lock(&rw->lock);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter--;
|
||||
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
|
||||
* writers) in interrupt handlers someone fucked up and we'd dead-lock
|
||||
* sooner or later anyway. prumpf */
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWLOCK
|
||||
extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
|
||||
#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
|
||||
#else
|
||||
static __inline__ void _raw_write_lock(rwlock_t *rw)
|
||||
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
retry:
|
||||
_raw_spin_lock(&rw->lock);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
|
||||
if(rw->counter != 0) {
|
||||
/* this basically never happens */
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
|
||||
while(rw->counter != 0);
|
||||
while (rw->counter != 0)
|
||||
cpu_relax();
|
||||
|
||||
goto retry;
|
||||
}
|
||||
@@ -215,26 +114,21 @@ retry:
|
||||
/* got it. now leave without unlocking */
|
||||
rw->counter = -1; /* remember we are locked */
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RWLOCK */
|
||||
|
||||
/* write_unlock is absolutely trivial - we don't have to wait for anything */
|
||||
|
||||
static __inline__ void _raw_write_unlock(rwlock_t *rw)
|
||||
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
rw->counter = 0;
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWLOCK
|
||||
extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
|
||||
#define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
|
||||
#else
|
||||
static __inline__ int _raw_write_trylock(rwlock_t *rw)
|
||||
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
_raw_spin_lock(&rw->lock);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
if (rw->counter != 0) {
|
||||
/* this basically never happens */
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw)
|
||||
rw->counter = -1; /* remember we are locked */
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RWLOCK */
|
||||
|
||||
static __inline__ int is_read_locked(rwlock_t *rw)
|
||||
static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw)
|
||||
{
|
||||
return rw->counter > 0;
|
||||
}
|
||||
|
||||
static __inline__ int is_write_locked(rwlock_t *rw)
|
||||
static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw)
|
||||
{
|
||||
return rw->counter < 0;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
#ifndef __ASM_SPINLOCK_TYPES_H
|
||||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
#ifndef __LINUX_SPINLOCK_TYPES_H
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock[4];
|
||||
} raw_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
|
||||
typedef struct {
|
||||
raw_spinlock_t lock;
|
||||
volatile int counter;
|
||||
} raw_rwlock_t;
|
||||
|
||||
#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
|
||||
|
||||
#endif
|
||||
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val)
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock[4];
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
unsigned long magic;
|
||||
volatile unsigned int babble;
|
||||
const char *module;
|
||||
char *bfile;
|
||||
int bline;
|
||||
int oncpu;
|
||||
void *previous;
|
||||
struct task_struct * task;
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
unsigned int break_lock;
|
||||
#endif
|
||||
} spinlock_t;
|
||||
|
||||
#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
|
||||
|
||||
# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (0x10100000 - 0x1000)
|
||||
|
||||
Reference in New Issue
Block a user