You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The locking tree was busier in this cycle than the usual pattern - a
couple of major projects happened to coincide.
The main changes are:
- implement the atomic_fetch_{add,sub,and,or,xor}() API natively
across all SMP architectures (Peter Zijlstra)
- add atomic_fetch_{inc/dec}() as well, using the generic primitives
(Davidlohr Bueso)
- optimize various aspects of rwsems (Jason Low, Davidlohr Bueso,
Waiman Long)
- optimize smp_cond_load_acquire() on arm64 and implement LSE based
atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
on arm64 (Will Deacon)
- introduce smp_acquire__after_ctrl_dep() and fix various barrier
mis-uses and bugs (Peter Zijlstra)
- after discovering ancient spin_unlock_wait() barrier bugs in its
implementation and usage, strengthen its semantics and update/fix
usage sites (Peter Zijlstra)
- optimize mutex_trylock() fastpath (Peter Zijlstra)
- ... misc fixes and cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API
locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire()
locking/static_keys: Fix non static symbol Sparse warning
locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec()
locking/atomic, arch/tile: Fix tilepro build
locking/atomic, arch/m68k: Remove comment
locking/atomic, arch/arc: Fix build
locking/Documentation: Clarify limited control-dependency scope
locking/atomic, arch/rwsem: Employ atomic_long_fetch_add()
locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire()
locking/atomic, arch/mips: Convert to _relaxed atomics
locking/atomic, arch/alpha: Convert to _relaxed atomics
locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions
locking/atomic: Remove linux/atomic.h:atomic_fetch_or()
locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
locking/atomic: Fix atomic64_relaxed() bits
locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
...
This commit is contained in:
+28
-4
@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic64_##op##_return);
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
long long val; \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
val = v->counter; \
|
||||
v->counter c_op a; \
|
||||
raw_spin_unlock_irqrestore(lock, flags); \
|
||||
return val; \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic64_fetch_##op);
|
||||
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op, c_op) \
|
||||
ATOMIC64_OP_RETURN(op, c_op)
|
||||
ATOMIC64_OP_RETURN(op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(add, +=)
|
||||
ATOMIC64_OPS(sub, -=)
|
||||
ATOMIC64_OP(and, &=)
|
||||
ATOMIC64_OP(or, |=)
|
||||
ATOMIC64_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op, c_op) \
|
||||
ATOMIC64_OP_RETURN(op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(and, &=)
|
||||
ATOMIC64_OPS(or, |=)
|
||||
ATOMIC64_OPS(xor, ^=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
|
||||
@@ -53,11 +53,25 @@ do { \
|
||||
BUG_ON(atomic##bit##_read(&v) != r); \
|
||||
} while (0)
|
||||
|
||||
#define TEST_FETCH(bit, op, c_op, val) \
|
||||
do { \
|
||||
atomic##bit##_set(&v, v0); \
|
||||
r = v0; \
|
||||
r c_op val; \
|
||||
BUG_ON(atomic##bit##_##op(val, &v) != v0); \
|
||||
BUG_ON(atomic##bit##_read(&v) != r); \
|
||||
} while (0)
|
||||
|
||||
#define RETURN_FAMILY_TEST(bit, op, c_op, val) \
|
||||
do { \
|
||||
FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
|
||||
} while (0)
|
||||
|
||||
#define FETCH_FAMILY_TEST(bit, op, c_op, val) \
|
||||
do { \
|
||||
FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \
|
||||
} while (0)
|
||||
|
||||
#define TEST_ARGS(bit, op, init, ret, expect, args...) \
|
||||
do { \
|
||||
atomic##bit##_set(&v, init); \
|
||||
@@ -114,6 +128,16 @@ static __init void test_atomic(void)
|
||||
RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
|
||||
RETURN_FAMILY_TEST(, sub_return, -=, -one);
|
||||
|
||||
FETCH_FAMILY_TEST(, fetch_add, +=, onestwos);
|
||||
FETCH_FAMILY_TEST(, fetch_add, +=, -one);
|
||||
FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos);
|
||||
FETCH_FAMILY_TEST(, fetch_sub, -=, -one);
|
||||
|
||||
FETCH_FAMILY_TEST(, fetch_or, |=, v1);
|
||||
FETCH_FAMILY_TEST(, fetch_and, &=, v1);
|
||||
FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1);
|
||||
FETCH_FAMILY_TEST(, fetch_xor, ^=, v1);
|
||||
|
||||
INC_RETURN_FAMILY_TEST(, v0);
|
||||
DEC_RETURN_FAMILY_TEST(, v0);
|
||||
|
||||
@@ -154,6 +178,16 @@ static __init void test_atomic64(void)
|
||||
RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
|
||||
RETURN_FAMILY_TEST(64, sub_return, -=, -one);
|
||||
|
||||
FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos);
|
||||
FETCH_FAMILY_TEST(64, fetch_add, +=, -one);
|
||||
FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos);
|
||||
FETCH_FAMILY_TEST(64, fetch_sub, -=, -one);
|
||||
|
||||
FETCH_FAMILY_TEST(64, fetch_or, |=, v1);
|
||||
FETCH_FAMILY_TEST(64, fetch_and, &=, v1);
|
||||
FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1);
|
||||
FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1);
|
||||
|
||||
INIT(v0);
|
||||
atomic64_inc(&v);
|
||||
r += one;
|
||||
|
||||
Reference in New Issue
Block a user