Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Introduce cmpxchg128() -- aka. the demise of cmpxchg_double()

   The cmpxchg128() family of functions is basically & functionally the
   same as cmpxchg_double(), but with a saner interface.

   Instead of a 6-parameter horror that forced u128 - u64/u64-halves
   layout details on the interface and exposed users to complexity,
   fragility & bugs, use a natural 3-parameter interface with u128
   types.

 - Restructure the generated atomic headers, and add kerneldoc comments
   for all of the generic atomic{,64,_long}_t operations.

   The generated definitions are much cleaner now, and come with
   documentation.

 - Implement lock_set_cmp_fn() on lockdep, for defining an ordering when
   taking multiple locks of the same type.

   This gets rid of one use of lockdep_set_novalidate_class() in the
   bcache code.

 - Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable
   shadowing generating garbage code on Clang on certain ARM builds.

* tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
  locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc
  percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg()
  locking/atomic: treewide: delete arch_atomic_*() kerneldoc
  locking/atomic: docs: Add atomic operations to the driver basic API documentation
  locking/atomic: scripts: generate kerneldoc comments
  docs: scripts: kernel-doc: accept bitwise negation like ~@var
  locking/atomic: scripts: simplify raw_atomic*() definitions
  locking/atomic: scripts: simplify raw_atomic_long*() definitions
  locking/atomic: scripts: split pfx/name/sfx/order
  locking/atomic: scripts: restructure fallback ifdeffery
  locking/atomic: scripts: build raw_atomic_long*() directly
  locking/atomic: treewide: use raw_atomic*_<op>()
  locking/atomic: scripts: add trivial raw_atomic*_<op>()
  locking/atomic: scripts: factor out order template generation
  locking/atomic: scripts: remove leftover "${mult}"
  locking/atomic: scripts: remove bogus order parameter
  locking/atomic: xtensa: add preprocessor symbols
  locking/atomic: x86: add preprocessor symbols
  locking/atomic: sparc: add preprocessor symbols
  locking/atomic: sh: add preprocessor symbols
  ...
This commit is contained in:
Linus Torvalds
2023-06-27 14:14:30 -07:00
136 changed files with 10107 additions and 4346 deletions

View File

@@ -53,7 +53,6 @@ preemption and interrupts::
this_cpu_add_return(pcp, val)
this_cpu_xchg(pcp, nval)
this_cpu_cmpxchg(pcp, oval, nval)
this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
this_cpu_sub(pcp, val)
this_cpu_inc(pcp)
this_cpu_dec(pcp)
@@ -242,7 +241,6 @@ safe::
__this_cpu_add_return(pcp, val)
__this_cpu_xchg(pcp, nval)
__this_cpu_cmpxchg(pcp, oval, nval)
__this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
__this_cpu_sub(pcp, val)
__this_cpu_inc(pcp)
__this_cpu_dec(pcp)

View File

@@ -84,7 +84,13 @@ Reference counting
Atomics
-------
.. kernel-doc:: arch/x86/include/asm/atomic.h
.. kernel-doc:: include/linux/atomic/atomic-instrumented.h
:internal:
.. kernel-doc:: include/linux/atomic/atomic-arch-fallback.h
:internal:
.. kernel-doc:: include/linux/atomic/atomic-long.h
:internal:
Kernel objects manipulation

View File

@@ -200,25 +200,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define arch_atomic64_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
/**
* arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
@@ -242,15 +223,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
@@ -274,13 +246,6 @@ static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;

View File

@@ -81,6 +81,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -92,7 +97,11 @@ ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
#define arch_atomic_andnot arch_atomic_andnot
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP

View File

@@ -22,30 +22,6 @@
#include <asm/atomic-spinlock.h>
#endif
#define arch_atomic_cmpxchg(v, o, n) \
({ \
arch_cmpxchg(&((v)->counter), (o), (n)); \
})
#ifdef arch_cmpxchg_relaxed
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
({ \
arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \
})
#endif
#define arch_atomic_xchg(v, n) \
({ \
arch_xchg(&((v)->counter), (n)); \
})
#ifdef arch_xchg_relaxed
#define arch_atomic_xchg_relaxed(v, n) \
({ \
arch_xchg_relaxed(&((v)->counter), (n)); \
})
#endif
/*
* 64-bit atomics
*/

View File

@@ -159,6 +159,7 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev;
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
@@ -179,14 +180,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
return prev;
}
/**
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
#define arch_atomic64_xchg arch_atomic64_xchg
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
@@ -212,15 +206,6 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/**
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;

View File

@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
#endif
.endm
/*
* Raw SMP data memory barrier
*/
.macro __smp_dmb mode
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
dmb ish
.else
W(dmb) ish
.endif
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#else
.error "Incompatible SMP platform"
#endif
.endm
#if defined(CONFIG_CPU_V7M)
/*
* setmode is used to assert to be in svc mode during boot. For v7-M

View File

@@ -197,6 +197,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -210,8 +220,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#endif /* __LINUX_ARM_ARCH__ */
@@ -240,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
s64 counter;

View File

@@ -14,14 +14,35 @@
* ops which are SMP safe even on a UP kernel.
*/
/*
* Unordered
*/
#define sync_set_bit(nr, p) _set_bit(nr, p)
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
#define sync_change_bit(nr, p) _change_bit(nr, p)
#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
#define arch_sync_cmpxchg arch_cmpxchg
/*
* Fully ordered
*/
int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
#define arch_sync_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_mb__before_atomic(); \
__ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
__smp_mb__after_atomic(); \
__ret; \
})
#endif

View File

@@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm
.macro testop, name, instr, store
.macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
\barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
\barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm
.macro testop, name, instr, store
__testop \name, \instr, \store, smp_dmb
.endm
.macro sync_testop, name, instr, store
__testop \name, \instr, \store, __smp_dmb
.endm
#else
.macro bitop, name, instr
ENTRY( \name )

View File

@@ -10,3 +10,7 @@
.text
testop _test_and_change_bit, eor, str
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_change_bit, eor, str
#endif

View File

@@ -10,3 +10,7 @@
.text
testop _test_and_clear_bit, bicne, strne
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_clear_bit, bicne, strne
#endif

View File

@@ -10,3 +10,7 @@
.text
testop _test_and_set_bit, orreq, streq
#if __LINUX_ARM_ARCH__ >= 6
sync_testop _sync_test_and_set_bit, orreq, streq
#endif

View File

@@ -142,24 +142,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_xchg_relaxed(v, new) \
arch_xchg_relaxed(&((v)->counter), (new))
#define arch_atomic_xchg_acquire(v, new) \
arch_xchg_acquire(&((v)->counter), (new))
#define arch_atomic_xchg_release(v, new) \
arch_xchg_release(&((v)->counter), (new))
#define arch_atomic_xchg(v, new) \
arch_xchg(&((v)->counter), (new))
#define arch_atomic_cmpxchg_relaxed(v, old, new) \
arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg_acquire(v, old, new) \
arch_cmpxchg_acquire(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg_release(v, old, new) \
arch_cmpxchg_release(&((v)->counter), (old), (new))
#define arch_atomic_cmpxchg(v, old, new) \
arch_cmpxchg(&((v)->counter), (old), (new))
#define arch_atomic_andnot arch_atomic_andnot
/*
@@ -209,16 +191,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
#define arch_atomic64_xchg_release arch_atomic_xchg_release
#define arch_atomic64_xchg arch_atomic_xchg
#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
#define arch_atomic64_cmpxchg arch_atomic_cmpxchg
#define arch_atomic64_andnot arch_atomic64_andnot
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

View File

@@ -294,38 +294,46 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, rel, cl) \
static __always_inline long \
__ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
volatile void *ptr) \
union __u128_halves {
u128 full;
struct {
u64 low, high;
};
};
#define __CMPXCHG128(name, mb, rel, cl...) \
static __always_inline u128 \
__ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \
{ \
unsigned long tmp, ret; \
union __u128_halves r, o = { .full = (old) }, \
n = { .full = (new) }; \
unsigned int tmp; \
\
asm volatile("// __cmpxchg_double" #name "\n" \
" prfm pstl1strm, %2\n" \
"1: ldxp %0, %1, %2\n" \
" eor %0, %0, %3\n" \
" eor %1, %1, %4\n" \
" orr %1, %0, %1\n" \
" cbnz %1, 2f\n" \
" st" #rel "xp %w0, %5, %6, %2\n" \
" cbnz %w0, 1b\n" \
asm volatile("// __cmpxchg128" #name "\n" \
" prfm pstl1strm, %[v]\n" \
"1: ldxp %[rl], %[rh], %[v]\n" \
" cmp %[rl], %[ol]\n" \
" ccmp %[rh], %[oh], 0, eq\n" \
" b.ne 2f\n" \
" st" #rel "xp %w[tmp], %[nl], %[nh], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
"2:" \
: "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \
: [v] "+Q" (*(u128 *)ptr), \
[rl] "=&r" (r.low), [rh] "=&r" (r.high), \
[tmp] "=&r" (tmp) \
: [ol] "r" (o.low), [oh] "r" (o.high), \
[nl] "r" (n.low), [nh] "r" (n.high) \
: "cc", ##cl); \
\
return ret; \
return r.full; \
}
__CMPXCHG_DBL( , , , )
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
__CMPXCHG128( , , )
__CMPXCHG128(_mb, dmb ish, l, "memory")
#undef __CMPXCHG128
#undef __CMPXCHG_DBL
#undef K
#endif /* __ASM_ATOMIC_LL_SC_H */

View File

@@ -281,40 +281,35 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, cl...) \
static __always_inline long \
__lse__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
volatile void *ptr) \
#define __CMPXCHG128(name, mb, cl...) \
static __always_inline u128 \
__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \
{ \
unsigned long oldval1 = old1; \
unsigned long oldval2 = old2; \
register unsigned long x0 asm ("x0") = old1; \
register unsigned long x1 asm ("x1") = old2; \
register unsigned long x2 asm ("x2") = new1; \
register unsigned long x3 asm ("x3") = new2; \
union __u128_halves r, o = { .full = (old) }, \
n = { .full = (new) }; \
register unsigned long x0 asm ("x0") = o.low; \
register unsigned long x1 asm ("x1") = o.high; \
register unsigned long x2 asm ("x2") = n.low; \
register unsigned long x3 asm ("x3") = n.high; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
asm volatile( \
__LSE_PREAMBLE \
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
" eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]" \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
[v] "+Q" (*(__uint128_t *)ptr) \
[v] "+Q" (*(u128 *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
[oldval1] "r" (o.low), [oldval2] "r" (o.high) \
: cl); \
\
return x0; \
r.low = x0; r.high = x1; \
\
return r.full; \
}
__CMPXCHG_DBL( , )
__CMPXCHG_DBL(_mb, al, "memory")
__CMPXCHG128( , )
__CMPXCHG128(_mb, al, "memory")
#undef __CMPXCHG_DBL
#undef __CMPXCHG128
#endif /* __ASM_ATOMIC_LSE_H */

View File

@@ -130,21 +130,18 @@ __CMPXCHG_CASE(mb_, 64)
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name) \
static inline long __cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
volatile void *ptr) \
#define __CMPXCHG128(name) \
static inline u128 __cmpxchg128##name(volatile u128 *ptr, \
u128 old, u128 new) \
{ \
return __lse_ll_sc_body(_cmpxchg_double##name, \
old1, old2, new1, new2, ptr); \
return __lse_ll_sc_body(_cmpxchg128##name, \
ptr, old, new); \
}
__CMPXCHG_DBL( )
__CMPXCHG_DBL(_mb)
__CMPXCHG128( )
__CMPXCHG128(_mb)
#undef __CMPXCHG_DBL
#undef __CMPXCHG128
#define __CMPXCHG_GEN(sfx) \
static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
@@ -198,34 +195,17 @@ __CMPXCHG_GEN(_mb)
#define arch_cmpxchg64 arch_cmpxchg
#define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */
#define system_has_cmpxchg_double() 1
/* cmpxchg128 */
#define system_has_cmpxchg128() 1
#define __cmpxchg_double_check(ptr1, ptr2) \
#define arch_cmpxchg128(ptr, o, n) \
({ \
if (sizeof(*(ptr1)) != 8) \
BUILD_BUG(); \
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
__cmpxchg128_mb((ptr), (o), (n)); \
})
#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
#define arch_cmpxchg128_local(ptr, o, n) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
})
#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \
ptr1); \
__ret; \
__cmpxchg128((ptr), (o), (n)); \
})
#define __CMPWAIT_CASE(w, sfx, sz) \

View File

@@ -140,17 +140,11 @@ PERCPU_RET_OP(add, add, ldadd)
* re-enabling preemption for preemptible kernels, but doing that in a way
* which builds inside a module would mean messing directly with the preempt
* count. If you do this, peterz and tglx will hunt you down.
*
* Not to mention it'll break the actual preemption model for missing a
* preemption point when TIF_NEED_RESCHED gets set while preemption is
* disabled.
*/
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable_notrace(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable_notrace(); \
__ret; \
})
#define _pcp_protect(op, pcp, ...) \
({ \
@@ -240,6 +234,22 @@ PERCPU_RET_OP(add, add, ldadd)
#define this_cpu_cmpxchg_8(pcp, o, n) \
_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n)
#define this_cpu_cmpxchg128(pcp, o, n) \
({ \
typedef typeof(pcp) pcp_op_T__; \
u128 old__, new__, ret__; \
pcp_op_T__ *ptr__; \
old__ = o; \
new__ = n; \
preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg128_local((void *)ptr__, old__, new__); \
preempt_enable_notrace(); \
ret__; \
})
#ifdef __KVM_NVHE_HYPERVISOR__
extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
#define __per_cpu_offset

View File

@@ -195,41 +195,6 @@ arch_atomic_dec_if_positive(atomic_t *v)
}
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#define ATOMIC_OP() \
static __always_inline \
int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
{ \
return __xchg_relaxed(n, &(v->counter), 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
{ \
return __cmpxchg(&(v->counter), o, n, 4); \
}
#define ATOMIC_OPS() \
ATOMIC_OP()
ATOMIC_OPS()
#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
#else
#include <asm-generic/atomic.h>
#endif

View File

@@ -28,58 +28,8 @@ static inline void arch_atomic_set(atomic_t *v, int new)
#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
* arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
* arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
* arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
*
* Parameters are then pointer, value-in-register, value-in-register,
* and the output is the old value.
*
* Apparently this is complicated for archs that don't support
* the memw_locked like we do (or it's broken or whatever).
*
* Kind of the lynchpin of the rest of the generically defined routines.
* Remember V2 had that bug with dotnew predicate set by memw_locked.
*
* "old" is "expected" old val, __oldval is actual old value
*/
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
asm volatile(
"1: %0 = memw_locked(%1);\n"
" { P0 = cmp.eq(%0,%2);\n"
" if (!P0.new) jump:nt 2f; }\n"
" memw_locked(%1,P0) = %3;\n"
" if (!P0) jump 1b;\n"
"2:\n"
: "=&r" (__oldval)
: "r" (&v->counter), "r" (old), "r" (new)
: "memory", "p0"
);
return __oldval;
}
#define ATOMIC_OP(op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
@@ -135,6 +85,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
@@ -142,21 +97,15 @@ ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/**
* arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
*
* Returns old value.
*
*/
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;

Some files were not shown because too many files have changed in this diff Show More