Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking and atomic updates from Ingo Molnar:
 "Main changes in this cycle are:

   - Extend atomic primitives with coherent logic op primitives
     (atomic_{or,and,xor}()) and deprecate the old partial APIs
     (atomic_{set,clear}_mask())

     The old ops were incoherent with incompatible signatures across
     architectures and with incomplete support.  Now every architecture
     supports the primitives consistently (by Peter Zijlstra)

   - Generic support for 'relaxed atomics':

       - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return()
       - atomic_read_acquire()
       - atomic_set_release()

     This came out of porting qwrlock code to arm64 (by Will Deacon)

   - Clean up the fragile static_key APIs that were causing repeat bugs,
     by introducing a new one:

       DEFINE_STATIC_KEY_TRUE(name);
       DEFINE_STATIC_KEY_FALSE(name);

     which define a key of different types with an initial true/false
     value.

     Then allow:

       static_branch_likely()
       static_branch_unlikely()

     to take a key of either type and emit the right instruction for the
     case.  To be able to know the 'type' of the static key we encode it
     in the jump entry (by Peter Zijlstra)

   - Static key self-tests (by Jason Baron)

   - qrwlock optimizations (by Waiman Long)

   - small futex enhancements (by Davidlohr Bueso)

   - ... and misc other changes"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits)
  jump_label/x86: Work around asm build bug on older/backported GCCs
  locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
  locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h
  locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
  locking/qrwlock: Implement queue_write_unlock() using smp_store_release()
  locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition
  locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'
  locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication
  locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations
  locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic
  locking/static_keys: Make verify_keys() static
  jump label, locking/static_keys: Update docs
  locking/static_keys: Provide a selftest
  jump_label: Provide a self-test
  s390/uaccess, locking/static_keys: employ static_branch_likely()
  x86, tsc, locking/static_keys: Employ static_branch_likely()
  locking/static_keys: Add selftest
  locking/static_keys: Add a new static_key interface
  locking/static_keys: Rework update logic
  locking/static_keys: Add static_key_{en,dis}able() helpers
  ...
This commit is contained in:
Linus Torvalds
2015-09-03 15:46:07 -07:00
139 changed files with 2428 additions and 3588 deletions

View File

@@ -266,7 +266,9 @@ with the given old and new values. Like all atomic_xxx operations,
atomic_cmpxchg will only satisfy its atomicity semantics as long as all
other accesses of *v are performed through atomic_xxx operations.
atomic_cmpxchg must provide explicit memory barriers around the operation.
atomic_cmpxchg must provide explicit memory barriers around the operation,
although if the comparison fails then no memory ordering guarantees are
required.
The semantics for atomic_cmpxchg are the same as those defined for 'cas'
below.

View File

@@ -15,6 +15,10 @@ o fail_page_alloc
injects page allocation failures. (alloc_pages(), get_free_pages(), ...)
o fail_futex
injects futex deadlock and uaddr fault errors.
o fail_make_request
injects disk IO errors on devices permitted by setting
@@ -113,6 +117,12 @@ configuration of fault-injection capabilities.
specifies the minimum page allocation order to be injected
failures.
- /sys/kernel/debug/fail_futex/ignore-private:
Format: { 'Y' | 'N' }
default is 'N', setting it to 'Y' will disable failure injections
when dealing with private (address space) futexes.
o Boot option
In order to inject faults while debugfs is not available (early boot time),
@@ -121,6 +131,7 @@ use the boot option:
failslab=
fail_page_alloc=
fail_make_request=
fail_futex=
mmc_core.fail_request=<interval>,<probability>,<space>,<times>
How to add new fault injection capability

View File

@@ -2327,9 +2327,7 @@ about the state (old or new) implies an SMP-conditional general memory barrier
explicit lock operations, described later). These include:
xchg();
cmpxchg();
atomic_xchg(); atomic_long_xchg();
atomic_cmpxchg(); atomic_long_cmpxchg();
atomic_inc_return(); atomic_long_inc_return();
atomic_dec_return(); atomic_long_dec_return();
atomic_add_return(); atomic_long_add_return();
@@ -2342,7 +2340,9 @@ explicit lock operations, described later). These include:
test_and_clear_bit();
test_and_change_bit();
/* when succeeds (returns 1) */
/* when succeeds */
cmpxchg();
atomic_cmpxchg(); atomic_long_cmpxchg();
atomic_add_unless(); atomic_long_add_unless();
These are used for such things as implementing ACQUIRE-class and RELEASE-class

View File

@@ -1,7 +1,22 @@
Static Keys
-----------
By: Jason Baron <jbaron@redhat.com>
DEPRECATED API:
The use of 'struct static_key' directly, is now DEPRECATED. In addition
static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
struct static_key false = STATIC_KEY_INIT_FALSE;
struct static_key true = STATIC_KEY_INIT_TRUE;
static_key_true()
static_key_false()
The updated API replacements are:
DEFINE_STATIC_KEY_TRUE(key);
DEFINE_STATIC_KEY_FALSE(key);
static_key_likely()
statick_key_unlikely()
0) Abstract
@@ -9,22 +24,22 @@ Static keys allows the inclusion of seldom used features in
performance-sensitive fast-path kernel code, via a GCC feature and a code
patching technique. A quick example:
struct static_key key = STATIC_KEY_INIT_FALSE;
DEFINE_STATIC_KEY_FALSE(key);
...
if (static_key_false(&key))
if (static_branch_unlikely(&key))
do unlikely code
else
do likely code
...
static_key_slow_inc();
static_branch_enable(&key);
...
static_key_slow_inc();
static_branch_disable(&key);
...
The static_key_false() branch will be generated into the code with as little
The static_branch_unlikely() branch will be generated into the code with as little
impact to the likely code path as possible.
@@ -56,7 +71,7 @@ the branch site to change the branch direction.
For example, if we have a simple branch that is disabled by default:
if (static_key_false(&key))
if (static_branch_unlikely(&key))
printk("I am the true branch\n");
Thus, by default the 'printk' will not be emitted. And the code generated will
@@ -75,68 +90,55 @@ the basis for the static keys facility.
In order to make use of this optimization you must first define a key:
struct static_key key;
Which is initialized as:
struct static_key key = STATIC_KEY_INIT_TRUE;
DEFINE_STATIC_KEY_TRUE(key);
or:
struct static_key key = STATIC_KEY_INIT_FALSE;
DEFINE_STATIC_KEY_FALSE(key);
If the key is not initialized, it is default false. The 'struct static_key',
must be a 'global'. That is, it can't be allocated on the stack or dynamically
The key must be global, that is, it can't be allocated on the stack or dynamically
allocated at run-time.
The key is then used in code as:
if (static_key_false(&key))
if (static_branch_unlikely(&key))
do unlikely code
else
do likely code
Or:
if (static_key_true(&key))
if (static_branch_likely(&key))
do likely code
else
do unlikely code
A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a
'static_key_false()' construct. Likewise, a key initialized via
'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A
single key can be used in many branches, but all the branches must match the
way that the key has been initialized.
Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may
be used in either static_branch_likely() or static_branch_unlikely()
statemnts.
The branch(es) can then be switched via:
Branch(es) can be set true via:
static_key_slow_inc(&key);
static_branch_enable(&key);
or false via:
static_branch_disable(&key);
The branch(es) can then be switched via reference counts:
static_branch_inc(&key);
...
static_key_slow_dec(&key);
static_branch_dec(&key);
Thus, 'static_key_slow_inc()' means 'make the branch true', and
'static_key_slow_dec()' means 'make the branch false' with appropriate
Thus, 'static_branch_inc()' means 'make the branch true', and
'static_branch_dec()' means 'make the branch false' with appropriate
reference counting. For example, if the key is initialized true, a
static_key_slow_dec(), will switch the branch to false. And a subsequent
static_key_slow_inc(), will change the branch back to true. Likewise, if the
key is initialized false, a 'static_key_slow_inc()', will change the branch to
true. And then a 'static_key_slow_dec()', will again make the branch false.
An example usage in the kernel is the implementation of tracepoints:
static inline void trace_##name(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond)); \
}
Tracepoints are disabled by default, and can be placed in performance critical
pieces of the kernel. Thus, by using a static key, the tracepoints can have
absolutely minimal impact when not in use.
static_branch_dec(), will switch the branch to false. And a subsequent
static_branch_inc(), will change the branch back to true. Likewise, if the
key is initialized false, a 'static_branch_inc()', will change the branch to
true. And then a 'static_branch_dec()', will again make the branch false.
4) Architecture level code patching interface, 'jump labels'
@@ -150,9 +152,12 @@ simply fall back to a traditional, load, test, and jump sequence.
* #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
* __always_inline bool arch_static_branch(struct static_key *key), see:
* __always_inline bool arch_static_branch(struct static_key *key, bool branch), see:
arch/x86/include/asm/jump_label.h
* __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch),
see: arch/x86/include/asm/jump_label.h
* void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
see: arch/x86/kernel/jump_label.c
@@ -173,7 +178,7 @@ SYSCALL_DEFINE0(getppid)
{
int pid;
+ if (static_key_false(&key))
+ if (static_branch_unlikely(&key))
+ printk("I am the true branch\n");
rcu_read_lock();

View File

@@ -71,6 +71,12 @@ config JUMP_LABEL
( On 32-bit x86, the necessary options added to the compiler
flags may increase the size of the kernel slightly. )
config STATIC_KEYS_SELFTEST
bool "Static key selftest"
depends on JUMP_LABEL
help
Boot time self-test of the branch patching code.
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES

View File

@@ -29,13 +29,13 @@
* branch back to restart the operation.
*/
#define ATOMIC_OP(op) \
#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #op "l %0,%2,%0\n" \
" " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
#define ATOMIC_OP_RETURN(op) \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #op "l %0,%3,%2\n" \
" " #op "l %0,%3,%0\n" \
" " #asm_op " %0,%3,%2\n" \
" " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
#define ATOMIC64_OP(op) \
#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #op "q %0,%2,%0\n" \
" " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
#define ATOMIC64_OP_RETURN(op) \
#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #op "q %0,%3,%2\n" \
" " #op "q %0,%3,%0\n" \
" " #asm_op " %0,%3,%2\n" \
" " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \
}
#define ATOMIC_OPS(opg) \
ATOMIC_OP(opg) \
ATOMIC_OP_RETURN(opg) \
ATOMIC64_OP(opg) \
ATOMIC64_OP_RETURN(opg)
#define ATOMIC_OPS(op) \
ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(op, op##l) \
ATOMIC64_OP(op, op##q) \
ATOMIC64_OP_RETURN(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, bis)
ATOMIC_OP(xor, xor)
ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, bis)
ATOMIC64_OP(xor, xor)
#undef ATOMIC_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

View File

@@ -172,9 +172,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(and, &=, and)
#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
#define atomic_andnot atomic_andnot
ATOMIC_OP(and, &=, and)
ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN

View File

@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
}
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
smp_mb();
return oldval;
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot
ATOMIC_OP(and, &=, and)
ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, orr)
ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
static inline long long \
atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
{ \
long long result; \
unsigned long tmp; \
\
smp_mb(); \
prefetchw(&v->counter); \
\
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
}
@@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_andnot atomic64_andnot
ATOMIC64_OP(and, and, and)
ATOMIC64_OP(andnot, bic, bic)
ATOMIC64_OP(or, orr, orr)
ATOMIC64_OP(xor, eor, eor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
long long new)
static inline long long
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
{
long long oldval;
unsigned long res;
smp_mb();
prefetchw(&ptr->counter);
do {
@@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
: "cc");
} while (res);
smp_mb();
return oldval;
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
long long result;
unsigned long tmp;
smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
: "r" (&ptr->counter), "r" (new)
: "cc");
smp_mb();
return result;
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{

View File

@@ -67,12 +67,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \

View File

@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
smp_mb();
prefetchw((const void *)ptr);
switch (size) {
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
smp_mb();
return ret;
}
#define xchg(ptr, x) ({ \
#define xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
#define xchg xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long ret;
smp_mb();
ret = __cmpxchg(ptr, old, new, size);
smp_mb();
return ret;
}
#define cmpxchg(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
#define cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;
smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();
return ret;
}
#define cmpxchg64(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
#endif /* __LINUX_ARM_ARCH__ >= 6 */
#endif /* __ASM_ARM_CMPXCHG_H */

View File

@@ -4,23 +4,32 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/unified.h>
#define JUMP_LABEL_NOP_SIZE 4
#ifdef CONFIG_THUMB2_KERNEL
#define JUMP_LABEL_NOP "nop.w"
#else
#define JUMP_LABEL_NOP "nop"
#endif
static __always_inline bool arch_static_branch(struct static_key *key)
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
WASM(nop) "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i" (key) : : l_yes);
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:

View File

@@ -12,7 +12,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
void *addr = (void *)entry->code;
unsigned int insn;
if (type == JUMP_LABEL_ENABLE)
if (type == JUMP_LABEL_JMP)
insn = arm_gen_branch(entry->code, entry->target);
else
insn = arm_gen_nop();

View File

@@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, sub)
#define atomic_andnot atomic_andnot
ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, orr)
ATOMIC_OP(xor, eor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, sub)
#define atomic64_andnot atomic64_andnot
ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, orr)
ATOMIC64_OP(xor, eor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

View File

@@ -44,12 +44,12 @@
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \

View File

@@ -26,14 +26,28 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
static __always_inline bool arch_static_branch(struct static_key *key)
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i"(key) : : l_yes);
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
l_yes:

View File

@@ -28,7 +28,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void *addr = (void *)entry->code;
u32 insn;
if (type == JUMP_LABEL_ENABLE) {
if (type == JUMP_LABEL_JMP) {
insn = aarch64_insn_gen_branch_imm(entry->code,
entry->target,
AARCH64_INSN_BRANCH_NOLINK);

View File

@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
#define ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic_##op##_return(i, v); \
}
ATOMIC_OP(and, and)
ATOMIC_OP(or, or)
ATOMIC_OP(xor, eor)
#undef ATOMIC_OP
#undef ATOMIC_OP_RETURN
/*

View File

@@ -16,19 +16,21 @@
#include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
#endif

View File

@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_update_asm);
EXPORT_SYMBOL(__raw_atomic_clear_asm);
EXPORT_SYMBOL(__raw_atomic_set_asm);
EXPORT_SYMBOL(__raw_atomic_add_asm);
EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm);
EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm);

View File

@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr
* r1 = value
*
* Add a signed value to a 32bit word and return the new value atomically.
* ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_atomic_update_asm)
ENTRY(___raw_atomic_add_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3;
rets = [sp++];
rts;
ENDPROC(___raw_atomic_update_asm)
ENDPROC(___raw_atomic_add_asm)
/*
* r0 = ptr
* r1 = mask
*
* Clear the mask bits from a 32bit word and return the old 32bit value
* AND the mask bits from a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_atomic_clear_asm)
ENTRY(___raw_atomic_and_asm)
p1 = r0;
r3 = ~r1;
r3 = r1;
[--sp] = rets;
call _get_core_lock;
r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3;
rets = [sp++];
rts;
ENDPROC(___raw_atomic_clear_asm)
ENDPROC(___raw_atomic_and_asm)
/*
* r0 = ptr
* r1 = mask
*
* Set the mask bits into a 32bit word and return the old 32bit value
* OR the mask bits into a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_atomic_set_asm)
ENTRY(___raw_atomic_or_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3;
rets = [sp++];
rts;
ENDPROC(___raw_atomic_set_asm)
ENDPROC(___raw_atomic_or_asm)
/*
* r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1;
r1 = 1;
r1 <<= r2;
jump ___raw_atomic_set_asm
jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm)
/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_bit_clear_asm)
r2 = r1;
r1 = 1;
r1 <<= r2;
jump ___raw_atomic_clear_asm
r2 = 1;
r2 <<= r1;
r1 = ~r2;
jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm)
/*

Some files were not shown because too many files have changed in this diff Show More