You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
- Support for new architectural features introduced in ARMv8.1:
* Privileged Access Never (PAN) to catch user pointer dereferences in
the kernel
* Large System Extension (LSE) for building scalable atomics and locks
(depends on locking/arch-atomic from tip, which is included here)
* Hardware Dirty Bit Management (DBM) for updating clean PTEs
automatically
- Move our PSCI implementation out into drivers/firmware/, where it can
be shared with arch/arm/. RMK has also pulled this component branch
and has additional patches moving arch/arm/ over. MAINTAINERS is
updated accordingly.
- Better BUG implementation based on the BRK instruction for trapping
- Leaf TLB invalidation for unmapping user pages
- Support for PROBE_ONLY PCI configurations
- Various cleanups and non-critical fixes, including:
* Always flush FP/SIMD state over exec()
* Restrict memblock additions based on range of linear mapping
* Ensure *(LIST_POISON) generates a fatal fault
* Context-tracking syscall return no longer corrupts return value when
not forced on.
* Alternatives patching synchronisation/stability improvements
* Signed sub-word cmpxchg compare fix (tickled by HAVE_CMPXCHG_LOCAL)
* Force SMP=y
* Hide direct DCC access from userspace
* Fix EFI stub memory allocation when DRAM starts at 0x0
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits)
arm64: flush FP/SIMD state correctly after execve()
arm64: makefile: fix perf_callchain.o kconfig dependency
arm64: set MAX_MEMBLOCK_ADDR according to linear region size
of/fdt: make memblock maximum physical address arch configurable
arm64: Fix source code file path in comments
arm64: entry: always restore x0 from the stack on syscall return
arm64: mdscr_el1: avoid exposing DCC to userspace
arm64: kconfig: Move LIST_POISON to a safe value
arm64: Add __exception_irq_entry definition for function graph
arm64: mm: ensure patched kernel text is fetched from PoU
arm64: alternatives: ensure secondary CPUs execute ISB after patching
arm64: make ll/sc __cmpxchg_case_##name asm consistent
arm64: dma-mapping: Simplify pgprot handling
arm64: restore cpu suspend/resume functionality
ARM64: PCI: do not enable resources on PROBE_ONLY systems
arm64: cmpxchg: truncate sub-word signed types before comparison
arm64: alternative: put secondary CPUs into polling loop during patch
arm64/Documentation: clarify wording regarding memory below the Image
arm64: lse: fix lse cmpxchg code indentation
arm64: remove redundant object file list
...
This commit is contained in:
@@ -81,7 +81,7 @@ The decompressed kernel image contains a 64-byte header as follows:
|
||||
u64 res3 = 0; /* reserved */
|
||||
u64 res4 = 0; /* reserved */
|
||||
u32 magic = 0x644d5241; /* Magic number, little endian, "ARM\x64" */
|
||||
u32 res5; /* reserved (used for PE COFF offset) */
|
||||
u32 res5; /* reserved (used for PE COFF offset) */
|
||||
|
||||
|
||||
Header notes:
|
||||
@@ -103,7 +103,7 @@ Header notes:
|
||||
|
||||
- The flags field (introduced in v3.17) is a little-endian 64-bit field
|
||||
composed as follows:
|
||||
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
|
||||
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
|
||||
Bits 1-63: Reserved.
|
||||
|
||||
- When image_size is zero, a bootloader should attempt to keep as much
|
||||
@@ -115,11 +115,14 @@ The Image must be placed text_offset bytes from a 2MB aligned base
|
||||
address near the start of usable system RAM and called there. Memory
|
||||
below that base address is currently unusable by Linux, and therefore it
|
||||
is strongly recommended that this location is the start of system RAM.
|
||||
The region between the 2 MB aligned base address and the start of the
|
||||
image has no special significance to the kernel, and may be used for
|
||||
other purposes.
|
||||
At least image_size bytes from the start of the image must be free for
|
||||
use by the kernel.
|
||||
|
||||
Any memory described to the kernel (even that below the 2MB aligned base
|
||||
address) which is not marked as reserved from the kernel e.g. with a
|
||||
Any memory described to the kernel (even that below the start of the
|
||||
image) which is not marked as reserved from the kernel (e.g., with a
|
||||
memreserve region in the device tree) will be considered as available to
|
||||
the kernel.
|
||||
|
||||
|
||||
+64
-22
@@ -29,7 +29,7 @@ config ARM64
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ALLOCATOR
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IRQ_PROBE
|
||||
@@ -54,6 +54,7 @@ config ARM64
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
select HAVE_CC_STACKPROTECTOR
|
||||
select HAVE_CMPXCHG_DOUBLE
|
||||
select HAVE_CMPXCHG_LOCAL
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_API_DEBUG
|
||||
@@ -105,6 +106,10 @@ config NO_IOPORT_MAP
|
||||
config STACKTRACE_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config ILLEGAL_POINTER_VALUE
|
||||
hex
|
||||
default 0xdead000000000000
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
def_bool y
|
||||
|
||||
@@ -114,6 +119,14 @@ config TRACE_IRQFLAGS_SUPPORT
|
||||
config RWSEM_XCHGADD_ALGORITHM
|
||||
def_bool y
|
||||
|
||||
config GENERIC_BUG
|
||||
def_bool y
|
||||
depends on BUG
|
||||
|
||||
config GENERIC_BUG_RELATIVE_POINTERS
|
||||
def_bool y
|
||||
depends on GENERIC_BUG
|
||||
|
||||
config GENERIC_HWEIGHT
|
||||
def_bool y
|
||||
|
||||
@@ -138,6 +151,9 @@ config NEED_DMA_MAP_STATE
|
||||
config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
|
||||
config SMP
|
||||
def_bool y
|
||||
|
||||
config SWIOTLB
|
||||
def_bool y
|
||||
|
||||
@@ -372,22 +388,8 @@ config CPU_BIG_ENDIAN
|
||||
help
|
||||
Say Y if you plan on running a kernel in big-endian mode.
|
||||
|
||||
config SMP
|
||||
bool "Symmetric Multi-Processing"
|
||||
help
|
||||
This enables support for systems with more than one CPU. If
|
||||
you say N here, the kernel will run on single and
|
||||
multiprocessor machines, but will use only one CPU of a
|
||||
multiprocessor machine. If you say Y here, the kernel will run
|
||||
on many, but not all, single processor machines. On a single
|
||||
processor machine, the kernel will run faster if you say N
|
||||
here.
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config SCHED_MC
|
||||
bool "Multi-core scheduler support"
|
||||
depends on SMP
|
||||
help
|
||||
Multi-core scheduler support improves the CPU scheduler's decision
|
||||
making when dealing with multi-core CPU chips at a cost of slightly
|
||||
@@ -395,7 +397,6 @@ config SCHED_MC
|
||||
|
||||
config SCHED_SMT
|
||||
bool "SMT scheduler support"
|
||||
depends on SMP
|
||||
help
|
||||
Improves the CPU scheduler's decision making when dealing with
|
||||
MultiThreading at a cost of slightly increased overhead in some
|
||||
@@ -404,23 +405,17 @@ config SCHED_SMT
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-4096)"
|
||||
range 2 4096
|
||||
depends on SMP
|
||||
# These have to remain sorted largest to smallest
|
||||
default "64"
|
||||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
depends on SMP
|
||||
help
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
||||
source kernel/Kconfig.preempt
|
||||
|
||||
config UP_LATE_INIT
|
||||
def_bool y
|
||||
depends on !SMP
|
||||
|
||||
config HZ
|
||||
int
|
||||
default 100
|
||||
@@ -562,6 +557,53 @@ config SETEND_EMULATION
|
||||
If unsure, say Y
|
||||
endif
|
||||
|
||||
menu "ARMv8.1 architectural features"
|
||||
|
||||
config ARM64_HW_AFDBM
|
||||
bool "Support for hardware updates of the Access and Dirty page flags"
|
||||
default y
|
||||
help
|
||||
The ARMv8.1 architecture extensions introduce support for
|
||||
hardware updates of the access and dirty information in page
|
||||
table entries. When enabled in TCR_EL1 (HA and HD bits) on
|
||||
capable processors, accesses to pages with PTE_AF cleared will
|
||||
set this bit instead of raising an access flag fault.
|
||||
Similarly, writes to read-only pages with the DBM bit set will
|
||||
clear the read-only bit (AP[2]) instead of raising a
|
||||
permission fault.
|
||||
|
||||
Kernels built with this configuration option enabled continue
|
||||
to work on pre-ARMv8.1 hardware and the performance impact is
|
||||
minimal. If unsure, say Y.
|
||||
|
||||
config ARM64_PAN
|
||||
bool "Enable support for Privileged Access Never (PAN)"
|
||||
default y
|
||||
help
|
||||
Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
|
||||
prevents the kernel or hypervisor from accessing user-space (EL0)
|
||||
memory directly.
|
||||
|
||||
Choosing this option will cause any unprotected (not using
|
||||
copy_to_user et al) memory access to fail with a permission fault.
|
||||
|
||||
The feature is detected at runtime, and will remain as a 'nop'
|
||||
instruction if the cpu does not implement the feature.
|
||||
|
||||
config ARM64_LSE_ATOMICS
|
||||
bool "Atomic instructions"
|
||||
help
|
||||
As part of the Large System Extensions, ARMv8.1 introduces new
|
||||
atomic instructions that are designed specifically to scale in
|
||||
very large systems.
|
||||
|
||||
Say Y here to make use of these instructions for the in-kernel
|
||||
atomic routines. This incurs a small overhead on CPUs that do
|
||||
not support these instructions and requires the kernel to be
|
||||
built with binutils >= 2.25.
|
||||
|
||||
endmenu
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Boot options"
|
||||
|
||||
+16
-2
@@ -17,7 +17,18 @@ GZFLAGS :=-9
|
||||
|
||||
KBUILD_DEFCONFIG := defconfig
|
||||
|
||||
KBUILD_CFLAGS += -mgeneral-regs-only
|
||||
# Check for binutils support for specific extensions
|
||||
lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
|
||||
|
||||
ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
|
||||
ifeq ($(lseinstr),)
|
||||
$(warning LSE atomics not supported by binutils)
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
|
||||
KBUILD_AFLAGS += $(lseinstr)
|
||||
|
||||
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
||||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
AS += -EB
|
||||
@@ -58,7 +69,10 @@ all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
|
||||
|
||||
boot := arch/arm64/boot
|
||||
|
||||
Image Image.gz: vmlinux
|
||||
Image: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||
|
||||
Image.%: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||
|
||||
zinstall install: vmlinux
|
||||
|
||||
@@ -19,9 +19,21 @@ targets := Image Image.gz
|
||||
$(obj)/Image: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
$(obj)/Image.bz2: $(obj)/Image FORCE
|
||||
$(call if_changed,bzip2)
|
||||
|
||||
$(obj)/Image.gz: $(obj)/Image FORCE
|
||||
$(call if_changed,gzip)
|
||||
|
||||
$(obj)/Image.lz4: $(obj)/Image FORCE
|
||||
$(call if_changed,lz4)
|
||||
|
||||
$(obj)/Image.lzma: $(obj)/Image FORCE
|
||||
$(call if_changed,lzma)
|
||||
|
||||
$(obj)/Image.lzo: $(obj)/Image FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
install: $(obj)/Image
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
$(obj)/Image System.map "$(INSTALL_PATH)"
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/stringify.h>
|
||||
@@ -15,7 +17,7 @@ struct alt_instr {
|
||||
u8 alt_len; /* size of new instruction(s), <= orig_len */
|
||||
};
|
||||
|
||||
void apply_alternatives_all(void);
|
||||
void __init apply_alternatives_all(void);
|
||||
void apply_alternatives(void *start, size_t length);
|
||||
void free_alternatives_memory(void);
|
||||
|
||||
@@ -40,7 +42,8 @@ void free_alternatives_memory(void);
|
||||
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
|
||||
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
|
||||
*/
|
||||
#define ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
@@ -53,7 +56,11 @@ void free_alternatives_memory(void);
|
||||
"664:\n\t" \
|
||||
".popsection\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n"
|
||||
".org . - (662b-661b) + (664b-663b)\n" \
|
||||
".endif\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||
|
||||
#else
|
||||
|
||||
@@ -65,7 +72,8 @@ void free_alternatives_memory(void);
|
||||
.byte \alt_len
|
||||
.endm
|
||||
|
||||
.macro alternative_insn insn1 insn2 cap
|
||||
.macro alternative_insn insn1, insn2, cap, enable = 1
|
||||
.if \enable
|
||||
661: \insn1
|
||||
662: .pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
|
||||
@@ -75,8 +83,70 @@ void free_alternatives_memory(void);
|
||||
664: .popsection
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Begin an alternative code sequence.
|
||||
*
|
||||
* The code that follows this macro will be assembled and linked as
|
||||
* normal. There are no restrictions on this code.
|
||||
*/
|
||||
.macro alternative_if_not cap, enable = 1
|
||||
.if \enable
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the alternative code sequence.
|
||||
*
|
||||
* The code that follows this macro is assembled into a special
|
||||
* section to be used for dynamic patching. Code that follows this
|
||||
* macro must:
|
||||
*
|
||||
* 1. Be exactly the same length (in bytes) as the default code
|
||||
* sequence.
|
||||
*
|
||||
* 2. Not contain a branch target that is used outside of the
|
||||
* alternative sequence it is defined in (branches into an
|
||||
* alternative sequence are not fixed up).
|
||||
*/
|
||||
.macro alternative_else, enable = 1
|
||||
.if \enable
|
||||
662: .pushsection .altinstr_replacement, "ax"
|
||||
663:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Complete an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_endif, enable = 1
|
||||
.if \enable
|
||||
664: .popsection
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
|
||||
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
|
||||
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
|
||||
*
|
||||
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
|
||||
* N.B. If CONFIG_FOO is specified, but not selected, the whole block
|
||||
* will be omitted, including oldinstr.
|
||||
*/
|
||||
#define ALTERNATIVE(oldinstr, newinstr, ...) \
|
||||
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
|
||||
|
||||
#endif /* __ASM_ALTERNATIVE_H */
|
||||
|
||||
@@ -49,18 +49,6 @@
|
||||
msr daifclr, #2
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save/disable and restore interrupts.
|
||||
*/
|
||||
.macro save_and_disable_irqs, olddaif
|
||||
mrs \olddaif, daif
|
||||
disable_irq
|
||||
.endm
|
||||
|
||||
.macro restore_irqs, olddaif
|
||||
msr daif, \olddaif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Enable and disable debug exceptions.
|
||||
*/
|
||||
@@ -103,9 +91,7 @@
|
||||
* SMP data memory barrier
|
||||
*/
|
||||
.macro smp_dmb, opt
|
||||
#ifdef CONFIG_SMP
|
||||
dmb \opt
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define USER(l, x...) \
|
||||
|
||||
+51
-226
@@ -24,247 +24,72 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#include <asm/lse.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
||||
* strex/ldrex monitor on some implementations. The reason we can use it for
|
||||
* atomic_set() is the clrex or dummy strex done on every exception return.
|
||||
*/
|
||||
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic_set(v,i) (((v)->counter) = (i))
|
||||
#define __ARM64_IN_ATOMIC_IMPL
|
||||
|
||||
/*
|
||||
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
||||
* store exclusive to ensure that these are atomic. We may loop
|
||||
* to ensure that the update happens.
|
||||
*/
|
||||
#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
|
||||
#include <asm/atomic_lse.h>
|
||||
#else
|
||||
#include <asm/atomic_ll_sc.h>
|
||||
#endif
|
||||
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
#undef __ARM64_IN_ATOMIC_IMPL
|
||||
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ___atomic_add_unless(v, a, u, sfx) \
|
||||
({ \
|
||||
typeof((v)->counter) c, old; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "\n" \
|
||||
"1: ldxr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" stxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
c = atomic##sfx##_read(v); \
|
||||
while (c != (u) && \
|
||||
(old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \
|
||||
c = old; \
|
||||
c; \
|
||||
})
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, asm_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "_return\n" \
|
||||
"1: ldxr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" stlxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i) \
|
||||
: "memory"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
return result; \
|
||||
}
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define ATOMIC_OPS(op, asm_op) \
|
||||
ATOMIC_OP(op, asm_op) \
|
||||
ATOMIC_OP_RETURN(op, asm_op)
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||
#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
|
||||
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
|
||||
|
||||
ATOMIC_OPS(add, add)
|
||||
ATOMIC_OPS(sub, sub)
|
||||
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(andnot, bic)
|
||||
ATOMIC_OP(or, orr)
|
||||
ATOMIC_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int oldval;
|
||||
|
||||
smp_mb();
|
||||
|
||||
asm volatile("// atomic_cmpxchg\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 2f\n"
|
||||
" stxr %w0, %w4, %2\n"
|
||||
" cbnz %w0, 1b\n"
|
||||
"2:"
|
||||
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
|
||||
c = atomic_read(v);
|
||||
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
||||
c = old;
|
||||
return c;
|
||||
}
|
||||
|
||||
#define atomic_inc(v) atomic_add(1, v)
|
||||
#define atomic_dec(v) atomic_sub(1, v)
|
||||
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
|
||||
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
||||
#define atomic_inc(v) atomic_add(1, (v))
|
||||
#define atomic_dec(v) atomic_sub(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
|
||||
#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
/*
|
||||
* 64-bit atomic operations.
|
||||
*/
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
#define ATOMIC64_INIT ATOMIC_INIT
|
||||
#define atomic64_read atomic_read
|
||||
#define atomic64_set atomic_set
|
||||
#define atomic64_xchg atomic_xchg
|
||||
#define atomic64_cmpxchg atomic_cmpxchg
|
||||
|
||||
#define atomic64_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic64_set(v,i) (((v)->counter) = (i))
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "\n" \
|
||||
"1: ldxr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" stxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, asm_op) \
|
||||
static inline long atomic64_##op##_return(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "_return\n" \
|
||||
"1: ldxr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" stlxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i) \
|
||||
: "memory"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
return result; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, asm_op) \
|
||||
ATOMIC64_OP(op, asm_op) \
|
||||
ATOMIC64_OP_RETURN(op, asm_op)
|
||||
|
||||
ATOMIC64_OPS(add, add)
|
||||
ATOMIC64_OPS(sub, sub)
|
||||
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
ATOMIC64_OP(and, and)
|
||||
ATOMIC64_OP(andnot, bic)
|
||||
ATOMIC64_OP(or, orr)
|
||||
ATOMIC64_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
|
||||
{
|
||||
long oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
|
||||
asm volatile("// atomic64_cmpxchg\n"
|
||||
"1: ldxr %1, %2\n"
|
||||
" cmp %1, %3\n"
|
||||
" b.ne 2f\n"
|
||||
" stxr %w0, %4, %2\n"
|
||||
" cbnz %w0, 1b\n"
|
||||
"2:"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_dec_if_positive\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" subs %0, %0, #1\n"
|
||||
" b.mi 2f\n"
|
||||
" stlxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish\n"
|
||||
"2:"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
:
|
||||
: "cc", "memory");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long c, old;
|
||||
|
||||
c = atomic64_read(v);
|
||||
while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
|
||||
c = old;
|
||||
|
||||
return c != u;
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
||||
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
||||
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
||||
#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u)
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -0,0 +1,247 @@
|
||||
/*
|
||||
* Based on arch/arm/include/asm/atomic.h
|
||||
*
|
||||
* Copyright (C) 1996 Russell King.
|
||||
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ATOMIC_LL_SC_H
|
||||
#define __ASM_ATOMIC_LL_SC_H
|
||||
|
||||
#ifndef __ARM64_IN_ATOMIC_IMPL
|
||||
#error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
||||
* store exclusive to ensure that these are atomic. We may loop
|
||||
* to ensure that the update happens.
|
||||
*
|
||||
* NOTE: these functions do *not* follow the PCS and must explicitly
|
||||
* save any clobbered registers other than x0 (regardless of return
|
||||
* value). This is achieved through -fcall-saved-* compiler flags for
|
||||
* this file, which unfortunately don't work on a per-function basis
|
||||
* (the optimize attribute silently ignores these options).
|
||||
*/
|
||||
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
__LL_SC_INLINE void \
|
||||
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "\n" \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" stxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic_##op);
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, asm_op) \
|
||||
__LL_SC_INLINE int \
|
||||
__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "_return\n" \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" stlxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i) \
|
||||
: "memory"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic_##op##_return);
|
||||
|
||||
#define ATOMIC_OPS(op, asm_op) \
|
||||
ATOMIC_OP(op, asm_op) \
|
||||
ATOMIC_OP_RETURN(op, asm_op)
|
||||
|
||||
ATOMIC_OPS(add, add)
|
||||
ATOMIC_OPS(sub, sub)
|
||||
|
||||
ATOMIC_OP(and, and)
|
||||
ATOMIC_OP(andnot, bic)
|
||||
ATOMIC_OP(or, orr)
|
||||
ATOMIC_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
__LL_SC_INLINE void \
|
||||
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "\n" \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" stxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic64_##op);
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, asm_op) \
|
||||
__LL_SC_INLINE long \
|
||||
__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "_return\n" \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" stlxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i) \
|
||||
: "memory"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic64_##op##_return);
|
||||
|
||||
#define ATOMIC64_OPS(op, asm_op) \
|
||||
ATOMIC64_OP(op, asm_op) \
|
||||
ATOMIC64_OP_RETURN(op, asm_op)
|
||||
|
||||
ATOMIC64_OPS(add, add)
|
||||
ATOMIC64_OPS(sub, sub)
|
||||
|
||||
ATOMIC64_OP(and, and)
|
||||
ATOMIC64_OP(andnot, bic)
|
||||
ATOMIC64_OP(or, orr)
|
||||
ATOMIC64_OP(xor, eor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
__LL_SC_INLINE long
|
||||
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_dec_if_positive\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" subs %0, %0, #1\n"
|
||||
" b.lt 2f\n"
|
||||
" stlxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish\n"
|
||||
"2:"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
:
|
||||
: "cc", "memory");
|
||||
|
||||
return result;
|
||||
}
|
||||
__LL_SC_EXPORT(atomic64_dec_if_positive);
|
||||
|
||||
#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \
|
||||
__LL_SC_INLINE unsigned long \
|
||||
__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
|
||||
unsigned long old, \
|
||||
unsigned long new)) \
|
||||
{ \
|
||||
unsigned long tmp, oldval; \
|
||||
\
|
||||
asm volatile( \
|
||||
" prfm pstl1strm, %[v]\n" \
|
||||
"1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
|
||||
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
|
||||
" cbnz %" #w "[tmp], 2f\n" \
|
||||
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
|
||||
" cbnz %w[tmp], 1b\n" \
|
||||
" " #mb "\n" \
|
||||
" mov %" #w "[oldval], %" #w "[old]\n" \
|
||||
"2:" \
|
||||
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
: [old] "Lr" (old), [new] "r" (new) \
|
||||
: cl); \
|
||||
\
|
||||
return oldval; \
|
||||
} \
|
||||
__LL_SC_EXPORT(__cmpxchg_case_##name);
|
||||
|
||||
__CMPXCHG_CASE(w, b, 1, , , )
|
||||
__CMPXCHG_CASE(w, h, 2, , , )
|
||||
__CMPXCHG_CASE(w, , 4, , , )
|
||||
__CMPXCHG_CASE( , , 8, , , )
|
||||
__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
|
||||
__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
|
||||
__CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory")
|
||||
__CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
|
||||
|
||||
#undef __CMPXCHG_CASE
|
||||
|
||||
#define __CMPXCHG_DBL(name, mb, rel, cl) \
|
||||
__LL_SC_INLINE int \
|
||||
__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long old2, \
|
||||
unsigned long new1, \
|
||||
unsigned long new2, \
|
||||
volatile void *ptr)) \
|
||||
{ \
|
||||
unsigned long tmp, ret; \
|
||||
\
|
||||
asm volatile("// __cmpxchg_double" #name "\n" \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxp %0, %1, %2\n" \
|
||||
" eor %0, %0, %3\n" \
|
||||
" eor %1, %1, %4\n" \
|
||||
" orr %1, %0, %1\n" \
|
||||
" cbnz %1, 2f\n" \
|
||||
" st" #rel "xp %w0, %5, %6, %2\n" \
|
||||
" cbnz %w0, 1b\n" \
|
||||
" " #mb "\n" \
|
||||
"2:" \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
||||
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
||||
: cl); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
__LL_SC_EXPORT(__cmpxchg_double##name);
|
||||
|
||||
__CMPXCHG_DBL( , , , )
|
||||
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
|
||||
|
||||
#undef __CMPXCHG_DBL
|
||||
|
||||
#endif /* __ASM_ATOMIC_LL_SC_H */
|
||||
@@ -0,0 +1,391 @@
|
||||
/*
|
||||
* Based on arch/arm/include/asm/atomic.h
|
||||
*
|
||||
* Copyright (C) 1996 Russell King.
|
||||
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ATOMIC_LSE_H
|
||||
#define __ASM_ATOMIC_LSE_H
|
||||
|
||||
#ifndef __ARM64_IN_ATOMIC_IMPL
|
||||
#error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
|
||||
|
||||
static inline void atomic_andnot(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
|
||||
" stclr %w[i], %[v]\n")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic_or(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
|
||||
" stset %w[i], %[v]\n")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic_xor(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
|
||||
" steor %w[i], %[v]\n")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
|
||||
" stadd %w[i], %[v]\n")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC(add_return),
|
||||
/* LSE atomics */
|
||||
" ldaddal %w[i], w30, %[v]\n"
|
||||
" add %w[i], %w[i], w30")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30", "memory");
|
||||
|
||||
return w0;
|
||||
}
|
||||
|
||||
static inline void atomic_and(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC(and),
|
||||
/* LSE atomics */
|
||||
" mvn %w[i], %w[i]\n"
|
||||
" stclr %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC(sub),
|
||||
/* LSE atomics */
|
||||
" neg %w[i], %w[i]\n"
|
||||
" stadd %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC(sub_return)
|
||||
" nop",
|
||||
/* LSE atomics */
|
||||
" neg %w[i], %w[i]\n"
|
||||
" ldaddal %w[i], w30, %[v]\n"
|
||||
" add %w[i], %w[i], w30")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30", "memory");
|
||||
|
||||
return w0;
|
||||
}
|
||||
|
||||
#undef __LL_SC_ATOMIC
|
||||
|
||||
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
|
||||
|
||||
static inline void atomic64_andnot(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
|
||||
" stclr %[i], %[v]\n")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic64_or(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
|
||||
" stset %[i], %[v]\n")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic64_xor(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
|
||||
" steor %[i], %[v]\n")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
|
||||
" stadd %[i], %[v]\n")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC64(add_return),
|
||||
/* LSE atomics */
|
||||
" ldaddal %[i], x30, %[v]\n"
|
||||
" add %[i], %[i], x30")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30", "memory");
|
||||
|
||||
return x0;
|
||||
}
|
||||
|
||||
static inline void atomic64_and(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC64(and),
|
||||
/* LSE atomics */
|
||||
" mvn %[i], %[i]\n"
|
||||
" stclr %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline void atomic64_sub(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC64(sub),
|
||||
/* LSE atomics */
|
||||
" neg %[i], %[i]\n"
|
||||
" stadd %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30");
|
||||
}
|
||||
|
||||
static inline long atomic64_sub_return(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC64(sub_return)
|
||||
" nop",
|
||||
/* LSE atomics */
|
||||
" neg %[i], %[i]\n"
|
||||
" ldaddal %[i], x30, %[v]\n"
|
||||
" add %[i], %[i], x30")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: "x30", "memory");
|
||||
|
||||
return x0;
|
||||
}
|
||||
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = (long)v;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" nop\n"
|
||||
__LL_SC_ATOMIC64(dec_if_positive)
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" nop",
|
||||
/* LSE atomics */
|
||||
"1: ldr x30, %[v]\n"
|
||||
" subs %[ret], x30, #1\n"
|
||||
" b.lt 2f\n"
|
||||
" casal x30, %[ret], %[v]\n"
|
||||
" sub x30, x30, #1\n"
|
||||
" sub x30, x30, %[ret]\n"
|
||||
" cbnz x30, 1b\n"
|
||||
"2:")
|
||||
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
:
|
||||
: "x30", "cc", "memory");
|
||||
|
||||
return x0;
|
||||
}
|
||||
|
||||
#undef __LL_SC_ATOMIC64
|
||||
|
||||
#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
|
||||
|
||||
#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
|
||||
static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
|
||||
unsigned long old, \
|
||||
unsigned long new) \
|
||||
{ \
|
||||
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
|
||||
register unsigned long x1 asm ("x1") = old; \
|
||||
register unsigned long x2 asm ("x2") = new; \
|
||||
\
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
||||
/* LL/SC */ \
|
||||
" nop\n" \
|
||||
__LL_SC_CMPXCHG(name) \
|
||||
" nop", \
|
||||
/* LSE atomics */ \
|
||||
" mov " #w "30, %" #w "[old]\n" \
|
||||
" cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
|
||||
" mov %" #w "[ret], " #w "30") \
|
||||
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
|
||||
: [old] "r" (x1), [new] "r" (x2) \
|
||||
: "x30" , ##cl); \
|
||||
\
|
||||
return x0; \
|
||||
}
|
||||
|
||||
__CMPXCHG_CASE(w, b, 1, )
|
||||
__CMPXCHG_CASE(w, h, 2, )
|
||||
__CMPXCHG_CASE(w, , 4, )
|
||||
__CMPXCHG_CASE(x, , 8, )
|
||||
__CMPXCHG_CASE(w, b, mb_1, al, "memory")
|
||||
__CMPXCHG_CASE(w, h, mb_2, al, "memory")
|
||||
__CMPXCHG_CASE(w, , mb_4, al, "memory")
|
||||
__CMPXCHG_CASE(x, , mb_8, al, "memory")
|
||||
|
||||
#undef __LL_SC_CMPXCHG
|
||||
#undef __CMPXCHG_CASE
|
||||
|
||||
#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
|
||||
|
||||
#define __CMPXCHG_DBL(name, mb, cl...) \
|
||||
static inline int __cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long old2, \
|
||||
unsigned long new1, \
|
||||
unsigned long new2, \
|
||||
volatile void *ptr) \
|
||||
{ \
|
||||
unsigned long oldval1 = old1; \
|
||||
unsigned long oldval2 = old2; \
|
||||
register unsigned long x0 asm ("x0") = old1; \
|
||||
register unsigned long x1 asm ("x1") = old2; \
|
||||
register unsigned long x2 asm ("x2") = new1; \
|
||||
register unsigned long x3 asm ("x3") = new2; \
|
||||
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
|
||||
\
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN( \
|
||||
/* LL/SC */ \
|
||||
" nop\n" \
|
||||
" nop\n" \
|
||||
" nop\n" \
|
||||
__LL_SC_CMPXCHG_DBL(name), \
|
||||
/* LSE atomics */ \
|
||||
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
|
||||
" eor %[old1], %[old1], %[oldval1]\n" \
|
||||
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||
" orr %[old1], %[old1], %[old2]") \
|
||||
: [old1] "+r" (x0), [old2] "+r" (x1), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
||||
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
||||
: "x30" , ##cl); \
|
||||
\
|
||||
return x0; \
|
||||
}
|
||||
|
||||
__CMPXCHG_DBL( , )
|
||||
__CMPXCHG_DBL(_mb, al, "memory")
|
||||
|
||||
#undef __LL_SC_CMPXCHG_DBL
|
||||
#undef __CMPXCHG_DBL
|
||||
|
||||
#endif /* __ASM_ATOMIC_LSE_H */
|
||||
@@ -35,28 +35,6 @@
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
#define smp_mb() dmb(ish)
|
||||
#define smp_rmb() dmb(ishld)
|
||||
#define smp_wmb() dmb(ishst)
|
||||
@@ -109,8 +87,6 @@ do { \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2015 ARM Limited
|
||||
* Author: Dave Martin <Dave.Martin@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _ARCH_ARM64_ASM_BUG_H
|
||||
#define _ARCH_ARM64_ASM_BUG_H
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG
|
||||
#define HAVE_ARCH_BUG
|
||||
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
|
||||
#define __BUGVERBOSE_LOCATION(file, line) \
|
||||
".pushsection .rodata.str,\"aMS\",@progbits,1\n" \
|
||||
"2: .string \"" file "\"\n\t" \
|
||||
".popsection\n\t" \
|
||||
\
|
||||
".long 2b - 0b\n\t" \
|
||||
".short " #line "\n\t"
|
||||
#else
|
||||
#define _BUGVERBOSE_LOCATION(file, line)
|
||||
#endif
|
||||
|
||||
#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
|
||||
|
||||
#define __BUG_FLAGS(flags) asm volatile ( \
|
||||
".pushsection __bug_table,\"a\"\n\t" \
|
||||
".align 2\n\t" \
|
||||
"0: .long 1f - 0b\n\t" \
|
||||
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
|
||||
".short " #flags "\n\t" \
|
||||
".popsection\n" \
|
||||
\
|
||||
"1: brk %[imm]" \
|
||||
:: [imm] "i" (BUG_BRK_IMM) \
|
||||
)
|
||||
|
||||
#define BUG() do { \
|
||||
_BUG_FLAGS(0); \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
|
||||
#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
|
||||
|
||||
#endif /* ! CONFIG_GENERIC_BUG */
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
||||
#endif /* ! _ARCH_ARM64_ASM_BUG_H */
|
||||
@@ -21,7 +21,9 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mmdebug.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/lse.h>
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
||||
{
|
||||
@@ -29,37 +31,73 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
asm volatile("// __xchg1\n"
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxrb %w0, %2\n"
|
||||
" stlxrb %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish",
|
||||
/* LSE atomics */
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" swpalb %w3, %w0, %2\n"
|
||||
" nop\n"
|
||||
" nop")
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 2:
|
||||
asm volatile("// __xchg2\n"
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxrh %w0, %2\n"
|
||||
" stlxrh %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish",
|
||||
/* LSE atomics */
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" swpalh %w3, %w0, %2\n"
|
||||
" nop\n"
|
||||
" nop")
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("// __xchg4\n"
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" stlxr %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish",
|
||||
/* LSE atomics */
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" swpal %w3, %w0, %2\n"
|
||||
" nop\n"
|
||||
" nop")
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 8:
|
||||
asm volatile("// __xchg8\n"
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" stlxr %w1, %3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish",
|
||||
/* LSE atomics */
|
||||
" nop\n"
|
||||
" nop\n"
|
||||
" swpal %3, %0, %2\n"
|
||||
" nop\n"
|
||||
" nop")
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
@@ -68,7 +106,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -83,131 +120,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval = 0, res;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("// __cmpxchg1\n"
|
||||
" ldxrb %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxrb %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
return __cmpxchg_case_1(ptr, (u8)old, new);
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("// __cmpxchg2\n"
|
||||
" ldxrh %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxrh %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
return __cmpxchg_case_2(ptr, (u16)old, new);
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("// __cmpxchg4\n"
|
||||
" ldxr %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxr %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
return __cmpxchg_case_4(ptr, old, new);
|
||||
case 8:
|
||||
do {
|
||||
asm volatile("// __cmpxchg8\n"
|
||||
" ldxr %1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %1, %3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxr %w0, %4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
return __cmpxchg_case_8(ptr, old, new);
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#define system_has_cmpxchg_double() 1
|
||||
|
||||
static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
|
||||
unsigned long old1, unsigned long old2,
|
||||
unsigned long new1, unsigned long new2, int size)
|
||||
{
|
||||
unsigned long loop, lost;
|
||||
|
||||
switch (size) {
|
||||
case 8:
|
||||
VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
|
||||
do {
|
||||
asm volatile("// __cmpxchg_double8\n"
|
||||
" ldxp %0, %1, %2\n"
|
||||
" eor %0, %0, %3\n"
|
||||
" eor %1, %1, %4\n"
|
||||
" orr %1, %0, %1\n"
|
||||
" mov %w0, #0\n"
|
||||
" cbnz %1, 1f\n"
|
||||
" stxp %w0, %5, %6, %2\n"
|
||||
"1:\n"
|
||||
: "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
|
||||
: "r" (old1), "r"(old2), "r"(new1), "r"(new2));
|
||||
} while (loop);
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
return !lost;
|
||||
}
|
||||
|
||||
static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
|
||||
unsigned long old1, unsigned long old2,
|
||||
unsigned long new1, unsigned long new2, int size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
unreachable();
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
switch (size) {
|
||||
case 1:
|
||||
return __cmpxchg_case_mb_1(ptr, (u8)old, new);
|
||||
case 2:
|
||||
return __cmpxchg_case_mb_2(ptr, (u16)old, new);
|
||||
case 4:
|
||||
return __cmpxchg_case_mb_4(ptr, old, new);
|
||||
case 8:
|
||||
return __cmpxchg_case_mb_8(ptr, old, new);
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
unreachable();
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
@@ -228,21 +173,32 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define system_has_cmpxchg_double() 1
|
||||
|
||||
#define __cmpxchg_double_check(ptr1, ptr2) \
|
||||
({ \
|
||||
if (sizeof(*(ptr1)) != 8) \
|
||||
BUILD_BUG(); \
|
||||
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
|
||||
})
|
||||
|
||||
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({\
|
||||
int __ret;\
|
||||
__ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
|
||||
(unsigned long)(o2), (unsigned long)(n1), \
|
||||
(unsigned long)(n2), sizeof(*(ptr1)));\
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({\
|
||||
int __ret;\
|
||||
__ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
|
||||
(unsigned long)(o2), (unsigned long)(n1), \
|
||||
(unsigned long)(n2), sizeof(*(ptr1)));\
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
|
||||
@@ -25,15 +25,20 @@
|
||||
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
|
||||
#define ARM64_WORKAROUND_845719 2
|
||||
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
|
||||
#define ARM64_HAS_PAN 4
|
||||
#define ARM64_HAS_LSE_ATOMICS 5
|
||||
|
||||
#define ARM64_NCAPS 4
|
||||
#define ARM64_NCAPS 6
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct arm64_cpu_capabilities {
|
||||
const char *desc;
|
||||
u16 capability;
|
||||
bool (*matches)(const struct arm64_cpu_capabilities *);
|
||||
void (*enable)(void);
|
||||
union {
|
||||
struct { /* To be used for erratum handling only */
|
||||
u32 midr_model;
|
||||
@@ -41,8 +46,8 @@ struct arm64_cpu_capabilities {
|
||||
};
|
||||
|
||||
struct { /* Feature register checking */
|
||||
u64 register_mask;
|
||||
u64 register_value;
|
||||
int field_pos;
|
||||
int min_field_value;
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -70,6 +75,13 @@ static inline void cpus_set_cap(unsigned int num)
|
||||
__set_bit(num, cpu_hwcaps);
|
||||
}
|
||||
|
||||
static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
|
||||
int field)
|
||||
{
|
||||
return (s64)(features << (64 - 4 - field)) >> (64 - 4);
|
||||
}
|
||||
|
||||
|
||||
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
const char *info);
|
||||
void check_local_cpu_errata(void);
|
||||
|
||||
@@ -81,9 +81,6 @@
|
||||
#define ID_AA64MMFR0_BIGEND(mmfr0) \
|
||||
(((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
|
||||
|
||||
#define SCTLR_EL1_CP15BEN (0x1 << 5)
|
||||
#define SCTLR_EL1_SED (0x1 << 8)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,6 +18,12 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* Low-level stepping controls. */
|
||||
#define DBG_MDSCR_SS (1 << 0)
|
||||
#define DBG_SPSR_SS (1 << 21)
|
||||
@@ -38,12 +44,7 @@
|
||||
/*
|
||||
* Break point instruction encoding
|
||||
*/
|
||||
#define BREAK_INSTR_SIZE 4
|
||||
|
||||
/*
|
||||
* ESR values expected for dynamic and compile time BRK instruction
|
||||
*/
|
||||
#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
|
||||
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
|
||||
|
||||
/*
|
||||
* #imm16 values used for BRK instruction generation
|
||||
@@ -51,10 +52,12 @@
|
||||
* 0x100: for triggering a fault on purpose (reserved)
|
||||
* 0x400: for dynamic BRK instruction
|
||||
* 0x401: for compile time BRK instruction
|
||||
* 0x800: kernel-mode BUG() and WARN() traps
|
||||
*/
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
#define BUG_BRK_IMM 0x800
|
||||
|
||||
/*
|
||||
* BRK instruction encoding
|
||||
@@ -68,25 +71,10 @@
|
||||
*/
|
||||
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
|
||||
|
||||
/*
|
||||
* Extract byte from BRK instruction
|
||||
*/
|
||||
#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
|
||||
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
|
||||
|
||||
/*
|
||||
* Extract byte from BRK #imm16
|
||||
*/
|
||||
#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
|
||||
(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
|
||||
|
||||
#define KGDB_DYN_DBG_BRK_BYTE(x) \
|
||||
(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
|
||||
|
||||
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
|
||||
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
|
||||
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
|
||||
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
|
||||
#define AARCH64_BREAK_KGDB_DYN_DBG \
|
||||
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
|
||||
#define KGDB_DYN_BRK_INS_BYTE(x) \
|
||||
((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
|
||||
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
|
||||
@@ -127,13 +115,13 @@ void unregister_break_hook(struct break_hook *hook);
|
||||
|
||||
u8 debug_monitors_arch(void);
|
||||
|
||||
enum debug_el {
|
||||
enum dbg_active_el {
|
||||
DBG_ACTIVE_EL0 = 0,
|
||||
DBG_ACTIVE_EL1,
|
||||
};
|
||||
|
||||
void enable_debug_monitors(enum debug_el el);
|
||||
void disable_debug_monitors(enum debug_el el);
|
||||
void enable_debug_monitors(enum dbg_active_el el);
|
||||
void disable_debug_monitors(enum dbg_active_el el);
|
||||
|
||||
void user_rewind_single_step(struct task_struct *task);
|
||||
void user_fastforward_single_step(struct task_struct *task);
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
#ifndef __ASM_ESR_H
|
||||
#define __ASM_ESR_H
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define ESR_ELx_EC_UNKNOWN (0x00)
|
||||
#define ESR_ELx_EC_WFx (0x01)
|
||||
/* Unallocated EC: 0x02 */
|
||||
@@ -99,6 +101,13 @@
|
||||
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
|
||||
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
|
||||
|
||||
/* ESR value templates for specific events */
|
||||
|
||||
/* BRK instruction trap from AArch64 state */
|
||||
#define ESR_ELx_VAL_BRK64(imm) \
|
||||
((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
|
||||
((imm) & 0xffff))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/types.h>
|
||||
|
||||
|
||||
@@ -18,7 +18,13 @@
|
||||
#ifndef __ASM_EXCEPTION_H
|
||||
#define __ASM_EXCEPTION_H
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry __exception
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_EXCEPTION_H */
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
* Copyright (C) 2013 Mark Salter <msalter@redhat.com>
|
||||
*
|
||||
* Adapted from arch/x86_64 version.
|
||||
* Adapted from arch/x86 version.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
@@ -20,10 +20,17 @@
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
|
||||
asm volatile( \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w1, %2\n" \
|
||||
insn "\n" \
|
||||
"2: stlxr %w3, %w0, %2\n" \
|
||||
@@ -39,6 +46,8 @@
|
||||
" .align 3\n" \
|
||||
" .quad 1b, 4b, 2b, 4b\n" \
|
||||
" .popsection\n" \
|
||||
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
|
||||
CONFIG_ARM64_PAN) \
|
||||
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
|
||||
: "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "memory")
|
||||
@@ -112,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
" sub %w3, %w1, %w4\n"
|
||||
" cbnz %w3, 3f\n"
|
||||
|
||||
@@ -24,9 +24,7 @@
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int ipi_irqs[NR_IPI];
|
||||
#endif
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
@@ -34,10 +32,8 @@ typedef struct {
|
||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
||||
#endif
|
||||
|
||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user