mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
"Updates for IRQ stacks and virtually mapped stack support, and ftrace:
- Support for IRQ and vmap'ed stacks
This covers all the work related to implementing IRQ stacks and
vmap'ed stacks for all 32-bit ARM systems that are currently
supported by the Linux kernel, including RiscPC and Footbridge. It
has been submitted for review in four different waves:
- IRQ stacks support for v7 SMP systems [0]
- vmap'ed stacks support for v7 SMP systems[1]
- extending support for both IRQ stacks and vmap'ed stacks for all
remaining configurations, including v6/v7 SMP multiplatform
kernels and uniprocessor configurations including v7-M [2]
- fixes and updates in [3]
- ftrace fixes and cleanups
Make all flavors of ftrace available on all builds, regardless of
ISA choice, unwinder choice or compiler [4]:
- use ADD not POP where possible
- fix a couple of Thumb2 related issues
- enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness
- enable the graph tracer with the EABI unwinder
- avoid clobbering frame pointer registers to make Clang happy
- Fixes for the above"
[0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/
[1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/
[2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/
[3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/
[4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/
* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits)
ARM: fix building NOMMU ARMv4/v5 kernels
ARM: unwind: only permit stack switch when unwinding call_with_stack()
ARM: Revert "unwind: dump exception stack from calling frame"
ARM: entry: fix unwinder problems caused by IRQ stacks
ARM: unwind: set frame.pc correctly for current-thread unwinding
ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y
ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses
Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel"
ARM: mach-bcm: disable ftrace in SMC invocation routines
ARM: cacheflush: avoid clobbering the frame pointer
ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds
ARM: ftrace: enable the graph tracer with the EABI unwinder
ARM: unwind: track location of LR value in stack frame
ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST
ARM: ftrace: avoid unnecessary literal loads
ARM: ftrace: avoid redundant loads or clobbering IP
ARM: ftrace: use trampolines to keep .init.text in branching range
ARM: ftrace: use ADD not POP to counter PUSH at entry
ARM: ftrace: ensure that ADR takes the Thumb bit into account
ARM: make get_current() and __my_cpu_offset() __always_inline
...
This commit is contained in:
@@ -60,6 +60,7 @@ config ARM
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IRQ_SHOW_LEVEL
|
||||
@@ -94,8 +95,8 @@ config ARM
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FAST_GUP if ARM_LPAE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
@@ -129,7 +130,8 @@ config ARM
|
||||
select PERF_USE_VMALLOC
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
select THREAD_INFO_IN_TASK if CURRENT_POINTER_IN_TPIDRURO
|
||||
select THREAD_INFO_IN_TASK
|
||||
select HAVE_ARCH_VMAP_STACK if MMU && ARM_HAS_GROUP_RELOCS
|
||||
select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M
|
||||
# Above selects are sorted alphabetically; please add new ones
|
||||
# according to that. Thanks.
|
||||
@@ -141,6 +143,17 @@ config ARM
|
||||
Europe. There is an ARM Linux project with a web page at
|
||||
<http://www.arm.linux.org.uk/>.
|
||||
|
||||
config ARM_HAS_GROUP_RELOCS
|
||||
def_bool y
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 140000
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
Whether or not to use R_ARM_ALU_PC_Gn or R_ARM_LDR_PC_Gn group
|
||||
relocations, which have been around for a long time, but were not
|
||||
supported in LLD until version 14. The combined range is -/+ 256 MiB,
|
||||
which is usually sufficient, but not for allyesconfig, so we disable
|
||||
this feature when doing compile testing.
|
||||
|
||||
config ARM_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
@@ -229,9 +242,6 @@ config GENERIC_ISA_DMA
|
||||
config FIQ
|
||||
bool
|
||||
|
||||
config NEED_RET_TO_USER
|
||||
bool
|
||||
|
||||
config ARCH_MTD_XIP
|
||||
bool
|
||||
|
||||
@@ -325,7 +335,6 @@ config ARCH_MULTIPLATFORM
|
||||
select AUTO_ZRELADDR
|
||||
select TIMER_OF
|
||||
select COMMON_CLK
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select HAVE_PCI
|
||||
select PCI_DOMAINS_GENERIC if PCI
|
||||
select SPARSE_IRQ
|
||||
@@ -349,7 +358,6 @@ config ARCH_EP93XX
|
||||
select ARM_AMBA
|
||||
imply ARM_PATCH_PHYS_VIRT
|
||||
select ARM_VIC
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select AUTO_ZRELADDR
|
||||
select CLKSRC_MMIO
|
||||
select CPU_ARM920T
|
||||
@@ -374,7 +382,6 @@ config ARCH_IOP32X
|
||||
select CPU_XSCALE
|
||||
select GPIO_IOP
|
||||
select GPIOLIB
|
||||
select NEED_RET_TO_USER
|
||||
select FORCE_PCI
|
||||
select PLAT_IOP
|
||||
help
|
||||
@@ -388,7 +395,6 @@ config ARCH_IXP4XX
|
||||
select ARCH_SUPPORTS_BIG_ENDIAN
|
||||
select CPU_XSCALE
|
||||
select DMABOUNCE if PCI
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIO_IXP4XX
|
||||
select GPIOLIB
|
||||
select HAVE_PCI
|
||||
@@ -404,7 +410,6 @@ config ARCH_IXP4XX
|
||||
config ARCH_DOVE
|
||||
bool "Marvell Dove"
|
||||
select CPU_PJ4
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select HAVE_PCI
|
||||
select MVEBU_MBUS
|
||||
@@ -427,7 +432,6 @@ config ARCH_PXA
|
||||
select CLKSRC_MMIO
|
||||
select TIMER_OF
|
||||
select CPU_XSCALE if !CPU_XSC3
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIO_PXA
|
||||
select GPIOLIB
|
||||
select IRQ_DOMAIN
|
||||
@@ -466,7 +470,6 @@ config ARCH_SA1100
|
||||
select COMMON_CLK
|
||||
select CPU_FREQ
|
||||
select CPU_SA1100
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select IRQ_DOMAIN
|
||||
select ISA
|
||||
@@ -481,7 +484,6 @@ config ARCH_S3C24XX
|
||||
select CLKSRC_SAMSUNG_PWM
|
||||
select GPIO_SAMSUNG
|
||||
select GPIOLIB
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select NEED_MACH_IO_H
|
||||
select S3C2410_WATCHDOG
|
||||
select SAMSUNG_ATAGS
|
||||
@@ -499,7 +501,6 @@ config ARCH_OMAP1
|
||||
select ARCH_OMAP
|
||||
select CLKSRC_MMIO
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GPIOLIB
|
||||
select HAVE_LEGACY_CLK
|
||||
select IRQ_DOMAIN
|
||||
@@ -1166,7 +1167,12 @@ config SMP_ON_UP
|
||||
|
||||
config CURRENT_POINTER_IN_TPIDRURO
|
||||
def_bool y
|
||||
depends on SMP && CPU_32v6K && !CPU_V6
|
||||
depends on CPU_32v6K && !CPU_V6
|
||||
|
||||
config IRQSTACKS
|
||||
def_bool y
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
|
||||
config ARM_CPU_TOPOLOGY
|
||||
bool "Support cpu topology definition"
|
||||
@@ -1607,10 +1613,14 @@ config XEN
|
||||
help
|
||||
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
|
||||
|
||||
config CC_HAVE_STACKPROTECTOR_TLS
|
||||
def_bool $(cc-option,-mtp=cp15 -mstack-protector-guard=tls -mstack-protector-guard-offset=0)
|
||||
|
||||
config STACKPROTECTOR_PER_TASK
|
||||
bool "Use a unique stack canary value for each task"
|
||||
depends on GCC_PLUGINS && STACKPROTECTOR && THREAD_INFO_IN_TASK && !XIP_DEFLATED_DATA
|
||||
select GCC_PLUGIN_ARM_SSP_PER_TASK
|
||||
depends on STACKPROTECTOR && CURRENT_POINTER_IN_TPIDRURO && !XIP_DEFLATED_DATA
|
||||
depends on GCC_PLUGINS || CC_HAVE_STACKPROTECTOR_TLS
|
||||
select GCC_PLUGIN_ARM_SSP_PER_TASK if !CC_HAVE_STACKPROTECTOR_TLS
|
||||
default y
|
||||
help
|
||||
Due to the fact that GCC uses an ordinary symbol reference from
|
||||
|
||||
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
|
||||
|
||||
config UNWINDER_ARM
|
||||
bool "ARM EABI stack unwinder"
|
||||
depends on AEABI && !FUNCTION_GRAPH_TRACER
|
||||
depends on AEABI
|
||||
select ARM_UNWIND
|
||||
help
|
||||
This option enables stack unwinding support in the kernel
|
||||
|
||||
@@ -275,6 +275,14 @@ endif
|
||||
|
||||
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
||||
prepare: stack_protector_prepare
|
||||
ifeq ($(CONFIG_CC_HAVE_STACKPROTECTOR_TLS),y)
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += \
|
||||
-mstack-protector-guard=tls \
|
||||
-mstack-protector-guard-offset=$(shell \
|
||||
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}'\
|
||||
include/generated/asm-offsets.h))
|
||||
else
|
||||
stack_protector_prepare: prepare0
|
||||
$(eval SSP_PLUGIN_CFLAGS := \
|
||||
-fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
|
||||
@@ -283,6 +291,7 @@ stack_protector_prepare: prepare0
|
||||
$(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
$(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
|
||||
endif
|
||||
endif
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
|
||||
@@ -92,17 +92,13 @@ ifeq ($(CONFIG_USE_OF),y)
|
||||
OBJS += $(libfdt_objs) fdt_check_mem_start.o
|
||||
endif
|
||||
|
||||
# -fstack-protector-strong triggers protection checks in this code,
|
||||
# but it is being used too early to link to meaningful stack_chk logic.
|
||||
$(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \
|
||||
$(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
|
||||
|
||||
targets := vmlinux vmlinux.lds piggy_data piggy.o \
|
||||
head.o $(OBJS)
|
||||
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
|
||||
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
|
||||
-I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
|
||||
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
|
||||
asflags-y := -DZIMAGE
|
||||
|
||||
@@ -128,13 +128,6 @@ asmlinkage void __div0(void)
|
||||
error("Attempting division by 0!");
|
||||
}
|
||||
|
||||
const unsigned long __stack_chk_guard = 0x000a0dff;
|
||||
|
||||
void __stack_chk_fail(void)
|
||||
{
|
||||
error("stack-protector: Kernel stack is corrupted\n");
|
||||
}
|
||||
|
||||
extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
|
||||
|
||||
|
||||
|
||||
@@ -86,6 +86,10 @@
|
||||
|
||||
#define IMM12_MASK 0xfff
|
||||
|
||||
/* the frame pointer used for stack unwinding */
|
||||
ARM( fpreg .req r11 )
|
||||
THUMB( fpreg .req r7 )
|
||||
|
||||
/*
|
||||
* Enable and disable interrupts
|
||||
*/
|
||||
@@ -209,43 +213,12 @@
|
||||
.endm
|
||||
.endr
|
||||
|
||||
.macro get_current, rd
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
|
||||
#else
|
||||
get_thread_info \rd
|
||||
ldr \rd, [\rd, #TI_TASK]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro set_current, rn
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro reload_current, t1:req, t2:req
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
adr_l \t1, __entry_task @ get __entry_task base address
|
||||
mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset
|
||||
ldr \t1, [\t1, \t2] @ load variable
|
||||
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Get current thread_info.
|
||||
*/
|
||||
.macro get_thread_info, rd
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
/* thread_info is the first member of struct task_struct */
|
||||
get_current \rd
|
||||
#else
|
||||
ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
THUMB( mov \rd, sp )
|
||||
THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
|
||||
mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
@@ -320,6 +293,80 @@
|
||||
#define ALT_UP_B(label) b label
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this_cpu_offset - load the per-CPU offset of this CPU into
|
||||
* register 'rd'
|
||||
*/
|
||||
.macro this_cpu_offset, rd:req
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L1_\@)
|
||||
.L0_\@:
|
||||
.subsection 1
|
||||
.L1_\@: ldr_va \rd, __per_cpu_offset
|
||||
b .L0_\@
|
||||
.previous
|
||||
#endif
|
||||
#else
|
||||
mov \rd, #0
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* set_current - store the task pointer of this CPU's current task
|
||||
*/
|
||||
.macro set_current, rn:req, tmp:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L0_\@)
|
||||
.subsection 1
|
||||
.L0_\@: str_va \rn, __current, \tmp
|
||||
b .L1_\@
|
||||
.previous
|
||||
.L1_\@:
|
||||
#endif
|
||||
#else
|
||||
str_va \rn, __current, \tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* get_current - load the task pointer of this CPU's current task
|
||||
*/
|
||||
.macro get_current, rd:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_UP_B(.L0_\@)
|
||||
.subsection 1
|
||||
.L0_\@: ldr_va \rd, __current
|
||||
b .L1_\@
|
||||
.previous
|
||||
.L1_\@:
|
||||
#endif
|
||||
#else
|
||||
ldr_va \rd, __current
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* reload_current - reload the task pointer of this CPU's current task
|
||||
* into the TLS register
|
||||
*/
|
||||
.macro reload_current, t1:req, t2:req
|
||||
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
ALT_SMP(nop)
|
||||
ALT_UP_B(.L0_\@)
|
||||
#endif
|
||||
ldr_this_cpu \t1, __entry_task, \t1, \t2
|
||||
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
|
||||
.L0_\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Instruction barrier
|
||||
*/
|
||||
@@ -576,12 +623,12 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
/*
|
||||
* mov_l - move a constant value or [relocated] address into a register
|
||||
*/
|
||||
.macro mov_l, dst:req, imm:req
|
||||
.macro mov_l, dst:req, imm:req, cond
|
||||
.if __LINUX_ARM_ARCH__ < 7
|
||||
ldr \dst, =\imm
|
||||
ldr\cond \dst, =\imm
|
||||
.else
|
||||
movw \dst, #:lower16:\imm
|
||||
movt \dst, #:upper16:\imm
|
||||
movw\cond \dst, #:lower16:\imm
|
||||
movt\cond \dst, #:upper16:\imm
|
||||
.endif
|
||||
.endm
|
||||
|
||||
@@ -619,6 +666,78 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
__adldst_l str, \src, \sym, \tmp, \cond
|
||||
.endm
|
||||
|
||||
.macro __ldst_va, op, reg, tmp, sym, cond
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
mov_l \tmp, \sym, \cond
|
||||
\op\cond \reg, [\tmp]
|
||||
#else
|
||||
/*
|
||||
* Avoid a literal load, by emitting a sequence of ADD/LDR instructions
|
||||
* with the appropriate relocations. The combined sequence has a range
|
||||
* of -/+ 256 MiB, which should be sufficient for the core kernel and
|
||||
* for modules loaded into the module region.
|
||||
*/
|
||||
.globl \sym
|
||||
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
|
||||
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
|
||||
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
|
||||
.L0_\@: sub\cond \tmp, pc, #8
|
||||
.L1_\@: sub\cond \tmp, \tmp, #4
|
||||
.L2_\@: \op\cond \reg, [\tmp, #0]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_va - load a 32-bit word from the virtual address of \sym
|
||||
*/
|
||||
.macro ldr_va, rd:req, sym:req, cond
|
||||
__ldst_va ldr, \rd, \rd, \sym, \cond
|
||||
.endm
|
||||
|
||||
/*
|
||||
* str_va - store a 32-bit word to the virtual address of \sym
|
||||
*/
|
||||
.macro str_va, rn:req, sym:req, tmp:req, cond
|
||||
__ldst_va str, \rn, \tmp, \sym, \cond
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
|
||||
* without using a temp register. Supported in ARM mode
|
||||
* only.
|
||||
*/
|
||||
.macro ldr_this_cpu_armv6, rd:req, sym:req
|
||||
this_cpu_offset \rd
|
||||
.globl \sym
|
||||
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
|
||||
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
|
||||
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
|
||||
add \rd, \rd, pc
|
||||
.L0_\@: sub \rd, \rd, #4
|
||||
.L1_\@: sub \rd, \rd, #0
|
||||
.L2_\@: ldr \rd, [\rd, #4]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
|
||||
* into register 'rd', which may be the stack pointer,
|
||||
* using 't1' and 't2' as general temp registers. These
|
||||
* are permitted to overlap with 'rd' if != sp
|
||||
*/
|
||||
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
this_cpu_offset \t1
|
||||
mov_l \t2, \sym
|
||||
ldr \rd, [\t1, \t2]
|
||||
#else
|
||||
ldr_this_cpu_armv6 \rd, \sym
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* rev_l - byte-swap a 32-bit value
|
||||
*
|
||||
@@ -636,4 +755,19 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* bl_r - branch and link to register
|
||||
*
|
||||
* @dst: target to branch to
|
||||
* @c: conditional opcode suffix
|
||||
*/
|
||||
.macro bl_r, dst:req, c
|
||||
.if __LINUX_ARM_ARCH__ < 6
|
||||
mov\c lr, pc
|
||||
mov\c pc, \dst
|
||||
.else
|
||||
blx\c \dst
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
||||
@@ -445,15 +445,10 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
* however some exceptions may exist. Caveat emptor.
|
||||
*
|
||||
* - The clobber list is dictated by the call to v7_flush_dcache_*.
|
||||
* fp is preserved to the stack explicitly prior disabling the cache
|
||||
* since adding it to the clobber list is incompatible with having
|
||||
* CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
|
||||
* trampoline are inserted by the linker and to keep sp 64-bit aligned.
|
||||
*/
|
||||
#define v7_exit_coherency_flush(level) \
|
||||
asm volatile( \
|
||||
".arch armv7-a \n\t" \
|
||||
"stmfd sp!, {fp, ip} \n\t" \
|
||||
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
|
||||
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
|
||||
@@ -463,10 +458,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
|
||||
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
|
||||
"isb \n\t" \
|
||||
"dsb \n\t" \
|
||||
"ldmfd sp!, {fp, ip}" \
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
|
||||
"r9","r10","lr","memory" )
|
||||
"dsb" \
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6", \
|
||||
"r9","r10","ip","lr","memory" )
|
||||
|
||||
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
|
||||
void *kaddr, unsigned long len);
|
||||
|
||||
@@ -8,25 +8,18 @@
|
||||
#define _ASM_ARM_CURRENT_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/insn.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static inline void set_current(struct task_struct *cur)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO))
|
||||
return;
|
||||
extern struct task_struct *__current;
|
||||
|
||||
/* Set TPIDRURO */
|
||||
asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
|
||||
|
||||
static inline struct task_struct *get_current(void)
|
||||
static __always_inline __attribute_const__ struct task_struct *get_current(void)
|
||||
{
|
||||
struct task_struct *cur;
|
||||
|
||||
#if __has_builtin(__builtin_thread_pointer) && \
|
||||
defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
|
||||
!(defined(CONFIG_THUMB2_KERNEL) && \
|
||||
defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
|
||||
/*
|
||||
@@ -39,16 +32,39 @@ static inline struct task_struct *get_current(void)
|
||||
* https://github.com/ClangBuiltLinux/linux/issues/1485
|
||||
*/
|
||||
cur = __builtin_thread_pointer();
|
||||
#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
|
||||
#ifdef CONFIG_CPU_V6
|
||||
"1: \n\t"
|
||||
" .subsection 1 \n\t"
|
||||
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
|
||||
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
"2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
|
||||
" b 1b \n\t"
|
||||
#else
|
||||
asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(cur));
|
||||
"2: ldr %0, 3f \n\t"
|
||||
" ldr %0, [%0] \n\t"
|
||||
" b 1b \n\t"
|
||||
"3: .long __current \n\t"
|
||||
#endif
|
||||
" .previous \n\t"
|
||||
" .pushsection \".alt.smp.init\", \"a\" \n\t"
|
||||
" .long 0b - . \n\t"
|
||||
" b . + (2b - 0b) \n\t"
|
||||
" .popsection \n\t"
|
||||
#endif
|
||||
: "=r"(cur));
|
||||
#elif __LINUX_ARM_ARCH__>= 7 || \
|
||||
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
|
||||
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
cur = __current;
|
||||
#else
|
||||
asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
|
||||
#endif
|
||||
return cur;
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
#else
|
||||
#include <asm-generic/current.h>
|
||||
#endif /* CONFIG_CURRENT_POINTER_IN_TPIDRURO */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
||||
@@ -61,6 +61,9 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#define R_ARM_MOVT_ABS 44
|
||||
#define R_ARM_MOVW_PREL_NC 45
|
||||
#define R_ARM_MOVT_PREL 46
|
||||
#define R_ARM_ALU_PC_G0_NC 57
|
||||
#define R_ARM_ALU_PC_G1_NC 59
|
||||
#define R_ARM_LDR_PC_G2 63
|
||||
|
||||
#define R_ARM_THM_CALL 10
|
||||
#define R_ARM_THM_JUMP24 30
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro arch_irq_handler_default
|
||||
get_irqnr_preamble r6, lr
|
||||
1: get_irqnr_and_base r0, r2, r6, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
badrne lr, 1b
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r2) and base (r6) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r2, r6, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r1, sp
|
||||
badrne lr, 1b
|
||||
bne do_IPI
|
||||
#endif
|
||||
9997:
|
||||
.endm
|
||||
|
||||
.macro arch_irq_handler, symbol_name
|
||||
.align 5
|
||||
.global \symbol_name
|
||||
\symbol_name:
|
||||
mov r8, lr
|
||||
arch_irq_handler_default
|
||||
ret r8
|
||||
.endm
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_ARM_FTRACE
|
||||
#define _ASM_ARM_FTRACE
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#endif
|
||||
@@ -48,7 +50,7 @@ void *return_address(unsigned int);
|
||||
|
||||
static inline void *return_address(unsigned int level)
|
||||
{
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
/*
|
||||
* arch/arm/include/asm/hardware/entry-macro-iomd.S
|
||||
*
|
||||
* Low-level IRQ helper macros for IOC/IOMD based platforms
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
/* IOC / IOMD based hardware */
|
||||
#include <asm/hardware/iomd.h>
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
|
||||
ldr \tmp, =irq_prio_h
|
||||
teq \irqstat, #0
|
||||
#ifdef IOMD_BASE
|
||||
ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_h table size
|
||||
teqeq \irqstat, #0
|
||||
bne 2406f
|
||||
#endif
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_d table size
|
||||
teqeq \irqstat, #0
|
||||
#ifdef IOMD_IRQREQC
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQC]
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_l table size
|
||||
teqeq \irqstat, #0
|
||||
#endif
|
||||
#ifdef IOMD_IRQREQD
|
||||
ldrbeq \irqstat, [\base, #IOMD_IRQREQD]
|
||||
addeq \tmp, \tmp, #256 @ irq_prio_lc table size
|
||||
teqeq \irqstat, #0
|
||||
#endif
|
||||
2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Interrupt table (incorporates priority). Please note that we
|
||||
* rely on the order of these tables (see above code).
|
||||
*/
|
||||
.align 5
|
||||
irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
|
||||
#ifdef IOMD_BASE
|
||||
irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
.byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
|
||||
#endif
|
||||
irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
|
||||
#ifdef IOMD_IRQREQC
|
||||
irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
|
||||
.byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
|
||||
.byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
.byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
|
||||
#endif
|
||||
#ifdef IOMD_IRQREQD
|
||||
irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
|
||||
.byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
|
||||
.byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
.byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
|
||||
#endif
|
||||
|
||||
@@ -2,6 +2,23 @@
|
||||
#ifndef __ASM_ARM_INSN_H
|
||||
#define __ASM_ARM_INSN_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
|
||||
* appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
|
||||
* which should be sufficient for the core kernel as well as modules loaded
|
||||
* into the module region. (Not supported by LLD before release 14)
|
||||
*/
|
||||
#define LOAD_SYM_ARMV6(reg, sym) \
|
||||
" .globl " #sym " \n\t" \
|
||||
" .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
|
||||
" .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
|
||||
" .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
|
||||
"10: sub " #reg ", pc, #8 \n\t" \
|
||||
"11: sub " #reg ", " #reg ", #4 \n\t" \
|
||||
"12: ldr " #reg ", [" #reg ", #0] \n\t"
|
||||
|
||||
static inline unsigned long
|
||||
arm_gen_nop(void)
|
||||
{
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
struct irqaction;
|
||||
struct pt_regs;
|
||||
|
||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||
void handle_IRQ(unsigned int, struct pt_regs *);
|
||||
void init_IRQ(void);
|
||||
|
||||
|
||||
@@ -56,9 +56,7 @@ struct machine_desc {
|
||||
void (*init_time)(void);
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
|
||||
void (*handle_irq)(struct pt_regs *);
|
||||
#endif
|
||||
void (*restart)(enum reboot_mode, const char *);
|
||||
};
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ typedef struct {
|
||||
#else
|
||||
int switch_pending;
|
||||
#endif
|
||||
unsigned int vmalloc_seq;
|
||||
atomic_t vmalloc_seq;
|
||||
unsigned long sigpage;
|
||||
#ifdef CONFIG_VDSO
|
||||
unsigned long vdso;
|
||||
|
||||
@@ -23,6 +23,16 @@
|
||||
|
||||
void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static inline void check_vmalloc_seq(struct mm_struct *mm)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
|
||||
unlikely(atomic_read(&mm->context.vmalloc_seq) !=
|
||||
atomic_read(&init_mm.context.vmalloc_seq)))
|
||||
__check_vmalloc_seq(mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
@@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
check_vmalloc_seq(mm);
|
||||
|
||||
if (irqs_disabled())
|
||||
/*
|
||||
@@ -129,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
if (mm != &init_mm)
|
||||
check_vmalloc_seq(mm);
|
||||
}
|
||||
#define enter_lazy_tlb enter_lazy_tlb
|
||||
#endif
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
||||
#endif
|
||||
|
||||
@@ -147,6 +147,9 @@ extern void copy_page(void *to, const void *from);
|
||||
#include <asm/pgtable-3level-types.h>
|
||||
#else
|
||||
#include <asm/pgtable-2level-types.h>
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
@@ -5,20 +5,27 @@
|
||||
#ifndef _ASM_ARM_PERCPU_H_
|
||||
#define _ASM_ARM_PERCPU_H_
|
||||
|
||||
#include <asm/insn.h>
|
||||
|
||||
register unsigned long current_stack_pointer asm ("sp");
|
||||
|
||||
/*
|
||||
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
||||
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
extern unsigned int smp_on_up;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
|
||||
return;
|
||||
|
||||
/* Set TPIDRPRW */
|
||||
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
static __always_inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long off;
|
||||
|
||||
@@ -27,8 +34,28 @@ static inline unsigned long __my_cpu_offset(void)
|
||||
* We want to allow caching the value, so avoid using volatile and
|
||||
* instead use a fake stack read to hazard against barrier().
|
||||
*/
|
||||
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
|
||||
: "Q" (*(const unsigned long *)current_stack_pointer));
|
||||
asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
|
||||
#ifdef CONFIG_CPU_V6
|
||||
"1: \n\t"
|
||||
" .subsection 1 \n\t"
|
||||
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
|
||||
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
||||
"2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
|
||||
" b 1b \n\t"
|
||||
#else
|
||||
"2: ldr %0, 3f \n\t"
|
||||
" ldr %0, [%0] \n\t"
|
||||
" b 1b \n\t"
|
||||
"3: .long __per_cpu_offset \n\t"
|
||||
#endif
|
||||
" .previous \n\t"
|
||||
" .pushsection \".alt.smp.init\", \"a\" \n\t"
|
||||
" .long 0b - . \n\t"
|
||||
" b . + (2b - 0b) \n\t"
|
||||
" .popsection \n\t"
|
||||
#endif
|
||||
: "=r" (off)
|
||||
: "Q" (*(const unsigned long *)current_stack_pointer));
|
||||
|
||||
return off;
|
||||
}
|
||||
|
||||
@@ -24,11 +24,6 @@ struct seq_file;
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *, int);
|
||||
|
||||
/*
|
||||
* Called from assembly code, this handles an IPI.
|
||||
*/
|
||||
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Called from C code, this handles an IPI.
|
||||
*/
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user