You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
"Apart from the core arm64 and perf changes, the Spectre v4 mitigation
touches the arm KVM code and the ACPI PPTT support touches drivers/
(acpi and cacheinfo). I should have the maintainers' acks in place.
Summary:
- Spectre v4 mitigation (Speculative Store Bypass Disable) support
for arm64 using SMC firmware call to set a hardware chicken bit
- ACPI PPTT (Processor Properties Topology Table) parsing support and
enable the feature for arm64
- Report signal frame size to user via auxv (AT_MINSIGSTKSZ). The
primary motivation is Scalable Vector Extensions which requires
more space on the signal frame than the currently defined
MINSIGSTKSZ
- ARM perf patches: allow building arm-cci as module, demote
dev_warn() to dev_dbg() in arm-ccn event_init(), miscellaneous
cleanups
- cmpwait() WFE optimisation to avoid some spurious wakeups
- L1_CACHE_BYTES reverted back to 64 (for performance reasons that
have to do with some network allocations) while keeping
ARCH_DMA_MINALIGN to 128. cache_line_size() returns the actual
hardware Cache Writeback Granule
- Turn LSE atomics on by default in Kconfig
- Kernel fault reporting tidying
- Some #include and miscellaneous cleanups"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (53 commits)
arm64: Fix syscall restarting around signal suppressed by tracer
arm64: topology: Avoid checking numa mask for scheduler MC selection
ACPI / PPTT: fix build when CONFIG_ACPI_PPTT is not enabled
arm64: cpu_errata: include required headers
arm64: KVM: Move VCPU_WORKAROUND_2_FLAG macros to the top of the file
arm64: signal: Report signal frame size to userspace via auxv
arm64/sve: Thin out initialisation sanity-checks for sve_max_vl
arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
arm64: KVM: Add HYP per-cpu accessors
arm64: ssbd: Add prctl interface for per-thread mitigation
arm64: ssbd: Introduce thread flag to control userspace mitigation
arm64: ssbd: Restore mitigation status on CPU resume
arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
arm64: ssbd: Add global mitigation state accessor
arm64: Add 'ssbd' command-line option
arm64: Add ARCH_WORKAROUND_2 probing
arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
...
This commit is contained in:
@@ -4106,6 +4106,23 @@
|
||||
expediting. Set to zero to disable automatic
|
||||
expediting.
|
||||
|
||||
ssbd= [ARM64,HW]
|
||||
Speculative Store Bypass Disable control
|
||||
|
||||
On CPUs that are vulnerable to the Speculative
|
||||
Store Bypass vulnerability and offer a
|
||||
firmware based mitigation, this parameter
|
||||
indicates how the mitigation should be used:
|
||||
|
||||
force-on: Unconditionally enable mitigation for
|
||||
for both kernel and userspace
|
||||
force-off: Unconditionally disable mitigation for
|
||||
for both kernel and userspace
|
||||
kernel: Always enable mitigation in the
|
||||
kernel, and offer a prctl interface
|
||||
to allow userspace to register its
|
||||
interest in being mitigated too.
|
||||
|
||||
stack_guard_gap= [MM]
|
||||
override the default stack gap protection. The value
|
||||
is in page units and it defines how many pages prior
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqflags.h>
|
||||
@@ -174,6 +175,7 @@ bool mcpm_is_available(void)
|
||||
{
|
||||
return (platform_ops) ? true : false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcpm_is_available);
|
||||
|
||||
/*
|
||||
* We can't use regular spinlocks. In the switcher case, it is possible
|
||||
|
||||
@@ -325,6 +325,18 @@ static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
/* No way to detect it yet, pretend it is not there. */
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
|
||||
@@ -356,6 +356,11 @@ static inline int kvm_map_vectors(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define kvm_phys_to_vttbr(addr) (addr)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
@@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event)
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
armv6pmu_handle_irq(int irq_num,
|
||||
void *dev)
|
||||
armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long pmcr = armv6_pmcr_read();
|
||||
struct perf_sample_data data;
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
||||
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
@@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event)
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
u32 pmnc;
|
||||
struct perf_sample_data data;
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
||||
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
@@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||
xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long pmnc;
|
||||
struct perf_sample_data data;
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
||||
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
@@ -489,11 +488,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||
xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long pmnc, of_flags;
|
||||
struct perf_sample_data data;
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
||||
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
@@ -7,11 +7,13 @@ config ARM64
|
||||
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
||||
select ACPI_MCFG if ACPI
|
||||
select ACPI_SPCR_TABLE if ACPI
|
||||
select ACPI_PPTT if ACPI
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||
@@ -923,6 +925,15 @@ config HARDEN_EL2_VECTORS
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_SSBD
|
||||
bool "Speculative Store Bypass Disable" if EXPERT
|
||||
default y
|
||||
help
|
||||
This enables mitigation of the bypassing of previous stores
|
||||
by speculative loads.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
menuconfig ARMV8_DEPRECATED
|
||||
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
||||
depends on COMPAT
|
||||
@@ -1034,6 +1045,7 @@ config ARM64_PAN
|
||||
|
||||
config ARM64_LSE_ATOMICS
|
||||
bool "Atomic instructions"
|
||||
default y
|
||||
help
|
||||
As part of the Large System Extensions, ARMv8.1 introduces new
|
||||
atomic instructions that are designed specifically to scale in
|
||||
@@ -1042,7 +1054,8 @@ config ARM64_LSE_ATOMICS
|
||||
Say Y here to make use of these instructions for the in-kernel
|
||||
atomic routines. This incurs a small overhead on CPUs that do
|
||||
not support these instructions and requires the kernel to be
|
||||
built with binutils >= 2.25.
|
||||
built with binutils >= 2.25 in order for the new instructions
|
||||
to be used.
|
||||
|
||||
config ARM64_VHE
|
||||
bool "Enable support for Virtualization Host Extensions (VHE)"
|
||||
|
||||
@@ -86,6 +86,10 @@ static inline bool acpi_has_cpu_in_madt(void)
|
||||
}
|
||||
|
||||
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
|
||||
static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
|
||||
{
|
||||
return acpi_cpu_get_madt_gicc(cpu)->uid;
|
||||
}
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
|
||||
void __init acpi_init_cpus(void);
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#define ICACHE_POLICY_VIPT 2
|
||||
#define ICACHE_POLICY_PIPT 3
|
||||
|
||||
#define L1_CACHE_SHIFT 7
|
||||
#define L1_CACHE_SHIFT (6)
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
/*
|
||||
@@ -43,7 +43,7 @@
|
||||
* cache before the transfer is done, causing old data to be seen by
|
||||
* the CPU.
|
||||
*/
|
||||
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
||||
#define ARCH_DMA_MINALIGN (128)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void)
|
||||
static inline int cache_line_size(void)
|
||||
{
|
||||
u32 cwg = cache_type_cwg();
|
||||
return cwg ? 4 << cwg : L1_CACHE_BYTES;
|
||||
return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile( \
|
||||
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
|
||||
" sevl\n" \
|
||||
" wfe\n" \
|
||||
" ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
|
||||
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
|
||||
" cbnz %" #w "[tmp], 1f\n" \
|
||||
" wfe\n" \
|
||||
|
||||
@@ -48,7 +48,8 @@
|
||||
#define ARM64_HAS_CACHE_IDC 27
|
||||
#define ARM64_HAS_CACHE_DIC 28
|
||||
#define ARM64_HW_DBM 29
|
||||
#define ARM64_SSBD 30
|
||||
|
||||
#define ARM64_NCAPS 30
|
||||
#define ARM64_NCAPS 31
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
||||
@@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void)
|
||||
return zcr;
|
||||
}
|
||||
|
||||
#define ARM64_SSBD_UNKNOWN -1
|
||||
#define ARM64_SSBD_FORCE_DISABLE 0
|
||||
#define ARM64_SSBD_KERNEL 1
|
||||
#define ARM64_SSBD_FORCE_ENABLE 2
|
||||
#define ARM64_SSBD_MITIGATED 3
|
||||
|
||||
static inline int arm64_get_ssbd_state(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
extern int ssbd_state;
|
||||
return ssbd_state;
|
||||
#else
|
||||
return ARM64_SSBD_UNKNOWN;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
void arm64_set_ssbd_mitigation(bool state);
|
||||
#else
|
||||
static inline void arm64_set_ssbd_mitigation(bool state) {}
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -121,6 +121,9 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
|
||||
@@ -148,6 +151,16 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
||||
do { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
||||
(elf_addr_t)current->mm->context.vdso); \
|
||||
\
|
||||
/* \
|
||||
* Should always be nonzero unless there's a kernel bug. \
|
||||
* If we haven't determined a sensible value to give to \
|
||||
* userspace, omit the entry: \
|
||||
*/ \
|
||||
if (likely(signal_minsigstksz)) \
|
||||
NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \
|
||||
else \
|
||||
NEW_AUX_ENT(AT_IGNORE, 0); \
|
||||
} while (0)
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||
|
||||
@@ -207,12 +207,14 @@
|
||||
str w\nxtmp, [\xpfpsr, #4]
|
||||
.endm
|
||||
|
||||
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp
|
||||
.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
|
||||
mrs_s x\nxtmp, SYS_ZCR_EL1
|
||||
bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK
|
||||
orr x\nxtmp, x\nxtmp, \xvqminus1
|
||||
msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising
|
||||
|
||||
bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK
|
||||
orr \xtmp2, \xtmp2, \xvqminus1
|
||||
cmp \xtmp2, x\nxtmp
|
||||
b.eq 921f
|
||||
msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising
|
||||
921:
|
||||
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
|
||||
_sve_ldr_p 0, \nxbase
|
||||
_sve_wrffr 0
|
||||
|
||||
@@ -20,6 +20,9 @@
|
||||
|
||||
#include <asm/virt.h>
|
||||
|
||||
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
|
||||
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
|
||||
|
||||
#define ARM_EXIT_WITH_SERROR_BIT 31
|
||||
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
|
||||
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
|
||||
@@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void);
|
||||
|
||||
extern u32 __init_stage2_translation(void);
|
||||
|
||||
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
|
||||
#define __hyp_this_cpu_ptr(sym) \
|
||||
({ \
|
||||
void *__ptr = hyp_symbol_addr(sym); \
|
||||
__ptr += read_sysreg(tpidr_el2); \
|
||||
(typeof(&sym))__ptr; \
|
||||
})
|
||||
|
||||
#define __hyp_this_cpu_read(sym) \
|
||||
({ \
|
||||
*__hyp_this_cpu_ptr(sym); \
|
||||
})
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
.macro get_host_ctxt reg, tmp
|
||||
adr_l \reg, kvm_host_cpu_state
|
||||
.macro hyp_adr_this_cpu reg, sym, tmp
|
||||
adr_l \reg, \sym
|
||||
mrs \tmp, tpidr_el2
|
||||
add \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
.macro hyp_ldr_this_cpu reg, sym, tmp
|
||||
adr_l \reg, \sym
|
||||
mrs \tmp, tpidr_el2
|
||||
ldr \reg, [\reg, \tmp]
|
||||
.endm
|
||||
|
||||
.macro get_host_ctxt reg, tmp
|
||||
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
|
||||
.endm
|
||||
|
||||
.macro get_vcpu_ptr vcpu, ctxt
|
||||
get_host_ctxt \ctxt, \vcpu
|
||||
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
|
||||
@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
/* State of various workarounds, see kvm_asm.h for bit assignment */
|
||||
u64 workaround_flags;
|
||||
|
||||
/* Guest debug state */
|
||||
u64 debug_flags;
|
||||
|
||||
@@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
return KVM_SSBD_FORCE_DISABLE;
|
||||
case ARM64_SSBD_KERNEL:
|
||||
return KVM_SSBD_KERNEL;
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
return KVM_SSBD_FORCE_ENABLE;
|
||||
case ARM64_SSBD_MITIGATED:
|
||||
return KVM_SSBD_MITIGATED;
|
||||
case ARM64_SSBD_UNKNOWN:
|
||||
default:
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
||||
@@ -72,7 +72,6 @@
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* Convert a kernel VA into a HYP VA.
|
||||
@@ -473,6 +472,30 @@ static inline int kvm_map_vectors(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
int cpu, err;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
u64 *ptr;
|
||||
|
||||
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
|
||||
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
@@ -244,6 +246,9 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
|
||||
void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
|
||||
void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
|
||||
|
||||
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
|
||||
extern void __init minsigstksz_setup(void);
|
||||
|
||||
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
|
||||
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
|
||||
#define SVE_GET_VL() sve_get_current_vl()
|
||||
|
||||
@@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define TIF_32BIT 22 /* 32bit process */
|
||||
#define TIF_SVE 23 /* Scalable Vector Extension in use */
|
||||
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
|
||||
#define TIF_SSBD 25 /* Wants SSB mitigation */
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user