mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge branches 'for-next/doc', 'for-next/sve', 'for-next/sysreg', 'for-next/gettimeofday', 'for-next/stacktrace', 'for-next/atomics', 'for-next/el1-exceptions', 'for-next/a510-erratum-2658417', 'for-next/defconfig', 'for-next/tpidr2_el0' and 'for-next/ftrace', remote-tracking branch 'arm64/for-next/perf' into for-next/core
* arm64/for-next/perf: arm64: asm/perf_regs.h: Avoid C++-style comment in UAPI header arm64/sve: Add Perf extensions documentation perf: arm64: Add SVE vector granule register to user regs MAINTAINERS: add maintainers for Alibaba' T-Head PMU driver drivers/perf: add DDR Sub-System Driveway PMU driver for Yitian 710 SoC docs: perf: Add description for Alibaba's T-Head PMU driver * for-next/doc: : Documentation/arm64 updates arm64/sve: Document our actual ABI for clearing registers on syscall * for-next/sve: : SVE updates arm64/sysreg: Add hwcap for SVE EBF16 * for-next/sysreg: (35 commits) : arm64 system registers generation (more conversions) arm64/sysreg: Fix a few missed conversions arm64/sysreg: Convert ID_AA64AFRn_EL1 to automatic generation arm64/sysreg: Convert ID_AA64DFR1_EL1 to automatic generation arm64/sysreg: Convert ID_AA64FDR0_EL1 to automatic generation arm64/sysreg: Use feature numbering for PMU and SPE revisions arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names arm64/sysreg: Align field names in ID_AA64DFR0_EL1 with architecture arm64/sysreg: Add defintion for ALLINT arm64/sysreg: Convert SCXTNUM_EL1 to automatic generation arm64/sysreg: Convert TIPDR_EL1 to automatic generation arm64/sysreg: Convert ID_AA64PFR1_EL1 to automatic generation arm64/sysreg: Convert ID_AA64PFR0_EL1 to automatic generation arm64/sysreg: Convert ID_AA64MMFR2_EL1 to automatic generation arm64/sysreg: Convert ID_AA64MMFR1_EL1 to automatic generation arm64/sysreg: Convert ID_AA64MMFR0_EL1 to automatic generation arm64/sysreg: Convert HCRX_EL2 to automatic generation arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 SME enumeration arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 BTI enumeration arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 fractional version fields arm64/sysreg: Standardise naming for MTE feature enumeration ... * for-next/gettimeofday: : Use self-synchronising counter access in gettimeofday() (if FEAT_ECV) arm64: vdso: use SYS_CNTVCTSS_EL0 for gettimeofday arm64: alternative: patch alternatives in the vDSO arm64: module: move find_section to header * for-next/stacktrace: : arm64 stacktrace cleanups and improvements arm64: stacktrace: track hyp stacks in unwinder's address space arm64: stacktrace: track all stack boundaries explicitly arm64: stacktrace: remove stack type from fp translator arm64: stacktrace: rework stack boundary discovery arm64: stacktrace: add stackinfo_on_stack() helper arm64: stacktrace: move SDEI stack helpers to stacktrace code arm64: stacktrace: rename unwind_next_common() -> unwind_next_frame_record() arm64: stacktrace: simplify unwind_next_common() arm64: stacktrace: fix kerneldoc comments * for-next/atomics: : arm64 atomics improvements arm64: atomic: always inline the assembly arm64: atomics: remove LL/SC trampolines * for-next/el1-exceptions: : Improve the reporting of EL1 exceptions arm64: rework BTI exception handling arm64: rework FPAC exception handling arm64: consistently pass ESR_ELx to die() arm64: die(): pass 'err' as long arm64: report EL1 UNDEFs better * for-next/a510-erratum-2658417: : Cortex-A510: 2658417: remove BF16 support due to incorrect result arm64: errata: remove BF16 HWCAP due to incorrect result on Cortex-A510 arm64: cpufeature: Expose get_arm64_ftr_reg() outside cpufeature.c arm64: cpufeature: Force HWCAP to be based on the sysreg visible to user-space * for-next/defconfig: : arm64 defconfig updates arm64: defconfig: Add Coresight as module arm64: Enable docker support in defconfig arm64: defconfig: Enable memory hotplug and hotremove config arm64: configs: Enable all PMUs provided by Arm * for-next/tpidr2_el0: : arm64 ptrace() support for TPIDR2_EL0 kselftest/arm64: Add coverage of TPIDR2_EL0 ptrace interface arm64/ptrace: Support access to TPIDR2_EL0 arm64/ptrace: Document extension of NT_ARM_TLS to cover TPIDR2_EL0 kselftest/arm64: Add test coverage for NT_ARM_TLS * for-next/ftrace: : arm64 ftraces updates/fixes arm64: ftrace: fix module PLTs with mcount arm64: module: Remove unused plt_entry_is_initialized() arm64: module: Make plt_equals_entry() static
This commit is contained in:
@@ -272,6 +272,9 @@ HWCAP2_WFXT
|
||||
HWCAP2_EBF16
|
||||
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
|
||||
|
||||
HWCAP2_SVE_EBF16
|
||||
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
||||
|
||||
@@ -110,6 +110,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
|
||||
@@ -331,6 +331,9 @@ The regset data starts with struct user_za_header, containing:
|
||||
been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread
|
||||
when the coredump was generated.
|
||||
|
||||
* The NT_ARM_TLS note will be extended to two registers, the second register
|
||||
will contain TPIDR2_EL0 on systems that support SME and will be read as
|
||||
zero with writes ignored otherwise.
|
||||
|
||||
9. System runtime configuration
|
||||
--------------------------------
|
||||
|
||||
@@ -111,7 +111,7 @@ the SVE instruction set architecture.
|
||||
|
||||
* On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of
|
||||
Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR
|
||||
become unspecified on return from a syscall.
|
||||
become zero on return from a syscall.
|
||||
|
||||
* The SVE registers are not used to pass arguments to or receive results from
|
||||
any syscall.
|
||||
|
||||
@@ -733,6 +733,19 @@ config ARM64_ERRATUM_2077057
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2658417
|
||||
bool "Cortex-A510: 2658417: remove BF16 support due to incorrect result"
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2658417.
|
||||
Affected Cortex-A510 (r0p0 to r1p1) may produce the wrong result for
|
||||
BFMMLA or VMMLA instructions in rare circumstances when a pair of
|
||||
A510 CPUs are using shared neon hardware. As the sharing is not
|
||||
discoverable by the kernel, hide the BF16 HWCAP to indicate that
|
||||
user-space should not be using these instructions.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2119858
|
||||
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
|
||||
default y
|
||||
|
||||
@@ -18,6 +18,7 @@ CONFIG_NUMA_BALANCING=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CGROUP_HUGETLB=y
|
||||
CONFIG_CPUSETS=y
|
||||
CONFIG_CGROUP_DEVICE=y
|
||||
@@ -102,6 +103,8 @@ CONFIG_ARM_SCMI_CPUFREQ=y
|
||||
CONFIG_ARM_TEGRA186_CPUFREQ=y
|
||||
CONFIG_QORIQ_CPUFREQ=y
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_ACPI_HOTPLUG_MEMORY=y
|
||||
CONFIG_ACPI_HMAT=y
|
||||
CONFIG_ACPI_APEI=y
|
||||
CONFIG_ACPI_APEI_GHES=y
|
||||
CONFIG_ACPI_APEI_PCIEAER=y
|
||||
@@ -126,6 +129,8 @@ CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_MEMORY_FAILURE=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
@@ -139,12 +144,16 @@ CONFIG_IP_PNP_DHCP=y
|
||||
CONFIG_IP_PNP_BOOTP=y
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_BRIDGE_NETFILTER=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NETFILTER_XT_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_IP_VS=m
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
CONFIG_IP_NF_FILTER=m
|
||||
CONFIG_IP_NF_TARGET_REJECT=m
|
||||
@@ -1229,8 +1238,14 @@ CONFIG_PHY_UNIPHIER_USB3=y
|
||||
CONFIG_PHY_TEGRA_XUSB=y
|
||||
CONFIG_PHY_AM654_SERDES=m
|
||||
CONFIG_PHY_J721E_WIZ=m
|
||||
CONFIG_ARM_CCI_PMU=m
|
||||
CONFIG_ARM_CCN=m
|
||||
CONFIG_ARM_CMN=m
|
||||
CONFIG_ARM_SMMU_V3_PMU=m
|
||||
CONFIG_ARM_DSU_PMU=m
|
||||
CONFIG_FSL_IMX8_DDR_PMU=m
|
||||
CONFIG_ARM_SPE_PMU=m
|
||||
CONFIG_ARM_DMC620_PMU=m
|
||||
CONFIG_QCOM_L2_PMU=y
|
||||
CONFIG_QCOM_L3_PMU=y
|
||||
CONFIG_HISI_PMU=y
|
||||
@@ -1325,4 +1340,12 @@ CONFIG_DEBUG_FS=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
# CONFIG_FTRACE is not set
|
||||
CONFIG_CORESIGHT=m
|
||||
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m
|
||||
CONFIG_CORESIGHT_CATU=m
|
||||
CONFIG_CORESIGHT_SINK_TPIU=m
|
||||
CONFIG_CORESIGHT_SINK_ETBV10=m
|
||||
CONFIG_CORESIGHT_STM=m
|
||||
CONFIG_CORESIGHT_CPU_DEBUG=m
|
||||
CONFIG_CORESIGHT_CTI=m
|
||||
CONFIG_MEMTEST=y
|
||||
|
||||
@@ -384,8 +384,8 @@ alternative_cb_end
|
||||
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
|
||||
mrs \tmp0, ID_AA64MMFR0_EL1
|
||||
// Narrow PARange to fit the PS field in TCR_ELx
|
||||
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
|
||||
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
|
||||
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
|
||||
cmp \tmp0, \tmp1
|
||||
csel \tmp0, \tmp1, \tmp0, hi
|
||||
bfi \tcr, \tmp0, \pos, #3
|
||||
@@ -512,7 +512,7 @@ alternative_endif
|
||||
*/
|
||||
.macro reset_pmuserenr_el0, tmpreg
|
||||
mrs \tmpreg, id_aa64dfr0_el1
|
||||
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
|
||||
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
|
||||
cmp \tmpreg, #1 // Skip if no PMU present
|
||||
b.lt 9000f
|
||||
msr pmuserenr_el0, xzr // Disable PMU access from EL0
|
||||
@@ -524,7 +524,7 @@ alternative_endif
|
||||
*/
|
||||
.macro reset_amuserenr_el0, tmpreg
|
||||
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
|
||||
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
|
||||
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
|
||||
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
|
||||
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
|
||||
.Lskip_\@:
|
||||
@@ -612,7 +612,7 @@ alternative_endif
|
||||
.macro offset_ttbr1, ttbr, tmp
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
|
||||
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
|
||||
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
|
||||
cbnz \tmp, .Lskipoffs_\@
|
||||
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
|
||||
.Lskipoffs_\@ :
|
||||
|
||||
@@ -12,19 +12,6 @@
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||
#define __LL_SC_FALLBACK(asm_ops) \
|
||||
" b 3f\n" \
|
||||
" .subsection 1\n" \
|
||||
"3:\n" \
|
||||
asm_ops "\n" \
|
||||
" b 4f\n" \
|
||||
" .previous\n" \
|
||||
"4:\n"
|
||||
#else
|
||||
#define __LL_SC_FALLBACK(asm_ops) asm_ops
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_CC_HAS_K_CONSTRAINT
|
||||
#define K
|
||||
#endif
|
||||
@@ -36,38 +23,36 @@ asm_ops "\n" \
|
||||
*/
|
||||
|
||||
#define ATOMIC_OP(op, asm_op, constraint) \
|
||||
static inline void \
|
||||
static __always_inline void \
|
||||
__ll_sc_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" stxr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b\n") \
|
||||
" cbnz %w1, 1b\n" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i)); \
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
||||
static inline int \
|
||||
static __always_inline int \
|
||||
__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
asm volatile("// atomic_" #op "_return" #name "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ld" #acq "xr %w0, %2\n" \
|
||||
" " #asm_op " %w0, %w0, %w3\n" \
|
||||
" st" #rel "xr %w1, %w0, %2\n" \
|
||||
" cbnz %w1, 1b\n" \
|
||||
" " #mb ) \
|
||||
" " #mb \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
@@ -76,20 +61,19 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
|
||||
static inline int \
|
||||
static __always_inline int \
|
||||
__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int val, result; \
|
||||
\
|
||||
asm volatile("// atomic_fetch_" #op #name "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %3\n" \
|
||||
"1: ld" #acq "xr %w0, %3\n" \
|
||||
" " #asm_op " %w1, %w0, %w4\n" \
|
||||
" st" #rel "xr %w2, %w1, %3\n" \
|
||||
" cbnz %w2, 1b\n" \
|
||||
" " #mb ) \
|
||||
" " #mb \
|
||||
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
@@ -135,38 +119,36 @@ ATOMIC_OPS(andnot, bic, )
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op, constraint) \
|
||||
static inline void \
|
||||
static __always_inline void \
|
||||
__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
s64 result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" stxr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b") \
|
||||
" cbnz %w1, 1b" \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i)); \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
||||
static inline long \
|
||||
static __always_inline long \
|
||||
__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
s64 result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_" #op "_return" #name "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ld" #acq "xr %0, %2\n" \
|
||||
" " #asm_op " %0, %0, %3\n" \
|
||||
" st" #rel "xr %w1, %0, %2\n" \
|
||||
" cbnz %w1, 1b\n" \
|
||||
" " #mb ) \
|
||||
" " #mb \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
@@ -175,20 +157,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
||||
static inline long \
|
||||
static __always_inline long \
|
||||
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
s64 result, val; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
asm volatile("// atomic64_fetch_" #op #name "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %3\n" \
|
||||
"1: ld" #acq "xr %0, %3\n" \
|
||||
" " #asm_op " %1, %0, %4\n" \
|
||||
" st" #rel "xr %w2, %1, %3\n" \
|
||||
" cbnz %w2, 1b\n" \
|
||||
" " #mb ) \
|
||||
" " #mb \
|
||||
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: __stringify(constraint) "r" (i) \
|
||||
: cl); \
|
||||
@@ -233,14 +214,13 @@ ATOMIC64_OPS(andnot, bic, )
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
static inline s64
|
||||
static __always_inline s64
|
||||
__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
s64 result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_dec_if_positive\n"
|
||||
__LL_SC_FALLBACK(
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" subs %0, %0, #1\n"
|
||||
@@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
|
||||
" stlxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
" dmb ish\n"
|
||||
"2:")
|
||||
"2:"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
:
|
||||
: "cc", "memory");
|
||||
@@ -257,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
|
||||
}
|
||||
|
||||
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
|
||||
static inline u##sz \
|
||||
static __always_inline u##sz \
|
||||
__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
||||
unsigned long old, \
|
||||
u##sz new) \
|
||||
@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
||||
old = (u##sz)old; \
|
||||
\
|
||||
asm volatile( \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %[v]\n" \
|
||||
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
|
||||
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
|
||||
@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
||||
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
|
||||
" cbnz %w[tmp], 1b\n" \
|
||||
" " #mb "\n" \
|
||||
"2:") \
|
||||
"2:" \
|
||||
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
||||
[v] "+Q" (*(u##sz *)ptr) \
|
||||
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \
|
||||
@@ -316,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
|
||||
#undef __CMPXCHG_CASE
|
||||
|
||||
#define __CMPXCHG_DBL(name, mb, rel, cl) \
|
||||
static inline long \
|
||||
static __always_inline long \
|
||||
__ll_sc__cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long old2, \
|
||||
unsigned long new1, \
|
||||
@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long tmp, ret; \
|
||||
\
|
||||
asm volatile("// __cmpxchg_double" #name "\n" \
|
||||
__LL_SC_FALLBACK( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxp %0, %1, %2\n" \
|
||||
" eor %0, %0, %3\n" \
|
||||
@@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
|
||||
" st" #rel "xp %w0, %5, %6, %2\n" \
|
||||
" cbnz %w0, 1b\n" \
|
||||
" " #mb "\n" \
|
||||
"2:") \
|
||||
"2:" \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
||||
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
||||
: cl); \
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
#define __ASM_ATOMIC_LSE_H
|
||||
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
static inline void __lse_atomic_##op(int i, atomic_t *v) \
|
||||
static __always_inline void \
|
||||
__lse_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
asm volatile( \
|
||||
__LSE_PREAMBLE \
|
||||
@@ -25,7 +26,7 @@ ATOMIC_OP(or, stset)
|
||||
ATOMIC_OP(xor, steor)
|
||||
ATOMIC_OP(add, stadd)
|
||||
|
||||
static inline void __lse_atomic_sub(int i, atomic_t *v)
|
||||
static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
__lse_atomic_add(-i, v);
|
||||
}
|
||||
@@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
|
||||
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||
static __always_inline int \
|
||||
__lse_atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
int old; \
|
||||
\
|
||||
@@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd)
|
||||
#undef ATOMIC_FETCH_OPS
|
||||
|
||||
#define ATOMIC_FETCH_OP_SUB(name) \
|
||||
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||
static __always_inline int \
|
||||
__lse_atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __lse_atomic_fetch_add##name(-i, v); \
|
||||
}
|
||||
@@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( )
|
||||
#undef ATOMIC_FETCH_OP_SUB
|
||||
|
||||
#define ATOMIC_OP_ADD_SUB_RETURN(name) \
|
||||
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
|
||||
static __always_inline int \
|
||||
__lse_atomic_add_return##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __lse_atomic_fetch_add##name(i, v) + i; \
|
||||
} \
|
||||
\
|
||||
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
|
||||
static __always_inline int \
|
||||
__lse_atomic_sub_return##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __lse_atomic_fetch_sub(i, v) - i; \
|
||||
}
|
||||
@@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( )
|
||||
|
||||
#undef ATOMIC_OP_ADD_SUB_RETURN
|
||||
|
||||
static inline void __lse_atomic_and(int i, atomic_t *v)
|
||||
static __always_inline void __lse_atomic_and(int i, atomic_t *v)
|
||||
{
|
||||
return __lse_atomic_andnot(~i, v);
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
|
||||
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
|
||||
static __always_inline int \
|
||||
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __lse_atomic_fetch_andnot##name(~i, v); \
|
||||
}
|
||||
@@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
|
||||
#undef ATOMIC_FETCH_OP_AND
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
static __always_inline void \
|
||||
__lse_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
asm volatile( \
|
||||
__LSE_PREAMBLE \
|
||||
@@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset)
|
||||
ATOMIC64_OP(xor, steor)
|
||||
ATOMIC64_OP(add, stadd)
|
||||
|
||||
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
||||
static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
||||
{
|
||||
__lse_atomic64_add(-i, v);
|
||||
}
|
||||
@@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
|
||||
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
|
||||
static __always_inline long \
|
||||
__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
s64 old; \
|
||||
\
|
||||
@@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd)
|
||||
#undef ATOMIC64_FETCH_OPS
|
||||
|
||||
#define ATOMIC64_FETCH_OP_SUB(name) \
|
||||
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
|
||||
static __always_inline long \
|
||||
__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __lse_atomic64_fetch_add##name(-i, v); \
|
||||
}
|
||||
@@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( )
|
||||
#undef ATOMIC64_FETCH_OP_SUB
|
||||
|
||||
#define ATOMIC64_OP_ADD_SUB_RETURN(name) \
|
||||
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
|
||||
static __always_inline long \
|
||||
__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __lse_atomic64_fetch_add##name(i, v) + i; \
|
||||
} \
|
||||
\
|
||||
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
|
||||
static __always_inline long \
|
||||
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __lse_atomic64_fetch_sub##name(i, v) - i; \
|
||||
}
|
||||
@@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( )
|
||||
|
||||
#undef ATOMIC64_OP_ADD_SUB_RETURN
|
||||
|
||||
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
||||
static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __lse_atomic64_andnot(~i, v);
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
|
||||
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
|
||||
static __always_inline long \
|
||||
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __lse_atomic64_fetch_andnot##name(~i, v); \
|
||||
}
|
||||
@@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
|
||||
|
||||
#undef ATOMIC64_FETCH_OP_AND
|
||||
|
||||
static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
|
||||
static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
|
||||
@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
|
||||
#define arch_slab_minalign() arch_slab_minalign()
|
||||
#endif
|
||||
|
||||
#define CTR_CACHE_MINLINE_MASK \
|
||||
(0xf << CTR_EL0_DMINLINE_SHIFT | \
|
||||
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
|
||||
|
||||
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
|
||||
|
||||
#define ICACHEF_ALIASING 0
|
||||
|
||||
@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
|
||||
u64 mask = GENMASK_ULL(field + 3, field);
|
||||
|
||||
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
|
||||
if (val == ID_AA64DFR0_PMUVER_IMP_DEF)
|
||||
if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
|
||||
val = 0;
|
||||
|
||||
if (val > cap) {
|
||||
@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
|
||||
|
||||
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
|
||||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
|
||||
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
|
||||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
|
||||
|
||||
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
|
||||
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
|
||||
|
||||
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
|
||||
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr0_sve(u64 pfr0)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
|
||||
|
||||
return val > 0;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr1_sme(u64 pfr1)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
|
||||
|
||||
return val > 0;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr1_mte(u64 pfr1)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
|
||||
|
||||
return val >= ID_AA64PFR1_MTE;
|
||||
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
|
||||
}
|
||||
|
||||
void __init setup_cpu_features(void);
|
||||
@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
|
||||
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
|
||||
ID_AA64PFR0_CSV2_SHIFT);
|
||||
ID_AA64PFR0_EL1_CSV2_SHIFT);
|
||||
return csv2_val == 3;
|
||||
}
|
||||
|
||||
@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
val = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_TGRAN4_SHIFT);
|
||||
ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
|
||||
|
||||
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
|
||||
return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
|
||||
}
|
||||
|
||||
static inline bool system_supports_64kb_granule(void)
|
||||
@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
val = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_TGRAN64_SHIFT);
|
||||
ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
|
||||
|
||||
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
|
||||
return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
|
||||
}
|
||||
|
||||
static inline bool system_supports_16kb_granule(void)
|
||||
@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
val = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_TGRAN16_SHIFT);
|
||||
ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
|
||||
|
||||
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
|
||||
return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
|
||||
(val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
|
||||
}
|
||||
|
||||
static inline bool system_supports_mixed_endian_el0(void)
|
||||
@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
val = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_BIGENDEL_SHIFT);
|
||||
ID_AA64MMFR0_EL1_BIGEND_SHIFT);
|
||||
|
||||
return val == 0x1;
|
||||
}
|
||||
@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
|
||||
{
|
||||
switch (parange) {
|
||||
case ID_AA64MMFR0_PARANGE_32: return 32;
|
||||
case ID_AA64MMFR0_PARANGE_36: return 36;
|
||||
case ID_AA64MMFR0_PARANGE_40: return 40;
|
||||
case ID_AA64MMFR0_PARANGE_42: return 42;
|
||||
case ID_AA64MMFR0_PARANGE_44: return 44;
|
||||
case ID_AA64MMFR0_PARANGE_48: return 48;
|
||||
case ID_AA64MMFR0_PARANGE_52: return 52;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
|
||||
case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
|
||||
/*
|
||||
* A future PE could use a value unknown to the kernel.
|
||||
* However, by the "D10.1.4 Principles of the ID scheme
|
||||
@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
|
||||
|
||||
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_HADBS_SHIFT);
|
||||
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_pan(void)
|
||||
{
|
||||
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_PAN_SHIFT);
|
||||
ID_AA64MMFR1_EL1_PAN_SHIFT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_AMU_EXTN
|
||||
@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
|
||||
int vmid_bits;
|
||||
|
||||
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_VMIDBITS_SHIFT);
|
||||
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
|
||||
ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
|
||||
if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
|
||||
return 16;
|
||||
|
||||
/*
|
||||
@@ -907,6 +907,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
|
||||
return 8;
|
||||
}
|
||||
|
||||
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
|
||||
|
||||
extern struct arm64_ftr_override id_aa64mmfr1_override;
|
||||
extern struct arm64_ftr_override id_aa64pfr0_override;
|
||||
extern struct arm64_ftr_override id_aa64pfr1_override;
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
.macro __init_el2_debug
|
||||
mrs x1, id_aa64dfr0_el1
|
||||
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
|
||||
sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
|
||||
cmp x0, #1
|
||||
b.lt .Lskip_pmu_\@ // Skip if no PMU present
|
||||
mrs x0, pmcr_el0 // Disable debug access traps
|
||||
@@ -49,7 +49,7 @@
|
||||
csel x2, xzr, x0, lt // all PMU counters from EL1
|
||||
|
||||
/* Statistical profiling */
|
||||
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
|
||||
ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
|
||||
cbz x0, .Lskip_spe_\@ // Skip if SPE not present
|
||||
|
||||
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
|
||||
@@ -65,7 +65,7 @@
|
||||
|
||||
.Lskip_spe_\@:
|
||||
/* Trace buffer */
|
||||
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
|
||||
ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
|
||||
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
|
||||
|
||||
mrs_s x0, SYS_TRBIDR_EL1
|
||||
@@ -83,7 +83,7 @@
|
||||
/* LORegions */
|
||||
.macro __init_el2_lor
|
||||
mrs x1, id_aa64mmfr1_el1
|
||||
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
|
||||
ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
|
||||
cbz x0, .Lskip_lor_\@
|
||||
msr_s SYS_LORC_EL1, xzr
|
||||
.Lskip_lor_\@:
|
||||
@@ -97,7 +97,7 @@
|
||||
/* GICv3 system register access */
|
||||
.macro __init_el2_gicv3
|
||||
mrs x0, id_aa64pfr0_el1
|
||||
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
|
||||
ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
|
||||
cbz x0, .Lskip_gicv3_\@
|
||||
|
||||
mrs_s x0, SYS_ICC_SRE_EL2
|
||||
@@ -132,12 +132,12 @@
|
||||
/* Disable any fine grained traps */
|
||||
.macro __init_el2_fgt
|
||||
mrs x1, id_aa64mmfr0_el1
|
||||
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
|
||||
ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
|
||||
cbz x1, .Lskip_fgt_\@
|
||||
|
||||
mov x0, xzr
|
||||
mrs x1, id_aa64dfr0_el1
|
||||
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
|
||||
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
|
||||
cmp x1, #3
|
||||
b.lt .Lset_debug_fgt_\@
|
||||
/* Disable PMSNEVFR_EL1 read and write traps */
|
||||
@@ -149,7 +149,7 @@
|
||||
|
||||
mov x0, xzr
|
||||
mrs x1, id_aa64pfr1_el1
|
||||
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
|
||||
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
|
||||
cbz x1, .Lset_fgt_\@
|
||||
|
||||
/* Disable nVHE traps of TPIDR2 and SMPRI */
|
||||
@@ -162,7 +162,7 @@
|
||||
msr_s SYS_HFGITR_EL2, xzr
|
||||
|
||||
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
|
||||
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
|
||||
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
|
||||
cbz x1, .Lskip_fgt_\@
|
||||
|
||||
msr_s SYS_HAFGRTR_EL2, xzr
|
||||
|
||||
@@ -58,8 +58,9 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs,
|
||||
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
|
||||
|
||||
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
|
||||
void do_undefinstr(struct pt_regs *regs);
|
||||
void do_bti(struct pt_regs *regs);
|
||||
void do_undefinstr(struct pt_regs *regs, unsigned long esr);
|
||||
void do_el0_bti(struct pt_regs *regs);
|
||||
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
|
||||
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
|
||||
struct pt_regs *regs);
|
||||
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
|
||||
@@ -72,7 +73,8 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
|
||||
void do_cp15instr(unsigned long esr, struct pt_regs *regs);
|
||||
void do_el0_svc(struct pt_regs *regs);
|
||||
void do_el0_svc_compat(struct pt_regs *regs);
|
||||
void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
|
||||
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
|
||||
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
|
||||
void do_serror(struct pt_regs *regs, unsigned long esr);
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
|
||||
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
|
||||
return 1 +
|
||||
cpuid_feature_extract_unsigned_field(dfr0,
|
||||
ID_AA64DFR0_BRPS_SHIFT);
|
||||
ID_AA64DFR0_EL1_BRPs_SHIFT);
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
|
||||
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
|
||||
return 1 +
|
||||
cpuid_feature_extract_unsigned_field(dfr0,
|
||||
ID_AA64DFR0_WRPS_SHIFT);
|
||||
ID_AA64DFR0_EL1_WRPs_SHIFT);
|
||||
}
|
||||
|
||||
#endif /* __ASM_BREAKPOINT_H */
|
||||
|
||||
@@ -119,6 +119,7 @@
|
||||
#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
|
||||
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
|
||||
#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
|
||||
#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
|
||||
@@ -16,9 +16,9 @@
|
||||
static inline u64 kvm_get_parange(u64 mmfr0)
|
||||
{
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_PARANGE_SHIFT);
|
||||
if (parange > ID_AA64MMFR0_PARANGE_MAX)
|
||||
parange = ID_AA64MMFR0_PARANGE_MAX;
|
||||
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
|
||||
parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
|
||||
|
||||
return parange;
|
||||
}
|
||||
|
||||
@@ -58,11 +58,20 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
|
||||
}
|
||||
|
||||
struct plt_entry get_plt_entry(u64 dst, void *pc);
|
||||
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
|
||||
|
||||
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
|
||||
static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
const char *name)
|
||||
{
|
||||
return e->adrp || e->add || e->br;
|
||||
const Elf_Shdr *s, *se;
|
||||
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||
|
||||
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
|
||||
if (strcmp(name, secstrs + s->sh_name) == 0)
|
||||
return s;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* __ASM_MODULE_H */
|
||||
|
||||
@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task);
|
||||
* The top of the current task's task stack
|
||||
*/
|
||||
#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
|
||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
|
||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
||||
@@ -43,22 +43,5 @@ unsigned long do_sdei_event(struct pt_regs *regs,
|
||||
unsigned long sdei_arch_get_entry_point(int conduit);
|
||||
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
|
||||
|
||||
struct stack_info;
|
||||
|
||||
bool _on_sdei_stack(unsigned long sp, unsigned long size,
|
||||
struct stack_info *info);
|
||||
static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
|
||||
struct stack_info *info)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return false;
|
||||
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
|
||||
return false;
|
||||
if (in_nmi())
|
||||
return _on_sdei_stack(sp, size, info);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_SDEI_H */
|
||||
|
||||
@@ -22,39 +22,86 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
||||
|
||||
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||
|
||||
static inline bool on_irq_stack(unsigned long sp, unsigned long size,
|
||||
struct stack_info *info)
|
||||
static inline struct stack_info stackinfo_get_irq(void)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
|
||||
unsigned long high = low + IRQ_STACK_SIZE;
|
||||
|
||||
return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
|
||||
static inline bool on_task_stack(const struct task_struct *tsk,
|
||||
unsigned long sp, unsigned long size,
|
||||
struct stack_info *info)
|
||||
static inline bool on_irq_stack(unsigned long sp, unsigned long size)
|
||||
{
|
||||
struct stack_info info = stackinfo_get_irq();
|
||||
return stackinfo_on_stack(&info, sp, size);
|
||||
}
|
||||
|
||||
static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk)
|
||||
{
|
||||
unsigned long low = (unsigned long)task_stack_page(tsk);
|
||||
unsigned long high = low + THREAD_SIZE;
|
||||
|
||||
return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
|
||||
static inline bool on_task_stack(const struct task_struct *tsk,
|
||||
unsigned long sp, unsigned long size)
|
||||
{
|
||||
struct stack_info info = stackinfo_get_task(tsk);
|
||||
return stackinfo_on_stack(&info, sp, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
||||
|
||||
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
|
||||
struct stack_info *info)
|
||||
static inline struct stack_info stackinfo_get_overflow(void)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
|
||||
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
||||
|
||||
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
#else
|
||||
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
|
||||
struct stack_info *info) { return false; }
|
||||
#define stackinfo_get_overflow() stackinfo_get_unknown()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
|
||||
|
||||
static inline struct stack_info stackinfo_get_sdei_normal(void)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
|
||||
static inline struct stack_info stackinfo_get_sdei_critical(void)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
return (struct stack_info) {
|
||||
.low = low,
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
#else
|
||||
#define stackinfo_get_sdei_normal() stackinfo_get_unknown()
|
||||
#define stackinfo_get_sdei_critical() stackinfo_get_unknown()
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_STACKTRACE_H */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user