mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"A bunch of good stuff in here. Worth noting is that we've pulled in
the x86/mm branch from -tip so that we can make use of the core
ioremap changes which allow us to put down huge mappings in the
vmalloc area without screwing up the TLB. Much of the positive
diffstat is because of the rseq selftest for arm64.
Summary:
- Wire up support for qspinlock, replacing our trusty ticket lock
code
- Add an IPI to flush_icache_range() to ensure that stale
instructions fetched into the pipeline are discarded along with the
I-cache lines
- Support for the GCC "stackleak" plugin
- Support for restartable sequences, plus an arm64 port for the
selftest
- Kexec/kdump support on systems booting with ACPI
- Rewrite of our syscall entry code in C, which allows us to zero the
GPRs on entry from userspace
- Support for chained PMU counters, allowing 64-bit event counters to
be constructed on current CPUs
- Ensure scheduler topology information is kept up-to-date with CPU
hotplug events
- Re-enable support for huge vmalloc/IO mappings now that the core
code has the correct hooks to use break-before-make sequences
- Miscellaneous, non-critical fixes and cleanups"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (90 commits)
arm64: alternative: Use true and false for boolean values
arm64: kexec: Add comment to explain use of __flush_icache_range()
arm64: sdei: Mark sdei stack helper functions as static
arm64, kaslr: export offset in VMCOREINFO ELF notes
arm64: perf: Add cap_user_time aarch64
efi/libstub: Only disable stackleak plugin for arm64
arm64: drop unused kernel_neon_begin_partial() macro
arm64: kexec: machine_kexec should call __flush_icache_range
arm64: svc: Ensure hardirq tracing is updated before return
arm64: mm: Export __sync_icache_dcache() for xen-privcmd
drivers/perf: arm-ccn: Use devm_ioremap_resource() to map memory
arm64: Add support for STACKLEAK gcc plugin
arm64: Add stack information to on_accessible_stack
drivers/perf: hisi: update the sccl_id/ccl_id when MT is supported
arm64: fix ACPI dependencies
rseq/selftests: Add support for arm64
arm64: acpi: fix alignment fault in accessing ACPI
efi/arm: map UEFI memory map even w/o runtime services enabled
efi/arm: preserve early mapping of UEFI memory map longer for BGRT
drivers: acpi: add dependency of EFI for arm64
...
This commit is contained in:
@@ -2270,6 +2270,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
||||
S: Maintained
|
||||
F: arch/arm64/
|
||||
X: arch/arm64/boot/dts/
|
||||
F: Documentation/arm64/
|
||||
|
||||
AS3645A LED FLASH CONTROLLER DRIVER
|
||||
|
||||
@@ -26,13 +26,13 @@
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/* arm64 compatibility macros */
|
||||
#define COMPAT_PSR_MODE_ABT ABT_MODE
|
||||
#define COMPAT_PSR_MODE_UND UND_MODE
|
||||
#define COMPAT_PSR_T_BIT PSR_T_BIT
|
||||
#define COMPAT_PSR_I_BIT PSR_I_BIT
|
||||
#define COMPAT_PSR_A_BIT PSR_A_BIT
|
||||
#define COMPAT_PSR_E_BIT PSR_E_BIT
|
||||
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
|
||||
#define PSR_AA32_MODE_ABT ABT_MODE
|
||||
#define PSR_AA32_MODE_UND UND_MODE
|
||||
#define PSR_AA32_T_BIT PSR_T_BIT
|
||||
#define PSR_AA32_I_BIT PSR_I_BIT
|
||||
#define PSR_AA32_A_BIT PSR_A_BIT
|
||||
#define PSR_AA32_E_BIT PSR_E_BIT
|
||||
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv6pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
}
|
||||
}
|
||||
|
||||
static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
static void armv6pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
@@ -491,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv6pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv6pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv6pmu_start;
|
||||
cpu_pmu->stop = armv6pmu_stop;
|
||||
cpu_pmu->map_event = armv6_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
}
|
||||
|
||||
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -542,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv6pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv6pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv6pmu_start;
|
||||
cpu_pmu->stop = armv6pmu_stop;
|
||||
cpu_pmu->map_event = armv6mpcore_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv7pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
|
||||
*/
|
||||
@@ -1167,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv7pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv7pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv7pmu_start;
|
||||
cpu_pmu->stop = armv7pmu_stop;
|
||||
cpu_pmu->reset = armv7pmu_reset;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
};
|
||||
|
||||
static void armv7_read_num_pmnc_events(void *info)
|
||||
@@ -1638,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool krait_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
armv7pmu_clear_event_idx(cpuc, event);
|
||||
if (venum_event || krait_event) {
|
||||
bit = krait_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
@@ -1967,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool scorpion_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
armv7pmu_clear_event_idx(cpuc, event);
|
||||
if (venum_event || scorpion_event) {
|
||||
bit = scorpion_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
@@ -2030,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "armv7-pmu",
|
||||
.of_match_table = armv7_pmu_of_device_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = armv7_pmu_device_probe,
|
||||
};
|
||||
|
||||
@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
}
|
||||
}
|
||||
|
||||
static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
@@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale1pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = xscale1pmu_read_counter;
|
||||
cpu_pmu->write_counter = xscale1pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
|
||||
cpu_pmu->start = xscale1pmu_start;
|
||||
cpu_pmu->stop = xscale1pmu_stop;
|
||||
cpu_pmu->map_event = xscale_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale2pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = xscale2pmu_read_counter;
|
||||
cpu_pmu->write_counter = xscale2pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
|
||||
cpu_pmu->start = xscale2pmu_start;
|
||||
cpu_pmu->stop = xscale2pmu_stop;
|
||||
cpu_pmu->map_event = xscale_map_event;
|
||||
cpu_pmu->num_events = 5;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ config ARM64
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_SYSCALL_WRAPPER
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_INLINE_READ_LOCK if !PREEMPT
|
||||
@@ -42,8 +43,19 @@ config ARM64
|
||||
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
|
||||
@@ -97,6 +109,7 @@ config ARM64
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_STACKLEAK
|
||||
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
@@ -128,6 +141,7 @@ config ARM64
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_RSEQ
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_KPROBES
|
||||
@@ -773,6 +787,9 @@ config ARCH_SPARSEMEM_DEFAULT
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
def_bool ARCH_SPARSEMEM_ENABLE
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool !NUMA
|
||||
|
||||
config HAVE_ARCH_PFN_VALID
|
||||
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
|
||||
|
||||
@@ -1244,6 +1261,7 @@ config EFI
|
||||
bool "UEFI runtime support"
|
||||
depends on OF && !CPU_BIG_ENDIAN
|
||||
depends on KERNEL_MODE_NEON
|
||||
select ARCH_SUPPORTS_ACPI
|
||||
select LIBFDT
|
||||
select UCS2_STRING
|
||||
select EFI_PARAMS_FROM_FDT
|
||||
|
||||
@@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
||||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
CHECKFLAGS += -D__AARCH64EB__
|
||||
AS += -EB
|
||||
# We must use the linux target here, since distributions don't tend to package
|
||||
# the ELF linker scripts with binutils, and this results in a build failure.
|
||||
LDFLAGS += -EB -maarch64linuxb
|
||||
# Prefer the baremetal ELF build target, but not all toolchains include
|
||||
# it so fall back to the standard linux version if needed.
|
||||
LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
|
||||
UTS_MACHINE := aarch64_be
|
||||
else
|
||||
KBUILD_CPPFLAGS += -mlittle-endian
|
||||
CHECKFLAGS += -D__AARCH64EL__
|
||||
AS += -EL
|
||||
LDFLAGS += -EL -maarch64linux # See comment above
|
||||
# Same as above, prefer ELF but fall back to linux target if needed.
|
||||
LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
|
||||
UTS_MACHINE := aarch64
|
||||
endif
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += msi.h
|
||||
generic-y += preempt.h
|
||||
generic-y += qrwlock.h
|
||||
generic-y += qspinlock.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += segment.h
|
||||
generic-y += serial.h
|
||||
|
||||
@@ -12,10 +12,12 @@
|
||||
#ifndef _ASM_ACPI_H
|
||||
#define _ASM_ACPI_H
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
@@ -29,18 +31,22 @@
|
||||
|
||||
/* Basic configuration for ACPI */
|
||||
#ifdef CONFIG_ACPI
|
||||
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
|
||||
|
||||
/* ACPI table mapping after acpi_permanent_mmap is set */
|
||||
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size)
|
||||
{
|
||||
/*
|
||||
* EFI's reserve_regions() call adds memory with the WB attribute
|
||||
* to memblock via early_init_dt_add_memory_arch().
|
||||
*/
|
||||
if (!memblock_is_memory(phys))
|
||||
return ioremap(phys, size);
|
||||
/* For normal memory we already have a cacheable mapping. */
|
||||
if (memblock_is_map_memory(phys))
|
||||
return (void __iomem *)__phys_to_virt(phys);
|
||||
|
||||
return ioremap_cache(phys, size);
|
||||
/*
|
||||
* We should still honor the memory's attribute here because
|
||||
* crash dump kernel possibly excludes some ACPI (reclaim)
|
||||
* regions from memblock list.
|
||||
*/
|
||||
return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
|
||||
}
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
|
||||
@@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu)
|
||||
* for compatibility.
|
||||
*/
|
||||
#define acpi_disable_cmcff 1
|
||||
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
|
||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
return __acpi_get_mem_attribute(addr);
|
||||
}
|
||||
#endif /* CONFIG_ACPI_APEI */
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int arm64_acpi_numa_init(void);
|
||||
int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
|
||||
int acpi_numa_get_nid(unsigned int cpu);
|
||||
void acpi_map_cpus_to_nodes(void);
|
||||
#else
|
||||
static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
|
||||
static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
|
||||
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
|
||||
static inline void acpi_map_cpus_to_nodes(void) { }
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
|
||||
|
||||
@@ -128,6 +128,19 @@ do { \
|
||||
__u.__val; \
|
||||
})
|
||||
|
||||
#define smp_cond_load_relaxed(ptr, cond_expr) \
|
||||
({ \
|
||||
typeof(ptr) __PTR = (ptr); \
|
||||
typeof(*ptr) VAL; \
|
||||
for (;;) { \
|
||||
VAL = READ_ONCE(*__PTR); \
|
||||
if (cond_expr) \
|
||||
break; \
|
||||
__cmpwait_relaxed(__PTR, VAL); \
|
||||
} \
|
||||
VAL; \
|
||||
})
|
||||
|
||||
#define smp_cond_load_acquire(ptr, cond_expr) \
|
||||
({ \
|
||||
typeof(ptr) __PTR = (ptr); \
|
||||
|
||||
@@ -21,12 +21,16 @@
|
||||
#define CTR_L1IP_SHIFT 14
|
||||
#define CTR_L1IP_MASK 3
|
||||
#define CTR_DMINLINE_SHIFT 16
|
||||
#define CTR_IMINLINE_SHIFT 0
|
||||
#define CTR_ERG_SHIFT 20
|
||||
#define CTR_CWG_SHIFT 24
|
||||
#define CTR_CWG_MASK 15
|
||||
#define CTR_IDC_SHIFT 28
|
||||
#define CTR_DIC_SHIFT 29
|
||||
|
||||
#define CTR_CACHE_MINLINE_MASK \
|
||||
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
|
||||
|
||||
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
|
||||
|
||||
#define ICACHE_POLICY_VPIPT 0
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#ifndef __ASM_CACHEFLUSH_H
|
||||
#define __ASM_CACHEFLUSH_H
|
||||
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
@@ -71,7 +72,7 @@
|
||||
* - kaddr - page address
|
||||
* - size - region size
|
||||
*/
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void __flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern int invalidate_icache_range(unsigned long start, unsigned long end);
|
||||
extern void __flush_dcache_area(void *addr, size_t len);
|
||||
extern void __inval_dcache_area(void *addr, size_t len);
|
||||
@@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len);
|
||||
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
|
||||
extern void sync_icache_aliases(void *kaddr, unsigned long len);
|
||||
|
||||
static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
__flush_icache_range(start, end);
|
||||
|
||||
/*
|
||||
* IPI all online CPUs so that they undergo a context synchronization
|
||||
* event and are forced to refetch the new instructions.
|
||||
*/
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* KGDB performs cache maintenance with interrupts disabled, so we
|
||||
* will deadlock trying to IPI the secondary CPUs. In theory, we can
|
||||
* set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
|
||||
* just means that KGDB will elide the maintenance altogether! As it
|
||||
* turns out, KGDB uses IPIs to round-up the secondary CPUs during
|
||||
* the patching operation, so we don't need extra IPIs here anyway.
|
||||
* In which case, add a KGDB-specific bodge and return early.
|
||||
*/
|
||||
if (kgdb_connected && irqs_disabled())
|
||||
return;
|
||||
#endif
|
||||
kick_all_cpus_sync();
|
||||
}
|
||||
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -49,7 +49,8 @@
|
||||
#define ARM64_HAS_CACHE_DIC 28
|
||||
#define ARM64_HW_DBM 29
|
||||
#define ARM64_SSBD 30
|
||||
#define ARM64_MISMATCHED_CACHE_TYPE 31
|
||||
|
||||
#define ARM64_NCAPS 31
|
||||
#define ARM64_NCAPS 32
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
||||
@@ -16,13 +16,15 @@
|
||||
#ifndef __ASM_FP_H
|
||||
#define __ASM_FP_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/stddef.h>
|
||||
@@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task,
|
||||
extern int sve_set_current_vl(unsigned long arg);
|
||||
extern int sve_get_current_vl(void);
|
||||
|
||||
static inline void sve_user_disable(void)
|
||||
{
|
||||
sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
|
||||
}
|
||||
|
||||
static inline void sve_user_enable(void)
|
||||
{
|
||||
sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Probing and setup functions.
|
||||
* Calls to these functions must be serialised with one another.
|
||||
@@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void sve_user_disable(void) { BUILD_BUG(); }
|
||||
static inline void sve_user_enable(void) { BUILD_BUG(); }
|
||||
|
||||
static inline void sve_init_vq_map(void) { }
|
||||
static inline void sve_update_vq_map(void) { }
|
||||
static inline int sve_verify_vq_map(void) { return 0; }
|
||||
|
||||
@@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
|
||||
s32 aarch64_get_branch_offset(u32 insn);
|
||||
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
||||
|
||||
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
|
||||
|
||||
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
|
||||
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
|
||||
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
|
||||
*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
u32 mode;
|
||||
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
return mode > COMPAT_PSR_MODE_USR;
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
|
||||
return mode > PSR_AA32_MODE_USR;
|
||||
}
|
||||
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
@@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
|
||||
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
|
||||
} else {
|
||||
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
sctlr |= (1 << 25);
|
||||
@@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
|
||||
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
|
||||
|
||||
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
|
||||
}
|
||||
|
||||
@@ -19,11 +19,4 @@
|
||||
void kernel_neon_begin(void);
|
||||
void kernel_neon_end(void);
|
||||
|
||||
/*
|
||||
* Temporary macro to allow the crypto code to compile. Note that the
|
||||
* semantics of kernel_neon_begin_partial() are now different from the
|
||||
* original as it does not allow being called in an interrupt context.
|
||||
*/
|
||||
#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
|
||||
|
||||
#endif /* ! __ASM_NEON_H */
|
||||
|
||||
@@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance);
|
||||
void __init numa_free_distance(void);
|
||||
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
|
||||
void numa_store_cpu_info(unsigned int cpu);
|
||||
void numa_add_cpu(unsigned int cpu);
|
||||
void numa_remove_cpu(unsigned int cpu);
|
||||
|
||||
#else /* CONFIG_NUMA */
|
||||
|
||||
static inline void numa_store_cpu_info(unsigned int cpu) { }
|
||||
static inline void numa_add_cpu(unsigned int cpu) { }
|
||||
static inline void numa_remove_cpu(unsigned int cpu) { }
|
||||
static inline void arm64_numa_init(void) { }
|
||||
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
|
||||
|
||||
|
||||
@@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
start_thread_common(regs, pc);
|
||||
regs->pstate = COMPAT_PSR_MODE_USR;
|
||||
regs->pstate = PSR_AA32_MODE_USR;
|
||||
if (pc & 1)
|
||||
regs->pstate |= COMPAT_PSR_T_BIT;
|
||||
regs->pstate |= PSR_AA32_T_BIT;
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
regs->pstate |= COMPAT_PSR_E_BIT;
|
||||
regs->pstate |= PSR_AA32_E_BIT;
|
||||
#endif
|
||||
|
||||
regs->compat_sp = sp;
|
||||
@@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void);
|
||||
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
|
||||
#define SVE_GET_VL() sve_get_current_vl()
|
||||
|
||||
/*
|
||||
* For CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
*
|
||||
* These need to be macros because otherwise we get stuck in a nightmare
|
||||
* of header definitions for the use of task_stack_page.
|
||||
*/
|
||||
|
||||
#define current_top_of_stack() \
|
||||
({ \
|
||||
struct stack_info _info; \
|
||||
BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
|
||||
_info.high; \
|
||||
})
|
||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
||||
@@ -35,36 +35,39 @@
|
||||
#define COMPAT_PTRACE_GETHBPREGS 29
|
||||
#define COMPAT_PTRACE_SETHBPREGS 30
|
||||
|
||||
/* AArch32 CPSR bits */
|
||||
#define COMPAT_PSR_MODE_MASK 0x0000001f
|
||||
#define COMPAT_PSR_MODE_USR 0x00000010
|
||||
#define COMPAT_PSR_MODE_FIQ 0x00000011
|
||||
#define COMPAT_PSR_MODE_IRQ 0x00000012
|
||||
#define COMPAT_PSR_MODE_SVC 0x00000013
|
||||
#define COMPAT_PSR_MODE_ABT 0x00000017
|
||||
#define COMPAT_PSR_MODE_HYP 0x0000001a
|
||||
#define COMPAT_PSR_MODE_UND 0x0000001b
|
||||
#define COMPAT_PSR_MODE_SYS 0x0000001f
|
||||
#define COMPAT_PSR_T_BIT 0x00000020
|
||||
#define COMPAT_PSR_F_BIT 0x00000040
|
||||
#define COMPAT_PSR_I_BIT 0x00000080
|
||||
#define COMPAT_PSR_A_BIT 0x00000100
|
||||
#define COMPAT_PSR_E_BIT 0x00000200
|
||||
#define COMPAT_PSR_J_BIT 0x01000000
|
||||
#define COMPAT_PSR_Q_BIT 0x08000000
|
||||
#define COMPAT_PSR_V_BIT 0x10000000
|
||||
#define COMPAT_PSR_C_BIT 0x20000000
|
||||
#define COMPAT_PSR_Z_BIT 0x40000000
|
||||
#define COMPAT_PSR_N_BIT 0x80000000
|
||||
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
|
||||
#define COMPAT_PSR_GE_MASK 0x000f0000
|
||||
/* SPSR_ELx bits for exceptions taken from AArch32 */
|
||||
#define PSR_AA32_MODE_MASK 0x0000001f
|
||||
#define PSR_AA32_MODE_USR 0x00000010
|
||||
#define PSR_AA32_MODE_FIQ 0x00000011
|
||||
#define PSR_AA32_MODE_IRQ 0x00000012
|
||||
#define PSR_AA32_MODE_SVC 0x00000013
|
||||
#define PSR_AA32_MODE_ABT 0x00000017
|
||||
#define PSR_AA32_MODE_HYP 0x0000001a
|
||||
#define PSR_AA32_MODE_UND 0x0000001b
|
||||
#define PSR_AA32_MODE_SYS 0x0000001f
|
||||
#define PSR_AA32_T_BIT 0x00000020
|
||||
#define PSR_AA32_F_BIT 0x00000040
|
||||
#define PSR_AA32_I_BIT 0x00000080
|
||||
#define PSR_AA32_A_BIT 0x00000100
|
||||
#define PSR_AA32_E_BIT 0x00000200
|
||||
#define PSR_AA32_DIT_BIT 0x01000000
|
||||
#define PSR_AA32_Q_BIT 0x08000000
|
||||
#define PSR_AA32_V_BIT 0x10000000
|
||||
#define PSR_AA32_C_BIT 0x20000000
|
||||
#define PSR_AA32_Z_BIT 0x40000000
|
||||
#define PSR_AA32_N_BIT 0x80000000
|
||||
#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
|
||||
#define PSR_AA32_GE_MASK 0x000f0000
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
|
||||
#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
|
||||
#else
|
||||
#define COMPAT_PSR_ENDSTATE 0
|
||||
#define PSR_AA32_ENDSTATE 0
|
||||
#endif
|
||||
|
||||
/* AArch32 CPSR bits, as seen in AArch32 */
|
||||
#define COMPAT_PSR_DIT_BIT 0x00200000
|
||||
|
||||
/*
|
||||
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
|
||||
* process is located in memory.
|
||||
@@ -111,6 +114,30 @@
|
||||
#define compat_sp_fiq regs[29]
|
||||
#define compat_lr_fiq regs[30]
|
||||
|
||||
static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
|
||||
{
|
||||
unsigned long pstate;
|
||||
|
||||
pstate = psr & ~COMPAT_PSR_DIT_BIT;
|
||||
|
||||
if (psr & COMPAT_PSR_DIT_BIT)
|
||||
pstate |= PSR_AA32_DIT_BIT;
|
||||
|
||||
return pstate;
|
||||
}
|
||||
|
||||
static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
|
||||
{
|
||||
unsigned long psr;
|
||||
|
||||
psr = pstate & ~PSR_AA32_DIT_BIT;
|
||||
|
||||
if (pstate & PSR_AA32_DIT_BIT)
|
||||
psr |= COMPAT_PSR_DIT_BIT;
|
||||
|
||||
return psr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This struct defines the way the registers are stored on the stack during an
|
||||
* exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
|
||||
@@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define compat_thumb_mode(regs) \
|
||||
(((regs)->pstate & COMPAT_PSR_T_BIT))
|
||||
(((regs)->pstate & PSR_AA32_T_BIT))
|
||||
#else
|
||||
#define compat_thumb_mode(regs) (0)
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user