You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
"Bigger items included in this update are:
- A series of updates from Arnd for ARM randconfig build failures
- Updates from Dmitry for StrongARM SA-1100 to move IRQ handling to
drivers/irqchip/
- Move ARMs SP804 timer to drivers/clocksource/
- Perf updates from Mark Rutland in preparation to move the ARM perf
code into drivers/ so it can be shared with ARM64.
- MCPM updates from Nicolas
- Add support for taking platform serial number from DT
- Re-implement Keystone2 physical address space switch to conform to
architecture requirements
- Clean up ARMv7 LPAE code, which goes in hand with the Keystone2
changes.
- L2C cleanups to avoid unlocking caches if we're prevented by the
secure support to unlock.
- Avoid cleaning a potentially dirty cache containing stale data on
CPU initialisation
- Add ARM-only entry point for secondary startup (for machines that
can only call into a Thumb kernel in ARM mode). Same thing is also
done for the resume entry point.
- Provide arch_irqs_disabled via asm-generic
- Enlarge ARMv7M vector table
- Always use BFD linker for VDSO, as gold doesn't accept some of the
options we need.
- Fix an incorrect BSYM (for Thumb symbols) usage, and convert all
BSYM compiler macros to a "badr" (for branch address).
- Shut up compiler warnings provoked by our cmpxchg() implementation.
- Ensure bad xchg sizes fail to link"
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (75 commits)
ARM: Fix build if CLKDEV_LOOKUP is not configured
ARM: fix new BSYM() usage introduced via for-arm-soc branch
ARM: 8383/1: nommu: avoid deprecated source register on mov
ARM: 8391/1: l2c: add options to overwrite prefetching behavior
ARM: 8390/1: irqflags: Get arch_irqs_disabled from asm-generic
ARM: 8387/1: arm/mm/dma-mapping.c: Add arm_coherent_dma_mmap
ARM: 8388/1: tcm: Don't crash when TCM banks are protected by TrustZone
ARM: 8384/1: VDSO: force use of BFD linker
ARM: 8385/1: VDSO: group link options
ARM: cmpxchg: avoid warnings from macro-ized cmpxchg() implementations
ARM: remove __bad_xchg definition
ARM: 8369/1: ARMv7M: define size of vector table for Vybrid
ARM: 8382/1: clocksource: make ARM_TIMER_SP804 depend on GENERIC_SCHED_CLOCK
ARM: 8366/1: move Dual-Timer SP804 driver to drivers/clocksource
ARM: 8365/1: introduce sp804_timer_disable and remove arm_timer.h inclusion
ARM: 8364/1: fix BE32 module loading
ARM: 8360/1: add secondary_startup_arm prototype in header file
ARM: 8359/1: correct secondary_startup_arm mode
ARM: proc-v7: sanitise and document registers around errata
ARM: proc-v7: clean up MIDR access
...
This commit is contained in:
@@ -67,6 +67,11 @@ Optional properties:
|
||||
disable if zero.
|
||||
- arm,prefetch-offset : Override prefetch offset value. Valid values are
|
||||
0-7, 15, 23, and 31.
|
||||
- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
|
||||
(forcibly enable), property absent (retain settings set by firmware)
|
||||
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
|
||||
<1> (forcibly enable), property absent (retain settings set by
|
||||
firmware)
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -856,6 +856,10 @@ address which can extend beyond that limit.
|
||||
name may clash with standard defined ones, you prefix them with your
|
||||
vendor name and a comma.
|
||||
|
||||
Additional properties for the root node:
|
||||
|
||||
- serial-number : a string representing the device's serial number
|
||||
|
||||
b) The /cpus node
|
||||
|
||||
This node is the parent of all individual CPU nodes. It doesn't
|
||||
|
||||
+23
-11
@@ -33,8 +33,8 @@ config ARM
|
||||
select HARDIRQS_SW_RESEND
|
||||
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
|
||||
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
|
||||
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
|
||||
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_BPF_JIT
|
||||
@@ -45,7 +45,7 @@ config ARM
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_CONTIGUOUS if MMU
|
||||
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
|
||||
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
|
||||
@@ -59,10 +59,10 @@ config ARM
|
||||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_KERNEL_LZO
|
||||
select HAVE_KERNEL_XZ
|
||||
select HAVE_KPROBES if !XIP_KERNEL
|
||||
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
|
||||
select HAVE_KRETPROBES if (HAVE_KPROBES)
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||
select HAVE_OPTPROBES if !THUMB2_KERNEL
|
||||
select HAVE_PERF_EVENTS
|
||||
@@ -173,7 +173,7 @@ config LOCKDEP_SUPPORT
|
||||
|
||||
config TRACE_IRQFLAGS_SUPPORT
|
||||
bool
|
||||
default y
|
||||
default !CPU_V7M
|
||||
|
||||
config RWSEM_XCHGADD_ALGORITHM
|
||||
bool
|
||||
@@ -1010,11 +1010,6 @@ config PLAT_PXA
|
||||
config PLAT_VERSATILE
|
||||
bool
|
||||
|
||||
config ARM_TIMER_SP804
|
||||
bool
|
||||
select CLKSRC_MMIO
|
||||
select CLKSRC_OF if OF
|
||||
|
||||
source "arch/arm/firmware/Kconfig"
|
||||
|
||||
source arch/arm/mm/Kconfig
|
||||
@@ -1342,6 +1337,7 @@ config SMP
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
depends on HAVE_SMP
|
||||
depends on MMU || ARM_MPU
|
||||
select IRQ_WORK
|
||||
help
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, say N. If you have a system with more
|
||||
@@ -1717,6 +1713,21 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
config ARCH_WANT_GENERAL_HUGETLB
|
||||
def_bool y
|
||||
|
||||
config ARM_MODULE_PLTS
|
||||
bool "Use PLTs to allow module memory to spill over into vmalloc area"
|
||||
depends on MODULES
|
||||
help
|
||||
Allocate PLTs when loading modules so that jumps and calls whose
|
||||
targets are too far away for their relative offsets to be encoded
|
||||
in the instructions themselves can be bounced via veneers in the
|
||||
module's PLT. This allows modules to be allocated in the generic
|
||||
vmalloc area after the dedicated module memory area has been
|
||||
exhausted. The modules will use slightly more memory, but after
|
||||
rounding up to page size, the actual memory footprint is usually
|
||||
the same.
|
||||
|
||||
Say y if you are getting out of memory errors while loading modules
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
@@ -1987,6 +1998,7 @@ config XIP_PHYS_ADDR
|
||||
config KEXEC
|
||||
bool "Kexec system call (EXPERIMENTAL)"
|
||||
depends on (!SMP || PM_SLEEP_SMP)
|
||||
depends on !CPU_V7M
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
||||
@@ -5,6 +5,7 @@ source "lib/Kconfig.debug"
|
||||
config ARM_PTDUMP
|
||||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on MMU
|
||||
select DEBUG_FS
|
||||
---help---
|
||||
Say Y here if you want to show the kernel pagetable layout in a
|
||||
|
||||
@@ -19,6 +19,10 @@ LDFLAGS_vmlinux += --be8
|
||||
LDFLAGS_MODULE += --be8
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
|
||||
LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
|
||||
endif
|
||||
|
||||
OBJCOPYFLAGS :=-O binary -R .comment -S
|
||||
GZFLAGS :=-9
|
||||
#KBUILD_CFLAGS +=-pipe
|
||||
|
||||
@@ -103,6 +103,8 @@ extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \
|
||||
lib1funcs.S ashldi3.S bswapsdi2.S $(libfdt) $(libfdt_hdrs) \
|
||||
hyp-stub.S
|
||||
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
|
||||
|
||||
@@ -130,7 +130,7 @@ start:
|
||||
.endr
|
||||
ARM( mov r0, r0 )
|
||||
ARM( b 1f )
|
||||
THUMB( adr r12, BSYM(1f) )
|
||||
THUMB( badr r12, 1f )
|
||||
THUMB( bx r12 )
|
||||
|
||||
.word _magic_sig @ Magic numbers to help the loader
|
||||
@@ -447,7 +447,7 @@ dtb_check_done:
|
||||
|
||||
bl cache_clean_flush
|
||||
|
||||
adr r0, BSYM(restart)
|
||||
badr r0, restart
|
||||
add r0, r0, r6
|
||||
mov pc, r0
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
|
||||
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
|
||||
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
|
||||
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
|
||||
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
|
||||
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
|
||||
CFLAGS_REMOVE_mcpm_entry.o = -pg
|
||||
AFLAGS_mcpm_head.o := -march=armv7-a
|
||||
|
||||
+127
-156
@@ -20,6 +20,126 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
/*
|
||||
* The public API for this code is documented in arch/arm/include/asm/mcpm.h.
|
||||
* For a comprehensive description of the main algorithm used here, please
|
||||
* see Documentation/arm/cluster-pm-race-avoidance.txt.
|
||||
*/
|
||||
|
||||
struct sync_struct mcpm_sync;
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
|
||||
* This must be called at the point of committing to teardown of a CPU.
|
||||
* The CPU cache (SCTRL.C bit) is expected to still be active.
|
||||
*/
|
||||
static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
|
||||
* cluster can be torn down without disrupting this CPU.
|
||||
* To avoid deadlocks, this must be called before a CPU is powered down.
|
||||
* The CPU cache (SCTRL.C bit) is expected to be off.
|
||||
* However L2 cache might or might not be active.
|
||||
*/
|
||||
static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
|
||||
* @state: the final state of the cluster:
|
||||
* CLUSTER_UP: no destructive teardown was done and the cluster has been
|
||||
* restored to the previous state (CPU cache still active); or
|
||||
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
|
||||
* (CPU cache disabled, L2 cache either enabled or disabled).
|
||||
*/
|
||||
static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cluster = state;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
|
||||
* This function should be called by the last man, after local CPU teardown
|
||||
* is complete. CPU cache expected to be active.
|
||||
*
|
||||
* Returns:
|
||||
* false: the critical section was not entered because an inbound CPU was
|
||||
* observed, or the cluster is already being set up;
|
||||
* true: the critical section was entered: it is now safe to tear down the
|
||||
* cluster.
|
||||
*/
|
||||
static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
|
||||
|
||||
/* Warn inbound CPUs that the cluster is being torn down: */
|
||||
c->cluster = CLUSTER_GOING_DOWN;
|
||||
sync_cache_w(&c->cluster);
|
||||
|
||||
/* Back out if the inbound cluster is already in the critical region: */
|
||||
sync_cache_r(&c->inbound);
|
||||
if (c->inbound == INBOUND_COMING_UP)
|
||||
goto abort;
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
|
||||
* teardown is complete on each CPU before tearing down the cluster.
|
||||
*
|
||||
* If any CPU has been woken up again from the DOWN state, then we
|
||||
* shouldn't be taking the cluster down at all: abort in that case.
|
||||
*/
|
||||
sync_cache_r(&c->cpus);
|
||||
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
|
||||
int cpustate;
|
||||
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
while (1) {
|
||||
cpustate = c->cpus[i].cpu;
|
||||
if (cpustate != CPU_GOING_DOWN)
|
||||
break;
|
||||
|
||||
wfe();
|
||||
sync_cache_r(&c->cpus[i].cpu);
|
||||
}
|
||||
|
||||
switch (cpustate) {
|
||||
case CPU_DOWN:
|
||||
continue;
|
||||
|
||||
default:
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
abort:
|
||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __mcpm_cluster_state(unsigned int cluster)
|
||||
{
|
||||
sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
|
||||
return mcpm_sync.clusters[cluster].cluster;
|
||||
}
|
||||
|
||||
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
|
||||
|
||||
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
|
||||
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
|
||||
bool cpu_is_down, cluster_is_down;
|
||||
int ret = 0;
|
||||
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
if (!platform_ops)
|
||||
return -EUNATCH; /* try not to shadow power_up errors */
|
||||
might_sleep();
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->power_up)
|
||||
return platform_ops->power_up(cpu, cluster);
|
||||
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
|
||||
/*
|
||||
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
||||
* variant exists, we need to disable IRQs manually here.
|
||||
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
|
||||
bool cpu_going_down, last_man;
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/*
|
||||
* Do this before calling into the power_down method,
|
||||
* as it might not always be safe to do afterwards.
|
||||
*/
|
||||
setup_mm_for_reboot();
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->power_down) {
|
||||
platform_ops->power_down();
|
||||
goto not_dead;
|
||||
}
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
setup_mm_for_reboot();
|
||||
|
||||
__mcpm_cpu_going_down(cpu, cluster);
|
||||
|
||||
arch_spin_lock(&mcpm_lock);
|
||||
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
|
||||
|
||||
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
|
||||
if (cpu_going_down)
|
||||
wfi();
|
||||
|
||||
not_dead:
|
||||
/*
|
||||
* It is possible for a power_up request to happen concurrently
|
||||
* with a power_down request for the same CPU. In this case the
|
||||
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mcpm_cpu_suspend(u64 expected_residency)
|
||||
void mcpm_cpu_suspend(void)
|
||||
{
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->suspend) {
|
||||
phys_reset_t phys_reset;
|
||||
BUG_ON(!irqs_disabled());
|
||||
setup_mm_for_reboot();
|
||||
platform_ops->suspend(expected_residency);
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
||||
phys_reset(virt_to_phys(mcpm_entry_point));
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Some platforms might have to enable special resume modes, etc. */
|
||||
if (platform_ops->cpu_suspend_prepare) {
|
||||
unsigned int mpidr = read_cpuid_mpidr();
|
||||
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
|
||||
if (!platform_ops)
|
||||
return -EUNATCH;
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->powered_up) {
|
||||
platform_ops->powered_up();
|
||||
return 0;
|
||||
}
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
|
||||
|
||||
#endif
|
||||
|
||||
struct sync_struct mcpm_sync;
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
|
||||
* This must be called at the point of committing to teardown of a CPU.
|
||||
* The CPU cache (SCTRL.C bit) is expected to still be active.
|
||||
*/
|
||||
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
|
||||
* cluster can be torn down without disrupting this CPU.
|
||||
* To avoid deadlocks, this must be called before a CPU is powered down.
|
||||
* The CPU cache (SCTRL.C bit) is expected to be off.
|
||||
* However L2 cache might or might not be active.
|
||||
*/
|
||||
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
|
||||
* @state: the final state of the cluster:
|
||||
* CLUSTER_UP: no destructive teardown was done and the cluster has been
|
||||
* restored to the previous state (CPU cache still active); or
|
||||
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
|
||||
* (CPU cache disabled, L2 cache either enabled or disabled).
|
||||
*/
|
||||
void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cluster = state;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
|
||||
* This function should be called by the last man, after local CPU teardown
|
||||
* is complete. CPU cache expected to be active.
|
||||
*
|
||||
* Returns:
|
||||
* false: the critical section was not entered because an inbound CPU was
|
||||
* observed, or the cluster is already being set up;
|
||||
* true: the critical section was entered: it is now safe to tear down the
|
||||
* cluster.
|
||||
*/
|
||||
bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
|
||||
|
||||
/* Warn inbound CPUs that the cluster is being torn down: */
|
||||
c->cluster = CLUSTER_GOING_DOWN;
|
||||
sync_cache_w(&c->cluster);
|
||||
|
||||
/* Back out if the inbound cluster is already in the critical region: */
|
||||
sync_cache_r(&c->inbound);
|
||||
if (c->inbound == INBOUND_COMING_UP)
|
||||
goto abort;
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
|
||||
* teardown is complete on each CPU before tearing down the cluster.
|
||||
*
|
||||
* If any CPU has been woken up again from the DOWN state, then we
|
||||
* shouldn't be taking the cluster down at all: abort in that case.
|
||||
*/
|
||||
sync_cache_r(&c->cpus);
|
||||
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
|
||||
int cpustate;
|
||||
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
while (1) {
|
||||
cpustate = c->cpus[i].cpu;
|
||||
if (cpustate != CPU_GOING_DOWN)
|
||||
break;
|
||||
|
||||
wfe();
|
||||
sync_cache_r(&c->cpus[i].cpu);
|
||||
}
|
||||
|
||||
switch (cpustate) {
|
||||
case CPU_DOWN:
|
||||
continue;
|
||||
|
||||
default:
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
abort:
|
||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
|
||||
return false;
|
||||
}
|
||||
|
||||
int __mcpm_cluster_state(unsigned int cluster)
|
||||
{
|
||||
sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
|
||||
return mcpm_sync.clusters[cluster].cluster;
|
||||
}
|
||||
|
||||
extern unsigned long mcpm_power_up_setup_phys;
|
||||
|
||||
int __init mcpm_sync_init(
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
ENTRY(mcpm_entry_point)
|
||||
|
||||
ARM_BE8(setend be)
|
||||
THUMB( adr r12, BSYM(1f) )
|
||||
THUMB( badr r12, 1f )
|
||||
THUMB( bx r12 )
|
||||
THUMB( .thumb )
|
||||
1:
|
||||
|
||||
@@ -177,6 +177,21 @@
|
||||
restore_irqs_notrace \oldcpsr
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Assembly version of "adr rd, BSYM(sym)". This should only be used to
|
||||
* reference local symbols in the same assembly file which are to be
|
||||
* resolved by the assembler. Other usage is undefined.
|
||||
*/
|
||||
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
||||
.macro badr\c, rd, sym
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
adr\c \rd, \sym + 1
|
||||
#else
|
||||
adr\c \rd, \sym
|
||||
#endif
|
||||
.endm
|
||||
.endr
|
||||
|
||||
/*
|
||||
* Get current thread_info.
|
||||
*/
|
||||
@@ -326,7 +341,7 @@
|
||||
THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
bne 1f
|
||||
orr \reg, \reg, #PSR_A_BIT
|
||||
adr lr, BSYM(2f)
|
||||
badr lr, 2f
|
||||
msr spsr_cxsf, \reg
|
||||
__MSR_ELR_HYP(14)
|
||||
__ERET
|
||||
|
||||
@@ -482,10 +482,17 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
|
||||
"r9","r10","lr","memory" )
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
#else
|
||||
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
void mark_rodata_ro(void);
|
||||
|
||||
@@ -94,6 +94,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
/* Cause a link-time error, the xchg() size is not supported */
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
@@ -102,8 +103,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
||||
#define xchg(ptr, x) ({ \
|
||||
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
@@ -118,14 +121,16 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
*/
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg_local(ptr, o, n) ({ \
|
||||
(__typeof(*ptr))__cmpxchg_local_generic((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#else /* min ARCH >= ARMv6 */
|
||||
|
||||
@@ -201,11 +206,12 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
#define cmpxchg(ptr,o,n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
unsigned long old,
|
||||
@@ -227,6 +233,13 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr, o, n) ({ \
|
||||
(__typeof(*ptr))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
@@ -252,6 +265,14 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#define cmpxchg64_relaxed(ptr, o, n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
@@ -265,23 +286,11 @@ static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
#define cmpxchg64(ptr, o, n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_relaxed(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
|
||||
(unsigned long long)(n)); \
|
||||
})
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
adrne lr, BSYM(1b)
|
||||
badrne lr, 1b
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -23,7 +23,7 @@
|
||||
ALT_SMP(test_for_ipi r0, r2, r6, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r1, sp
|
||||
adrne lr, BSYM(1b)
|
||||
badrne lr, 1b
|
||||
bne do_IPI
|
||||
#endif
|
||||
9997:
|
||||
|
||||
+37
-15
@@ -23,6 +23,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <asm/byteorder.h>
|
||||
@@ -73,17 +74,16 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
|
||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strh %1, %0"
|
||||
: "+Q" (*(volatile u16 __force *)addr)
|
||||
: "r" (val));
|
||||
: : "Q" (*(volatile u16 __force *)addr), "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_readw __raw_readw
|
||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 val;
|
||||
asm volatile("ldrh %1, %0"
|
||||
: "+Q" (*(volatile u16 __force *)addr),
|
||||
"=r" (val));
|
||||
asm volatile("ldrh %0, %1"
|
||||
: "=r" (val)
|
||||
: "Q" (*(volatile u16 __force *)addr));
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -92,25 +92,23 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strb %1, %0"
|
||||
: "+Qo" (*(volatile u8 __force *)addr)
|
||||
: "r" (val));
|
||||
: : "Qo" (*(volatile u8 __force *)addr), "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %1, %0"
|
||||
: "+Qo" (*(volatile u32 __force *)addr)
|
||||
: "r" (val));
|
||||
: : "Qo" (*(volatile u32 __force *)addr), "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_readb __raw_readb
|
||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
{
|
||||
u8 val;
|
||||
asm volatile("ldrb %1, %0"
|
||||
: "+Qo" (*(volatile u8 __force *)addr),
|
||||
"=r" (val));
|
||||
asm volatile("ldrb %0, %1"
|
||||
: "=r" (val)
|
||||
: "Qo" (*(volatile u8 __force *)addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
@@ -118,9 +116,9 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("ldr %1, %0"
|
||||
: "+Qo" (*(volatile u32 __force *)addr),
|
||||
"=r" (val));
|
||||
asm volatile("ldr %0, %1"
|
||||
: "=r" (val)
|
||||
: "Qo" (*(volatile u32 __force *)addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
@@ -319,9 +317,33 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
|
||||
#define writesw(p,d,l) __raw_writesw(p,d,l)
|
||||
#define writesl(p,d,l) __raw_writesl(p,d,l)
|
||||
|
||||
#ifndef __ARMBE__
|
||||
static inline void memset_io(volatile void __iomem *dst, unsigned c,
|
||||
size_t count)
|
||||
{
|
||||
memset((void __force *)dst, c, count);
|
||||
}
|
||||
#define memset_io(dst,c,count) memset_io(dst,c,count)
|
||||
|
||||
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
|
||||
size_t count)
|
||||
{
|
||||
memcpy(to, (const void __force *)from, count);
|
||||
}
|
||||
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
|
||||
|
||||
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
|
||||
size_t count)
|
||||
{
|
||||
memcpy((void __force *)to, from, count);
|
||||
}
|
||||
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
|
||||
|
||||
#else
|
||||
#define memset_io(c,v,l) _memset_io(c,(v),(l))
|
||||
#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
|
||||
#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
|
||||
#endif
|
||||
|
||||
#endif /* readl */
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
|
||||
#define arch_local_irq_save arch_local_irq_save
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -31,6 +32,7 @@ static inline unsigned long arch_local_irq_save(void)
|
||||
return flags;
|
||||
}
|
||||
|
||||
#define arch_local_irq_enable arch_local_irq_enable
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
asm volatile(
|
||||
@@ -40,6 +42,7 @@ static inline void arch_local_irq_enable(void)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
#define arch_local_irq_disable arch_local_irq_disable
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
asm volatile(
|
||||
@@ -56,6 +59,7 @@ static inline void arch_local_irq_disable(void)
|
||||
/*
|
||||
* Save the current interrupt enable state & disable IRQs
|
||||
*/
|
||||
#define arch_local_irq_save arch_local_irq_save
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags, temp;
|
||||
@@ -73,6 +77,7 @@ static inline unsigned long arch_local_irq_save(void)
|
||||
/*
|
||||
* Enable IRQs
|
||||
*/
|
||||
#define arch_local_irq_enable arch_local_irq_enable
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
@@ -88,6 +93,7 @@ static inline void arch_local_irq_enable(void)
|
||||
/*
|
||||
* Disable IRQs
|
||||
*/
|
||||
#define arch_local_irq_disable arch_local_irq_disable
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
@@ -135,6 +141,7 @@ static inline void arch_local_irq_disable(void)
|
||||
/*
|
||||
* Save the current interrupt enable state.
|
||||
*/
|
||||
#define arch_local_save_flags arch_local_save_flags
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -147,6 +154,7 @@ static inline unsigned long arch_local_save_flags(void)
|
||||
/*
|
||||
* restore saved IRQ & FIQ state
|
||||
*/
|
||||
#define arch_local_irq_restore arch_local_irq_restore
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile(
|
||||
@@ -156,10 +164,13 @@ static inline void arch_local_irq_restore(unsigned long flags)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
#define arch_irqs_disabled_flags arch_irqs_disabled_flags
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return flags & IRQMASK_I_BIT;
|
||||
}
|
||||
|
||||
#include <asm-generic/irqflags.h>
|
||||
|
||||
#endif /* ifdef __KERNEL__ */
|
||||
#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
|
||||
|
||||
@@ -51,7 +51,7 @@ struct machine_desc {
|
||||
bool (*smp_init)(void);
|
||||
void (*fixup)(struct tag *, char **);
|
||||
void (*dt_fixup)(void);
|
||||
void (*init_meminfo)(void);
|
||||
long long (*pv_fixup)(void);
|
||||
void (*reserve)(void);/* reserve mem blocks */
|
||||
void (*map_io)(void);/* IO mapping function */
|
||||
void (*init_early)(void);
|
||||
|
||||
+28
-45
@@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
|
||||
/**
|
||||
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
|
||||
*
|
||||
* @expected_residency: duration in microseconds the CPU is expected
|
||||
* to remain suspended, or 0 if unknown/infinity.
|
||||
*
|
||||
* The calling CPU is suspended. The expected residency argument is used
|
||||
* as a hint by the platform specific backend to implement the appropriate
|
||||
* sleep state level according to the knowledge it has on wake-up latency
|
||||
* for the given hardware.
|
||||
* The calling CPU is suspended. This is similar to mcpm_cpu_power_down()
|
||||
* except for possible extra platform specific configuration steps to allow
|
||||
* an asynchronous wake-up e.g. with a pending interrupt.
|
||||
*
|
||||
* If this CPU is found to be the "last man standing" in the cluster
|
||||
* then the cluster may be prepared for power-down too, if the expected
|
||||
* residency makes it worthwhile.
|
||||
* then the cluster may be prepared for power-down too.
|
||||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
@@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
|
||||
* This will return if mcpm_platform_register() has not been called
|
||||
* previously in which case the caller should take appropriate action.
|
||||
*/
|
||||
void mcpm_cpu_suspend(u64 expected_residency);
|
||||
void mcpm_cpu_suspend(void);
|
||||
|
||||
/**
|
||||
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
|
||||
@@ -234,12 +229,6 @@ struct mcpm_platform_ops {
|
||||
void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
|
||||
void (*cluster_is_up)(unsigned int cluster);
|
||||
int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
|
||||
|
||||
/* deprecated callbacks */
|
||||
int (*power_up)(unsigned int cpu, unsigned int cluster);
|
||||
void (*power_down)(void);
|
||||
void (*suspend)(u64);
|
||||
void (*powered_up)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -251,35 +240,6 @@ struct mcpm_platform_ops {
|
||||
*/
|
||||
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
|
||||
|
||||
/* Synchronisation structures for coordinating safe cluster setup/teardown: */
|
||||
|
||||
/*
|
||||
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
|
||||
* to match.
|
||||
*/
|
||||
struct mcpm_sync_struct {
|
||||
/* individual CPU states */
|
||||
struct {
|
||||
s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
} cpus[MAX_CPUS_PER_CLUSTER];
|
||||
|
||||
/* cluster state */
|
||||
s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
|
||||
/* inbound-side state */
|
||||
s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
};
|
||||
|
||||
struct sync_struct {
|
||||
struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
|
||||
};
|
||||
|
||||
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
|
||||
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
|
||||
void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
|
||||
bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
|
||||
int __mcpm_cluster_state(unsigned int cluster);
|
||||
|
||||
/**
|
||||
* mcpm_sync_init - Initialize the cluster synchronization support
|
||||
*
|
||||
@@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));
|
||||
|
||||
void __init mcpm_smp_set_ops(void);
|
||||
|
||||
/*
|
||||
* Synchronisation structures for coordinating safe cluster setup/teardown.
|
||||
* This is private to the MCPM core code and shared between C and assembly.
|
||||
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
|
||||
* to match.
|
||||
*/
|
||||
struct mcpm_sync_struct {
|
||||
/* individual CPU states */
|
||||
struct {
|
||||
s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
} cpus[MAX_CPUS_PER_CLUSTER];
|
||||
|
||||
/* cluster state */
|
||||
s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
|
||||
/* inbound-side state */
|
||||
s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
|
||||
};
|
||||
|
||||
struct sync_struct {
|
||||
struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,8 +18,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
|
||||
#ifdef CONFIG_NEED_MACH_MEMORY_H
|
||||
#include <mach/memory.h>
|
||||
#endif
|
||||
@@ -132,20 +130,6 @@
|
||||
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
||||
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
||||
|
||||
/*
|
||||
* Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
|
||||
* around in head.S and proc-*.S are shifted by this amount, in order to
|
||||
* leave spare high bits for systems with physical address extension. This
|
||||
* does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
|
||||
* gives us about 38-bits or so.
|
||||
*/
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
|
||||
#else
|
||||
#define ARCH_PGD_SHIFT 0
|
||||
#endif
|
||||
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
|
||||
* memory. This is used for XIP and NoMMU kernels, and on platforms that don't
|
||||
|
||||
@@ -16,11 +16,21 @@ enum {
|
||||
ARM_SEC_UNLIKELY,
|
||||
ARM_SEC_MAX,
|
||||
};
|
||||
#endif
|
||||
|
||||
struct mod_arch_specific {
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
struct unwind_table *unwind[ARM_SEC_MAX];
|
||||
};
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_MODULE_PLTS
|
||||
struct elf32_shdr *core_plt;
|
||||
struct elf32_shdr *init_plt;
|
||||
int core_plt_count;
|
||||
int init_plt_count;
|
||||
#endif
|
||||
};
|
||||
|
||||
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
|
||||
|
||||
/*
|
||||
* Add the ARM architecture version to the version magic string
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user