mirror of
https://github.com/armbian/linux.git
synced 2026-01-06 10:13:00 -08:00
Merge branch 'devel-stable' into devel
Conflicts: arch/arm/mach-pxa/clock.c arch/arm/mach-pxa/clock.h
This commit is contained in:
@@ -364,6 +364,14 @@ config ARCH_MXC
|
||||
help
|
||||
Support for Freescale MXC/iMX-based family of processors
|
||||
|
||||
config ARCH_MXS
|
||||
bool "Freescale MXS-based"
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
help
|
||||
Support for Freescale MXS-based family of processors
|
||||
|
||||
config ARCH_STMP3XXX
|
||||
bool "Freescale STMP3xxx"
|
||||
select CPU_ARM926T
|
||||
@@ -817,6 +825,7 @@ config ARCH_U8500
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_CPUFREQ
|
||||
help
|
||||
Support for ST-Ericsson's Ux500 architecture
|
||||
|
||||
@@ -923,6 +932,8 @@ source "arch/arm/mach-mv78xx0/Kconfig"
|
||||
|
||||
source "arch/arm/plat-mxc/Kconfig"
|
||||
|
||||
source "arch/arm/mach-mxs/Kconfig"
|
||||
|
||||
source "arch/arm/mach-netx/Kconfig"
|
||||
|
||||
source "arch/arm/mach-nomadik/Kconfig"
|
||||
@@ -1022,8 +1033,8 @@ source arch/arm/mm/Kconfig
|
||||
|
||||
config IWMMXT
|
||||
bool "Enable iWMMXt support"
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
|
||||
default y if PXA27x || PXA3xx || PXA95x || ARCH_MMP
|
||||
help
|
||||
Enable support for iWMMXt context switching at run time if
|
||||
running on a CPU that supports it.
|
||||
|
||||
@@ -154,10 +154,11 @@ machine-$(CONFIG_ARCH_MSM) := msm
|
||||
machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
|
||||
machine-$(CONFIG_ARCH_MX1) := imx
|
||||
machine-$(CONFIG_ARCH_MX2) := imx
|
||||
machine-$(CONFIG_ARCH_MX25) := mx25
|
||||
machine-$(CONFIG_ARCH_MX25) := imx
|
||||
machine-$(CONFIG_ARCH_MX3) := mx3
|
||||
machine-$(CONFIG_ARCH_MX5) := mx5
|
||||
machine-$(CONFIG_ARCH_MXC91231) := mxc91231
|
||||
machine-$(CONFIG_ARCH_MXS) := mxs
|
||||
machine-$(CONFIG_ARCH_NETX) := netx
|
||||
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
|
||||
machine-$(CONFIG_ARCH_NS9XXX) := ns9xxx
|
||||
|
||||
@@ -84,6 +84,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
|
||||
CONFIG_I2C=y
|
||||
CONFIG_I2C_CHARDEV=y
|
||||
CONFIG_I2C_IMX=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_W1=y
|
||||
CONFIG_W1_MASTER_MXC=y
|
||||
CONFIG_W1_SLAVE_THERM=y
|
||||
|
||||
@@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl {
|
||||
struct arch_hw_breakpoint {
|
||||
u32 address;
|
||||
u32 trigger;
|
||||
struct perf_event *suspended_wp;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
struct arch_hw_breakpoint_ctrl step_ctrl;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
};
|
||||
|
||||
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
|
||||
|
||||
@@ -54,6 +54,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
|
||||
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
||||
|
||||
@@ -178,6 +178,7 @@ __dabt_svc:
|
||||
@
|
||||
@ set desired IRQ state, then call main handler
|
||||
@
|
||||
debug_entry r1
|
||||
msr cpsr_c, r9
|
||||
mov r2, sp
|
||||
bl do_DataAbort
|
||||
@@ -304,6 +305,7 @@ __pabt_svc:
|
||||
#else
|
||||
bl CPU_PABORT_HANDLER
|
||||
#endif
|
||||
debug_entry r1
|
||||
msr cpsr_c, r9 @ Maybe enable interrupts
|
||||
mov r2, sp @ regs
|
||||
bl do_PrefetchAbort @ call abort handler
|
||||
@@ -419,6 +421,7 @@ __dabt_usr:
|
||||
@
|
||||
@ IRQs on, then call the main handler
|
||||
@
|
||||
debug_entry r1
|
||||
enable_irq
|
||||
mov r2, sp
|
||||
adr lr, BSYM(ret_from_exception)
|
||||
@@ -683,6 +686,7 @@ __pabt_usr:
|
||||
#else
|
||||
bl CPU_PABORT_HANDLER
|
||||
#endif
|
||||
debug_entry r1
|
||||
enable_irq @ Enable interrupts
|
||||
mov r2, sp @ regs
|
||||
bl do_PrefetchAbort @ call abort handler
|
||||
|
||||
@@ -165,6 +165,25 @@
|
||||
.endm
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
@
|
||||
@ Debug exceptions are taken as prefetch or data aborts.
|
||||
@ We must disable preemption during the handler so that
|
||||
@ we can access the debug registers safely.
|
||||
@
|
||||
.macro debug_entry, fsr
|
||||
#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
|
||||
ldr r4, =0x40f @ mask out fsr.fs
|
||||
and r5, r4, \fsr
|
||||
cmp r5, #2 @ debug exception
|
||||
bne 1f
|
||||
get_thread_info r10
|
||||
ldr r6, [r10, #TI_PREEMPT] @ get preempt count
|
||||
add r11, r6, #1 @ increment it
|
||||
str r11, [r10, #TI_PREEMPT]
|
||||
1:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* These are the registers used in the syscall handler, and allow us to
|
||||
* have in theory up to 7 arguments to a function - r0 to r6.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,6 +19,14 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#if defined(CONFIG_CPU_PJ4)
|
||||
#define PJ4(code...) code
|
||||
#define XSC(code...)
|
||||
#else
|
||||
#define PJ4(code...)
|
||||
#define XSC(code...) code
|
||||
#endif
|
||||
|
||||
#define MMX_WR0 (0x00)
|
||||
#define MMX_WR1 (0x08)
|
||||
#define MMX_WR2 (0x10)
|
||||
@@ -58,11 +66,17 @@
|
||||
|
||||
ENTRY(iwmmxt_task_enable)
|
||||
|
||||
mrc p15, 0, r2, c15, c1, 0
|
||||
tst r2, #0x3 @ CP0 and CP1 accessible?
|
||||
XSC(mrc p15, 0, r2, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r2, c1, c0, 2)
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r2, #0x3)
|
||||
PJ4(tst r2, #0xf)
|
||||
movne pc, lr @ if so no business here
|
||||
orr r2, r2, #0x3 @ enable access to CP0 and CP1
|
||||
mcr p15, 0, r2, c15, c1, 0
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(orr r2, r2, #0x3)
|
||||
XSC(mcr p15, 0, r2, c15, c1, 0)
|
||||
PJ4(orr r2, r2, #0xf)
|
||||
PJ4(mcr p15, 0, r2, c1, c0, 2)
|
||||
|
||||
ldr r3, =concan_owner
|
||||
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
|
||||
@@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable)
|
||||
teqne r1, r2 @ or specified one?
|
||||
bne 1f @ no: quit
|
||||
|
||||
mrc p15, 0, r4, c15, c1, 0
|
||||
orr r4, r4, #0x3 @ enable access to CP0 and CP1
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(mrc p15, 0, r4, c15, c1, 0)
|
||||
XSC(orr r4, r4, #0xf)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r4, c1, c0, 2)
|
||||
PJ4(orr r4, r4, #0x3)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
|
||||
mov r0, #0 @ nothing to load
|
||||
str r0, [r3] @ no more current owner
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
mov r2, r2 @ cpwait
|
||||
bl concan_save
|
||||
|
||||
bic r4, r4, #0x3 @ disable access to CP0 and CP1
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
@ disable access to CP0 and CP1
|
||||
XSC(bic r4, r4, #0x3)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(bic r4, r4, #0xf)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
mov r2, r2 @ cpwait
|
||||
|
||||
@@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore)
|
||||
*/
|
||||
ENTRY(iwmmxt_task_switch)
|
||||
|
||||
mrc p15, 0, r1, c15, c1, 0
|
||||
tst r1, #0x3 @ CP0 and CP1 accessible?
|
||||
XSC(mrc p15, 0, r1, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r1, c1, c0, 2)
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r1, #0x3)
|
||||
PJ4(tst r1, #0xf)
|
||||
bne 1f @ yes: block them for next task
|
||||
|
||||
ldr r2, =concan_owner
|
||||
@@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch)
|
||||
teq r2, r3 @ next task owns it?
|
||||
movne pc, lr @ no: leave Concan disabled
|
||||
|
||||
1: eor r1, r1, #3 @ flip Concan access
|
||||
mcr p15, 0, r1, c15, c1, 0
|
||||
1: @ flip Conan access
|
||||
XSC(eor r1, r1, #0x3)
|
||||
XSC(mcr p15, 0, r1, c15, c1, 0)
|
||||
PJ4(eor r1, r1, #0xf)
|
||||
PJ4(mcr p15, 0, r1, c1, c0, 2)
|
||||
|
||||
mrc p15, 0, r1, c2, c0, 0
|
||||
sub pc, lr, r1, lsr #32 @ cpwait and return
|
||||
|
||||
@@ -32,7 +32,7 @@ static struct platform_device *pmu_device;
|
||||
* Hardware lock to serialize accesses to PMU registers. Needed for the
|
||||
* read/modify/write sequences.
|
||||
*/
|
||||
DEFINE_SPINLOCK(pmu_lock);
|
||||
static DEFINE_RAW_SPINLOCK(pmu_lock);
|
||||
|
||||
/*
|
||||
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
|
||||
@@ -65,7 +65,7 @@ struct cpu_hw_events {
|
||||
*/
|
||||
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
|
||||
};
|
||||
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||
|
||||
struct arm_pmu {
|
||||
enum arm_perf_pmu_ids id;
|
||||
@@ -673,17 +673,17 @@ arch_initcall(init_hw_perf_events);
|
||||
* This code has been adapted from the ARM OProfile support.
|
||||
*/
|
||||
struct frame_tail {
|
||||
struct frame_tail *fp;
|
||||
unsigned long sp;
|
||||
unsigned long lr;
|
||||
struct frame_tail __user *fp;
|
||||
unsigned long sp;
|
||||
unsigned long lr;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* Get the return address for a single stackframe and return a pointer to the
|
||||
* next frame tail.
|
||||
*/
|
||||
static struct frame_tail *
|
||||
user_backtrace(struct frame_tail *tail,
|
||||
static struct frame_tail __user *
|
||||
user_backtrace(struct frame_tail __user *tail,
|
||||
struct perf_callchain_entry *entry)
|
||||
{
|
||||
struct frame_tail buftail;
|
||||
@@ -709,10 +709,10 @@ user_backtrace(struct frame_tail *tail,
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct frame_tail *tail;
|
||||
struct frame_tail __user *tail;
|
||||
|
||||
|
||||
tail = (struct frame_tail *)regs->ARM_fp - 1;
|
||||
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
||||
|
||||
while (tail && !((unsigned long)tail & 0x3))
|
||||
tail = user_backtrace(tail, entry);
|
||||
|
||||
@@ -400,7 +400,7 @@ armv6pmu_write_counter(int counter,
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
@@ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||
* Mask out the current event and set the counter to count the event
|
||||
* that we're interested in.
|
||||
*/
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
@@ -500,11 +500,11 @@ armv6pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val |= ARMV6_PMCR_ENABLE;
|
||||
armv6_pmcr_write(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -512,11 +512,11 @@ armv6pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~ARMV6_PMCR_ENABLE;
|
||||
armv6_pmcr_write(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
|
||||
* of ETM bus signal assertion cycles. The external reporting should
|
||||
* be disabled and so this should never increment.
|
||||
*/
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
|
||||
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
|
||||
* simply disable the interrupt reporting.
|
||||
*/
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static const struct arm_pmu armv6pmu = {
|
||||
@@ -625,7 +625,7 @@ static const struct arm_pmu armv6pmu = {
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
const struct arm_pmu *__init armv6pmu_init(void)
|
||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
||||
{
|
||||
return &armv6pmu;
|
||||
}
|
||||
@@ -655,17 +655,17 @@ static const struct arm_pmu armv6mpcore_pmu = {
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
{
|
||||
return &armv6mpcore_pmu;
|
||||
}
|
||||
#else
|
||||
const struct arm_pmu *__init armv6pmu_init(void)
|
||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -681,7 +681,7 @@ static void armv7_pmnc_dump_regs(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -689,7 +689,7 @@ void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
* Enable counter and interrupt, and set the counter to count
|
||||
* the event that we're interested in.
|
||||
*/
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
|
||||
/*
|
||||
* Disable counter
|
||||
@@ -713,7 +713,7 @@ void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
*/
|
||||
armv7_pmnc_enable_counter(idx);
|
||||
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
@@ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
/*
|
||||
* Disable counter and interrupt
|
||||
*/
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
|
||||
/*
|
||||
* Disable counter
|
||||
@@ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
*/
|
||||
armv7_pmnc_disable_intens(idx);
|
||||
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
@@ -805,20 +805,20 @@ static void armv7pmu_start(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
/* Enable all counters */
|
||||
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv7pmu_stop(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
/* Disable all counters */
|
||||
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||
@@ -874,7 +874,7 @@ static u32 __init armv7_reset_read_pmnc(void)
|
||||
return nb_cnt + 1;
|
||||
}
|
||||
|
||||
const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
{
|
||||
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
|
||||
armv7pmu.name = "ARMv7 Cortex-A8";
|
||||
@@ -884,7 +884,7 @@ const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
return &armv7pmu;
|
||||
}
|
||||
|
||||
const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
{
|
||||
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
|
||||
armv7pmu.name = "ARMv7 Cortex-A9";
|
||||
@@ -894,12 +894,12 @@ const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
return &armv7pmu;
|
||||
}
|
||||
#else
|
||||
const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -355,11 +355,11 @@ xscale1pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val |= XSCALE_PMU_ENABLE;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -367,11 +367,11 @@ xscale1pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~XSCALE_PMU_ENABLE;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
@@ -428,7 +428,7 @@ static const struct arm_pmu xscale1pmu = {
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
{
|
||||
return &xscale1pmu;
|
||||
}
|
||||
@@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
xscale2pmu_write_event_select(evtsel);
|
||||
xscale2pmu_write_int_enable(ien);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
xscale2pmu_write_event_select(evtsel);
|
||||
xscale2pmu_write_int_enable(ien);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -705,11 +705,11 @@ xscale2pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
||||
val |= XSCALE_PMU_ENABLE;
|
||||
xscale2pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -717,11 +717,11 @@ xscale2pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
spin_lock_irqsave(&pmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale2pmu_read_pmnc();
|
||||
val &= ~XSCALE_PMU_ENABLE;
|
||||
xscale2pmu_write_pmnc(val);
|
||||
spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
@@ -790,17 +790,17 @@ static const struct arm_pmu xscale2pmu = {
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
{
|
||||
return &xscale2pmu;
|
||||
}
|
||||
#else
|
||||
const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
94
arch/arm/kernel/pj4-cp0.c
Normal file
94
arch/arm/kernel/pj4-cp0.c
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* linux/arch/arm/kernel/pj4-cp0.c
|
||||
*
|
||||
* PJ4 iWMMXt coprocessor context switching and handling
|
||||
*
|
||||
* Copyright (c) 2010 Marvell International Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/thread_notify.h>
|
||||
|
||||
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
/*
|
||||
* flush_thread() zeroes thread->fpstate, so no need
|
||||
* to do anything here.
|
||||
*
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
|
||||
case THREAD_NOTIFY_EXIT:
|
||||
iwmmxt_task_release(thread);
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
iwmmxt_task_switch(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block iwmmxt_notifier_block = {
|
||||
.notifier_call = iwmmxt_do,
|
||||
};
|
||||
|
||||
|
||||
static u32 __init pj4_cp_access_read(void)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
: "=r" (value));
|
||||
return value;
|
||||
}
|
||||
|
||||
static void __init pj4_cp_access_write(u32 value)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mcr p15, 0, %1, c1, c0, 2\n\t"
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
"mov %0, %0\n\t"
|
||||
"sub pc, pc, #4\n\t"
|
||||
: "=r" (temp) : "r" (value));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
|
||||
* switch code handle iWMMXt context switching.
|
||||
*/
|
||||
static int __init pj4_cp0_init(void)
|
||||
{
|
||||
u32 cp_access;
|
||||
|
||||
cp_access = pj4_cp_access_read() & ~0xf;
|
||||
pj4_cp_access_write(cp_access);
|
||||
|
||||
printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
|
||||
elf_hwcap |= HWCAP_IWMMXT;
|
||||
thread_register_notifier(&iwmmxt_notifier_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(pj4_cp0_init);
|
||||
@@ -1060,8 +1060,8 @@ static int ptrace_sethbpregs(struct task_struct *tsk, long num,
|
||||
goto out;
|
||||
|
||||
if ((gen_type & implied_type) != gen_type) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr.bp_len = gen_len;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/platform_device.h>
|
||||
@@ -107,11 +108,64 @@ static void __init cns3420_early_serial_setup(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* USB
|
||||
*/
|
||||
static struct resource cns3xxx_usb_ehci_resources[] = {
|
||||
[0] = {
|
||||
.start = CNS3XXX_USB_BASE,
|
||||
.end = CNS3XXX_USB_BASE + SZ_16M - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_CNS3XXX_USB_EHCI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static u64 cns3xxx_usb_ehci_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device cns3xxx_usb_ehci_device = {
|
||||
.name = "cns3xxx-ehci",
|
||||
.num_resources = ARRAY_SIZE(cns3xxx_usb_ehci_resources),
|
||||
.resource = cns3xxx_usb_ehci_resources,
|
||||
.dev = {
|
||||
.dma_mask = &cns3xxx_usb_ehci_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
static struct resource cns3xxx_usb_ohci_resources[] = {
|
||||
[0] = {
|
||||
.start = CNS3XXX_USB_OHCI_BASE,
|
||||
.end = CNS3XXX_USB_OHCI_BASE + SZ_16M - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_CNS3XXX_USB_OHCI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static u64 cns3xxx_usb_ohci_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device cns3xxx_usb_ohci_device = {
|
||||
.name = "cns3xxx-ohci",
|
||||
.num_resources = ARRAY_SIZE(cns3xxx_usb_ohci_resources),
|
||||
.resource = cns3xxx_usb_ohci_resources,
|
||||
.dev = {
|
||||
.dma_mask = &cns3xxx_usb_ohci_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialization
|
||||
*/
|
||||
static struct platform_device *cns3420_pdevs[] __initdata = {
|
||||
&cns3420_nor_pdev,
|
||||
&cns3xxx_usb_ehci_device,
|
||||
&cns3xxx_usb_ohci_device,
|
||||
};
|
||||
|
||||
static void __init cns3420_init(void)
|
||||
|
||||
@@ -16,7 +16,5 @@ extern struct sys_timer cns3xxx_timer;
|
||||
void __init cns3xxx_map_io(void);
|
||||
void __init cns3xxx_init_irq(void);
|
||||
void cns3xxx_power_off(void);
|
||||
void cns3xxx_pwr_power_up(unsigned int block);
|
||||
void cns3xxx_pwr_power_down(unsigned int block);
|
||||
|
||||
#endif /* __CNS3XXX_CORE_H */
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <mach/cns3xxx.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <mach/pm.h>
|
||||
#include "core.h"
|
||||
#include "devices.h"
|
||||
|
||||
|
||||
@@ -165,7 +165,6 @@
|
||||
#define CNS3XXX_USBOTG_BASE_VIRT 0xFFF15000
|
||||
|
||||
#define CNS3XXX_USB_BASE 0x82000000 /* USB Host Control */
|
||||
#define CNS3XXX_USB_BASE_VIRT 0xFFF16000
|
||||
|
||||
#define CNS3XXX_SATA2_BASE 0x83000000 /* SATA */
|
||||
#define CNS3XXX_SATA2_SIZE SZ_16M
|
||||
@@ -184,7 +183,6 @@
|
||||
#define CNS3XXX_2DG_BASE_VIRT 0xFFF1B000
|
||||
|
||||
#define CNS3XXX_USB_OHCI_BASE 0x88000000 /* USB OHCI */
|
||||
#define CNS3XXX_USB_OHCI_BASE_VIRT 0xFFF1C000
|
||||
|
||||
#define CNS3XXX_L2C_BASE 0x92000000 /* L2 Cache Control */
|
||||
#define CNS3XXX_L2C_BASE_VIRT 0xFFF27000
|
||||
|
||||
23
arch/arm/mach-cns3xxx/include/mach/pm.h
Normal file
23
arch/arm/mach-cns3xxx/include/mach/pm.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright 2000 Deep Blue Solutions Ltd
|
||||
* Copyright 2004 ARM Limited
|
||||
* Copyright 2008 Cavium Networks
|
||||
*
|
||||
* This file is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, Version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __CNS3XXX_PM_H
|
||||
#define __CNS3XXX_PM_H
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
void cns3xxx_pwr_clk_en(unsigned int block);
|
||||
void cns3xxx_pwr_clk_dis(unsigned int block);
|
||||
void cns3xxx_pwr_power_up(unsigned int block);
|
||||
void cns3xxx_pwr_power_down(unsigned int block);
|
||||
|
||||
extern atomic_t usb_pwr_ref;
|
||||
|
||||
#endif /* __CNS3XXX_PM_H */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user