You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'devel-stable' into for-linus
This commit is contained in:
@@ -0,0 +1,77 @@
|
||||
* ARM CPUs binding description
|
||||
|
||||
The device tree allows to describe the layout of CPUs in a system through
|
||||
the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
|
||||
defining properties for every cpu.
|
||||
|
||||
Bindings for CPU nodes follow the ePAPR standard, available from:
|
||||
|
||||
http://devicetree.org
|
||||
|
||||
For the ARM architecture every CPU node must contain the following properties:
|
||||
|
||||
- device_type: must be "cpu"
|
||||
- reg: property matching the CPU MPIDR[23:0] register bits
|
||||
reg[31:24] bits must be set to 0
|
||||
- compatible: should be one of:
|
||||
"arm,arm1020"
|
||||
"arm,arm1020e"
|
||||
"arm,arm1022"
|
||||
"arm,arm1026"
|
||||
"arm,arm720"
|
||||
"arm,arm740"
|
||||
"arm,arm7tdmi"
|
||||
"arm,arm920"
|
||||
"arm,arm922"
|
||||
"arm,arm925"
|
||||
"arm,arm926"
|
||||
"arm,arm940"
|
||||
"arm,arm946"
|
||||
"arm,arm9tdmi"
|
||||
"arm,cortex-a5"
|
||||
"arm,cortex-a7"
|
||||
"arm,cortex-a8"
|
||||
"arm,cortex-a9"
|
||||
"arm,cortex-a15"
|
||||
"arm,arm1136"
|
||||
"arm,arm1156"
|
||||
"arm,arm1176"
|
||||
"arm,arm11mpcore"
|
||||
"faraday,fa526"
|
||||
"intel,sa110"
|
||||
"intel,sa1100"
|
||||
"marvell,feroceon"
|
||||
"marvell,mohawk"
|
||||
"marvell,xsc3"
|
||||
"marvell,xscale"
|
||||
|
||||
Example:
|
||||
|
||||
cpus {
|
||||
#size-cells = <0>;
|
||||
#address-cells = <1>;
|
||||
|
||||
CPU0: cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a15";
|
||||
reg = <0x0>;
|
||||
};
|
||||
|
||||
CPU1: cpu@1 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a15";
|
||||
reg = <0x1>;
|
||||
};
|
||||
|
||||
CPU2: cpu@100 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a7";
|
||||
reg = <0x100>;
|
||||
};
|
||||
|
||||
CPU3: cpu@101 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a7";
|
||||
reg = <0x101>;
|
||||
};
|
||||
};
|
||||
+36
-9
@@ -69,6 +69,14 @@ struct gic_chip_data {
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
||||
|
||||
/*
|
||||
* The GIC mapping of CPU interfaces does not necessarily match
|
||||
* the logical CPU numbering. Let's use a mapping as returned
|
||||
* by the GIC itself.
|
||||
*/
|
||||
#define NR_GIC_CPU_IF 8
|
||||
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
|
||||
|
||||
/*
|
||||
* Supported arch specific GIC irq extension.
|
||||
* Default make them NULL.
|
||||
@@ -238,11 +246,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
u32 val, mask, bit;
|
||||
|
||||
if (cpu >= 8 || cpu >= nr_cpu_ids)
|
||||
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
mask = 0xff << shift;
|
||||
bit = 1 << (cpu_logical_map(cpu) + shift);
|
||||
bit = gic_cpu_map[cpu] << shift;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
val = readl_relaxed(reg) & ~mask;
|
||||
@@ -349,11 +357,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||
u32 cpumask;
|
||||
unsigned int gic_irqs = gic->gic_irqs;
|
||||
void __iomem *base = gic_data_dist_base(gic);
|
||||
u32 cpu = cpu_logical_map(smp_processor_id());
|
||||
|
||||
cpumask = 1 << cpu;
|
||||
cpumask |= cpumask << 8;
|
||||
cpumask |= cpumask << 16;
|
||||
|
||||
writel_relaxed(0, base + GIC_DIST_CTRL);
|
||||
|
||||
@@ -366,6 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||
/*
|
||||
* Set all global interrupts to this CPU only.
|
||||
*/
|
||||
cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
|
||||
for (i = 32; i < gic_irqs; i += 4)
|
||||
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
||||
|
||||
@@ -389,8 +393,24 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
|
||||
{
|
||||
void __iomem *dist_base = gic_data_dist_base(gic);
|
||||
void __iomem *base = gic_data_cpu_base(gic);
|
||||
unsigned int cpu_mask, cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Get what the GIC says our CPU mask is.
|
||||
*/
|
||||
BUG_ON(cpu >= NR_GIC_CPU_IF);
|
||||
cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
|
||||
gic_cpu_map[cpu] = cpu_mask;
|
||||
|
||||
/*
|
||||
* Clear our mask from the other map entries in case they're
|
||||
* still undefined.
|
||||
*/
|
||||
for (i = 0; i < NR_GIC_CPU_IF; i++)
|
||||
if (i != cpu)
|
||||
gic_cpu_map[i] &= ~cpu_mask;
|
||||
|
||||
/*
|
||||
* Deal with the banked PPI and SGI interrupts - disable all
|
||||
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||
@@ -646,7 +666,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
|
||||
{
|
||||
irq_hw_number_t hwirq_base;
|
||||
struct gic_chip_data *gic;
|
||||
int gic_irqs, irq_base;
|
||||
int gic_irqs, irq_base, i;
|
||||
|
||||
BUG_ON(gic_nr >= MAX_GIC_NR);
|
||||
|
||||
@@ -682,6 +702,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
|
||||
gic_set_base_accessor(gic, gic_get_common_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the CPU interface map to all CPUs.
|
||||
* It will be refined as each CPU probes its ID.
|
||||
*/
|
||||
for (i = 0; i < NR_GIC_CPU_IF; i++)
|
||||
gic_cpu_map[i] = 0xff;
|
||||
|
||||
/*
|
||||
* For primary GICs, skip over SGIs.
|
||||
* For secondary GICs, skip over PPIs, too.
|
||||
@@ -737,7 +764,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
|
||||
/* Convert our logical CPU mask into a physical one. */
|
||||
for_each_cpu(cpu, mask)
|
||||
map |= 1 << cpu_logical_map(cpu);
|
||||
map |= gic_cpu_map[cpu];
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
|
||||
@@ -16,7 +16,6 @@ generic-y += local64.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
generic-y += poll.h
|
||||
generic-y += resource.h
|
||||
generic-y += sections.h
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
struct cpuinfo_arm {
|
||||
struct cpu cpu;
|
||||
u32 cpuid;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int loops_per_jiffy;
|
||||
#endif
|
||||
|
||||
@@ -25,6 +25,19 @@
|
||||
#define CPUID_EXT_ISAR4 "c2, 4"
|
||||
#define CPUID_EXT_ISAR5 "c2, 5"
|
||||
|
||||
#define MPIDR_SMP_BITMASK (0x3 << 30)
|
||||
#define MPIDR_SMP_VALUE (0x2 << 30)
|
||||
|
||||
#define MPIDR_MT_BITMASK (0x1 << 24)
|
||||
|
||||
#define MPIDR_HWID_BITMASK 0xFFFFFF
|
||||
|
||||
#define MPIDR_LEVEL_BITS 8
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
|
||||
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
|
||||
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
|
||||
|
||||
extern unsigned int processor_id;
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
|
||||
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_unlock(struct cti *cti)
|
||||
{
|
||||
void __iomem *base = cti->base;
|
||||
unsigned long val;
|
||||
|
||||
val = __raw_readl(base + LOCKSTATUS);
|
||||
|
||||
if (val & 1) {
|
||||
val = LOCKCODE;
|
||||
__raw_writel(val, base + LOCKACCESS);
|
||||
}
|
||||
__raw_writel(LOCKCODE, cti->base + LOCKACCESS);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_lock(struct cti *cti)
|
||||
{
|
||||
void __iomem *base = cti->base;
|
||||
unsigned long val;
|
||||
|
||||
val = __raw_readl(base + LOCKSTATUS);
|
||||
|
||||
if (!(val & 1)) {
|
||||
val = ~LOCKCODE;
|
||||
__raw_writel(val, base + LOCKACCESS);
|
||||
}
|
||||
__raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||
#define ARM_BASE_WCR 112
|
||||
|
||||
/* Accessor macros for the debug registers. */
|
||||
#define ARM_DBG_READ(M, OP2, VAL) do {\
|
||||
asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\
|
||||
#define ARM_DBG_READ(N, M, OP2, VAL) do {\
|
||||
asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
#define ARM_DBG_WRITE(M, OP2, VAL) do {\
|
||||
asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\
|
||||
#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
|
||||
asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
@@ -5,18 +5,15 @@
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
unsigned int id;
|
||||
raw_spinlock_t id_lock;
|
||||
u64 id;
|
||||
#endif
|
||||
unsigned int kvm_seq;
|
||||
unsigned int vmalloc_seq;
|
||||
} mm_context_t;
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID(mm) ((mm)->context.id & 255)
|
||||
|
||||
/* init_mm.context.id_lock should be initialized. */
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0ULL) << ASID_BITS)
|
||||
#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
|
||||
@@ -20,88 +20,12 @@
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
void __check_kvm_seq(struct mm_struct *mm);
|
||||
void __check_vmalloc_seq(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
|
||||
/*
|
||||
* On ARMv6, we have the following structure in the Context ID:
|
||||
*
|
||||
* 31 7 0
|
||||
* +-------------------------+-----------+
|
||||
* | process ID | ASID |
|
||||
* +-------------------------+-----------+
|
||||
* | context ID |
|
||||
* +-------------------------------------+
|
||||
*
|
||||
* The ASID is used to tag entries in the CPU caches and TLBs.
|
||||
* The context ID is used by debuggers and trace logic, and
|
||||
* should be unique within all running processes.
|
||||
*/
|
||||
#define ASID_BITS 8
|
||||
#define ASID_MASK ((~0) << ASID_BITS)
|
||||
#define ASID_FIRST_VERSION (1 << ASID_BITS)
|
||||
|
||||
extern unsigned int cpu_last_asid;
|
||||
|
||||
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void __new_context(struct mm_struct *mm);
|
||||
void cpu_set_reserved_ttbr0(void);
|
||||
|
||||
static inline void switch_new_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__new_context(mm);
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
||||
__check_kvm_seq(mm);
|
||||
|
||||
/*
|
||||
* Required during context switch to avoid speculative page table
|
||||
* walking with the wrong TTBR.
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
|
||||
/*
|
||||
* The ASID is from the current generation, just switch to the
|
||||
* new pgd. This condition is only true for calls from
|
||||
* context_switch() and interrupts are already disabled.
|
||||
*/
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
else if (irqs_disabled())
|
||||
/*
|
||||
* Defer the new ASID allocation until after the context
|
||||
* switch critical region since __new_context() cannot be
|
||||
* called with interrupts disabled (it sends IPIs).
|
||||
*/
|
||||
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
|
||||
else
|
||||
/*
|
||||
* That is a direct call to switch_mm() or activate_mm() with
|
||||
* interrupts enabled and a new context.
|
||||
*/
|
||||
switch_new_context(mm);
|
||||
}
|
||||
|
||||
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
|
||||
|
||||
#define finish_arch_post_lock_switch \
|
||||
finish_arch_post_lock_switch
|
||||
static inline void finish_arch_post_lock_switch(void)
|
||||
{
|
||||
if (test_and_clear_thread_flag(TIF_SWITCH_MM))
|
||||
switch_new_context(current->mm);
|
||||
}
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||
#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
|
||||
|
||||
#else /* !CONFIG_CPU_HAS_ASID */
|
||||
|
||||
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
||||
__check_kvm_seq(mm);
|
||||
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||
__check_vmalloc_seq(mm);
|
||||
|
||||
if (irqs_disabled())
|
||||
/*
|
||||
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
#endif /* CONFIG_CPU_HAS_ASID */
|
||||
|
||||
#define destroy_context(mm) do { } while(0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2012 Calxeda, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef _ASM_ARM_PERCPU_H_
|
||||
#define _ASM_ARM_PERCPU_H_
|
||||
|
||||
/*
|
||||
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
||||
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
/* Set TPIDRPRW */
|
||||
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long off;
|
||||
/* Read TPIDRPRW */
|
||||
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
|
||||
return off;
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset()
|
||||
#else
|
||||
#define set_my_cpu_offset(x) do {} while(0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* _ASM_ARM_PERCPU_H_ */
|
||||
@@ -21,4 +21,11 @@
|
||||
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
||||
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
||||
|
||||
#ifdef CONFIG_HW_PERF_EVENTS
|
||||
struct pt_regs;
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
#define perf_misc_flags(regs) perf_misc_flags(regs)
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_PERF_EVENT_H__ */
|
||||
|
||||
@@ -115,6 +115,7 @@
|
||||
* The PTE table pointer refers to the hardware entries; the "Linux"
|
||||
* entries are stored 1024 bytes below.
|
||||
*/
|
||||
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
|
||||
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
@@ -123,6 +124,7 @@
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 9)
|
||||
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
|
||||
#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
|
||||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
|
||||
@@ -67,7 +67,8 @@
|
||||
* These bits overlap with the hardware bits but the naming is preserved for
|
||||
* consistency with the classic page table format.
|
||||
*/
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
|
||||
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
||||
@@ -76,6 +77,7 @@
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
|
||||
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
|
||||
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
|
||||
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
|
||||
|
||||
/*
|
||||
* To be used in assembly code with the upper page attributes.
|
||||
|
||||
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel;
|
||||
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
|
||||
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel;
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
|
||||
#define pte_special(pte) (0)
|
||||
|
||||
#define pte_present_user(pte) \
|
||||
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
|
||||
(L_PTE_PRESENT | L_PTE_USER))
|
||||
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
static inline void __sync_icache_dcache(pte_t pteval)
|
||||
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
|
||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
return pte;
|
||||
}
|
||||
|
||||
+12
-16
@@ -67,19 +67,19 @@ struct arm_pmu {
|
||||
cpumask_t active_irqs;
|
||||
char *name;
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
void (*enable)(struct hw_perf_event *evt, int idx);
|
||||
void (*disable)(struct hw_perf_event *evt, int idx);
|
||||
void (*enable)(struct perf_event *event);
|
||||
void (*disable)(struct perf_event *event);
|
||||
int (*get_event_idx)(struct pmu_hw_events *hw_events,
|
||||
struct hw_perf_event *hwc);
|
||||
struct perf_event *event);
|
||||
int (*set_event_filter)(struct hw_perf_event *evt,
|
||||
struct perf_event_attr *attr);
|
||||
u32 (*read_counter)(int idx);
|
||||
void (*write_counter)(int idx, u32 val);
|
||||
void (*start)(void);
|
||||
void (*stop)(void);
|
||||
u32 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u32 val);
|
||||
void (*start)(struct arm_pmu *);
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
int (*request_irq)(irq_handler_t handler);
|
||||
void (*free_irq)(void);
|
||||
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
|
||||
void (*free_irq)(struct arm_pmu *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
int num_events;
|
||||
atomic_t active_events;
|
||||
@@ -93,15 +93,11 @@ struct arm_pmu {
|
||||
|
||||
extern const struct dev_pm_ops armpmu_dev_pm_ops;
|
||||
|
||||
int armpmu_register(struct arm_pmu *armpmu, char *name, int type);
|
||||
int armpmu_register(struct arm_pmu *armpmu, int type);
|
||||
|
||||
u64 armpmu_event_update(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx);
|
||||
u64 armpmu_event_update(struct perf_event *event);
|
||||
|
||||
int armpmu_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx);
|
||||
int armpmu_event_set_period(struct perf_event *event);
|
||||
|
||||
int armpmu_map_event(struct perf_event *event,
|
||||
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
|
||||
extern void arm_dt_memblock_reserve(void);
|
||||
extern void __init arm_dt_init_cpu_maps(void);
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
|
||||
@@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
|
||||
}
|
||||
|
||||
static inline void arm_dt_memblock_reserve(void) { }
|
||||
static inline void arm_dt_init_cpu_maps(void) { }
|
||||
|
||||
#endif /* CONFIG_OF */
|
||||
#endif /* ASMARM_PROM_H */
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
#ifndef __ASMARM_SMP_PLAT_H
|
||||
#define __ASMARM_SMP_PLAT_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/*
|
||||
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void)
|
||||
*/
|
||||
extern int __cpu_logical_map[];
|
||||
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||
/*
|
||||
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
|
||||
* - mpidr: MPIDR[23:0] to be used for the look-up
|
||||
*
|
||||
* Returns the cpu logical index or -EINVAL on look-up error
|
||||
*/
|
||||
static inline int get_logical_index(u32 mpidr)
|
||||
{
|
||||
int cpu;
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (cpu_logical_map(cpu) == mpidr)
|
||||
return cpu;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -19,8 +19,10 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
@@ -61,6 +63,108 @@ void __init arm_dt_memblock_reserve(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree
|
||||
* and builds the cpu logical map array containing MPIDR values related to
|
||||
* logical cpus
|
||||
*
|
||||
* Updates the cpu possible mask with the number of parsed cpu nodes
|
||||
*/
|
||||
void __init arm_dt_init_cpu_maps(void)
|
||||
{
|
||||
/*
|
||||
* Temp logical map is initialized with UINT_MAX values that are
|
||||
* considered invalid logical map entries since the logical map must
|
||||
* contain a list of MPIDR[23:0] values where MPIDR[31:24] must
|
||||
* read as 0.
|
||||
*/
|
||||
struct device_node *cpu, *cpus;
|
||||
u32 i, j, cpuidx = 1;
|
||||
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
|
||||
|
||||
u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
|
||||
bool bootcpu_valid = false;
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
|
||||
if (!cpus)
|
||||
return;
|
||||
|
||||
for_each_child_of_node(cpus, cpu) {
|
||||
u32 hwid;
|
||||
|
||||
pr_debug(" * %s...\n", cpu->full_name);
|
||||
/*
|
||||
* A device tree containing CPU nodes with missing "reg"
|
||||
* properties is considered invalid to build the
|
||||
* cpu_logical_map.
|
||||
*/
|
||||
if (of_property_read_u32(cpu, "reg", &hwid)) {
|
||||
pr_debug(" * %s missing reg property\n",
|
||||
cpu->full_name);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* 8 MSBs must be set to 0 in the DT since the reg property
|
||||
* defines the MPIDR[23:0].
|
||||
*/
|
||||
if (hwid & ~MPIDR_HWID_BITMASK)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Duplicate MPIDRs are a recipe for disaster.
|
||||
* Scan all initialized entries and check for
|
||||
* duplicates. If any is found just bail out.
|
||||
* temp values were initialized to UINT_MAX
|
||||
* to avoid matching valid MPIDR[23:0] values.
|
||||
*/
|
||||
for (j = 0; j < cpuidx; j++)
|
||||
if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
|
||||
"properties in the DT\n"))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Build a stashed array of MPIDR values. Numbering scheme
|
||||
* requires that if detected the boot CPU must be assigned
|
||||
* logical id 0. Other CPUs get sequential indexes starting
|
||||
* from 1. If a CPU node with a reg property matching the
|
||||
* boot CPU MPIDR is detected, this is recorded so that the
|
||||
* logical map built from DT is validated and can be used
|
||||
* to override the map created in smp_setup_processor_id().
|
||||
*/
|
||||
if (hwid == mpidr) {
|
||||
i = 0;
|
||||
bootcpu_valid = true;
|
||||
} else {
|
||||
i = cpuidx++;
|
||||
}
|
||||
|
||||
if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than "
|
||||
"max cores %u, capping them\n",
|
||||
cpuidx, nr_cpu_ids)) {
|
||||
cpuidx = nr_cpu_ids;
|
||||
break;
|
||||
}
|
||||
|
||||
tmp_map[i] = hwid;
|
||||
}
|
||||
|
||||
if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
|
||||
"fall back to default cpu_logical_map\n"))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Since the boot CPU node contains proper data, and all nodes have
|
||||
* a reg property, the DT CPU list can be considered valid and the
|
||||
* logical map created in smp_setup_processor_id() can be overridden
|
||||
*/
|
||||
for (i = 0; i < cpuidx; i++) {
|
||||
set_cpu_possible(i, true);
|
||||
cpu_logical_map(i) = tmp_map[i];
|
||||
pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
|
||||
* @dt_phys: physical address of dt blob
|
||||
|
||||
@@ -52,14 +52,14 @@ static u8 debug_arch;
|
||||
/* Maximum supported watchpoint length. */
|
||||
static u8 max_watchpoint_len;
|
||||
|
||||
#define READ_WB_REG_CASE(OP2, M, VAL) \
|
||||
case ((OP2 << 4) + M): \
|
||||
ARM_DBG_READ(c ## M, OP2, VAL); \
|
||||
#define READ_WB_REG_CASE(OP2, M, VAL) \
|
||||
case ((OP2 << 4) + M): \
|
||||
ARM_DBG_READ(c0, c ## M, OP2, VAL); \
|
||||
break
|
||||
|
||||
#define WRITE_WB_REG_CASE(OP2, M, VAL) \
|
||||
case ((OP2 << 4) + M): \
|
||||
ARM_DBG_WRITE(c ## M, OP2, VAL);\
|
||||
#define WRITE_WB_REG_CASE(OP2, M, VAL) \
|
||||
case ((OP2 << 4) + M): \
|
||||
ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \
|
||||
break
|
||||
|
||||
#define GEN_READ_WB_REG_CASES(OP2, VAL) \
|
||||
@@ -136,12 +136,12 @@ static u8 get_debug_arch(void)
|
||||
|
||||
/* Do we implement the extended CPUID interface? */
|
||||
if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
|
||||
pr_warning("CPUID feature registers not supported. "
|
||||
"Assuming v6 debug is present.\n");
|
||||
pr_warn_once("CPUID feature registers not supported. "
|
||||
"Assuming v6 debug is present.\n");
|
||||
return ARM_DEBUG_ARCH_V6;
|
||||
}
|
||||
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
ARM_DBG_READ(c0, c0, 0, didr);
|
||||
return (didr >> 16) & 0xf;
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ static int debug_exception_updates_fsr(void)
|
||||
static int get_num_wrp_resources(void)
|
||||
{
|
||||
u32 didr;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
ARM_DBG_READ(c0, c0, 0, didr);
|
||||
return ((didr >> 28) & 0xf) + 1;
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ static int get_num_wrp_resources(void)
|
||||
static int get_num_brp_resources(void)
|
||||
{
|
||||
u32 didr;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
ARM_DBG_READ(c0, c0, 0, didr);
|
||||
return ((didr >> 24) & 0xf) + 1;
|
||||
}
|
||||
|
||||
@@ -228,19 +228,17 @@ static int get_num_brps(void)
|
||||
* be put into halting debug mode at any time by an external debugger
|
||||
* but there is nothing we can do to prevent that.
|
||||
*/
|
||||
static int monitor_mode_enabled(void)
|
||||
{
|
||||
u32 dscr;
|
||||
ARM_DBG_READ(c0, c1, 0, dscr);
|
||||
return !!(dscr & ARM_DSCR_MDBGEN);
|
||||
}
|
||||
|
||||
static int enable_monitor_mode(void)
|
||||
{
|
||||
u32 dscr;
|
||||
int ret = 0;
|
||||
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
|
||||
/* Ensure that halting mode is disabled. */
|
||||
if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
|
||||
"halting debug mode enabled. Unable to access hardware resources.\n")) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
ARM_DBG_READ(c0, c1, 0, dscr);
|
||||
|
||||
/* If monitor mode is already enabled, just return. */
|
||||
if (dscr & ARM_DSCR_MDBGEN)
|
||||
@@ -250,24 +248,27 @@ static int enable_monitor_mode(void)
|
||||
switch (get_debug_arch()) {
|
||||
case ARM_DEBUG_ARCH_V6:
|
||||
case ARM_DEBUG_ARCH_V6_1:
|
||||
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
|
||||
ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
|
||||
break;
|
||||
case ARM_DEBUG_ARCH_V7_ECP14:
|
||||
case ARM_DEBUG_ARCH_V7_1:
|
||||
ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
|
||||
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
|
||||
isb();
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check that the write made it through. */
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
if (!(dscr & ARM_DSCR_MDBGEN))
|
||||
ret = -EPERM;
|
||||
ARM_DBG_READ(c0, c1, 0, dscr);
|
||||
if (!(dscr & ARM_DSCR_MDBGEN)) {
|
||||
pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
|
||||
smp_processor_id());
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hw_breakpoint_slots(int type)
|
||||
@@ -328,14 +329,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
struct perf_event **slot, **slots;
|
||||
int i, max_slots, ctrl_base, val_base, ret = 0;
|
||||
int i, max_slots, ctrl_base, val_base;
|
||||
u32 addr, ctrl;
|
||||
|
||||
/* Ensure that we are in monitor mode and halting mode is disabled. */
|
||||
ret = enable_monitor_mode();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addr = info->address;
|
||||
ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
|
||||
|
||||
@@ -362,9 +358,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
if (i == max_slots) {
|
||||
pr_warning("Can't find any breakpoint slot\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Override the breakpoint data with the step data. */
|
||||
@@ -383,9 +379,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
||||
|
||||
/* Setup the control register. */
|
||||
write_wb_reg(ctrl_base + i, ctrl);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
@@ -416,8 +410,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
|
||||
if (i == max_slots) {
|
||||
pr_warning("Can't find any breakpoint slot\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Ensure that we disable the mismatch breakpoint. */
|
||||
if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
|
||||
@@ -596,6 +592,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
int ret = 0;
|
||||
u32 offset, alignment_mask = 0x3;
|
||||
|
||||
/* Ensure that we are in monitor debug mode. */
|
||||
if (!monitor_mode_enabled())
|
||||
return -ENODEV;
|
||||
|
||||
/* Build the arch_hw_breakpoint. */
|
||||
ret = arch_build_bp_info(bp);
|
||||
if (ret)
|
||||
@@ -858,7 +858,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||
local_irq_enable();
|
||||
|
||||
/* We only handle watchpoints and hardware breakpoints. */
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
ARM_DBG_READ(c0, c1, 0, dscr);
|
||||
|
||||
/* Perform perf callbacks. */
|
||||
switch (ARM_DSCR_MOE(dscr)) {
|
||||
@@ -906,7 +906,7 @@ static struct undef_hook debug_reg_hook = {
|
||||
static void reset_ctrl_regs(void *unused)
|
||||
{
|
||||
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
|
||||
u32 dbg_power;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* v7 debug contains save and restore registers so that debug state
|
||||
@@ -919,23 +919,30 @@ static void reset_ctrl_regs(void *unused)
|
||||
switch (debug_arch) {
|
||||
case ARM_DEBUG_ARCH_V6:
|
||||
case ARM_DEBUG_ARCH_V6_1:
|
||||
/* ARMv6 cores just need to reset the registers. */
|
||||
goto reset_regs;
|
||||
/* ARMv6 cores clear the registers out of reset. */
|
||||
goto out_mdbgen;
|
||||
case ARM_DEBUG_ARCH_V7_ECP14:
|
||||
/*
|
||||
* Ensure sticky power-down is clear (i.e. debug logic is
|
||||
* powered up).
|
||||
*/
|
||||
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
|
||||
if ((dbg_power & 0x1) == 0)
|
||||
ARM_DBG_READ(c1, c5, 4, val);
|
||||
if ((val & 0x1) == 0)
|
||||
err = -EPERM;
|
||||
|
||||
/*
|
||||
* Check whether we implement OS save and restore.
|
||||
*/
|
||||
ARM_DBG_READ(c1, c1, 4, val);
|
||||
if ((val & 0x9) == 0)
|
||||
goto clear_vcr;
|
||||
break;
|
||||
case ARM_DEBUG_ARCH_V7_1:
|
||||
/*
|
||||
* Ensure the OS double lock is clear.
|
||||
*/
|
||||
asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
|
||||
if ((dbg_power & 0x1) == 1)
|
||||
ARM_DBG_READ(c1, c3, 4, val);
|
||||
if ((val & 0x1) == 1)
|
||||
err = -EPERM;
|
||||
break;
|
||||
}
|
||||
@@ -947,24 +954,29 @@ static void reset_ctrl_regs(void *unused)
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally clear the lock by writing a value
|
||||
* Unconditionally clear the OS lock by writing a value
|
||||
* other than 0xC5ACCE55 to the access register.
|
||||
*/
|
||||
asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
|
||||
ARM_DBG_WRITE(c1, c0, 4, 0);
|
||||
isb();
|
||||
|
||||
/*
|
||||
* Clear any configured vector-catch events before
|
||||
* enabling monitor mode.
|
||||
*/
|
||||
asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
|
||||
clear_vcr:
|
||||
ARM_DBG_WRITE(c0, c7, 0, 0);
|
||||
isb();
|
||||
|
||||
reset_regs:
|
||||
if (enable_monitor_mode())
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to disable vector catch\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We must also reset any reserved registers. */
|
||||
/*
|
||||
* The control/value register pairs are UNKNOWN out of reset so
|
||||
* clear them to avoid spurious debug events.
|
||||
*/
|
||||
raw_num_brps = get_num_brp_resources();
|
||||
for (i = 0; i < raw_num_brps; ++i) {
|
||||
write_wb_reg(ARM_BASE_BCR + i, 0UL);
|
||||
@@ -975,6 +987,19 @@ reset_regs:
|
||||
write_wb_reg(ARM_BASE_WCR + i, 0UL);
|
||||
write_wb_reg(ARM_BASE_WVR + i, 0UL);
|
||||
}
|
||||
|
||||
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
|
||||
pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Have a crack at enabling monitor mode. We don't actually need
|
||||
* it yet, but reporting an error early is useful if it fails.
|
||||
*/
|
||||
out_mdbgen:
|
||||
if (enable_monitor_mode())
|
||||
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
|
||||
}
|
||||
|
||||
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
|
||||
@@ -992,8 +1017,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||
|
||||
static int __init arch_hw_breakpoint_init(void)
|
||||
{
|
||||
u32 dscr;
|
||||
|
||||
debug_arch = get_debug_arch();
|
||||
|
||||
if (!debug_arch_supported()) {
|
||||
@@ -1028,17 +1051,10 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
|
||||
"", core_num_wrps);
|
||||
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
if (dscr & ARM_DSCR_HDBGEN) {
|
||||
max_watchpoint_len = 4;
|
||||
pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n",
|
||||
max_watchpoint_len);
|
||||
} else {
|
||||
/* Work out the maximum supported watchpoint length. */
|
||||
max_watchpoint_len = get_max_wp_len();
|
||||
pr_info("maximum watchpoint size is %u bytes.\n",
|
||||
max_watchpoint_len);
|
||||
}
|
||||
/* Work out the maximum supported watchpoint length. */
|
||||
max_watchpoint_len = get_max_wp_len();
|
||||
pr_info("maximum watchpoint size is %u bytes.\n",
|
||||
max_watchpoint_len);
|
||||
|
||||
/* Register debug fault handler. */
|
||||
hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
|
||||
|
||||
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
int
|
||||
armpmu_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
int armpmu_event_set_period(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 left = local64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
int ret = 0;
|
||||
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
|
||||
|
||||
local64_set(&hwc->prev_count, (u64)-left);
|
||||
|
||||
armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
|
||||
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u64
|
||||
armpmu_event_update(struct perf_event *event,
|
||||
struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
u64 armpmu_event_update(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 delta, prev_raw_count, new_raw_count;
|
||||
|
||||
again:
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = armpmu->read_counter(idx);
|
||||
new_raw_count = armpmu->read_counter(event);
|
||||
|
||||
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count)
|
||||
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
|
||||
if (hwc->idx < 0)
|
||||
return;
|
||||
|
||||
armpmu_event_update(event, hwc, hwc->idx);
|
||||
armpmu_event_update(event);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
|
||||
* PERF_EF_UPDATE, see comments in armpmu_start().
|
||||
*/
|
||||
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||
armpmu->disable(hwc, hwc->idx);
|
||||
armpmu_event_update(event, hwc, hwc->idx);
|
||||
armpmu->disable(event);
|
||||
armpmu_event_update(event);
|
||||
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
armpmu_start(struct perf_event *event, int flags)
|
||||
static void armpmu_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
|
||||
* get an interrupt too soon or *way* too late if the overflow has
|
||||
* happened since disabling.
|
||||
*/
|
||||
armpmu_event_set_period(event, hwc, hwc->idx);
|
||||
armpmu->enable(hwc, hwc->idx);
|
||||
armpmu_event_set_period(event);
|
||||
armpmu->enable(event);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
/* If we don't have a space for the counter then finish early. */
|
||||
idx = armpmu->get_event_idx(hw_events, hwc);
|
||||
idx = armpmu->get_event_idx(hw_events, event);
|
||||
if (idx < 0) {
|
||||
err = idx;
|
||||
goto out;
|
||||
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
|
||||
* sure it is disabled.
|
||||
*/
|
||||
event->hw.idx = idx;
|
||||
armpmu->disable(hwc, idx);
|
||||
armpmu->disable(event);
|
||||
hw_events->events[idx] = event;
|
||||
|
||||
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event fake_event = event->hw;
|
||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||
|
||||
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
|
||||
return armpmu->get_event_idx(hw_events, event) >= 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
|
||||
static void
|
||||
armpmu_release_hardware(struct arm_pmu *armpmu)
|
||||
{
|
||||
armpmu->free_irq();
|
||||
armpmu->free_irq(armpmu);
|
||||
pm_runtime_put_sync(&armpmu->plat_device->dev);
|
||||
}
|
||||
|
||||
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||
return -ENODEV;
|
||||
|
||||
pm_runtime_get_sync(&pmu_device->dev);
|
||||
err = armpmu->request_irq(armpmu_dispatch_irq);
|
||||
err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
|
||||
if (err) {
|
||||
armpmu_release_hardware(armpmu);
|
||||
return err;
|
||||
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
|
||||
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
||||
|
||||
if (enabled)
|
||||
armpmu->start();
|
||||
armpmu->start(armpmu);
|
||||
}
|
||||
|
||||
static void armpmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||
armpmu->stop();
|
||||
armpmu->stop(armpmu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu)
|
||||
};
|
||||
}
|
||||
|
||||
int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
|
||||
int armpmu_register(struct arm_pmu *armpmu, int type)
|
||||
{
|
||||
armpmu_init(armpmu);
|
||||
pm_runtime_enable(&armpmu->plat_device->dev);
|
||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||
armpmu->name, armpmu->num_events);
|
||||
return perf_pmu_register(&armpmu->pmu, name, type);
|
||||
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct frame_tail __user *tail;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
|
||||
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
||||
|
||||
@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe fr;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
/* We don't support guest os callchain now */
|
||||
return;
|
||||
}
|
||||
|
||||
fr.fp = regs->ARM_fp;
|
||||
fr.sp = regs->ARM_sp;
|
||||
fr.lr = regs->ARM_lr;
|
||||
fr.pc = regs->ARM_pc;
|
||||
walk_stackframe(&fr, callchain_trace, entry);
|
||||
}
|
||||
|
||||
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
|
||||
return perf_guest_cbs->get_guest_ip();
|
||||
|
||||
return instruction_pointer(regs);
|
||||
}
|
||||
|
||||
unsigned long perf_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
int misc = 0;
|
||||
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
if (perf_guest_cbs->is_user_mode())
|
||||
misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
} else {
|
||||
if (user_mode(regs))
|
||||
misc |= PERF_RECORD_MISC_USER;
|
||||
else
|
||||
misc |= PERF_RECORD_MISC_KERNEL;
|
||||
}
|
||||
|
||||
return misc;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user