Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android

Conflicts:
	drivers/of/fdt.c
This commit is contained in:
Mark Brown
2014-07-24 23:01:03 +01:00
42 changed files with 935 additions and 291 deletions

View File

@@ -111,8 +111,14 @@ Before jumping into the kernel, the following conditions must be met:
- Caches, MMUs
The MMU must be off.
Instruction cache may be on or off.
Data cache must be off and invalidated.
External caches (if present) must be configured and disabled.
The address range corresponding to the loaded kernel image must be
cleaned to the PoC. In the presence of a system cache or other
coherent masters with caches enabled, this will typically require
cache maintenance by VA rather than set/way operations.
System caches which respect the architected cache maintenance by VA
operations must be configured and may be enabled.
System caches which do not respect architected cache maintenance by VA
operations (not recommended) must be configured and disabled.
- Architected timers
CNTFRQ must be programmed with the timer frequency.

View File

@@ -40,7 +40,7 @@ struct machine_desc * __init setup_machine_fdt(void *dt)
const char *model, *compat;
void *clk;
char manufacturer[16];
unsigned long len;
int len;
/* check device tree validity */
if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)

View File

@@ -212,7 +212,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
}
if (!mdesc_best) {
const char *prop;
long size;
int size;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");

View File

@@ -53,7 +53,7 @@ static int __init vexpress_dt_find_scu(unsigned long node,
{
if (of_flat_dt_match(node, vexpress_dt_cortex_a9_match)) {
phys_addr_t phys_addr;
__be32 *reg = of_get_flat_dt_prop(node, "reg", NULL);
const __be32 *reg = of_get_flat_dt_prop(node, "reg", NULL);
if (WARN_ON(!reg))
return -EINVAL;

View File

@@ -116,8 +116,8 @@ device_initcall(s5p_mfc_memory_init);
int __init s5p_fdt_find_mfc_mem(unsigned long node, const char *uname,
int depth, void *data)
{
__be32 *prop;
unsigned long len;
const __be32 *prop;
int len;
struct s5p_mfc_dt_meminfo *mfc_mem = data;
if (!data)

View File

@@ -24,3 +24,7 @@
reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
};
};
&serial0 {
status = "ok";
};

View File

@@ -273,8 +273,9 @@
};
serial0: serial@1c020000 {
status = "disabled";
device_type = "serial";
compatible = "ns16550";
compatible = "ns16550a";
reg = <0 0x1c020000 0x0 0x1000>;
reg-shift = <2>;
clock-frequency = <10000000>; /* Updated by bootloader */
@@ -282,6 +283,39 @@
interrupts = <0x0 0x4c 0x4>;
};
serial1: serial@1c021000 {
status = "disabled";
device_type = "serial";
compatible = "ns16550a";
reg = <0 0x1c021000 0x0 0x1000>;
reg-shift = <2>;
clock-frequency = <10000000>; /* Updated by bootloader */
interrupt-parent = <&gic>;
interrupts = <0x0 0x4d 0x4>;
};
serial2: serial@1c022000 {
status = "disabled";
device_type = "serial";
compatible = "ns16550a";
reg = <0 0x1c022000 0x0 0x1000>;
reg-shift = <2>;
clock-frequency = <10000000>; /* Updated by bootloader */
interrupt-parent = <&gic>;
interrupts = <0x0 0x4e 0x4>;
};
serial3: serial@1c023000 {
status = "disabled";
device_type = "serial";
compatible = "ns16550a";
reg = <0 0x1c023000 0x0 0x1000>;
reg-shift = <2>;
clock-frequency = <10000000>; /* Updated by bootloader */
interrupt-parent = <&gic>;
interrupts = <0x0 0x4f 0x4>;
};
phy1: phy@1f21a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f21a000 0x0 0x100>;

View File

@@ -225,6 +225,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))

View File

@@ -97,6 +97,12 @@
#define MT_NORMAL_NC 3
#define MT_NORMAL 4
/*
* Memory types for Stage-2 translation
*/
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;

View File

@@ -31,5 +31,7 @@ extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
/* create an identity mapping for memory (or io if map_io is true) */
extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
#endif

View File

@@ -31,6 +31,15 @@
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
/*
* The idmap and swapper page tables need some space reserved in the kernel
* image. The idmap only requires a pgd and a next level table to (section) map
* the kernel, while the swapper also maps the FDT and requires an additional
* table to map an early UART. See __create_page_tables for more information.
*/
#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM64_64K_PAGES

View File

@@ -79,6 +79,24 @@
#define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2)
#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
/*
* 2nd stage PTE definitions
*/
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
/*
* EL2/HYP PTE/PMD definitions
*/
#define PMD_HYP PMD_SECT_USER
#define PTE_HYP PTE_USER
/*
* 40-bit physical address supported.
*/

View File

@@ -288,6 +288,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
@@ -372,9 +378,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)

View File

@@ -26,6 +26,7 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
@@ -34,29 +35,17 @@
#include <asm/page.h>
#include <asm/virt.h>
/*
* swapper_pg_dir is the virtual address of the initial page table. We place
* the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
* 2 pages and is placed below swapper_pg_dir.
*/
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
#error KERNEL_RAM_VADDR must start at 0xXXX80000
#endif
#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
.globl idmap_pg_dir
.equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
.macro pgtbl, ttb0, ttb1, phys
add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
.macro pgtbl, ttb0, ttb1, virt_to_phys
ldr \ttb1, =swapper_pg_dir
ldr \ttb0, =idmap_pg_dir
add \ttb1, \ttb1, \virt_to_phys
add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
@@ -240,8 +229,9 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
.pushsection .data
.pushsection .data..cacheline_aligned
ENTRY(__boot_cpu_mode)
.align L1_CACHE_SHIFT
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
@@ -297,7 +287,7 @@ ENTRY(secondary_startup)
mov x23, x0 // x23=current cpu_table
cbz x23, __error_p // invalid processor (x23=0)?
pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
blr x12 // initialise processor
@@ -339,8 +329,13 @@ ENDPROC(__enable_mmu)
* x27 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
*
* We align the entire function to the smallest power of two larger than it to
* ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
* close to the end of a 512MB or 1GB block we might require an additional
* table to map the entire function.
*/
.align 6
.align 4
__turn_mmu_on:
msr sctlr_el1, x0
isb
@@ -406,7 +401,16 @@ ENDPROC(__calc_phys_offset)
* - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
mov x27, lr
/*
* Invalidate the idmap and swapper page tables to avoid potential
* dirty cache lines being evicted.
*/
mov x0, x25
add x1, x26, #SWAPPER_DIR_SIZE
bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
@@ -466,6 +470,17 @@ __create_page_tables:
ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
/*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page
* tables again to remove any speculatively loaded cache lines.
*/
mov x0, x25
add x1, x26, #SWAPPER_DIR_SIZE
bl __inval_cache_range
mov lr, x27
ret
ENDPROC(__create_page_tables)
.ltorg
@@ -475,7 +490,7 @@ ENDPROC(__create_page_tables)
__switch_data:
.quad __mmap_switched
.quad __bss_start // x6
.quad _end // x7
.quad __bss_stop // x7
.quad processor_id // x4
.quad __fdt_pointer // x5
.quad memstart_addr // x6

View File

@@ -138,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_mapping_len;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
int ret;
vdso_text_len = vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
@@ -152,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
}
mm->context.vdso = (void *)vdso_base;
ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
ret = install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
if (ret) {
mm->context.vdso = NULL;
if (ret)
goto up_fail;
}
vdso_base += vdso_text_len;
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_MAYREAD,
vdso_pagelist + vdso_pages);
if (ret)
goto up_fail;
up_write(&mm->mmap_sem);
return 0;
up_fail:
mm->context.vdso = NULL;
up_write(&mm->mmap_sem);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
unsigned long vdso_text;
if (!vma->vm_mm)
return NULL;
vdso_text = (unsigned long)vma->vm_mm->context.vdso;
/*
* We can re-use the vdso pointer in mm_context_t for identifying
* the vectors page for compat applications. The vDSO will always
* sit above TASK_UNMAPPED_BASE and so we don't need to worry about
* it conflicting with the vectors base.
*/
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
if (vma->vm_start == vdso_text) {
#ifdef CONFIG_COMPAT
if (vma->vm_start == AARCH32_VECTORS_BASE)
return "[vectors]";
#endif
return "[vdso]";
} else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
return "[vvar]";
}
return NULL;

View File

@@ -47,9 +47,9 @@ $(obj-vdso): %.o: %.S
$(call if_changed_dep,vdsoas)
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
quiet_cmd_vdsold = VDSOL $@
cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
# Install commands for the unstripped file

View File

@@ -90,6 +90,13 @@ SECTIONS
_edata = .;
BSS_SECTION(0, 0, 0)
. = ALIGN(PAGE_SIZE);
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
swapper_pg_dir = .;
. += SWAPPER_DIR_SIZE;
_end = .;
STABS_DEBUG

View File

@@ -167,6 +167,14 @@ ENTRY(__flush_dcache_area)
ret
ENDPROC(__flush_dcache_area)
/*
* __inval_cache_range(start, end)
* - start - start address of region
* - end - end address of region
*/
ENTRY(__inval_cache_range)
/* FALLTHROUGH */
/*
* __dma_inv_range(start, end)
* - start - virtual start address of region
@@ -190,6 +198,7 @@ __dma_inv_range:
b.lo 2b
dsb sy
ret
ENDPROC(__inval_cache_range)
ENDPROC(__dma_inv_range)
/*

View File

@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
copy_page(kto, kfrom);
__flush_dcache_area(kto, PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
EXPORT_SYMBOL_GPL(__cpu_clear_user_page);

View File

@@ -136,7 +136,10 @@ void __init arm64_memblock_init(void)
{
u64 *reserve_map, base, size;
/* Register the kernel text, kernel data and initrd with memblock */
/*
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
@@ -148,13 +151,6 @@ void __init arm64_memblock_init(void)
}
#endif
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
/* Reserve the dtb region */
memblock_reserve(virt_to_phys(initial_boot_params),
be32_to_cpu(initial_boot_params->totalsize));

Some files were not shown because too many files have changed in this diff Show More