mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branch 'core-memblock-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-memblock-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (74 commits) x86-64: Only set max_pfn_mapped to 512 MiB if we enter via head_64.S xen: Cope with unmapped pages when initializing kernel pagetable memblock, bootmem: Round pfn properly for memory and reserved regions memblock: Annotate memblock functions with __init_memblock memblock: Allow memblock_init to be called early memblock/arm: Fix memblock_region_is_memory() typo x86, memblock: Remove __memblock_x86_find_in_range_size() memblock: Fix wraparound in find_region() x86-32, memblock: Make add_highpages honor early reserved ranges x86, memblock: Fix crashkernel allocation arm, memblock: Fix the sparsemem build memblock: Fix section mismatch warnings powerpc, memblock: Fix memblock API change fallout memblock, microblaze: Fix memblock API change fallout x86: Remove old bootmem code x86, memblock: Use memblock_memory_size()/memblock_free_memory_size() to get correct dma_reserve x86: Remove not used early_res code x86, memblock: Replace e820_/_early string with memblock_ x86: Use memblock to replace early_res x86, memblock: Use memblock_debug to control debug message print out ... Fix up trivial conflicts in arch/x86/kernel/setup.c and kernel/Makefile
This commit is contained in:
@@ -150,6 +150,7 @@ static void __init find_limits(struct meminfo *mi,
|
||||
static void __init arm_bootmem_init(struct meminfo *mi,
|
||||
unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned int boot_pages;
|
||||
phys_addr_t bitmap;
|
||||
pg_data_t *pgdat;
|
||||
@@ -180,13 +181,13 @@ static void __init arm_bootmem_init(struct meminfo *mi,
|
||||
/*
|
||||
* Reserve the memblock reserved regions in bootmem.
|
||||
*/
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
|
||||
if (start >= start_pfn &&
|
||||
memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
|
||||
for_each_memblock(reserved, reg) {
|
||||
phys_addr_t start = memblock_region_reserved_base_pfn(reg);
|
||||
phys_addr_t end = memblock_region_reserved_end_pfn(reg);
|
||||
if (start >= start_pfn && end <= end_pfn)
|
||||
reserve_bootmem_node(pgdat, __pfn_to_phys(start),
|
||||
memblock_size_bytes(&memblock.reserved, i),
|
||||
BOOTMEM_DEFAULT);
|
||||
(end - start) << PAGE_SHIFT,
|
||||
BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,20 +238,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
|
||||
#ifndef CONFIG_SPARSEMEM
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
struct memblock_region *mem = &memblock.memory;
|
||||
unsigned int left = 0, right = mem->cnt;
|
||||
|
||||
do {
|
||||
unsigned int mid = (right + left) / 2;
|
||||
|
||||
if (pfn < memblock_start_pfn(mem, mid))
|
||||
right = mid;
|
||||
else if (pfn >= memblock_end_pfn(mem, mid))
|
||||
left = mid + 1;
|
||||
else
|
||||
return 1;
|
||||
} while (left < right);
|
||||
return 0;
|
||||
return memblock_is_memory(pfn << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(pfn_valid);
|
||||
|
||||
@@ -260,10 +248,11 @@ static void arm_memory_present(void)
|
||||
#else
|
||||
static void arm_memory_present(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < memblock.memory.cnt; i++)
|
||||
memory_present(0, memblock_start_pfn(&memblock.memory, i),
|
||||
memblock_end_pfn(&memblock.memory, i));
|
||||
struct memblock_region *reg;
|
||||
|
||||
for_each_memblock(memory, reg)
|
||||
memory_present(0, memblock_region_memory_base_pfn(reg),
|
||||
memblock_region_memory_end_pfn(reg));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -173,11 +173,7 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg,
|
||||
|
||||
static int valid_sdram(unsigned long addr, unsigned long size)
|
||||
{
|
||||
struct memblock_property res;
|
||||
|
||||
res.base = addr;
|
||||
res.size = size;
|
||||
return !memblock_find(&res) && res.base == addr && res.size == size;
|
||||
return memblock_is_region_memory(addr, size);
|
||||
}
|
||||
|
||||
static int reserve_sdram(unsigned long addr, unsigned long size)
|
||||
|
||||
@@ -9,9 +9,6 @@
|
||||
#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
|
||||
#define _ASM_MICROBLAZE_MEMBLOCK_H
|
||||
|
||||
/* MEMBLOCK limit is OFF */
|
||||
#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
|
||||
|
||||
|
||||
|
||||
@@ -70,16 +70,16 @@ static void __init paging_init(void)
|
||||
|
||||
void __init setup_memory(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long map_size;
|
||||
struct memblock_region *reg;
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
u32 kernel_align_start, kernel_align_size;
|
||||
|
||||
/* Find main memory where is the kernel */
|
||||
for (i = 0; i < memblock.memory.cnt; i++) {
|
||||
memory_start = (u32) memblock.memory.region[i].base;
|
||||
memory_end = (u32) memblock.memory.region[i].base
|
||||
+ (u32) memblock.memory.region[i].size;
|
||||
for_each_memblock(memory, reg) {
|
||||
memory_start = (u32)reg->base;
|
||||
memory_end = (u32) reg->base + reg->size;
|
||||
if ((memory_start <= (u32)_text) &&
|
||||
((u32)_text <= memory_end)) {
|
||||
memory_size = memory_end - memory_start;
|
||||
@@ -142,12 +142,10 @@ void __init setup_memory(void)
|
||||
free_bootmem(memory_start, memory_size);
|
||||
|
||||
/* reserve allocate blocks */
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
|
||||
(u32) memblock.reserved.region[i].base,
|
||||
(u32) memblock_size_bytes(&memblock.reserved, i));
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT);
|
||||
for_each_memblock(reserved, reg) {
|
||||
pr_debug("reserved - 0x%08x-0x%08x\n",
|
||||
(u32) reg->base, (u32) reg->size);
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
#ifdef CONFIG_MMU
|
||||
init_bootmem_done = 1;
|
||||
@@ -230,7 +228,7 @@ static void mm_cmdline_setup(void)
|
||||
if (maxmem && memory_size > maxmem) {
|
||||
memory_size = maxmem;
|
||||
memory_end = memory_start + memory_size;
|
||||
memblock.memory.region[0].size = memory_size;
|
||||
memblock.memory.regions[0].size = memory_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -273,14 +271,14 @@ asmlinkage void __init mmu_init(void)
|
||||
machine_restart(NULL);
|
||||
}
|
||||
|
||||
if ((u32) memblock.memory.region[0].size < 0x1000000) {
|
||||
if ((u32) memblock.memory.regions[0].size < 0x1000000) {
|
||||
printk(KERN_EMERG "Memory must be greater than 16MB\n");
|
||||
machine_restart(NULL);
|
||||
}
|
||||
/* Find main memory where the kernel is */
|
||||
memory_start = (u32) memblock.memory.region[0].base;
|
||||
memory_end = (u32) memblock.memory.region[0].base +
|
||||
(u32) memblock.memory.region[0].size;
|
||||
memory_start = (u32) memblock.memory.regions[0].base;
|
||||
memory_end = (u32) memblock.memory.regions[0].base +
|
||||
(u32) memblock.memory.regions[0].size;
|
||||
memory_size = memory_end - memory_start;
|
||||
|
||||
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
|
||||
|
||||
@@ -5,11 +5,4 @@
|
||||
|
||||
#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
extern phys_addr_t lowmem_end_addr;
|
||||
#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
|
||||
#else
|
||||
#define MEMBLOCK_REAL_LIMIT 0
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_POWERPC_MEMBLOCK_H */
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#define _ASM_POWERPC_MMU_H_
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/asm-compat.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
@@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
|
||||
extern void early_init_mmu(void);
|
||||
extern void early_init_mmu_secondary(void);
|
||||
|
||||
extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* This is our real memory area size on ppc64 server, on embedded, we
|
||||
* make it match the size our of bolted TLB area
|
||||
*/
|
||||
extern u64 ppc64_rma_size;
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* The kernel use the constants below to index in the page sizes array.
|
||||
|
||||
@@ -923,11 +923,7 @@ initial_mmu:
|
||||
mtspr SPRN_PID,r0
|
||||
sync
|
||||
|
||||
/* Configure and load two entries into TLB slots 62 and 63.
|
||||
* In case we are pinning TLBs, these are reserved in by the
|
||||
* other TLB functions. If not reserving, then it doesn't
|
||||
* matter where they are loaded.
|
||||
*/
|
||||
/* Configure and load one entry into TLB slots 63 */
|
||||
clrrwi r4,r4,10 /* Mask off the real page number */
|
||||
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ void __init allocate_pacas(void)
|
||||
* the first segment. On iSeries they must be within the area mapped
|
||||
* by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
|
||||
*/
|
||||
limit = min(0x10000000ULL, memblock.rmo_size);
|
||||
limit = min(0x10000000ULL, ppc64_rma_size);
|
||||
if (firmware_has_feature(FW_FEATURE_ISERIES))
|
||||
limit = min(limit, HvPagesToMap * HVPAGESIZE);
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@
|
||||
int __initdata iommu_is_off;
|
||||
int __initdata iommu_force_on;
|
||||
unsigned long tce_alloc_start, tce_alloc_end;
|
||||
u64 ppc64_rma_size;
|
||||
#endif
|
||||
|
||||
static int __init early_parse_mem(char *p)
|
||||
@@ -98,7 +99,7 @@ static void __init move_device_tree(void)
|
||||
|
||||
if ((memory_limit && (start + size) > memory_limit) ||
|
||||
overlaps_crashkernel(start, size)) {
|
||||
p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
|
||||
p = __va(memblock_alloc(size, PAGE_SIZE));
|
||||
memcpy(p, initial_boot_params, size);
|
||||
initial_boot_params = (struct boot_param_header *)p;
|
||||
DBG("Moved device tree to 0x%p\n", p);
|
||||
@@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
|
||||
|
||||
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
{
|
||||
#if defined(CONFIG_PPC64)
|
||||
#ifdef CONFIG_PPC64
|
||||
if (iommu_is_off) {
|
||||
if (base >= 0x80000000ul)
|
||||
return;
|
||||
@@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
}
|
||||
#endif
|
||||
|
||||
memblock_add(base, size);
|
||||
|
||||
/* First MEMBLOCK added, do some special initializations */
|
||||
if (memstart_addr == ~(phys_addr_t)0)
|
||||
setup_initial_memory_limit(base, size);
|
||||
memstart_addr = min((u64)memstart_addr, base);
|
||||
|
||||
/* Add the chunk to the MEMBLOCK list */
|
||||
memblock_add(base, size);
|
||||
}
|
||||
|
||||
u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
|
||||
@@ -655,7 +660,6 @@ static void __init phyp_dump_reserve_mem(void)
|
||||
static inline void __init phyp_dump_reserve_mem(void) {}
|
||||
#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
|
||||
|
||||
|
||||
void __init early_init_devtree(void *params)
|
||||
{
|
||||
phys_addr_t limit;
|
||||
@@ -683,6 +687,7 @@ void __init early_init_devtree(void *params)
|
||||
|
||||
/* Scan memory nodes and rebuild MEMBLOCKs */
|
||||
memblock_init();
|
||||
|
||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
|
||||
|
||||
|
||||
@@ -969,7 +969,7 @@ void __init rtas_initialize(void)
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX);
|
||||
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
|
||||
ibm_suspend_me_token = rtas_token("ibm,suspend-me");
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -246,7 +246,7 @@ static void __init irqstack_early_init(void)
|
||||
unsigned int i;
|
||||
|
||||
/* interrupt stacks must be in lowmem, we get that for free on ppc32
|
||||
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
|
||||
* as the memblock is limited to lowmem by default */
|
||||
for_each_possible_cpu(i) {
|
||||
softirq_ctx[i] = (struct thread_info *)
|
||||
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
|
||||
|
||||
@@ -486,7 +486,7 @@ static void __init emergency_stack_init(void)
|
||||
* bringup, we need to get at them in real mode. This means they
|
||||
* must also be within the RMO region.
|
||||
*/
|
||||
limit = min(slb0_limit(), memblock.rmo_size);
|
||||
limit = min(slb0_limit(), ppc64_rma_size);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
unsigned long sp;
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/prom.h>
|
||||
@@ -47,6 +48,7 @@
|
||||
#include <asm/bootx.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
extern int __map_without_ltlbs;
|
||||
@@ -139,8 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
|
||||
* coverage with normal-sized pages (or other reasons) do not
|
||||
* attempt to allocate outside the allowed range.
|
||||
*/
|
||||
|
||||
__initial_memory_limit_addr = memstart_addr + mapped;
|
||||
memblock_set_current_limit(mapped);
|
||||
|
||||
return mapped;
|
||||
}
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/* 40x can only access 16MB at the moment (see head_40x.S) */
|
||||
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/page.h>
|
||||
@@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
|
||||
return total_lowmem;
|
||||
}
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/* 44x has a 256M TLB entry pinned at boot */
|
||||
memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __cpuinit mmu_init_secondary(int cpu)
|
||||
{
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/prom.h>
|
||||
@@ -213,5 +214,14 @@ void __init adjust_total_lowmem(void)
|
||||
pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
|
||||
(unsigned int)((total_lowmem - __max_low_memory) >> 20));
|
||||
|
||||
__initial_memory_limit_addr = memstart_addr + __max_low_memory;
|
||||
memblock_set_current_limit(memstart_addr + __max_low_memory);
|
||||
}
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
phys_addr_t limit = first_memblock_base + first_memblock_size;
|
||||
|
||||
/* 64M mapped initially according to head_fsl_booke.S */
|
||||
memblock_set_current_limit(min_t(u64, limit, 0x04000000));
|
||||
}
|
||||
|
||||
@@ -588,7 +588,7 @@ static void __init htab_initialize(void)
|
||||
unsigned long pteg_count;
|
||||
unsigned long prot;
|
||||
unsigned long base = 0, size = 0, limit;
|
||||
int i;
|
||||
struct memblock_region *reg;
|
||||
|
||||
DBG(" -> htab_initialize()\n");
|
||||
|
||||
@@ -625,7 +625,7 @@ static void __init htab_initialize(void)
|
||||
if (machine_is(cell))
|
||||
limit = 0x80000000;
|
||||
else
|
||||
limit = 0;
|
||||
limit = MEMBLOCK_ALLOC_ANYWHERE;
|
||||
|
||||
table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
|
||||
|
||||
@@ -649,7 +649,7 @@ static void __init htab_initialize(void)
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
|
||||
1, memblock.rmo_size));
|
||||
1, ppc64_rma_size));
|
||||
memset(linear_map_hash_slots, 0, linear_map_hash_count);
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
@@ -659,9 +659,9 @@ static void __init htab_initialize(void)
|
||||
*/
|
||||
|
||||
/* create bolted the linear mapping in the hash table */
|
||||
for (i=0; i < memblock.memory.cnt; i++) {
|
||||
base = (unsigned long)__va(memblock.memory.region[i].base);
|
||||
size = memblock.memory.region[i].size;
|
||||
for_each_memblock(memory, reg) {
|
||||
base = (unsigned long)__va(reg->base);
|
||||
size = reg->size;
|
||||
|
||||
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
|
||||
base, size, prot);
|
||||
@@ -696,7 +696,8 @@ static void __init htab_initialize(void)
|
||||
#endif /* CONFIG_U3_DART */
|
||||
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
|
||||
prot, mmu_linear_psize, mmu_kernel_ssize));
|
||||
}
|
||||
}
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
|
||||
/*
|
||||
* If we have a memory_limit and we've allocated TCEs then we need to
|
||||
@@ -1247,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/* On LPAR systems, the first entry is our RMA region,
|
||||
* non-LPAR 64-bit hash MMU systems don't have a limitation
|
||||
* on real mode access, but using the first entry works well
|
||||
* enough. We also clamp it to 1G to avoid some funky things
|
||||
* such as RTAS bugs etc...
|
||||
*/
|
||||
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
|
||||
|
||||
/* Finally limit subsequent allocations */
|
||||
memblock_set_current_limit(ppc64_rma_size);
|
||||
}
|
||||
|
||||
@@ -91,12 +91,6 @@ int __allow_ioremap_reserved;
|
||||
/* max amount of low RAM to map in */
|
||||
unsigned long __max_low_memory = MAX_LOW_MEM;
|
||||
|
||||
/*
|
||||
* address of the limit of what is accessible with initial MMU setup -
|
||||
* 256MB usually, but only 16MB on 601.
|
||||
*/
|
||||
phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
|
||||
|
||||
/*
|
||||
* Check for command-line options that affect what MMU_init will do.
|
||||
*/
|
||||
@@ -126,13 +120,6 @@ void __init MMU_init(void)
|
||||
if (ppc_md.progress)
|
||||
ppc_md.progress("MMU:enter", 0x111);
|
||||
|
||||
/* 601 can only access 16MB at the moment */
|
||||
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
|
||||
__initial_memory_limit_addr = 0x01000000;
|
||||
/* 8xx can only access 8MB at the moment */
|
||||
if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
|
||||
__initial_memory_limit_addr = 0x00800000;
|
||||
|
||||
/* parse args from command line */
|
||||
MMU_setup();
|
||||
|
||||
@@ -190,20 +177,18 @@ void __init MMU_init(void)
|
||||
#ifdef CONFIG_BOOTX_TEXT
|
||||
btext_unmap();
|
||||
#endif
|
||||
|
||||
/* Shortly after that, the entire linear mapping will be available */
|
||||
memblock_set_current_limit(lowmem_end_addr);
|
||||
}
|
||||
|
||||
/* This is only called until mem_init is done. */
|
||||
void __init *early_get_page(void)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (init_bootmem_done) {
|
||||
p = alloc_bootmem_pages(PAGE_SIZE);
|
||||
} else {
|
||||
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
__initial_memory_limit_addr));
|
||||
}
|
||||
return p;
|
||||
if (init_bootmem_done)
|
||||
return alloc_bootmem_pages(PAGE_SIZE);
|
||||
else
|
||||
return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
}
|
||||
|
||||
/* Free up now-unused memory */
|
||||
@@ -252,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/* 8xx can only access 8MB at the moment */
|
||||
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
|
||||
}
|
||||
#endif /* CONFIG_8xx */
|
||||
|
||||
@@ -330,3 +330,4 @@ int __meminit vmemmap_populate(struct page *start_page,
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
|
||||
@@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn)
|
||||
return pfn < max_pfn;
|
||||
#else
|
||||
unsigned long paddr = (pfn << PAGE_SHIFT);
|
||||
int i;
|
||||
for (i=0; i < memblock.memory.cnt; i++) {
|
||||
unsigned long base;
|
||||
struct memblock_region *reg;
|
||||
|
||||
base = memblock.memory.region[i].base;
|
||||
|
||||
if ((paddr >= base) &&
|
||||
(paddr < (base + memblock.memory.region[i].size))) {
|
||||
for_each_memblock(memory, reg)
|
||||
if (paddr >= reg->base && paddr < (reg->base + reg->size))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@@ -149,23 +142,19 @@ int
|
||||
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
||||
{
|
||||
struct memblock_property res;
|
||||
unsigned long pfn, len;
|
||||
u64 end;
|
||||
struct memblock_region *reg;
|
||||
unsigned long end_pfn = start_pfn + nr_pages;
|
||||
unsigned long tstart, tend;
|
||||
int ret = -1;
|
||||
|
||||
res.base = (u64) start_pfn << PAGE_SHIFT;
|
||||
res.size = (u64) nr_pages << PAGE_SHIFT;
|
||||
|
||||
end = res.base + res.size - 1;
|
||||
while ((res.base < end) && (memblock_find(&res) >= 0)) {
|
||||
pfn = (unsigned long)(res.base >> PAGE_SHIFT);
|
||||
len = (unsigned long)(res.size >> PAGE_SHIFT);
|
||||
ret = (*func)(pfn, len, arg);
|
||||
for_each_memblock(memory, reg) {
|
||||
tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
|
||||
tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
|
||||
if (tstart >= tend)
|
||||
continue;
|
||||
ret = (*func)(tstart, tend - tstart, arg);
|
||||
if (ret)
|
||||
break;
|
||||
res.base += (res.size + 1);
|
||||
res.size = (end - res.base + 1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range);
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
void __init do_init_bootmem(void)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long start, bootmap_pages;
|
||||
unsigned long total_pages;
|
||||
struct memblock_region *reg;
|
||||
int boot_mapsize;
|
||||
|
||||
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
@@ -204,10 +193,10 @@ void __init do_init_bootmem(void)
|
||||
boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
|
||||
|
||||
/* Add active regions with valid PFNs */
|
||||
for (i = 0; i < memblock.memory.cnt; i++) {
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
|
||||
start_pfn = memblock_region_memory_base_pfn(reg);
|
||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
||||
add_active_range(0, start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
@@ -218,29 +207,21 @@ void __init do_init_bootmem(void)
|
||||
free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
|
||||
|
||||
/* reserve the sections we're already using */
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
unsigned long addr = memblock.reserved.region[i].base +
|
||||
memblock_size_bytes(&memblock.reserved, i) - 1;
|
||||
if (addr < lowmem_end_addr)
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
memblock_size_bytes(&memblock.reserved, i),
|
||||
BOOTMEM_DEFAULT);
|
||||
else if (memblock.reserved.region[i].base < lowmem_end_addr) {
|
||||
unsigned long adjusted_size = lowmem_end_addr -
|
||||
memblock.reserved.region[i].base;
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
adjusted_size, BOOTMEM_DEFAULT);
|
||||
for_each_memblock(reserved, reg) {
|
||||
unsigned long top = reg->base + reg->size - 1;
|
||||
if (top < lowmem_end_addr)
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
else if (reg->base < lowmem_end_addr) {
|
||||
unsigned long trunc_size = lowmem_end_addr - reg->base;
|
||||
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
#else
|
||||
free_bootmem_with_active_regions(0, max_pfn);
|
||||
|
||||
/* reserve the sections we're already using */
|
||||
for (i = 0; i < memblock.reserved.cnt; i++)
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
memblock_size_bytes(&memblock.reserved, i),
|
||||
BOOTMEM_DEFAULT);
|
||||
|
||||
for_each_memblock(reserved, reg)
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
#endif
|
||||
/* XXX need to clip this if using highmem? */
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
@@ -251,22 +232,15 @@ void __init do_init_bootmem(void)
|
||||
/* mark pages that don't exist as nosave */
|
||||
static int __init mark_nonram_nosave(void)
|
||||
{
|
||||
unsigned long memblock_next_region_start_pfn,
|
||||
memblock_region_max_pfn;
|
||||
int i;
|
||||
struct memblock_region *reg, *prev = NULL;
|
||||
|
||||
for (i = 0; i < memblock.memory.cnt - 1; i++) {
|
||||
memblock_region_max_pfn =
|
||||
(memblock.memory.region[i].base >> PAGE_SHIFT) +
|
||||
(memblock.memory.region[i].size >> PAGE_SHIFT);
|
||||
memblock_next_region_start_pfn =
|
||||
memblock.memory.region[i+1].base >> PAGE_SHIFT;
|
||||
|
||||
if (memblock_region_max_pfn < memblock_next_region_start_pfn)
|
||||
register_nosave_region(memblock_region_max_pfn,
|
||||
memblock_next_region_start_pfn);
|
||||
for_each_memblock(memory, reg) {
|
||||
if (prev &&
|
||||
memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
|
||||
register_nosave_region(memblock_region_memory_end_pfn(prev),
|
||||
memblock_region_memory_base_pfn(reg));
|
||||
prev = reg;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -327,7 +301,7 @@ void __init mem_init(void)
|
||||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
num_physpages = memblock.memory.size >> PAGE_SHIFT;
|
||||
num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
@@ -802,16 +802,17 @@ static void __init setup_nonnuma(void)
|
||||
unsigned long top_of_ram = memblock_end_of_DRAM();
|
||||
unsigned long total_ram = memblock_phys_mem_size();
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned int i, nid = 0;
|
||||
unsigned int nid = 0;
|
||||
struct memblock_region *reg;
|
||||
|
||||
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
|
||||
top_of_ram, total_ram);
|
||||
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
||||
(top_of_ram - total_ram) >> 20);
|
||||
|
||||
for (i = 0; i < memblock.memory.cnt; ++i) {
|
||||
start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
|
||||
for_each_memblock(memory, reg) {
|
||||
start_pfn = memblock_region_memory_base_pfn(reg);
|
||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
||||
|
||||
fake_numa_create_new_node(end_pfn, &nid);
|
||||
add_active_range(nid, start_pfn, end_pfn);
|
||||
@@ -947,11 +948,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
|
||||
static void mark_reserved_regions_for_nid(int nid)
|
||||
{
|
||||
struct pglist_data *node = NODE_DATA(nid);
|
||||
int i;
|
||||
struct memblock_region *reg;
|
||||
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
unsigned long physbase = memblock.reserved.region[i].base;
|
||||
unsigned long size = memblock.reserved.region[i].size;
|
||||
for_each_memblock(reserved, reg) {
|
||||
unsigned long physbase = reg->base;
|
||||
unsigned long size = reg->size;
|
||||
unsigned long start_pfn = physbase >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = PFN_UP(physbase + size);
|
||||
struct node_active_region node_ar;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user