You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts: arch/x86/include/asm/pgtable.h
This commit is contained in:
+13
-7
@@ -189,9 +189,21 @@ callback_init(void * kernel_end)
|
||||
|
||||
if (alpha_using_srm) {
|
||||
static struct vm_struct console_remap_vm;
|
||||
unsigned long vaddr = VMALLOC_START;
|
||||
unsigned long nr_pages = 0;
|
||||
unsigned long vaddr;
|
||||
unsigned long i, j;
|
||||
|
||||
/* calculate needed size */
|
||||
for (i = 0; i < crb->map_entries; ++i)
|
||||
nr_pages += crb->map[i].count;
|
||||
|
||||
/* register the vm area */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.size = nr_pages << PAGE_SHIFT;
|
||||
vm_area_register_early(&console_remap_vm, PAGE_SIZE);
|
||||
|
||||
vaddr = (unsigned long)consle_remap_vm.addr;
|
||||
|
||||
/* Set up the third level PTEs and update the virtual
|
||||
addresses of the CRB entries. */
|
||||
for (i = 0; i < crb->map_entries; ++i) {
|
||||
@@ -213,12 +225,6 @@ callback_init(void * kernel_end)
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Let vmalloc know that we've allocated some space. */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.addr = (void *) VMALLOC_START;
|
||||
console_remap_vm.size = vaddr - VMALLOC_START;
|
||||
vmlist = &console_remap_vm;
|
||||
}
|
||||
|
||||
callback_init_done = 1;
|
||||
|
||||
+1
-1
@@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
|
||||
config QUICKLIST
|
||||
def_bool y
|
||||
|
||||
config HAVE_ARCH_BOOTMEM_NODE
|
||||
config HAVE_ARCH_BOOTMEM
|
||||
def_bool n
|
||||
|
||||
config ARCH_HAVE_MEMORY_PRESENT
|
||||
|
||||
+4
-1
@@ -135,6 +135,9 @@ config ARCH_HAS_CACHE_LINE_SIZE
|
||||
config HAVE_SETUP_PER_CPU_AREA
|
||||
def_bool y
|
||||
|
||||
config HAVE_DYNAMIC_PER_CPU_AREA
|
||||
def_bool y
|
||||
|
||||
config HAVE_CPUMASK_OF_CPU_MAP
|
||||
def_bool X86_64_SMP
|
||||
|
||||
@@ -1122,7 +1125,7 @@ config NODES_SHIFT
|
||||
Specify the maximum number of NUMA Nodes available on the target
|
||||
system. Increases memory reserved to accomodate various tables.
|
||||
|
||||
config HAVE_ARCH_BOOTMEM_NODE
|
||||
config HAVE_ARCH_BOOTMEM
|
||||
def_bool y
|
||||
depends on X86_32 && NUMA
|
||||
|
||||
|
||||
@@ -91,45 +91,12 @@ static inline int pfn_valid(int pfn)
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
/*
|
||||
* Following are macros that are specific to this numa platform.
|
||||
*/
|
||||
#define reserve_bootmem(addr, size, flags) \
|
||||
reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_nopanic(x) \
|
||||
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
|
||||
__pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_pages(x) \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_nopanic(x) \
|
||||
__alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
|
||||
__pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_node(pgdat, x) \
|
||||
/* always use node 0 for bootmem on this numa platform */
|
||||
#define alloc_bootmem_core(__bdata, size, align, goal, limit) \
|
||||
({ \
|
||||
struct pglist_data __maybe_unused \
|
||||
*__alloc_bootmem_node__pgdat = (pgdat); \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
|
||||
__pa(MAX_DMA_ADDRESS)); \
|
||||
})
|
||||
#define alloc_bootmem_pages_node(pgdat, x) \
|
||||
({ \
|
||||
struct pglist_data __maybe_unused \
|
||||
*__alloc_bootmem_node__pgdat = (pgdat); \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
|
||||
__pa(MAX_DMA_ADDRESS)); \
|
||||
})
|
||||
#define alloc_bootmem_low_pages_node(pgdat, x) \
|
||||
({ \
|
||||
struct pglist_data __maybe_unused \
|
||||
*__alloc_bootmem_node__pgdat = (pgdat); \
|
||||
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
|
||||
bootmem_data_t __maybe_unused * __abm_bdata_dummy = (__bdata); \
|
||||
__alloc_bootmem_core(NODE_DATA(0)->bdata, \
|
||||
(size), (align), (goal), (limit)); \
|
||||
})
|
||||
#endif /* CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
|
||||
@@ -43,6 +43,14 @@
|
||||
#else /* ...!ASSEMBLY */
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define __addr_to_pcpu_ptr(addr) \
|
||||
(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
|
||||
+ (unsigned long)__per_cpu_start)
|
||||
#define __pcpu_ptr_to_addr(ptr) \
|
||||
(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
|
||||
- (unsigned long)__per_cpu_start)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
|
||||
|
||||
@@ -288,6 +288,8 @@ static inline int is_new_memtype_allowed(unsigned long flags,
|
||||
return 1;
|
||||
}
|
||||
|
||||
pmd_t *populate_extra_pmd(unsigned long vaddr);
|
||||
pte_t *populate_extra_pte(unsigned long vaddr);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
|
||||
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
|
||||
per_cpu(drv_data, cpu) = data;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
|
||||
|
||||
+15
-14
@@ -16,6 +16,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { }
|
||||
union irq_ctx {
|
||||
struct thread_info tinfo;
|
||||
u32 stack[THREAD_SIZE/sizeof(u32)];
|
||||
};
|
||||
} __attribute__((aligned(PAGE_SIZE)));
|
||||
|
||||
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
|
||||
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
|
||||
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
|
||||
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
|
||||
|
||||
static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
|
||||
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
|
||||
static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
|
||||
static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
|
||||
|
||||
static void call_on_stack(void *func, void *stack)
|
||||
{
|
||||
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
||||
u32 *isp, arg1, arg2;
|
||||
|
||||
curctx = (union irq_ctx *) current_thread_info();
|
||||
irqctx = hardirq_ctx[smp_processor_id()];
|
||||
irqctx = __get_cpu_var(hardirq_ctx);
|
||||
|
||||
/*
|
||||
* this is where we switch to the IRQ stack. However, if we are
|
||||
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu)
|
||||
{
|
||||
union irq_ctx *irqctx;
|
||||
|
||||
if (hardirq_ctx[cpu])
|
||||
if (per_cpu(hardirq_ctx, cpu))
|
||||
return;
|
||||
|
||||
irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
|
||||
irqctx = &per_cpu(hardirq_stack, cpu);
|
||||
irqctx->tinfo.task = NULL;
|
||||
irqctx->tinfo.exec_domain = NULL;
|
||||
irqctx->tinfo.cpu = cpu;
|
||||
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
|
||||
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
||||
|
||||
hardirq_ctx[cpu] = irqctx;
|
||||
per_cpu(hardirq_ctx, cpu) = irqctx;
|
||||
|
||||
irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
|
||||
irqctx = &per_cpu(softirq_stack, cpu);
|
||||
irqctx->tinfo.task = NULL;
|
||||
irqctx->tinfo.exec_domain = NULL;
|
||||
irqctx->tinfo.cpu = cpu;
|
||||
irqctx->tinfo.preempt_count = 0;
|
||||
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
||||
|
||||
softirq_ctx[cpu] = irqctx;
|
||||
per_cpu(softirq_ctx, cpu) = irqctx;
|
||||
|
||||
printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
|
||||
cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
|
||||
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
|
||||
}
|
||||
|
||||
void irq_ctx_exit(int cpu)
|
||||
{
|
||||
hardirq_ctx[cpu] = NULL;
|
||||
per_cpu(hardirq_ctx, cpu) = NULL;
|
||||
}
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void)
|
||||
|
||||
if (local_softirq_pending()) {
|
||||
curctx = current_thread_info();
|
||||
irqctx = softirq_ctx[smp_processor_id()];
|
||||
irqctx = __get_cpu_var(softirq_ctx);
|
||||
irqctx->tinfo.task = curctx->task;
|
||||
irqctx->tinfo.previous_esp = current_stack_pointer;
|
||||
|
||||
|
||||
+339
-26
@@ -7,6 +7,7 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/setup.h>
|
||||
@@ -41,6 +42,321 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
|
||||
};
|
||||
EXPORT_SYMBOL(__per_cpu_offset);
|
||||
|
||||
/**
|
||||
* pcpu_need_numa - determine percpu allocation needs to consider NUMA
|
||||
*
|
||||
* If NUMA is not configured or there is only one NUMA node available,
|
||||
* there is no reason to consider NUMA. This function determines
|
||||
* whether percpu allocation should consider NUMA or not.
|
||||
*
|
||||
* RETURNS:
|
||||
* true if NUMA should be considered; otherwise, false.
|
||||
*/
|
||||
static bool __init pcpu_need_numa(void)
|
||||
{
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
pg_data_t *last = NULL;
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
int node = early_cpu_to_node(cpu);
|
||||
|
||||
if (node_online(node) && NODE_DATA(node) &&
|
||||
last && last != NODE_DATA(node))
|
||||
return true;
|
||||
|
||||
last = NODE_DATA(node);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
|
||||
* @cpu: cpu to allocate for
|
||||
* @size: size allocation in bytes
|
||||
* @align: alignment
|
||||
*
|
||||
* Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
|
||||
* does the right thing for NUMA regardless of the current
|
||||
* configuration.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to the allocated area on success, NULL on failure.
|
||||
*/
|
||||
static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
|
||||
unsigned long align)
|
||||
{
|
||||
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
int node = early_cpu_to_node(cpu);
|
||||
void *ptr;
|
||||
|
||||
if (!node_online(node) || !NODE_DATA(node)) {
|
||||
ptr = __alloc_bootmem_nopanic(size, align, goal);
|
||||
pr_info("cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
|
||||
cpu, size, __pa(ptr));
|
||||
} else {
|
||||
ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
|
||||
size, align, goal);
|
||||
pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
|
||||
"%016lx\n", cpu, size, node, __pa(ptr));
|
||||
}
|
||||
return ptr;
|
||||
#else
|
||||
return __alloc_bootmem_nopanic(size, align, goal);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Remap allocator
|
||||
*
|
||||
* This allocator uses PMD page as unit. A PMD page is allocated for
|
||||
* each cpu and each is remapped into vmalloc area using PMD mapping.
|
||||
* As PMD page is quite large, only part of it is used for the first
|
||||
* chunk. Unused part is returned to the bootmem allocator.
|
||||
*
|
||||
* So, the PMD pages are mapped twice - once to the physical mapping
|
||||
* and to the vmalloc area for the first percpu chunk. The double
|
||||
* mapping does add one more PMD TLB entry pressure but still is much
|
||||
* better than only using 4k mappings while still being NUMA friendly.
|
||||
*/
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
static size_t pcpur_size __initdata;
|
||||
static void **pcpur_ptrs __initdata;
|
||||
|
||||
static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
|
||||
{
|
||||
size_t off = (size_t)pageno << PAGE_SHIFT;
|
||||
|
||||
if (off >= pcpur_size)
|
||||
return NULL;
|
||||
|
||||
return virt_to_page(pcpur_ptrs[cpu] + off);
|
||||
}
|
||||
|
||||
static ssize_t __init setup_pcpu_remap(size_t static_size)
|
||||
{
|
||||
static struct vm_struct vm;
|
||||
pg_data_t *last;
|
||||
size_t ptrs_size;
|
||||
unsigned int cpu;
|
||||
ssize_t ret;
|
||||
|
||||
/*
|
||||
* If large page isn't supported, there's no benefit in doing
|
||||
* this. Also, on non-NUMA, embedding is better.
|
||||
*/
|
||||
if (!cpu_has_pse || pcpu_need_numa())
|
||||
return -EINVAL;
|
||||
|
||||
last = NULL;
|
||||
for_each_possible_cpu(cpu) {
|
||||
int node = early_cpu_to_node(cpu);
|
||||
|
||||
if (node_online(node) && NODE_DATA(node) &&
|
||||
last && last != NODE_DATA(node))
|
||||
goto proceed;
|
||||
|
||||
last = NODE_DATA(node);
|
||||
}
|
||||
return -EINVAL;
|
||||
|
||||
proceed:
|
||||
/*
|
||||
* Currently supports only single page. Supporting multiple
|
||||
* pages won't be too difficult if it ever becomes necessary.
|
||||
*/
|
||||
pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
|
||||
if (pcpur_size > PMD_SIZE) {
|
||||
pr_warning("PERCPU: static data is larger than large page, "
|
||||
"can't use large page\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* allocate pointer array and alloc large pages */
|
||||
ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
|
||||
pcpur_ptrs = alloc_bootmem(ptrs_size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
|
||||
if (!pcpur_ptrs[cpu])
|
||||
goto enomem;
|
||||
|
||||
/*
|
||||
* Only use pcpur_size bytes and give back the rest.
|
||||
*
|
||||
* Ingo: The 2MB up-rounding bootmem is needed to make
|
||||
* sure the partial 2MB page is still fully RAM - it's
|
||||
* not well-specified to have a PAT-incompatible area
|
||||
* (unmapped RAM, device memory, etc.) in that hole.
|
||||
*/
|
||||
free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
|
||||
PMD_SIZE - pcpur_size);
|
||||
|
||||
memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
|
||||
}
|
||||
|
||||
/* allocate address and map */
|
||||
vm.flags = VM_ALLOC;
|
||||
vm.size = num_possible_cpus() * PMD_SIZE;
|
||||
vm_area_register_early(&vm, PMD_SIZE);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = populate_extra_pmd((unsigned long)vm.addr
|
||||
+ cpu * PMD_SIZE);
|
||||
set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
|
||||
PAGE_KERNEL_LARGE));
|
||||
}
|
||||
|
||||
/* we're ready, commit */
|
||||
pr_info("PERCPU: Remapped at %p with large pages, static data "
|
||||
"%zu bytes\n", vm.addr, static_size);
|
||||
|
||||
ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE,
|
||||
pcpur_size - static_size, vm.addr, NULL);
|
||||
goto out_free_ar;
|
||||
|
||||
enomem:
|
||||
for_each_possible_cpu(cpu)
|
||||
if (pcpur_ptrs[cpu])
|
||||
free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
|
||||
ret = -ENOMEM;
|
||||
out_free_ar:
|
||||
free_bootmem(__pa(pcpur_ptrs), ptrs_size);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static ssize_t __init setup_pcpu_remap(size_t static_size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Embedding allocator
|
||||
*
|
||||
* The first chunk is sized to just contain the static area plus
|
||||
* PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
|
||||
* bootmem allocator and used as-is without being mapped into vmalloc
|
||||
* area. This enables the first chunk to piggy back on the linear
|
||||
* physical PMD mapping and doesn't add any additional pressure to
|
||||
* TLB.
|
||||
*/
|
||||
static void *pcpue_ptr __initdata;
|
||||
static size_t pcpue_unit_size __initdata;
|
||||
|
||||
static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
|
||||
{
|
||||
return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
|
||||
+ ((size_t)pageno << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
static ssize_t __init setup_pcpu_embed(size_t static_size)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* If large page isn't supported, there's no benefit in doing
|
||||
* this. Also, embedding allocation doesn't play well with
|
||||
* NUMA.
|
||||
*/
|
||||
if (!cpu_has_pse || pcpu_need_numa())
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate and copy */
|
||||
pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
|
||||
pcpue_unit_size = max(pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
|
||||
pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
|
||||
PAGE_SIZE);
|
||||
if (!pcpue_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
|
||||
static_size);
|
||||
|
||||
/* we're ready, commit */
|
||||
pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
|
||||
pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
|
||||
|
||||
return pcpu_setup_first_chunk(pcpue_get_page, static_size,
|
||||
pcpue_unit_size,
|
||||
pcpue_unit_size - static_size, pcpue_ptr,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* 4k page allocator
|
||||
*
|
||||
* This is the basic allocator. Static percpu area is allocated
|
||||
* page-by-page and most of initialization is done by the generic
|
||||
* setup function.
|
||||
*/
|
||||
static struct page **pcpu4k_pages __initdata;
|
||||
static int pcpu4k_nr_static_pages __initdata;
|
||||
|
||||
static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
|
||||
{
|
||||
if (pageno < pcpu4k_nr_static_pages)
|
||||
return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __init pcpu4k_populate_pte(unsigned long addr)
|
||||
{
|
||||
populate_extra_pte(addr);
|
||||
}
|
||||
|
||||
static ssize_t __init setup_pcpu_4k(size_t static_size)
|
||||
{
|
||||
size_t pages_size;
|
||||
unsigned int cpu;
|
||||
int i, j;
|
||||
ssize_t ret;
|
||||
|
||||
pcpu4k_nr_static_pages = PFN_UP(static_size);
|
||||
|
||||
/* unaligned allocations can't be freed, round up to page size */
|
||||
pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
|
||||
* sizeof(pcpu4k_pages[0]));
|
||||
pcpu4k_pages = alloc_bootmem(pages_size);
|
||||
|
||||
/* allocate and copy */
|
||||
j = 0;
|
||||
for_each_possible_cpu(cpu)
|
||||
for (i = 0; i < pcpu4k_nr_static_pages; i++) {
|
||||
void *ptr;
|
||||
|
||||
ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
|
||||
if (!ptr)
|
||||
goto enomem;
|
||||
|
||||
memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
|
||||
pcpu4k_pages[j++] = virt_to_page(ptr);
|
||||
}
|
||||
|
||||
/* we're ready, commit */
|
||||
pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
|
||||
pcpu4k_nr_static_pages, static_size);
|
||||
|
||||
ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
|
||||
pcpu4k_populate_pte);
|
||||
goto out_free_ar;
|
||||
|
||||
enomem:
|
||||
while (--j >= 0)
|
||||
free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
|
||||
ret = -ENOMEM;
|
||||
out_free_ar:
|
||||
free_bootmem(__pa(pcpu4k_pages), pages_size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void setup_percpu_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -61,38 +377,35 @@ static inline void setup_percpu_segment(int cpu)
|
||||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
ssize_t size;
|
||||
char *ptr;
|
||||
int cpu;
|
||||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
|
||||
size_t static_size = __per_cpu_end - __per_cpu_start;
|
||||
unsigned int cpu;
|
||||
unsigned long delta;
|
||||
size_t pcpu_unit_size;
|
||||
ssize_t ret;
|
||||
|
||||
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
|
||||
/*
|
||||
* Allocate percpu area. If PSE is supported, try to make use
|
||||
* of large page mappings. Please read comments on top of
|
||||
* each allocator for details.
|
||||
*/
|
||||
ret = setup_pcpu_remap(static_size);
|
||||
if (ret < 0)
|
||||
ret = setup_pcpu_embed(static_size);
|
||||
if (ret < 0)
|
||||
ret = setup_pcpu_4k(static_size);
|
||||
if (ret < 0)
|
||||
panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
|
||||
static_size, ret);
|
||||
|
||||
pcpu_unit_size = ret;
|
||||
|
||||
/* alrighty, percpu areas up and running */
|
||||
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
#else
|
||||
int node = early_cpu_to_node(cpu);
|
||||
if (!node_online(node) || !NODE_DATA(node)) {
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
pr_info("cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
pr_debug("per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
} else {
|
||||
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
||||
pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
}
|
||||
#endif
|
||||
|
||||
memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
|
||||
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
|
||||
per_cpu(cpu_number, cpu) = cpu;
|
||||
setup_percpu_segment(cpu);
|
||||
|
||||
@@ -137,6 +137,23 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
|
||||
return pte_offset_kernel(pmd, 0);
|
||||
}
|
||||
|
||||
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
|
||||
{
|
||||
int pgd_idx = pgd_index(vaddr);
|
||||
int pmd_idx = pmd_index(vaddr);
|
||||
|
||||
return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
|
||||
}
|
||||
|
||||
pte_t * __init populate_extra_pte(unsigned long vaddr)
|
||||
{
|
||||
int pte_idx = pte_index(vaddr);
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = populate_extra_pmd(vaddr);
|
||||
return one_page_table_init(pmd) + pte_idx;
|
||||
}
|
||||
|
||||
static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
|
||||
unsigned long vaddr, pte_t *lastpte)
|
||||
{
|
||||
|
||||
+57
-23
@@ -168,34 +168,51 @@ static __ref void *spp_getpage(void)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void
|
||||
set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
|
||||
static pud_t * __init fill_pud(pgd_t *pgd, unsigned long vaddr)
|
||||
{
|
||||
if (pgd_none(*pgd)) {
|
||||
pud_t *pud = (pud_t *)spp_getpage();
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
if (pud != pud_offset(pgd, 0))
|
||||
printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
|
||||
pud, pud_offset(pgd, 0));
|
||||
}
|
||||
return pud_offset(pgd, vaddr);
|
||||
}
|
||||
|
||||
static pmd_t * __init fill_pmd(pud_t *pud, unsigned long vaddr)
|
||||
{
|
||||
if (pud_none(*pud)) {
|
||||
pmd_t *pmd = (pmd_t *) spp_getpage();
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
if (pmd != pmd_offset(pud, 0))
|
||||
printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
|
||||
pmd, pmd_offset(pud, 0));
|
||||
}
|
||||
return pmd_offset(pud, vaddr);
|
||||
}
|
||||
|
||||
static pte_t * __init fill_pte(pmd_t *pmd, unsigned long vaddr)
|
||||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
pte_t *pte = (pte_t *) spp_getpage();
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
if (pte != pte_offset_kernel(pmd, 0))
|
||||
printk(KERN_ERR "PAGETABLE BUG #02!\n");
|
||||
}
|
||||
return pte_offset_kernel(pmd, vaddr);
|
||||
}
|
||||
|
||||
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
|
||||
{
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pud = pud_page + pud_index(vaddr);
|
||||
if (pud_none(*pud)) {
|
||||
pmd = (pmd_t *) spp_getpage();
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
if (pmd != pmd_offset(pud, 0)) {
|
||||
printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
|
||||
pmd, pmd_offset(pud, 0));
|
||||
return;
|
||||
}
|
||||
}
|
||||
pmd = pmd_offset(pud, vaddr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = (pte_t *) spp_getpage();
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
if (pte != pte_offset_kernel(pmd, 0)) {
|
||||
printk(KERN_ERR "PAGETABLE BUG #02!\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
pmd = fill_pmd(pud, vaddr);
|
||||
pte = fill_pte(pmd, vaddr);
|
||||
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
set_pte(pte, new_pte);
|
||||
|
||||
/*
|
||||
@@ -205,8 +222,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
|
||||
__flush_tlb_one(vaddr);
|
||||
}
|
||||
|
||||
void
|
||||
set_pte_vaddr(unsigned long vaddr, pte_t pteval)
|
||||
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud_page;
|
||||
@@ -223,6 +239,24 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
|
||||
set_pte_vaddr_pud(pud_page, vaddr, pteval);
|
||||
}
|
||||
|
||||
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
||||
pgd = pgd_offset_k(vaddr);
|
||||
pud = fill_pud(pgd, vaddr);
|
||||
return fill_pmd(pud, vaddr);
|
||||
}
|
||||
|
||||
pte_t * __init populate_extra_pte(unsigned long vaddr)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = populate_extra_pmd(vaddr);
|
||||
return fill_pte(pmd, vaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create large page table mappings for a range of physical addresses.
|
||||
*/
|
||||
|
||||
+1
-1
@@ -363,7 +363,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
if (!bt->sequence)
|
||||
goto err;
|
||||
|
||||
bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
|
||||
bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
|
||||
if (!bt->msg_data)
|
||||
goto err;
|
||||
|
||||
|
||||
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance(
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!performance || !percpu_ptr(performance, i)) {
|
||||
if (!performance || !per_cpu_ptr(performance, i)) {
|
||||
retval = -EINVAL;
|
||||
continue;
|
||||
}
|
||||
|
||||
pr->performance = percpu_ptr(performance, i);
|
||||
pr->performance = per_cpu_ptr(performance, i);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
if (acpi_processor_get_psd(pr)) {
|
||||
retval = -EINVAL;
|
||||
|
||||
+19
-17
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
|
||||
#define BOOTMEM_DEFAULT 0
|
||||
#define BOOTMEM_EXCLUSIVE (1<<0)
|
||||
|
||||
extern int reserve_bootmem(unsigned long addr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
extern int reserve_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
|
||||
#endif
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_pages(x) \
|
||||
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
|
||||
__alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_low_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
|
||||
int flags);
|
||||
|
||||
+73
-27
@@ -76,52 +76,98 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
|
||||
/* minimum unit size, also is the maximum supported allocation size */
|
||||
#define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
||||
* back on the first chunk if arch is manually allocating and mapping
|
||||
* it for faster access (as a part of large page mapping for example).
|
||||
* Note that dynamic percpu allocator covers both static and dynamic
|
||||
* areas, so these values are bigger than PERCPU_MODULE_RESERVE.
|
||||
*
|
||||
* On typical configuration with modules, the following values leave
|
||||
* about 8k of free space on the first chunk after boot on both x86_32
|
||||
* and 64 when module support is enabled. When module support is
|
||||
* disabled, it's much tighter.
|
||||
*/
|
||||
#ifndef PERCPU_DYNAMIC_RESERVE
|
||||
# if BITS_PER_LONG > 32
|
||||
# ifdef CONFIG_MODULES
|
||||
# define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT)
|
||||
# else
|
||||
# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
|
||||
# endif
|
||||
# else
|
||||
# ifdef CONFIG_MODULES
|
||||
# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
|
||||
# else
|
||||
# define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT)
|
||||
# endif
|
||||
# endif
|
||||
#endif /* PERCPU_DYNAMIC_RESERVE */
|
||||
|
||||
extern void *pcpu_base_addr;
|
||||
|
||||
typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
|
||||
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
|
||||
|
||||
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
|
||||
size_t static_size, size_t unit_size,
|
||||
size_t free_size, void *base_addr,
|
||||
pcpu_populate_pte_fn_t populate_pte_fn);
|
||||
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object
|
||||
* dynamically allocated. Non-atomic access to the current CPU's
|
||||
* version should probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
||||
|
||||
#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
struct percpu_data {
|
||||
void *ptrs[1];
|
||||
};
|
||||
|
||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object dynamically
|
||||
* allocated. Non-atomic access to the current CPU's version should
|
||||
* probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define percpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
})
|
||||
|
||||
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
|
||||
extern void percpu_free(void *__pdata);
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
extern void *__alloc_percpu(size_t size, size_t align);
|
||||
extern void free_percpu(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
static inline void *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
/*
|
||||
* Can't easily make larger alignment work with kmalloc. WARN
|
||||
* on it. Larger alignment should only be used for module
|
||||
* percpu sections on SMP for which this path isn't used.
|
||||
*/
|
||||
WARN_ON_ONCE(align > __alignof__(unsigned long long));
|
||||
return kzalloc(size, gfp);
|
||||
}
|
||||
|
||||
static inline void percpu_free(void *__pdata)
|
||||
static inline void free_percpu(void *p)
|
||||
{
|
||||
kfree(__pdata);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define percpu_alloc_mask(size, gfp, mask) \
|
||||
__percpu_alloc_mask((size), (gfp), &(mask))
|
||||
|
||||
#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
|
||||
|
||||
/* (legacy) interface for use without CPU hotplug handling */
|
||||
|
||||
#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
|
||||
cpu_possible_map)
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
|
||||
#define free_percpu(ptr) percpu_free((ptr))
|
||||
#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
||||
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
|
||||
*/
|
||||
extern rwlock_t vmlist_lock;
|
||||
extern struct vm_struct *vmlist;
|
||||
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
||||
|
||||
#endif /* _LINUX_VMALLOC_H */
|
||||
|
||||
+49
-15
@@ -51,6 +51,7 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
|
||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
if (align > PAGE_SIZE) {
|
||||
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
|
||||
name, align, PAGE_SIZE);
|
||||
align = PAGE_SIZE;
|
||||
}
|
||||
|
||||
ptr = __alloc_percpu(size, align);
|
||||
if (!ptr)
|
||||
printk(KERN_WARNING
|
||||
"Could not allocate %lu bytes percpu data\n", size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void percpu_modfree(void *freeme)
|
||||
{
|
||||
free_percpu(freeme);
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
/* Number of blocks used and allocated. */
|
||||
static unsigned int pcpu_num_used, pcpu_num_allocated;
|
||||
/* Size of each block. -ve means used. */
|
||||
@@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
const char *secstrings)
|
||||
{
|
||||
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
|
||||
}
|
||||
|
||||
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
|
||||
}
|
||||
|
||||
static int percpu_modinit(void)
|
||||
{
|
||||
pcpu_num_used = 2;
|
||||
@@ -513,7 +527,26 @@ static int percpu_modinit(void)
|
||||
return 0;
|
||||
}
|
||||
__initcall(percpu_modinit);
|
||||
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
const char *secstrings)
|
||||
{
|
||||
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
|
||||
}
|
||||
|
||||
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_SMP */
|
||||
|
||||
static inline void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
@@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
|
||||
/* pcpusec should be 0, and size of that section should be 0. */
|
||||
BUG_ON(size != 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define MODINFO_ATTR(field) \
|
||||
|
||||
+3
-3
@@ -9476,7 +9476,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||
|
||||
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
|
||||
{
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
u64 data;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
@@ -9495,7 +9495,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
|
||||
|
||||
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
|
||||
{
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
@@ -9591,7 +9591,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
|
||||
ca = task_ca(tsk);
|
||||
|
||||
for (; ca; ca = ca->parent) {
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
*cpuusage += cputime;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
* doesn't hit this CPU until we're ready. */
|
||||
get_cpu();
|
||||
for_each_online_cpu(i) {
|
||||
sm_work = percpu_ptr(stop_machine_work, i);
|
||||
sm_work = per_cpu_ptr(stop_machine_work, i);
|
||||
INIT_WORK(sm_work, stop_cpu);
|
||||
queue_work_on(i, stop_machine_wq, sm_work);
|
||||
}
|
||||
|
||||
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
|
||||
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
|
||||
obj-$(CONFIG_FS_XIP) += filemap_xip.o
|
||||
obj-$(CONFIG_MIGRATION) += migrate.o
|
||||
ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
obj-$(CONFIG_SMP) += percpu.o
|
||||
else
|
||||
obj-$(CONFIG_SMP) += allocpercpu.o
|
||||
endif
|
||||
obj-$(CONFIG_QUICKLIST) += quicklist.o
|
||||
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user