mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'percpu-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
Pull percpu updates from Dennis Zhou: "Enable percpu page allocator for RISC-V. There are RISC-V configurations with sparse NUMA configurations and small vmalloc space causing dynamic percpu allocations to fail as the backing chunk stride is too far apart" * tag 'percpu-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu: riscv: Enable pcpu page first chunk allocator mm: Introduce flush_cache_vmap_early()
This commit is contained in:
@@ -40,6 +40,7 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
||||
|
||||
/* TBD: optimize this */
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
|
||||
|
||||
@@ -340,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
|
||||
@@ -43,6 +43,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
*/
|
||||
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
||||
#define flush_cache_vmap(start, end) cache_wbinv_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) cache_wbinv_all()
|
||||
|
||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||
|
||||
@@ -41,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
|
||||
void flush_icache_deferred(struct mm_struct *mm);
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
|
||||
@@ -191,6 +191,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
|
||||
#define flush_cache_all() __flush_cache_all()
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
|
||||
@@ -97,6 +97,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
__flush_cache_vmap();
|
||||
}
|
||||
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
|
||||
extern void (*__flush_cache_vunmap)(void);
|
||||
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
|
||||
@@ -38,6 +38,7 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
#define flush_icache_pages flush_icache_pages
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
|
||||
|
||||
extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
||||
@@ -41,6 +41,7 @@ void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
@@ -416,7 +416,9 @@ config NUMA
|
||||
depends on SMP && MMU
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select GENERIC_ARCH_NUMA
|
||||
select HAVE_SETUP_PER_CPU_AREA
|
||||
select NEED_PER_CPU_EMBED_FIRST_CHUNK
|
||||
select NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
select OF_NUMA
|
||||
select USE_PERCPU_NUMA_NODE_ID
|
||||
help
|
||||
|
||||
@@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page)
|
||||
flush_icache_mm(vma->vm_mm, 0)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
|
||||
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
|
||||
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
|
||||
@@ -41,6 +41,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
|
||||
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
||||
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
|
||||
@@ -441,6 +441,14 @@ static void __init kasan_shallow_populate(void *start, void *end)
|
||||
kasan_shallow_populate_pgd(vaddr, vend);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_VMALLOC
|
||||
void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
|
||||
{
|
||||
kasan_populate(kasan_mem_to_shadow(start),
|
||||
kasan_mem_to_shadow(start + size));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init create_tmp_mapping(void)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
@@ -66,6 +66,11 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
|
||||
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
|
||||
}
|
||||
|
||||
static void __ipi_flush_tlb_all(void *info)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
|
||||
@@ -90,6 +90,7 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
|
||||
unsigned long len);
|
||||
|
||||
#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
|
||||
@@ -48,6 +48,7 @@ static inline void flush_dcache_page(struct page *page)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
/* When a context switch happens we must flush all user windows so that
|
||||
|
||||
@@ -75,6 +75,7 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
@@ -116,8 +116,9 @@ void flush_cache_page(struct vm_area_struct*,
|
||||
#define flush_cache_mm(mm) flush_cache_all()
|
||||
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
||||
|
||||
#define flush_cache_vmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vmap(start,end) flush_cache_all()
|
||||
#define flush_cache_vmap_early(start,end) do { } while (0)
|
||||
#define flush_cache_vunmap(start,end) flush_cache_all()
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
@@ -140,6 +141,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
|
||||
#define flush_cache_vmap(start,end) do { } while (0)
|
||||
#define flush_cache_vmap_early(start,end) do { } while (0)
|
||||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
|
||||
@@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef flush_cache_vmap_early
|
||||
static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef flush_cache_vunmap
|
||||
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
|
||||
@@ -3333,13 +3333,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
|
||||
if (rc < 0)
|
||||
panic("failed to map percpu area, err=%d\n", rc);
|
||||
|
||||
/*
|
||||
* FIXME: Archs with virtual cache should flush local
|
||||
* cache for the linear mapping here - something
|
||||
* equivalent to flush_cache_vmap() on the local cpu.
|
||||
* flush_cache_vmap() can't be used as most supporting
|
||||
* data structures are not set up yet.
|
||||
*/
|
||||
flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
|
||||
|
||||
/* copy static data */
|
||||
memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
|
||||
|
||||
Reference in New Issue
Block a user