You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few misc things - ocfs2 updates - most of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (159 commits) tools/testing/selftests/proc/proc-self-syscall.c: remove duplicate include proc: more robust bulk read test proc: test /proc/*/maps, smaps, smaps_rollup, statm proc: use seq_puts() everywhere proc: read kernel cpu stat pointer once proc: remove unused argument in proc_pid_lookup() fs/proc/thread_self.c: code cleanup for proc_setup_thread_self() fs/proc/self.c: code cleanup for proc_setup_self() proc: return exit code 4 for skipped tests mm,mremap: bail out earlier in mremap_to under map pressure mm/sparse: fix a bad comparison mm/memory.c: do_fault: avoid usage of stale vm_area_struct writeback: fix inode cgroup switching comment mm/huge_memory.c: fix "orig_pud" set but not used mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC mm/memcontrol.c: fix bad line in comment mm/cma.c: cma_declare_contiguous: correct err handling mm/page_ext.c: fix an imbalance with kmemleak mm/compaction: pass pgdat to too_many_isolated() instead of zone mm: remove zone_lru_lock() function, access ->lru_lock directly ...
This commit is contained in:
@@ -1189,6 +1189,10 @@ PAGE_SIZE multiple when read back.
|
||||
Amount of cached filesystem data that was modified and
|
||||
is currently being written back to disk
|
||||
|
||||
anon_thp
|
||||
Amount of memory used in anonymous mappings backed by
|
||||
transparent hugepages
|
||||
|
||||
inactive_anon, active_anon, inactive_file, active_file, unevictable
|
||||
Amount of memory, swap-backed and filesystem-backed,
|
||||
on the internal memory management lists used by the
|
||||
@@ -1248,6 +1252,18 @@ PAGE_SIZE multiple when read back.
|
||||
|
||||
Amount of reclaimed lazyfree pages
|
||||
|
||||
thp_fault_alloc
|
||||
|
||||
Number of transparent hugepages which were allocated to satisfy
|
||||
a page fault, including COW faults. This counter is not present
|
||||
when CONFIG_TRANSPARENT_HUGEPAGE is not set.
|
||||
|
||||
thp_collapse_alloc
|
||||
|
||||
Number of transparent hugepages which were allocated to allow
|
||||
collapsing an existing range of pages. This counter is not
|
||||
present when CONFIG_TRANSPARENT_HUGEPAGE is not set.
|
||||
|
||||
memory.swap.current
|
||||
A read-only single value file which exists on non-root
|
||||
cgroups.
|
||||
|
||||
@@ -75,9 +75,10 @@ number of times a page is mapped.
|
||||
20. NOPAGE
|
||||
21. KSM
|
||||
22. THP
|
||||
23. BALLOON
|
||||
23. OFFLINE
|
||||
24. ZERO_PAGE
|
||||
25. IDLE
|
||||
26. PGTABLE
|
||||
|
||||
* ``/proc/kpagecgroup``. This file contains a 64-bit inode number of the
|
||||
memory cgroup each page is charged to, indexed by PFN. Only available when
|
||||
@@ -118,8 +119,8 @@ Short descriptions to the page flags
|
||||
identical memory pages dynamically shared between one or more processes
|
||||
22 - THP
|
||||
contiguous pages which construct transparent hugepages
|
||||
23 - BALLOON
|
||||
balloon compaction page
|
||||
23 - OFFLINE
|
||||
page is logically offline
|
||||
24 - ZERO_PAGE
|
||||
zero page for pfn_zero or huge_zero page
|
||||
25 - IDLE
|
||||
@@ -128,6 +129,8 @@ Short descriptions to the page flags
|
||||
Note that this flag may be stale in case the page was accessed via
|
||||
a PTE. To make sure the flag is up-to-date one has to read
|
||||
``/sys/kernel/mm/page_idle/bitmap`` first.
|
||||
26 - PGTABLE
|
||||
page is in use as a page table
|
||||
|
||||
IO related page flags
|
||||
---------------------
|
||||
|
||||
@@ -107,9 +107,9 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
|
||||
|
||||
8. LRU
|
||||
Each memcg has its own private LRU. Now, its handling is under global
|
||||
VM's control (means that it's handled under global zone_lru_lock).
|
||||
VM's control (means that it's handled under global pgdat->lru_lock).
|
||||
Almost all routines around memcg's LRU is called by global LRU's
|
||||
list management functions under zone_lru_lock().
|
||||
list management functions under pgdat->lru_lock.
|
||||
|
||||
A special function is mem_cgroup_isolate_pages(). This scans
|
||||
memcg's private LRU and call __isolate_lru_page() to extract a page
|
||||
|
||||
@@ -267,11 +267,11 @@ When oom event notifier is registered, event will be delivered.
|
||||
Other lock order is following:
|
||||
PG_locked.
|
||||
mm->page_table_lock
|
||||
zone_lru_lock
|
||||
pgdat->lru_lock
|
||||
lock_page_cgroup.
|
||||
In many cases, just lock_page_cgroup() is called.
|
||||
per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
|
||||
zone_lru_lock, it has no lock of its own.
|
||||
pgdat->lru_lock, it has no lock of its own.
|
||||
|
||||
2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM)
|
||||
|
||||
|
||||
@@ -9835,6 +9835,14 @@ F: kernel/sched/membarrier.c
|
||||
F: include/uapi/linux/membarrier.h
|
||||
F: arch/powerpc/include/asm/membarrier.h
|
||||
|
||||
MEMBLOCK
|
||||
M: Mike Rapoport <rppt@linux.ibm.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/memblock.h
|
||||
F: mm/memblock.c
|
||||
F: Documentation/core-api/boot-time-mm.rst
|
||||
|
||||
MEMORY MANAGEMENT
|
||||
L: linux-mm@kvack.org
|
||||
W: http://www.linux-mm.org
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/numa.h>
|
||||
#include <asm/machvec.h>
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
@@ -29,7 +30,7 @@ static const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (node == -1)
|
||||
if (node == NUMA_NO_NODE)
|
||||
return cpu_all_mask;
|
||||
|
||||
cpumask_clear(&node_to_cpumask_map[node]);
|
||||
|
||||
@@ -1467,6 +1467,10 @@ config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
def_bool y
|
||||
depends on HUGETLB_PAGE && MIGRATION
|
||||
|
||||
menu "Power management options"
|
||||
|
||||
source "kernel/power/Kconfig"
|
||||
|
||||
@@ -20,6 +20,11 @@
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
|
||||
extern bool arch_hugetlb_migration_supported(struct hstate *h);
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
|
||||
@@ -80,11 +80,7 @@
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#ifdef CONFIG_KASAN_EXTRA
|
||||
#define KASAN_THREAD_SHIFT 2
|
||||
#else
|
||||
#define KASAN_THREAD_SHIFT 1
|
||||
#endif /* CONFIG_KASAN_EXTRA */
|
||||
#else
|
||||
#define KASAN_SHADOW_SIZE (0)
|
||||
#define KASAN_THREAD_SHIFT 0
|
||||
|
||||
@@ -321,7 +321,7 @@ void crash_post_resume(void)
|
||||
* but does not hold any data of loaded kernel image.
|
||||
*
|
||||
* Note that all the pages in crash dump kernel memory have been initially
|
||||
* marked as Reserved in kexec_reserve_crashkres_pages().
|
||||
* marked as Reserved as memory was allocated via memblock_reserve().
|
||||
*
|
||||
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
|
||||
* from the hibernation iamge. crash_is_nosave() does thich check for crash
|
||||
@@ -361,7 +361,6 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
|
||||
|
||||
for (addr = begin; addr < end; addr += PAGE_SIZE) {
|
||||
page = phys_to_page(addr);
|
||||
ClearPageReserved(page);
|
||||
free_reserved_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,26 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
bool arch_hugetlb_migration_supported(struct hstate *h)
|
||||
{
|
||||
size_t pagesize = huge_page_size(h);
|
||||
|
||||
switch (pagesize) {
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
case PUD_SIZE:
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
case CONT_PMD_SIZE:
|
||||
case CONT_PTE_SIZE:
|
||||
return true;
|
||||
}
|
||||
pr_warn("%s: unrecognized huge page size 0x%lx\n",
|
||||
__func__, pagesize);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
|
||||
@@ -118,35 +118,10 @@ static void __init reserve_crashkernel(void)
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
}
|
||||
|
||||
static void __init kexec_reserve_crashkres_pages(void)
|
||||
{
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
phys_addr_t addr;
|
||||
struct page *page;
|
||||
|
||||
if (!crashk_res.end)
|
||||
return;
|
||||
|
||||
/*
|
||||
* To reduce the size of hibernation image, all the pages are
|
||||
* marked as Reserved initially.
|
||||
*/
|
||||
for (addr = crashk_res.start; addr < (crashk_res.end + 1);
|
||||
addr += PAGE_SIZE) {
|
||||
page = phys_to_page(addr);
|
||||
SetPageReserved(page);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void __init reserve_crashkernel(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void __init kexec_reserve_crashkres_pages(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
@@ -586,8 +561,6 @@ void __init mem_init(void)
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
memblock_free_all();
|
||||
|
||||
kexec_reserve_crashkres_pages();
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
/*
|
||||
|
||||
@@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void)
|
||||
}
|
||||
|
||||
/* cpumask_of_node() will now work */
|
||||
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
|
||||
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -74,7 +74,7 @@ void __init build_cpu_to_node_map(void)
|
||||
cpumask_clear(&node_to_cpu_mask[node]);
|
||||
|
||||
for_each_possible_early_cpu(cpu) {
|
||||
node = -1;
|
||||
node = NUMA_NO_NODE;
|
||||
for (i = 0; i < NR_CPUS; ++i)
|
||||
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
|
||||
node = node_cpuid[i].nid;
|
||||
|
||||
@@ -583,17 +583,6 @@ pfm_put_task(struct task_struct *task)
|
||||
if (task != current) put_task_struct(task);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pfm_reserve_page(unsigned long a)
|
||||
{
|
||||
SetPageReserved(vmalloc_to_page((void *)a));
|
||||
}
|
||||
static inline void
|
||||
pfm_unreserve_page(unsigned long a)
|
||||
{
|
||||
ClearPageReserved(vmalloc_to_page((void*)a));
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
pfm_protect_ctx_ctxsw(pfm_context_t *x)
|
||||
{
|
||||
@@ -816,44 +805,6 @@ pfm_reset_msgq(pfm_context_t *ctx)
|
||||
DPRINT(("ctx=%p msgq reset\n", ctx));
|
||||
}
|
||||
|
||||
static void *
|
||||
pfm_rvmalloc(unsigned long size)
|
||||
{
|
||||
void *mem;
|
||||
unsigned long addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
mem = vzalloc(size);
|
||||
if (mem) {
|
||||
//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
|
||||
addr = (unsigned long)mem;
|
||||
while (size > 0) {
|
||||
pfm_reserve_page(addr);
|
||||
addr+=PAGE_SIZE;
|
||||
size-=PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
return mem;
|
||||
}
|
||||
|
||||
static void
|
||||
pfm_rvfree(void *mem, unsigned long size)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
if (mem) {
|
||||
DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
|
||||
addr = (unsigned long) mem;
|
||||
while ((long) size > 0) {
|
||||
pfm_unreserve_page(addr);
|
||||
addr+=PAGE_SIZE;
|
||||
size-=PAGE_SIZE;
|
||||
}
|
||||
vfree(mem);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static pfm_context_t *
|
||||
pfm_context_alloc(int ctx_flags)
|
||||
{
|
||||
@@ -1498,7 +1449,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
|
||||
/*
|
||||
* free the buffer
|
||||
*/
|
||||
pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
|
||||
vfree(ctx->ctx_smpl_hdr);
|
||||
|
||||
ctx->ctx_smpl_hdr = NULL;
|
||||
ctx->ctx_smpl_size = 0UL;
|
||||
@@ -2137,7 +2088,7 @@ doit:
|
||||
* All memory free operations (especially for vmalloc'ed memory)
|
||||
* MUST be done with interrupts ENABLED.
|
||||
*/
|
||||
if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
|
||||
vfree(smpl_buf_addr);
|
||||
|
||||
/*
|
||||
* return the memory used by the context
|
||||
@@ -2266,10 +2217,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
||||
|
||||
/*
|
||||
* We do the easy to undo allocations first.
|
||||
*
|
||||
* pfm_rvmalloc(), clears the buffer, so there is no leak
|
||||
*/
|
||||
smpl_buf = pfm_rvmalloc(size);
|
||||
smpl_buf = vzalloc(size);
|
||||
if (smpl_buf == NULL) {
|
||||
DPRINT(("Can't allocate sampling buffer\n"));
|
||||
return -ENOMEM;
|
||||
@@ -2346,7 +2295,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
|
||||
error:
|
||||
vm_area_free(vma);
|
||||
error_kmem:
|
||||
pfm_rvfree(smpl_buf, size);
|
||||
vfree(smpl_buf);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ void __init setup_per_cpu_areas(void)
|
||||
* CPUs are put into groups according to node. Walk cpu_map
|
||||
* and create new groups at node boundaries.
|
||||
*/
|
||||
prev_node = -1;
|
||||
prev_node = NUMA_NO_NODE;
|
||||
ai->nr_groups = 0;
|
||||
for (unit = 0; unit < nr_units; unit++) {
|
||||
cpu = cpu_map[unit];
|
||||
@@ -435,7 +435,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
|
||||
{
|
||||
void *ptr = NULL;
|
||||
u8 best = 0xff;
|
||||
int bestnode = -1, node, anynode = 0;
|
||||
int bestnode = NUMA_NO_NODE, node, anynode = 0;
|
||||
|
||||
for_each_online_node(node) {
|
||||
if (node_isset(node, memory_less_mask))
|
||||
@@ -447,7 +447,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
|
||||
anynode = node;
|
||||
}
|
||||
|
||||
if (bestnode == -1)
|
||||
if (bestnode == NUMA_NO_NODE)
|
||||
bestnode = anynode;
|
||||
|
||||
ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
|
||||
|
||||
@@ -51,7 +51,7 @@ void __init init_pointer_table(unsigned long ptable)
|
||||
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
|
||||
|
||||
/* unreserve the page so it's possible to free that page */
|
||||
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
|
||||
__ClearPageReserved(PD_PAGE(dp));
|
||||
init_page_count(PD_PAGE(dp));
|
||||
|
||||
return;
|
||||
|
||||
@@ -13,6 +13,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
|
||||
extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte);
|
||||
|
||||
static inline int hstate_get_psize(struct hstate *hstate)
|
||||
{
|
||||
unsigned long shift;
|
||||
@@ -42,4 +46,12 @@ static inline bool gigantic_page_supported(void)
|
||||
/* hugepd entry valid bit */
|
||||
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
|
||||
|
||||
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
||||
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
||||
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t new_pte);
|
||||
#endif
|
||||
|
||||
@@ -1306,6 +1306,24 @@ static inline int pud_pfn(pud_t pud)
|
||||
BUILD_BUG();
|
||||
return 0;
|
||||
}
|
||||
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
||||
pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
|
||||
void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
|
||||
pte_t *, pte_t, pte_t);
|
||||
|
||||
/*
|
||||
* Returns true for a R -> RW upgrade of pte
|
||||
*/
|
||||
static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val)
|
||||
{
|
||||
if (!(old_val & _PAGE_READ))
|
||||
return false;
|
||||
|
||||
if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
|
||||
|
||||
@@ -127,6 +127,10 @@ extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep
|
||||
pte_t entry, unsigned long address,
|
||||
int psize);
|
||||
|
||||
extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte);
|
||||
|
||||
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
|
||||
unsigned long set)
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user