mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
mm: remove the vma linked list
Replace any vm_next use with vma_find(). Update free_pgtables(), unmap_vmas(), and zap_page_range() to use the maple tree. Use the new free_pgtables() and unmap_vmas() in do_mas_align_munmap(). At the same time, alter the loop to be more compact. Now that free_pgtables() and unmap_vmas() take a maple tree as an argument, rearrange do_mas_align_munmap() to use the new tree to hold the vmas to remove. Remove __vma_link_list() and __vma_unlink_list() as they are exclusively used to update the linked list. Drop linked list update from __insert_vm_struct(). Rework validation of tree as it was depending on the linked list. [yang.lee@linux.alibaba.com: fix one kernel-doc comment] Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=1949 Link: https://lkml.kernel.org/r/20220824021918.94116-1-yang.lee@linux.alibaba.comLink: https://lkml.kernel.org/r/20220906194824.2110408-69-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Signed-off-by: Yang Li <yang.lee@linux.alibaba.com> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
78ba531ff3
commit
763ecb0350
@@ -1857,8 +1857,9 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
||||
unsigned long start, unsigned long end);
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
struct mmu_notifier_range;
|
||||
|
||||
|
||||
@@ -408,8 +408,6 @@ struct vm_area_struct {
|
||||
unsigned long vm_end; /* The first byte after our end address
|
||||
within vm_mm. */
|
||||
|
||||
/* linked list of VM areas per task, sorted by address */
|
||||
struct vm_area_struct *vm_next, *vm_prev;
|
||||
struct mm_struct *vm_mm; /* The address space we belong to. */
|
||||
|
||||
/*
|
||||
@@ -473,7 +471,6 @@ struct vm_area_struct {
|
||||
struct kioctx_table;
|
||||
struct mm_struct {
|
||||
struct {
|
||||
struct vm_area_struct *mmap; /* list of VMAs */
|
||||
struct maple_tree mm_mt;
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
@@ -488,7 +485,6 @@ struct mm_struct {
|
||||
unsigned long mmap_compat_legacy_base;
|
||||
#endif
|
||||
unsigned long task_size; /* size of task vm space */
|
||||
unsigned long highest_vm_end; /* highest vma end address */
|
||||
pgd_t * pgd;
|
||||
|
||||
#ifdef CONFIG_MEMBARRIER
|
||||
|
||||
@@ -474,7 +474,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
||||
*/
|
||||
*new = data_race(*orig);
|
||||
INIT_LIST_HEAD(&new->anon_vma_chain);
|
||||
new->vm_next = new->vm_prev = NULL;
|
||||
dup_anon_vma_name(orig, new);
|
||||
}
|
||||
return new;
|
||||
@@ -579,7 +578,7 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||
struct mm_struct *oldmm)
|
||||
{
|
||||
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
||||
struct vm_area_struct *mpnt, *tmp;
|
||||
int retval;
|
||||
unsigned long charge = 0;
|
||||
LIST_HEAD(uf);
|
||||
@@ -606,18 +605,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||
mm->exec_vm = oldmm->exec_vm;
|
||||
mm->stack_vm = oldmm->stack_vm;
|
||||
|
||||
pprev = &mm->mmap;
|
||||
retval = ksm_fork(mm, oldmm);
|
||||
if (retval)
|
||||
goto out;
|
||||
khugepaged_fork(mm, oldmm);
|
||||
|
||||
retval = mas_expected_entries(&mas, oldmm->map_count);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
prev = NULL;
|
||||
|
||||
retval = mas_expected_entries(&mas, oldmm->map_count);
|
||||
if (retval)
|
||||
goto out;
|
||||
@@ -689,14 +681,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||
if (is_vm_hugetlb_page(tmp))
|
||||
reset_vma_resv_huge_pages(tmp);
|
||||
|
||||
/*
|
||||
* Link in the new vma and copy the page table entries.
|
||||
*/
|
||||
*pprev = tmp;
|
||||
pprev = &tmp->vm_next;
|
||||
tmp->vm_prev = prev;
|
||||
prev = tmp;
|
||||
|
||||
/* Link the vma into the MT */
|
||||
mas.index = tmp->vm_start;
|
||||
mas.last = tmp->vm_end - 1;
|
||||
@@ -1124,7 +1108,6 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
|
||||
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||
struct user_namespace *user_ns)
|
||||
{
|
||||
mm->mmap = NULL;
|
||||
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
|
||||
mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
|
||||
atomic_set(&mm->mm_users, 1);
|
||||
|
||||
14
mm/debug.c
14
mm/debug.c
@@ -139,13 +139,11 @@ EXPORT_SYMBOL(dump_page);
|
||||
|
||||
void dump_vma(const struct vm_area_struct *vma)
|
||||
{
|
||||
pr_emerg("vma %px start %px end %px\n"
|
||||
"next %px prev %px mm %px\n"
|
||||
pr_emerg("vma %px start %px end %px mm %px\n"
|
||||
"prot %lx anon_vma %px vm_ops %px\n"
|
||||
"pgoff %lx file %px private_data %px\n"
|
||||
"flags: %#lx(%pGv)\n",
|
||||
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
|
||||
vma->vm_prev, vma->vm_mm,
|
||||
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
|
||||
(unsigned long)pgprot_val(vma->vm_page_prot),
|
||||
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
|
||||
vma->vm_file, vma->vm_private_data,
|
||||
@@ -155,11 +153,11 @@ EXPORT_SYMBOL(dump_vma);
|
||||
|
||||
void dump_mm(const struct mm_struct *mm)
|
||||
{
|
||||
pr_emerg("mm %px mmap %px task_size %lu\n"
|
||||
pr_emerg("mm %px task_size %lu\n"
|
||||
#ifdef CONFIG_MMU
|
||||
"get_unmapped_area %px\n"
|
||||
#endif
|
||||
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
|
||||
"mmap_base %lu mmap_legacy_base %lu\n"
|
||||
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
|
||||
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
|
||||
"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
|
||||
@@ -183,11 +181,11 @@ void dump_mm(const struct mm_struct *mm)
|
||||
"tlb_flush_pending %d\n"
|
||||
"def_flags: %#lx(%pGv)\n",
|
||||
|
||||
mm, mm->mmap, mm->task_size,
|
||||
mm, mm->task_size,
|
||||
#ifdef CONFIG_MMU
|
||||
mm->get_unmapped_area,
|
||||
#endif
|
||||
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
|
||||
mm->mmap_base, mm->mmap_legacy_base,
|
||||
mm->pgd, atomic_read(&mm->mm_users),
|
||||
atomic_read(&mm->mm_count),
|
||||
mm_pgtables_bytes(mm),
|
||||
|
||||
@@ -85,8 +85,9 @@ bool __folio_end_writeback(struct folio *folio);
|
||||
void deactivate_file_folio(struct folio *folio);
|
||||
void folio_activate(struct folio *folio);
|
||||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
|
||||
unsigned long floor, unsigned long ceiling);
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
|
||||
|
||||
struct zap_details;
|
||||
@@ -480,9 +481,6 @@ static inline bool is_data_mapping(vm_flags_t flags)
|
||||
}
|
||||
|
||||
/* mm/util.c */
|
||||
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev);
|
||||
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
|
||||
struct anon_vma *folio_anon_vma(struct folio *folio);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
34
mm/memory.c
34
mm/memory.c
@@ -392,12 +392,21 @@ void free_pgd_range(struct mmu_gather *tlb,
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
unsigned long floor, unsigned long ceiling)
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
{
|
||||
while (vma) {
|
||||
struct vm_area_struct *next = vma->vm_next;
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
|
||||
do {
|
||||
unsigned long addr = vma->vm_start;
|
||||
struct vm_area_struct *next;
|
||||
|
||||
/*
|
||||
* Note: USER_PGTABLES_CEILING may be passed as ceiling and may
|
||||
* be 0. This will underflow and is okay.
|
||||
*/
|
||||
next = mas_find(&mas, ceiling - 1);
|
||||
|
||||
/*
|
||||
* Hide vma from rmap and truncate_pagecache before freeing
|
||||
@@ -416,7 +425,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
|
||||
&& !is_vm_hugetlb_page(next)) {
|
||||
vma = next;
|
||||
next = vma->vm_next;
|
||||
next = mas_find(&mas, ceiling - 1);
|
||||
unlink_anon_vmas(vma);
|
||||
unlink_file_vma(vma);
|
||||
}
|
||||
@@ -424,7 +433,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
floor, next ? next->vm_start : ceiling);
|
||||
}
|
||||
vma = next;
|
||||
}
|
||||
} while (vma);
|
||||
}
|
||||
|
||||
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
|
||||
@@ -1688,6 +1697,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
/**
|
||||
* unmap_vmas - unmap a range of memory covered by a list of vma's
|
||||
* @tlb: address of the caller's struct mmu_gather
|
||||
* @mt: the maple tree
|
||||
* @vma: the starting vma
|
||||
* @start_addr: virtual address at which to start unmapping
|
||||
* @end_addr: virtual address at which to end unmapping
|
||||
@@ -1703,7 +1713,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
|
||||
* drops the lock and schedules.
|
||||
*/
|
||||
void unmap_vmas(struct mmu_gather *tlb,
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr)
|
||||
{
|
||||
@@ -1713,12 +1723,14 @@ void unmap_vmas(struct mmu_gather *tlb,
|
||||
/* Careful - we need to zap private pages too! */
|
||||
.even_cows = true,
|
||||
};
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
start_addr, end_addr);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
|
||||
do {
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
|
||||
} while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
|
||||
@@ -1733,8 +1745,11 @@ void unmap_vmas(struct mmu_gather *tlb,
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
struct maple_tree *mt = &vma->vm_mm->mm_mt;
|
||||
unsigned long end = start + size;
|
||||
struct mmu_notifier_range range;
|
||||
struct mmu_gather tlb;
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
|
||||
lru_add_drain();
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
@@ -1742,8 +1757,9 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
update_hiwater_rss(vma->vm_mm);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
|
||||
do {
|
||||
unmap_single_vma(&tlb, vma, start, range.end, NULL);
|
||||
} while ((vma = mas_find(&mas, end - 1)) != NULL);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
@@ -584,17 +584,12 @@ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
|
||||
static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *prev;
|
||||
|
||||
BUG_ON(!vma->vm_region);
|
||||
|
||||
setup_vma_to_mm(vma, mm);
|
||||
|
||||
prev = mas_prev(mas, 0);
|
||||
mas_reset(mas);
|
||||
/* add the VMA to the tree */
|
||||
vma_mas_store(vma, mas);
|
||||
__vma_link_list(mm, vma, prev);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -647,7 +642,6 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
|
||||
|
||||
/* remove from the MM's tree and list */
|
||||
vma_mas_remove(vma, &mas);
|
||||
__vma_unlink_list(vma->vm_mm, vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
40
mm/util.c
40
mm/util.c
@@ -272,46 +272,6 @@ void *memdup_user_nul(const void __user *src, size_t len)
|
||||
}
|
||||
EXPORT_SYMBOL(memdup_user_nul);
|
||||
|
||||
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct vm_area_struct *prev)
|
||||
{
|
||||
struct vm_area_struct *next;
|
||||
|
||||
vma->vm_prev = prev;
|
||||
if (prev) {
|
||||
next = prev->vm_next;
|
||||
prev->vm_next = vma;
|
||||
} else {
|
||||
next = mm->mmap;
|
||||
mm->mmap = vma;
|
||||
}
|
||||
vma->vm_next = next;
|
||||
if (next)
|
||||
next->vm_prev = vma;
|
||||
else
|
||||
mm->highest_vm_end = vm_end_gap(vma);
|
||||
}
|
||||
|
||||
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *prev, *next;
|
||||
|
||||
next = vma->vm_next;
|
||||
prev = vma->vm_prev;
|
||||
if (prev)
|
||||
prev->vm_next = next;
|
||||
else
|
||||
mm->mmap = next;
|
||||
if (next) {
|
||||
next->vm_prev = prev;
|
||||
} else {
|
||||
if (prev)
|
||||
mm->highest_vm_end = vm_end_gap(prev);
|
||||
else
|
||||
mm->highest_vm_end = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the vma is being used as a stack by this task */
|
||||
int vma_is_stack_for_current(struct vm_area_struct *vma)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user