You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
[PATCH] freepgt: remove MM_VM_SIZE(mm)
There's only one usage of MM_VM_SIZE(mm) left, and it's a troublesome macro because mm doesn't contain the (32-bit emulation?) info needed. But it too is only needed because we ignore the end from the vma list. We could make flush_pgtables return that end, or unmap_vmas. Choose the latter, since it's a natural fit with unmap_mapping_range_vma needing to know its restart addr. This does make more than minimal change, but if unmap_vmas had returned the end before, this is how we'd have done it, rather than storing the break_addr in zap_details. unmap_vmas used to return count of vmas scanned, but that's just debug which hasn't been useful in a while; and if we want the map_count 0 on exit check back, it can easily come from the final remove_vm_struct loop. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
e0da382c92
commit
ee39b37b23
@@ -42,14 +42,6 @@
|
||||
*/
|
||||
#define TASK_SIZE (current->thread.task_size)
|
||||
|
||||
/*
|
||||
* MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
|
||||
* address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
|
||||
* because the kernel may have installed helper-mappings above TASK_SIZE. For example,
|
||||
* for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
|
||||
*/
|
||||
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
|
||||
|
||||
/*
|
||||
* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's.
|
||||
|
||||
@@ -542,10 +542,6 @@ extern struct task_struct *last_task_used_altivec;
|
||||
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
||||
TASK_SIZE_USER32 : TASK_SIZE_USER64)
|
||||
|
||||
/* We can't actually tell the TASK_SIZE given just the mm, but default
|
||||
* to the 64-bit case to make sure that enough gets cleaned up. */
|
||||
#define MM_VM_SIZE(mm) TASK_SIZE_USER64
|
||||
|
||||
/* This decides where the kernel will search for a free chunk of vm
|
||||
* space during mmap's.
|
||||
*/
|
||||
|
||||
@@ -74,8 +74,6 @@ extern struct task_struct *last_task_used_math;
|
||||
|
||||
#endif /* __s390x__ */
|
||||
|
||||
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
|
||||
|
||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
|
||||
typedef struct {
|
||||
|
||||
+2
-7
@@ -37,10 +37,6 @@ extern int sysctl_legacy_va_layout;
|
||||
#include <asm/processor.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#ifndef MM_VM_SIZE
|
||||
#define MM_VM_SIZE(mm) ((TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK)
|
||||
#endif
|
||||
|
||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||
|
||||
/*
|
||||
@@ -582,13 +578,12 @@ struct zap_details {
|
||||
pgoff_t first_index; /* Lowest page->index to unmap */
|
||||
pgoff_t last_index; /* Highest page->index to unmap */
|
||||
spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
|
||||
unsigned long break_addr; /* Where unmap_vmas stopped */
|
||||
unsigned long truncate_count; /* Compare vm_truncate_count */
|
||||
};
|
||||
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *);
|
||||
int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
|
||||
unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
|
||||
struct vm_area_struct *start_vma, unsigned long start_addr,
|
||||
unsigned long end_addr, unsigned long *nr_accounted,
|
||||
struct zap_details *);
|
||||
|
||||
Reference in New Issue
Block a user