You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar:
"The main x86 MM changes in this cycle were:
- continued native kernel PCID support preparation patches to the TLB
flushing code (Andy Lutomirski)
- various fixes related to 32-bit compat syscall returning address
over 4Gb in applications, launched from 64-bit binaries - motivated
by C/R frameworks such as Virtuozzo. (Dmitry Safonov)
- continued Intel 5-level paging enablement: in particular the
conversion of x86 GUP to the generic GUP code. (Kirill A. Shutemov)
- x86/mpx ABI corner case fixes/enhancements (Joerg Roedel)
- ... plus misc updates, fixes and cleanups"
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (62 commits)
mm, zone_device: Replace {get, put}_zone_device_page() with a single reference to fix pmem crash
x86/mm: Fix flush_tlb_page() on Xen
x86/mm: Make flush_tlb_mm_range() more predictable
x86/mm: Remove flush_tlb() and flush_tlb_current_task()
x86/vm86/32: Switch to flush_tlb_mm_range() in mark_screen_rdonly()
x86/mm/64: Fix crash in remove_pagetable()
Revert "x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation"
x86/boot/e820: Remove a redundant self assignment
x86/mm: Fix dump pagetables for 4 levels of page tables
x86/mpx, selftests: Only check bounds-vs-shadow when we keep shadow
x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space
Revert "x86/mm/numa: Remove numa_nodemask_from_meminfo()"
x86/espfix: Add support for 5-level paging
x86/kasan: Extend KASAN to support 5-level paging
x86/mm: Add basic defines/helpers for CONFIG_X86_5LEVEL=y
x86/paravirt: Add 5-level support to the paravirt code
x86/mm: Define virtual memory map for 5-level paging
x86/asm: Remove __VIRTUAL_MASK_SHIFT==47 assert
x86/boot: Detect 5-level paging support
x86/mm/numa: Remove numa_nodemask_from_meminfo()
...
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
Virtual memory map with 4 level page tables:
|
||||
|
||||
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
|
||||
hole caused by [48:63] sign extension
|
||||
hole caused by [47:63] sign extension
|
||||
ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor
|
||||
ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
|
||||
ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
|
||||
@@ -19,16 +19,43 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable)
|
||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||
|
||||
Virtual memory map with 5 level page tables:
|
||||
|
||||
0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm
|
||||
hole caused by [56:63] sign extension
|
||||
ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
|
||||
ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
|
||||
ff90000000000000 - ff91ffffffffffff (=49 bits) hole
|
||||
ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
|
||||
ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
|
||||
ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
|
||||
... unused hole ...
|
||||
ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
|
||||
... unused hole ...
|
||||
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
... unused hole ...
|
||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
|
||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||
|
||||
Architecture defines a 64-bit virtual address. Implementations can support
|
||||
less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
|
||||
through to the most-significant implemented bit are set to either all ones
|
||||
or all zero. This causes hole between user space and kernel addresses.
|
||||
|
||||
The direct mapping covers all memory in the system up to the highest
|
||||
memory address (this means in some cases it can also include PCI memory
|
||||
holes).
|
||||
|
||||
vmalloc space is lazily synchronized into the different PML4 pages of
|
||||
the processes using the page fault handler, with init_level4_pgt as
|
||||
vmalloc space is lazily synchronized into the different PML4/PML5 pages of
|
||||
the processes using the page fault handler, with init_top_pgt as
|
||||
reference.
|
||||
|
||||
Current X86-64 implementations support up to 46 bits of address space (64 TB),
|
||||
@@ -39,6 +66,9 @@ memory window (this size is arbitrary, it can be raised later if needed).
|
||||
The mappings are not part of any other kernel PGD and are only available
|
||||
during EFI runtime calls.
|
||||
|
||||
The module mapping space size changes based on the CONFIG requirements for the
|
||||
following fixmap section.
|
||||
|
||||
Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
|
||||
physical memory, vmalloc/ioremap space and virtual memory map are randomized.
|
||||
Their order is preserved but their base will be offset early at boot time.
|
||||
|
||||
@@ -700,6 +700,13 @@ config ARCH_MMAP_RND_COMPAT_BITS
|
||||
This value can be changed after boot using the
|
||||
/proc/sys/vm/mmap_rnd_compat_bits tunable
|
||||
|
||||
config HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
bool
|
||||
help
|
||||
This allows 64bit applications to invoke 32-bit mmap() syscall
|
||||
and vice-versa 32-bit applications to call 64-bit mmap().
|
||||
Required for applications doing different bitness syscalls.
|
||||
|
||||
config HAVE_COPY_THREAD_TLS
|
||||
bool
|
||||
help
|
||||
|
||||
@@ -163,11 +163,5 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|
||||
|
||||
@@ -156,10 +156,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
#endif /* __S390_MMU_CONTEXT_H */
|
||||
|
||||
@@ -37,12 +37,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* end asm-generic/mm_hooks.h functions
|
||||
*/
|
||||
|
||||
@@ -103,10 +103,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
/* by default, allow everything */
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -105,6 +105,7 @@ config X86
|
||||
select HAVE_ARCH_KMEMCHECK
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
|
||||
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
@@ -289,6 +290,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN
|
||||
default 0xdff8000000000000 if X86_5LEVEL
|
||||
default 0xdffffc0000000000
|
||||
|
||||
config HAVE_INTEL_TXT
|
||||
|
||||
@@ -44,6 +44,15 @@ static const u32 req_flags[NCAPINTS] =
|
||||
0, /* REQUIRED_MASK5 not implemented in this file */
|
||||
REQUIRED_MASK6,
|
||||
0, /* REQUIRED_MASK7 not implemented in this file */
|
||||
0, /* REQUIRED_MASK8 not implemented in this file */
|
||||
0, /* REQUIRED_MASK9 not implemented in this file */
|
||||
0, /* REQUIRED_MASK10 not implemented in this file */
|
||||
0, /* REQUIRED_MASK11 not implemented in this file */
|
||||
0, /* REQUIRED_MASK12 not implemented in this file */
|
||||
0, /* REQUIRED_MASK13 not implemented in this file */
|
||||
0, /* REQUIRED_MASK14 not implemented in this file */
|
||||
0, /* REQUIRED_MASK15 not implemented in this file */
|
||||
REQUIRED_MASK16,
|
||||
};
|
||||
|
||||
#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
|
||||
|
||||
@@ -70,16 +70,19 @@ int has_eflag(unsigned long mask)
|
||||
# define EBX_REG "=b"
|
||||
#endif
|
||||
|
||||
static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
|
||||
static inline void cpuid_count(u32 id, u32 count,
|
||||
u32 *a, u32 *b, u32 *c, u32 *d)
|
||||
{
|
||||
asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
|
||||
"cpuid \n\t"
|
||||
".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
|
||||
: "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
|
||||
: "a" (id)
|
||||
: "a" (id), "c" (count)
|
||||
);
|
||||
}
|
||||
|
||||
#define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d)
|
||||
|
||||
void get_cpuflags(void)
|
||||
{
|
||||
u32 max_intel_level, max_amd_level;
|
||||
@@ -108,6 +111,11 @@ void get_cpuflags(void)
|
||||
cpu.model += ((tfms >> 16) & 0xf) << 4;
|
||||
}
|
||||
|
||||
if (max_intel_level >= 0x00000007) {
|
||||
cpuid_count(0x00000007, 0, &ignored, &ignored,
|
||||
&cpu.flags[16], &ignored);
|
||||
}
|
||||
|
||||
cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
|
||||
&ignored);
|
||||
|
||||
|
||||
@@ -265,12 +265,9 @@ return_from_SYSCALL_64:
|
||||
*
|
||||
* If width of "canonical tail" ever becomes variable, this will need
|
||||
* to be updated to remain correct on both old and new CPUs.
|
||||
*
|
||||
* Change top 16 bits to be the sign-extension of 47th bit
|
||||
*/
|
||||
.ifne __VIRTUAL_MASK_SHIFT - 47
|
||||
.error "virtual address width changed -- SYSRET checks need update"
|
||||
.endif
|
||||
|
||||
/* Change top 16 bits to be the sign-extension of 47th bit */
|
||||
shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
|
||||
sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
|
||||
|
||||
|
||||
@@ -361,7 +361,7 @@ static void vgetcpu_cpu_init(void *arg)
|
||||
d.p = 1; /* Present */
|
||||
d.d = 1; /* 32-bit */
|
||||
|
||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
|
||||
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
|
||||
}
|
||||
|
||||
static int vgetcpu_online(unsigned int cpu)
|
||||
|
||||
+105
-42
@@ -4,6 +4,7 @@
|
||||
#include <asm/desc_defs.h>
|
||||
#include <asm/ldt.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
@@ -45,11 +46,43 @@ struct gdt_page {
|
||||
|
||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
|
||||
|
||||
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
||||
/* Provide the original GDT */
|
||||
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(gdt_page, cpu).gdt;
|
||||
}
|
||||
|
||||
/* Provide the current original GDT */
|
||||
static inline struct desc_struct *get_current_gdt_rw(void)
|
||||
{
|
||||
return this_cpu_ptr(&gdt_page)->gdt;
|
||||
}
|
||||
|
||||
/* Get the fixmap index for a specific processor */
|
||||
static inline unsigned int get_cpu_gdt_ro_index(int cpu)
|
||||
{
|
||||
return FIX_GDT_REMAP_BEGIN + cpu;
|
||||
}
|
||||
|
||||
/* Provide the fixmap address of the remapped GDT */
|
||||
static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
|
||||
{
|
||||
unsigned int idx = get_cpu_gdt_ro_index(cpu);
|
||||
return (struct desc_struct *)__fix_to_virt(idx);
|
||||
}
|
||||
|
||||
/* Provide the current read-only GDT */
|
||||
static inline struct desc_struct *get_current_gdt_ro(void)
|
||||
{
|
||||
return get_cpu_gdt_ro(smp_processor_id());
|
||||
}
|
||||
|
||||
/* Provide the physical address of the GDT page. */
|
||||
static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
|
||||
{
|
||||
return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
|
||||
@@ -174,7 +207,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned t
|
||||
|
||||
static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
|
||||
{
|
||||
struct desc_struct *d = get_cpu_gdt_table(cpu);
|
||||
struct desc_struct *d = get_cpu_gdt_rw(cpu);
|
||||
tss_desc tss;
|
||||
|
||||
set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
|
||||
@@ -194,22 +227,90 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
|
||||
|
||||
set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
|
||||
entries * LDT_ENTRY_SIZE - 1);
|
||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
|
||||
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
|
||||
&ldt, DESC_LDT);
|
||||
asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void native_load_gdt(const struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("lgdt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_load_idt(const struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("lidt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_store_gdt(struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("sgdt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_store_idt(struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("sidt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
/*
|
||||
* The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
|
||||
* a read-only remapping. To prevent a page fault, the GDT is switched to the
|
||||
* original writeable version when needed.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline void native_load_tr_desc(void)
|
||||
{
|
||||
struct desc_ptr gdt;
|
||||
int cpu = raw_smp_processor_id();
|
||||
bool restore = 0;
|
||||
struct desc_struct *fixmap_gdt;
|
||||
|
||||
native_store_gdt(&gdt);
|
||||
fixmap_gdt = get_cpu_gdt_ro(cpu);
|
||||
|
||||
/*
|
||||
* If the current GDT is the read-only fixmap, swap to the original
|
||||
* writeable version. Swap back at the end.
|
||||
*/
|
||||
if (gdt.address == (unsigned long)fixmap_gdt) {
|
||||
load_direct_gdt(cpu);
|
||||
restore = 1;
|
||||
}
|
||||
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
|
||||
if (restore)
|
||||
load_fixmap_gdt(cpu);
|
||||
}
|
||||
#else
|
||||
static inline void native_load_tr_desc(void)
|
||||
{
|
||||
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long native_store_tr(void)
|
||||
{
|
||||
unsigned long tr;
|
||||
|
||||
asm volatile("str %0":"=r" (tr));
|
||||
|
||||
return tr;
|
||||
}
|
||||
|
||||
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
{
|
||||
struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
|
||||
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(bool, __tss_limit_invalid);
|
||||
|
||||
static inline void force_reload_TR(void)
|
||||
{
|
||||
struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
|
||||
struct desc_struct *d = get_current_gdt_rw();
|
||||
tss_desc tss;
|
||||
|
||||
memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc));
|
||||
@@ -257,44 +358,6 @@ static inline void invalidate_tss_limit(void)
|
||||
this_cpu_write(__tss_limit_invalid, true);
|
||||
}
|
||||
|
||||
static inline void native_load_gdt(const struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("lgdt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_load_idt(const struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("lidt %0"::"m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_store_gdt(struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("sgdt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
static inline void native_store_idt(struct desc_ptr *dtr)
|
||||
{
|
||||
asm volatile("sidt %0":"=m" (*dtr));
|
||||
}
|
||||
|
||||
static inline unsigned long native_store_tr(void)
|
||||
{
|
||||
unsigned long tr;
|
||||
|
||||
asm volatile("str %0":"=r" (tr));
|
||||
|
||||
return tr;
|
||||
}
|
||||
|
||||
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
{
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
|
||||
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
||||
}
|
||||
|
||||
/* This intentionally ignores lm, since 32-bit apps don't have that field. */
|
||||
#define LDT_empty(info) \
|
||||
((info)->base_addr == 0 && \
|
||||
|
||||
@@ -36,6 +36,12 @@
|
||||
# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
# define DISABLE_LA57 0
|
||||
#else
|
||||
# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make sure to add features to the correct mask
|
||||
*/
|
||||
@@ -55,7 +61,7 @@
|
||||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
#define DISABLED_MASK15 0
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
|
||||
#define DISABLED_MASK17 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
|
||||
|
||||
|
||||
+17
-11
@@ -293,8 +293,23 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* True on X86_32 or when emulating IA32 on X86_64
|
||||
*/
|
||||
static inline int mmap_is_ia32(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_X86_32) ||
|
||||
(IS_ENABLED(CONFIG_COMPAT) &&
|
||||
test_thread_flag(TIF_ADDR32));
|
||||
}
|
||||
|
||||
extern unsigned long tasksize_32bit(void);
|
||||
extern unsigned long tasksize_64bit(void);
|
||||
extern unsigned long get_mmap_base(int is_legacy);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#define __STACK_RND_MASK(is32bit) (0x7ff)
|
||||
#define STACK_RND_MASK (0x7ff)
|
||||
|
||||
#define ARCH_DLINFO ARCH_DLINFO_IA32
|
||||
@@ -304,7 +319,8 @@ do { \
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
/* 1GB for 64bit, 8MB for 32bit */
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
|
||||
#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
|
||||
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
@@ -348,16 +364,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int uses_interp);
|
||||
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
|
||||
|
||||
/*
|
||||
* True on X86_32 or when emulating IA32 on X86_64
|
||||
*/
|
||||
static inline int mmap_is_ia32(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_X86_32) ||
|
||||
(IS_ENABLED(CONFIG_COMPAT) &&
|
||||
test_thread_flag(TIF_ADDR32));
|
||||
}
|
||||
|
||||
/* Do not change the values. See get_align_mask() */
|
||||
enum align_flags {
|
||||
ALIGN_VA_32 = BIT(0),
|
||||
|
||||
@@ -100,6 +100,10 @@ enum fixed_addresses {
|
||||
#ifdef CONFIG_X86_INTEL_MID
|
||||
FIX_LNW_VRTC,
|
||||
#endif
|
||||
/* Fixmap entries to remap the GDTs, one per processor. */
|
||||
FIX_GDT_REMAP_BEGIN,
|
||||
FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
|
||||
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
|
||||
@@ -11,9 +11,12 @@
|
||||
* 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
|
||||
*/
|
||||
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
|
||||
(0xffff800000000000ULL >> 3))
|
||||
/* 47 bits for kernel address -> (47 - 3) bits for shadow */
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (47 - 3)))
|
||||
((-1UL << __VIRTUAL_MASK_SHIFT) >> 3))
|
||||
/*
|
||||
* 47 bits for kernel address -> (47 - 3) bits for shadow
|
||||
* 56 bits for kernel address -> (56 - 3) bits for shadow
|
||||
*/
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3)))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
||||
@@ -164,6 +164,7 @@ struct kimage_arch {
|
||||
};
|
||||
#else
|
||||
struct kimage_arch {
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
@@ -268,8 +268,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||
return __pkru_allows_pkey(vma_pkey(vma), write);
|
||||
}
|
||||
|
||||
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
|
||||
{
|
||||
return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write);
|
||||
}
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_H */
|
||||
|
||||
@@ -36,7 +36,12 @@
|
||||
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
|
||||
* what Xen requires.
|
||||
*/
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
#define __PAGE_OFFSET_BASE _AC(0xff10000000000000, UL)
|
||||
#else
|
||||
#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_MEMORY
|
||||
#define __PAGE_OFFSET page_offset_base
|
||||
#else
|
||||
@@ -46,8 +51,13 @@
|
||||
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
|
||||
|
||||
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
#define __PHYSICAL_MASK_SHIFT 52
|
||||
#define __VIRTUAL_MASK_SHIFT 56
|
||||
#else
|
||||
#define __PHYSICAL_MASK_SHIFT 46
|
||||
#define __VIRTUAL_MASK_SHIFT 47
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Kernel image size is limited to 1GiB due to the fixmap living in the
|
||||
|
||||
@@ -357,6 +357,16 @@ static inline void paravirt_release_pud(unsigned long pfn)
|
||||
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_release_p4d(unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
|
||||
}
|
||||
|
||||
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
@@ -536,7 +546,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
|
||||
val);
|
||||
}
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#if CONFIG_PGTABLE_LEVELS >= 4
|
||||
static inline pud_t __pud(pudval_t val)
|
||||
{
|
||||
pudval_t ret;
|
||||
@@ -565,16 +575,42 @@ static inline pudval_t pud_val(pud_t pud)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
set_pud(pudp, __pud(0));
|
||||
}
|
||||
|
||||
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||
{
|
||||
p4dval_t val = native_p4d_val(p4d);
|
||||
|
||||
if (sizeof(p4dval_t) > sizeof(long))
|
||||
PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
|
||||
val);
|
||||
}
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS >= 5
|
||||
|
||||
static inline p4d_t __p4d(p4dval_t val)
|
||||
{
|
||||
p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
|
||||
|
||||
return (p4d_t) { ret };
|
||||
}
|
||||
|
||||
static inline p4dval_t p4d_val(p4d_t p4d)
|
||||
{
|
||||
return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
|
||||
}
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
pgdval_t val = native_pgd_val(pgd);
|
||||
|
||||
if (sizeof(pgdval_t) > sizeof(long))
|
||||
PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
|
||||
val);
|
||||
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
|
||||
}
|
||||
|
||||
static inline void pgd_clear(pgd_t *pgdp)
|
||||
@@ -582,9 +618,11 @@ static inline void pgd_clear(pgd_t *pgdp)
|
||||
set_pgd(pgdp, __pgd(0));
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 5 */
|
||||
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
set_pud(pudp, __pud(0));
|
||||
set_p4d(p4dp, __p4d(0));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user