Merge branch 'akpm' (patches from Andrew)

Merge second patch-bomb from Andrew Morton:

 - more MM stuff:

    - Kirill's page-flags rework

    - Kirill's now-allegedly-fixed THP rework

    - MADV_FREE implementation

    - DAX feature work (msync/fsync).  This isn't quite complete but DAX
      is new and it's good enough and the guys have a handle on what
      needs to be done - I expect this to be wrapped in the next week or
      two.

  - some vsprintf maintenance work

  - various other misc bits

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (145 commits)
  printk: change recursion_bug type to bool
  lib/vsprintf: factor out %pN[F] handler as netdev_bits()
  lib/vsprintf: refactor duplicate code to special_hex_number()
  printk-formats.txt: remove unimplemented %pT
  printk: help pr_debug and pr_devel to optimize out arguments
  lib/test_printf.c: test dentry printing
  lib/test_printf.c: add test for large bitmaps
  lib/test_printf.c: account for kvasprintf tests
  lib/test_printf.c: add a few number() tests
  lib/test_printf.c: test precision quirks
  lib/test_printf.c: check for out-of-bound writes
  lib/test_printf.c: don't BUG
  lib/kasprintf.c: add sanity check to kvasprintf
  lib/vsprintf.c: warn about too large precisions and field widths
  lib/vsprintf.c: help gcc make number() smaller
  lib/vsprintf.c: expand field_width to 24 bits
  lib/vsprintf.c: eliminate potential race in string()
  lib/vsprintf.c: move string() below widen_string()
  lib/vsprintf.c: pull out padding code from dentry_name()
  printk: do cond_resched() between lines while outputting to consoles
  ...
This commit is contained in:
Linus Torvalds
2016-01-17 12:58:52 -08:00
189 changed files with 4373 additions and 2902 deletions
@@ -1,40 +0,0 @@
#
# Feature name: pmdp_splitting_flush
# Kconfig: __HAVE_ARCH_PMDP_SPLITTING_FLUSH
# description: arch supports the pmdp_splitting_flush() VM API
#
-----------------------
| arch |status|
-----------------------
| alpha: | TODO |
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
| avr32: | TODO |
| blackfin: | TODO |
| c6x: | TODO |
| cris: | TODO |
| frv: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| m32r: | TODO |
| m68k: | TODO |
| metag: | TODO |
| microblaze: | TODO |
| mips: | ok |
| mn10300: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
| s390: | ok |
| score: | TODO |
| sh: | TODO |
| sparc: | TODO |
| tile: | TODO |
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
| xtensa: | TODO |
-----------------------
-9
View File
@@ -306,15 +306,6 @@ Network device features:
Passed by reference. Passed by reference.
Command from struct task_struct
%pT ls
For printing executable name excluding path from struct
task_struct.
Passed by reference.
If you add other %p extensions, please extend lib/test_printf.c with If you add other %p extensions, please extend lib/test_printf.c with
one or more test cases, if at all feasible. one or more test cases, if at all feasible.
+95 -54
View File
@@ -35,10 +35,10 @@ miss is going to run faster.
== Design == == Design ==
- "graceful fallback": mm components which don't have transparent - "graceful fallback": mm components which don't have transparent hugepage
hugepage knowledge fall back to breaking a transparent hugepage and knowledge fall back to breaking huge pmd mapping into table of ptes and,
working on the regular pages and their respective regular pmd/pte if necessary, split a transparent hugepage. Therefore these components
mappings can continue working on the regular pages or regular pte mappings.
- if a hugepage allocation fails because of memory fragmentation, - if a hugepage allocation fails because of memory fragmentation,
regular pages should be gracefully allocated instead and mixed in regular pages should be gracefully allocated instead and mixed in
@@ -221,9 +221,18 @@ thp_collapse_alloc_failed is incremented if khugepaged found a range
of pages that should be collapsed into one huge page but failed of pages that should be collapsed into one huge page but failed
the allocation. the allocation.
thp_split is incremented every time a huge page is split into base thp_split_page is incremented every time a huge page is split into base
pages. This can happen for a variety of reasons but a common pages. This can happen for a variety of reasons but a common
reason is that a huge page is old and is being reclaimed. reason is that a huge page is old and is being reclaimed.
This action implies splitting all PMD the page mapped with.
thp_split_page_failed is is incremented if kernel fails to split huge
page. This can happen if the page was pinned by somebody.
thp_split_pmd is incremented every time a PMD split into table of PTEs.
This can happen, for instance, when application calls mprotect() or
munmap() on part of huge page. It doesn't split huge page, only
page table entry.
thp_zero_page_alloc is incremented every time a huge zero page is thp_zero_page_alloc is incremented every time a huge zero page is
successfully allocated. It includes allocations which where successfully allocated. It includes allocations which where
@@ -274,10 +283,8 @@ is complete, so they won't ever notice the fact the page is huge. But
if any driver is going to mangle over the page structure of the tail if any driver is going to mangle over the page structure of the tail
page (like for checking page->mapping or other bits that are relevant page (like for checking page->mapping or other bits that are relevant
for the head page and not the tail page), it should be updated to jump for the head page and not the tail page), it should be updated to jump
to check head page instead (while serializing properly against to check head page instead. Taking reference on any head/tail page would
split_huge_page() to avoid the head and tail pages to disappear from prevent page from being split by anyone.
under it, see the futex code to see an example of that, hugetlbfs also
needed special handling in futex code for similar reasons).
NOTE: these aren't new constraints to the GUP API, and they match the NOTE: these aren't new constraints to the GUP API, and they match the
same constrains that applies to hugetlbfs too, so any driver capable same constrains that applies to hugetlbfs too, so any driver capable
@@ -312,9 +319,9 @@ unaffected. libhugetlbfs will also work fine as usual.
== Graceful fallback == == Graceful fallback ==
Code walking pagetables but unware about huge pmds can simply call Code walking pagetables but unware about huge pmds can simply call
split_huge_page_pmd(vma, addr, pmd) where the pmd is the one returned by split_huge_pmd(vma, pmd, addr) where the pmd is the one returned by
pmd_offset. It's trivial to make the code transparent hugepage aware pmd_offset. It's trivial to make the code transparent hugepage aware
by just grepping for "pmd_offset" and adding split_huge_page_pmd where by just grepping for "pmd_offset" and adding split_huge_pmd where
missing after pmd_offset returns the pmd. Thanks to the graceful missing after pmd_offset returns the pmd. Thanks to the graceful
fallback design, with a one liner change, you can avoid to write fallback design, with a one liner change, you can avoid to write
hundred if not thousand of lines of complex code to make your code hundred if not thousand of lines of complex code to make your code
@@ -323,7 +330,8 @@ hugepage aware.
If you're not walking pagetables but you run into a physical hugepage If you're not walking pagetables but you run into a physical hugepage
but you can't handle it natively in your code, you can split it by but you can't handle it natively in your code, you can split it by
calling split_huge_page(page). This is what the Linux VM does before calling split_huge_page(page). This is what the Linux VM does before
it tries to swapout the hugepage for example. it tries to swapout the hugepage for example. split_huge_page() can fail
if the page is pinned and you must handle this correctly.
Example to make mremap.c transparent hugepage aware with a one liner Example to make mremap.c transparent hugepage aware with a one liner
change: change:
@@ -335,14 +343,14 @@ diff --git a/mm/mremap.c b/mm/mremap.c
return NULL; return NULL;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
+ split_huge_page_pmd(vma, addr, pmd); + split_huge_pmd(vma, pmd, addr);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return NULL; return NULL;
== Locking in hugepage aware code == == Locking in hugepage aware code ==
We want as much code as possible hugepage aware, as calling We want as much code as possible hugepage aware, as calling
split_huge_page() or split_huge_page_pmd() has a cost. split_huge_page() or split_huge_pmd() has a cost.
To make pagetable walks huge pmd aware, all you need to do is to call To make pagetable walks huge pmd aware, all you need to do is to call
pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
@@ -351,47 +359,80 @@ created from under you by khugepaged (khugepaged collapse_huge_page
takes the mmap_sem in write mode in addition to the anon_vma lock). If takes the mmap_sem in write mode in addition to the anon_vma lock). If
pmd_trans_huge returns false, you just fallback in the old code pmd_trans_huge returns false, you just fallback in the old code
paths. If instead pmd_trans_huge returns true, you have to take the paths. If instead pmd_trans_huge returns true, you have to take the
mm->page_table_lock and re-run pmd_trans_huge. Taking the page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the
page_table_lock will prevent the huge pmd to be converted into a page table lock will prevent the huge pmd to be converted into a
regular pmd from under you (split_huge_page can run in parallel to the regular pmd from under you (split_huge_pmd can run in parallel to the
pagetable walk). If the second pmd_trans_huge returns false, you pagetable walk). If the second pmd_trans_huge returns false, you
should just drop the page_table_lock and fallback to the old code as should just drop the page table lock and fallback to the old code as
before. Otherwise you should run pmd_trans_splitting on the pmd. In before. Otherwise you can proceed to process the huge pmd and the
case pmd_trans_splitting returns true, it means split_huge_page is hugepage natively. Once finished you can drop the page table lock.
already in the middle of splitting the page. So if pmd_trans_splitting
returns true it's enough to drop the page_table_lock and call
wait_split_huge_page and then fallback the old code paths. You are
guaranteed by the time wait_split_huge_page returns, the pmd isn't
huge anymore. If pmd_trans_splitting returns false, you can proceed to
process the huge pmd and the hugepage natively. Once finished you can
drop the page_table_lock.
== compound_lock, get_user_pages and put_page == == Refcounts and transparent huge pages ==
Refcounting on THP is mostly consistent with refcounting on other compound
pages:
- get_page()/put_page() and GUP operate in head page's ->_count.
- ->_count in tail pages is always zero: get_page_unless_zero() never
succeed on tail pages.
- map/unmap of the pages with PTE entry increment/decrement ->_mapcount
on relevant sub-page of the compound page.
- map/unmap of the whole compound page accounted in compound_mapcount
(stored in first tail page).
PageDoubleMap() indicates that ->_mapcount in all subpages is offset up by one.
This additional reference is required to get race-free detection of unmap of
subpages when we have them mapped with both PMDs and PTEs.
This is optimization required to lower overhead of per-subpage mapcount
tracking. The alternative is alter ->_mapcount in all subpages on each
map/unmap of the whole compound page.
We set PG_double_map when a PMD of the page got split for the first time,
but still have PMD mapping. The addtional references go away with last
compound_mapcount.
split_huge_page internally has to distribute the refcounts in the head split_huge_page internally has to distribute the refcounts in the head
page to the tail pages before clearing all PG_head/tail bits from the page to the tail pages before clearing all PG_head/tail bits from the page
page structures. It can do that easily for refcounts taken by huge pmd structures. It can be done easily for refcounts taken by page table
mappings. But the GUI API as created by hugetlbfs (that returns head entries. But we don't have enough information on how to distribute any
and tail pages if running get_user_pages on an address backed by any additional pins (i.e. from get_user_pages). split_huge_page() fails any
hugepage), requires the refcount to be accounted on the tail pages and requests to split pinned huge page: it expects page count to be equal to
not only in the head pages, if we want to be able to run sum of mapcount of all sub-pages plus one (split_huge_page caller must
split_huge_page while there are gup pins established on any tail have reference for head page).
page. Failure to be able to run split_huge_page if there's any gup pin
on any tail page, would mean having to split all hugepages upfront in split_huge_page uses migration entries to stabilize page->_count and
get_user_pages which is unacceptable as too many gup users are page->_mapcount.
performance critical and they must work natively on hugepages like
they work natively on hugetlbfs already (hugetlbfs is simpler because We safe against physical memory scanners too: the only legitimate way
hugetlbfs pages cannot be split so there wouldn't be requirement of scanner can get reference to a page is get_page_unless_zero().
accounting the pins on the tail pages for hugetlbfs). If we wouldn't
account the gup refcounts on the tail pages during gup, we won't know All tail pages has zero ->_count until atomic_add(). It prevent scanner
anymore which tail page is pinned by gup and which is not while we run from geting reference to tail page up to the point. After the atomic_add()
split_huge_page. But we still have to add the gup pin to the head page we don't care about ->_count value. We already known how many references
too, to know when we can free the compound page in case it's never with should uncharge from head page.
split during its lifetime. That requires changing not just
get_page, but put_page as well so that when put_page runs on a tail For head page get_page_unless_zero() will succeed and we don't mind. It's
page (and only on a tail page) it will find its respective head page, clear where reference should go after split: it will stay on head page.
and then it will decrease the head page refcount in addition to the
tail page refcount. To obtain a head page reliably and to decrease its Note that split_huge_pmd() doesn't have any limitation on refcounting:
refcount without race conditions, put_page has to serialize against pmd can be split at any point and never fails.
__split_huge_page_refcount using a special per-page lock called
compound_lock. == Partial unmap and deferred_split_huge_page() ==
Unmapping part of THP (with munmap() or other way) is not going to free
memory immediately. Instead, we detect that a subpage of THP is not in use
in page_remove_rmap() and queue the THP for splitting if memory pressure
comes. Splitting will free up unused subpages.
Splitting the page right away is not an option due to locking context in
the place where we can detect partial unmap. It's also might be
counterproductive since in many cases partial unmap unmap happens during
exit(2) if an THP crosses VMA boundary.
Function deferred_split_huge_page() is used to queue page for splitting.
The splitting itself will happen when we get memory pressure via shrinker
interface.
+2
View File
@@ -47,8 +47,10 @@
#define MADV_WILLNEED 3 /* will need these pages */ #define MADV_WILLNEED 3 /* will need these pages */
#define MADV_SPACEAVAIL 5 /* ensure resources are available */ #define MADV_SPACEAVAIL 5 /* ensure resources are available */
#define MADV_DONTNEED 6 /* don't need these pages */ #define MADV_DONTNEED 6 /* don't need these pages */
#define MADV_FREE 7 /* free pages only if memory pressure */
/* common/generic parameters */ /* common/generic parameters */
#define MADV_FREE 8 /* free pages only if memory pressure */
#define MADV_REMOVE 9 /* remove these pages & resources */ #define MADV_REMOVE 9 /* remove these pages & resources */
#define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */
-3
View File
@@ -73,9 +73,6 @@ config STACKTRACE_SUPPORT
def_bool y def_bool y
select STACKTRACE select STACKTRACE
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config HAVE_ARCH_TRANSPARENT_HUGEPAGE config HAVE_ARCH_TRANSPARENT_HUGEPAGE
def_bool y def_bool y
depends on ARC_MMU_V4 depends on ARC_MMU_V4
+2 -2
View File
@@ -617,7 +617,7 @@ void flush_dcache_page(struct page *page)
*/ */
if (!mapping_mapped(mapping)) { if (!mapping_mapped(mapping)) {
clear_bit(PG_dc_clean, &page->flags); clear_bit(PG_dc_clean, &page->flags);
} else if (page_mapped(page)) { } else if (page_mapcount(page)) {
/* kernel reading from page with U-mapping */ /* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page); phys_addr_t paddr = (unsigned long)page_address(page);
@@ -857,7 +857,7 @@ void copy_user_highpage(struct page *to, struct page *from,
* For !VIPT cache, all of this gets compiled out as * For !VIPT cache, all of this gets compiled out as
* addr_not_cache_congruent() is 0 * addr_not_cache_congruent() is 0
*/ */
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_page((unsigned long)kfrom, u_vaddr); __flush_dcache_page((unsigned long)kfrom, u_vaddr);
clean_src_k_mappings = 1; clean_src_k_mappings = 1;
} }
-5
View File
@@ -168,11 +168,6 @@ config STACKTRACE_SUPPORT
bool bool
default y default y
config HAVE_LATENCYTOP_SUPPORT
bool
depends on !SMP
default y
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
bool bool
default y default y
+3 -2
View File
@@ -182,7 +182,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
} }
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size, unsigned long size,
bool ipa_uncached) bool ipa_uncached)
{ {
@@ -246,7 +247,7 @@ static inline void __kvm_flush_dcache_pte(pte_t pte)
static inline void __kvm_flush_dcache_pmd(pmd_t pmd) static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{ {
unsigned long size = PMD_SIZE; unsigned long size = PMD_SIZE;
pfn_t pfn = pmd_pfn(pmd); kvm_pfn_t pfn = pmd_pfn(pmd);
while (size) { while (size) {
void *va = kmap_atomic_pfn(pfn); void *va = kmap_atomic_pfn(pfn);
+1 -9
View File
@@ -88,7 +88,6 @@
#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) #define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58) #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
@@ -232,13 +231,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif
#endif #endif
#define PMD_BIT_FUNC(fn,op) \ #define PMD_BIT_FUNC(fn,op) \
@@ -246,9 +238,9 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY); PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY); PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY); PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+5 -5
View File
@@ -992,9 +992,9 @@ out:
return ret; return ret;
} }
static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{ {
pfn_t pfn = *pfnp; kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT; gfn_t gfn = *ipap >> PAGE_SHIFT;
if (PageTransCompound(pfn_to_page(pfn))) { if (PageTransCompound(pfn_to_page(pfn))) {
@@ -1201,7 +1201,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
} }
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size, bool uncached) unsigned long size, bool uncached)
{ {
__coherent_cache_guest_page(vcpu, pfn, size, uncached); __coherent_cache_guest_page(vcpu, pfn, size, uncached);
@@ -1218,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma; struct vm_area_struct *vma;
pfn_t pfn; kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2; pgprot_t mem_type = PAGE_S2;
bool fault_ipa_uncached; bool fault_ipa_uncached;
bool logging_active = memslot_is_logging(memslot); bool logging_active = memslot_is_logging(memslot);
@@ -1346,7 +1346,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{ {
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
pfn_t pfn; kvm_pfn_t pfn;
bool pfn_valid = false; bool pfn_valid = false;
trace_kvm_access_fault(fault_ipa); trace_kvm_access_fault(fault_ipa);
+2 -3
View File
@@ -52,14 +52,13 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
* *
* Lock the page table for the destination and check * Lock the page table for the destination and check
* to see that it's still huge and whether or not we will * to see that it's still huge and whether or not we will
* need to fault on write, or if we have a splitting THP. * need to fault on write.
*/ */
if (unlikely(pmd_thp_or_huge(*pmd))) { if (unlikely(pmd_thp_or_huge(*pmd))) {
ptl = &current->mm->page_table_lock; ptl = &current->mm->page_table_lock;
spin_lock(ptl); spin_lock(ptl);
if (unlikely(!pmd_thp_or_huge(*pmd) if (unlikely(!pmd_thp_or_huge(*pmd)
|| pmd_hugewillfault(*pmd) || pmd_hugewillfault(*pmd))) {
|| pmd_trans_splitting(*pmd))) {
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
} }
+1 -16
View File
@@ -330,7 +330,7 @@ void flush_dcache_page(struct page *page)
mapping = page_mapping(page); mapping = page_mapping(page);
if (!cache_ops_need_broadcast() && if (!cache_ops_need_broadcast() &&
mapping && !page_mapped(page)) mapping && !page_mapcount(page))
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
else { else {
__flush_dcache_page(mapping, page); __flush_dcache_page(mapping, page);
@@ -415,18 +415,3 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
*/ */
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd = pmd_mksplitting(*pmdp);
VM_BUG_ON(address & ~PMD_MASK);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
/* dummy IPI to serialise against fast_gup */
kick_all_cpus_sync();
}
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+2 -1
View File
@@ -230,7 +230,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
} }
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size, unsigned long size,
bool ipa_uncached) bool ipa_uncached)
{ {
+1 -8
View File
@@ -353,21 +353,14 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
struct vm_area_struct;
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK)) #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
-16
View File
@@ -102,19 +102,3 @@ EXPORT_SYMBOL(flush_dcache_page);
* Additional functions defined in assembly. * Additional functions defined in assembly.
*/ */
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd = pmd_mksplitting(*pmdp);
VM_BUG_ON(address & ~PMD_MASK);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
/* dummy IPI to serialise against fast_gup */
kick_all_cpus_sync();
}
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+4 -4
View File
@@ -83,11 +83,9 @@ static inline int get_order(unsigned long size)
#ifndef CONFIG_NEED_MULTIPLE_NODES #ifndef CONFIG_NEED_MULTIPLE_NODES
#define PHYS_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT) #define ARCH_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT)
#define pfn_to_page(pfn) (mem_map + ((pfn) - PHYS_PFN_OFFSET)) #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PHYS_PFN_OFFSET)
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#endif /* CONFIG_NEED_MULTIPLE_NODES */ #endif /* CONFIG_NEED_MULTIPLE_NODES */
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -101,4 +99,6 @@ static inline int get_order(unsigned long size)
*/ */
#define HIGHMEM_START 0x20000000UL #define HIGHMEM_START 0x20000000UL
#include <asm-generic/memory_model.h>
#endif /* __ASM_AVR32_PAGE_H */ #endif /* __ASM_AVR32_PAGE_H */
+1 -1
View File
@@ -34,7 +34,7 @@ typedef struct page *pgtable_t;
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } ) #define __pmd(x) ((pmd_t) { { (x) } } )
#define __pud(x) ((pud_t) { (x) } ) #define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
+1
View File
@@ -105,6 +105,7 @@ extern struct page *vmem_map;
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn)) # define pfn_to_page(pfn) (vmem_map + (pfn))
# define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#else #else
# include <asm-generic/memory_model.h> # include <asm-generic/memory_model.h>
#endif #endif
-3
View File
@@ -36,9 +36,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
def_bool y def_bool y
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
def_bool y def_bool y
-3
View File
@@ -67,9 +67,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
def_bool y def_bool y
config HAVE_LATENCYTOP_SUPPORT
def_bool y
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"

Some files were not shown because too many files have changed in this diff Show More