You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - fsnotify fix - poll() timeout fix - a few scripts/ tweaks - debugobjects updates - the (small) ocfs2 queue - Minor fixes to kernel/padata.c - Maybe half of the MM queue * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm, page_alloc: restore the original nodemask if the fast path allocation failed mm, page_alloc: uninline the bad page part of check_new_page() mm, page_alloc: don't duplicate code in free_pcp_prepare mm, page_alloc: defer debugging checks of pages allocated from the PCP mm, page_alloc: defer debugging checks of freed pages until a PCP drain cpuset: use static key better and convert to new API mm, page_alloc: inline pageblock lookup in page free fast paths mm, page_alloc: remove unnecessary variable from free_pcppages_bulk mm, page_alloc: pull out side effects from free_pages_check mm, page_alloc: un-inline the bad part of free_pages_check mm, page_alloc: check multiple page fields with a single branch mm, page_alloc: remove field from alloc_context mm, page_alloc: avoid looking up the first zone in a zonelist twice mm, page_alloc: shortcut watermark checks for order-0 pages mm, page_alloc: reduce cost of fair zone allocation policy retry mm, page_alloc: shorten the page allocator fast path mm, page_alloc: check once if a zone has isolated pageblocks mm, page_alloc: move __GFP_HARDWALL modifications out of the fastpath mm, page_alloc: simplify last cpupid reset mm, page_alloc: remove unnecessary initialisation from __alloc_pages_nodemask() ...
This commit is contained in:
@@ -316,8 +316,8 @@
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
The function returns 1 when the fixup was successful,
|
||||
otherwise 0. The return value is used to update the
|
||||
The function returns true when the fixup was successful,
|
||||
otherwise false. The return value is used to update the
|
||||
statistics.
|
||||
</para>
|
||||
<para>
|
||||
@@ -341,8 +341,8 @@
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
The function returns 1 when the fixup was successful,
|
||||
otherwise 0. The return value is used to update the
|
||||
The function returns true when the fixup was successful,
|
||||
otherwise false. The return value is used to update the
|
||||
statistics.
|
||||
</para>
|
||||
<para>
|
||||
@@ -359,7 +359,8 @@
|
||||
statically initialized object or not. In case it is it calls
|
||||
debug_object_init() and debug_object_activate() to make the
|
||||
object known to the tracker and marked active. In this case
|
||||
the function should return 0 because this is not a real fixup.
|
||||
the function should return false because this is not a real
|
||||
fixup.
|
||||
</para>
|
||||
</sect1>
|
||||
|
||||
@@ -376,8 +377,8 @@
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
The function returns 1 when the fixup was successful,
|
||||
otherwise 0. The return value is used to update the
|
||||
The function returns true when the fixup was successful,
|
||||
otherwise false. The return value is used to update the
|
||||
statistics.
|
||||
</para>
|
||||
</sect1>
|
||||
@@ -397,8 +398,8 @@
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
The function returns 1 when the fixup was successful,
|
||||
otherwise 0. The return value is used to update the
|
||||
The function returns true when the fixup was successful,
|
||||
otherwise false. The return value is used to update the
|
||||
statistics.
|
||||
</para>
|
||||
</sect1>
|
||||
@@ -414,8 +415,8 @@
|
||||
debug bucket.
|
||||
</para>
|
||||
<para>
|
||||
The function returns 1 when the fixup was successful,
|
||||
otherwise 0. The return value is used to update the
|
||||
The function returns true when the fixup was successful,
|
||||
otherwise false. The return value is used to update the
|
||||
statistics.
|
||||
</para>
|
||||
<para>
|
||||
@@ -427,7 +428,8 @@
|
||||
case. The fixup function should check if this is a legitimate
|
||||
case of a statically initialized object or not. In this case only
|
||||
debug_object_init() should be called to make the object known to
|
||||
the tracker. Then the function should return 0 because this is not
|
||||
the tracker. Then the function should return false because this
|
||||
is not
|
||||
a real fixup.
|
||||
</para>
|
||||
</sect1>
|
||||
|
||||
@@ -2168,6 +2168,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
[KNL,SH] Allow user to override the default size for
|
||||
per-device physically contiguous DMA buffers.
|
||||
|
||||
memhp_default_state=online/offline
|
||||
[KNL] Set the initial state for the memory hotplug
|
||||
onlining policy. If not specified, the default value is
|
||||
set according to the
|
||||
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config
|
||||
option.
|
||||
See Documentation/memory-hotplug.txt.
|
||||
|
||||
memmap=exactmap [KNL,X86] Enable setting of an exact
|
||||
E820 memory map, as specified by the user.
|
||||
Such memmap=exactmap lines can be constructed based on
|
||||
|
||||
@@ -261,10 +261,11 @@ it according to the policy which can be read from "auto_online_blocks" file:
|
||||
|
||||
% cat /sys/devices/system/memory/auto_online_blocks
|
||||
|
||||
The default is "offline" which means the newly added memory is not in a
|
||||
ready-to-use state and you have to "online" the newly added memory blocks
|
||||
manually. Automatic onlining can be requested by writing "online" to
|
||||
"auto_online_blocks" file:
|
||||
The default depends on the CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config
|
||||
option. If it is disabled the default is "offline" which means the newly added
|
||||
memory is not in a ready-to-use state and you have to "online" the newly added
|
||||
memory blocks manually. Automatic onlining can be requested by writing "online"
|
||||
to "auto_online_blocks" file:
|
||||
|
||||
% echo online > /sys/devices/system/memory/auto_online_blocks
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ Currently, these files are in /proc/sys/vm:
|
||||
- panic_on_oom
|
||||
- percpu_pagelist_fraction
|
||||
- stat_interval
|
||||
- stat_refresh
|
||||
- swappiness
|
||||
- user_reserve_kbytes
|
||||
- vfs_cache_pressure
|
||||
@@ -755,6 +756,19 @@ is 1 second.
|
||||
|
||||
==============================================================
|
||||
|
||||
stat_refresh
|
||||
|
||||
Any read or write (by root only) flushes all the per-cpu vm statistics
|
||||
into their global totals, for more accurate reports when testing
|
||||
e.g. cat /proc/sys/vm/stat_refresh /proc/meminfo
|
||||
|
||||
As a side-effect, it also checks for negative totals (elsewhere reported
|
||||
as 0) and "fails" with EINVAL if any are found, with a warning in dmesg.
|
||||
(At time of writing, a few stats are known sometimes to be found negative,
|
||||
with no ill effects: errors and warnings on these stats are suppressed.)
|
||||
|
||||
==============================================================
|
||||
|
||||
swappiness
|
||||
|
||||
This control is used to define how aggressive the kernel will swap
|
||||
|
||||
@@ -394,9 +394,9 @@ hugepage natively. Once finished you can drop the page table lock.
|
||||
Refcounting on THP is mostly consistent with refcounting on other compound
|
||||
pages:
|
||||
|
||||
- get_page()/put_page() and GUP operate in head page's ->_count.
|
||||
- get_page()/put_page() and GUP operate in head page's ->_refcount.
|
||||
|
||||
- ->_count in tail pages is always zero: get_page_unless_zero() never
|
||||
- ->_refcount in tail pages is always zero: get_page_unless_zero() never
|
||||
succeed on tail pages.
|
||||
|
||||
- map/unmap of the pages with PTE entry increment/decrement ->_mapcount
|
||||
@@ -426,15 +426,15 @@ requests to split pinned huge page: it expects page count to be equal to
|
||||
sum of mapcount of all sub-pages plus one (split_huge_page caller must
|
||||
have reference for head page).
|
||||
|
||||
split_huge_page uses migration entries to stabilize page->_count and
|
||||
split_huge_page uses migration entries to stabilize page->_refcount and
|
||||
page->_mapcount.
|
||||
|
||||
We safe against physical memory scanners too: the only legitimate way
|
||||
scanner can get reference to a page is get_page_unless_zero().
|
||||
|
||||
All tail pages has zero ->_count until atomic_add(). It prevent scanner
|
||||
All tail pages has zero ->_refcount until atomic_add(). It prevent scanner
|
||||
from geting reference to tail page up to the point. After the atomic_add()
|
||||
we don't care about ->_count value. We already known how many references
|
||||
we don't care about ->_refcount value. We already known how many references
|
||||
with should uncharge from head page.
|
||||
|
||||
For head page get_page_unless_zero() will succeed and we don't mind. It's
|
||||
|
||||
@@ -61,8 +61,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd);
|
||||
|
||||
#define has_transparent_hugepage() 1
|
||||
|
||||
/* Generic variants assume pgtable_t is struct page *, hence need for these */
|
||||
#define __HAVE_ARCH_PGTABLE_DEPOSIT
|
||||
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
||||
|
||||
@@ -281,11 +281,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
flush_pmd_entry(pmdp);
|
||||
}
|
||||
|
||||
static inline int has_transparent_hugepage(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_PGTABLE_3LEVEL_H */
|
||||
|
||||
@@ -316,11 +316,6 @@ static inline int pmd_protnone(pmd_t pmd)
|
||||
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
|
||||
|
||||
static inline int has_transparent_hugepage(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define __pgprot_modify(prot,mask,bits) \
|
||||
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
||||
|
||||
|
||||
@@ -307,6 +307,7 @@ static __init int setup_hugepagesz(char *opt)
|
||||
} else if (ps == PUD_SIZE) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
hugetlb_bad_size();
|
||||
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -239,6 +239,7 @@ static __init int setup_hugepagesz(char *opt)
|
||||
if (ps == (1 << HPAGE_SHIFT)) {
|
||||
hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
hugetlb_bad_size();
|
||||
pr_err("hugepagesz: Unsupported page size %lu M\n",
|
||||
ps >> 20);
|
||||
return 0;
|
||||
|
||||
@@ -533,6 +533,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
||||
#define has_transparent_hugepage has_transparent_hugepage
|
||||
extern int has_transparent_hugepage(void);
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
|
||||
+11
-10
@@ -405,19 +405,20 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
||||
int __init has_transparent_hugepage(void)
|
||||
int has_transparent_hugepage(void)
|
||||
{
|
||||
unsigned int mask;
|
||||
unsigned long flags;
|
||||
static unsigned int mask = -1;
|
||||
|
||||
local_irq_save(flags);
|
||||
write_c0_pagemask(PM_HUGE_MASK);
|
||||
back_to_back_c0_hazard();
|
||||
mask = read_c0_pagemask();
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
|
||||
local_irq_restore(flags);
|
||||
if (mask == -1) { /* first call comes during __init */
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
write_c0_pagemask(PM_HUGE_MASK);
|
||||
back_to_back_c0_hazard();
|
||||
mask = read_c0_pagemask();
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
return mask == PM_HUGE_MASK;
|
||||
}
|
||||
|
||||
|
||||
@@ -219,6 +219,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd);
|
||||
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd);
|
||||
#define has_transparent_hugepage has_transparent_hugepage
|
||||
extern int has_transparent_hugepage(void);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
|
||||
@@ -65,7 +65,6 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
struct page **pages, int *nr);
|
||||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_large(pmd) 0
|
||||
#define has_transparent_hugepage() 0
|
||||
#endif
|
||||
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
bool *is_thp, unsigned *shift);
|
||||
|
||||
@@ -772,8 +772,10 @@ static int __init hugepage_setup_sz(char *str)
|
||||
|
||||
size = memparse(str, &str);
|
||||
|
||||
if (add_huge_page_size(size) != 0)
|
||||
printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
|
||||
if (add_huge_page_size(size) != 0) {
|
||||
hugetlb_bad_size();
|
||||
pr_err("Invalid huge page size specified(%llu)\n", size);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1223,6 +1223,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
|
||||
return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
|
||||
}
|
||||
|
||||
#define has_transparent_hugepage has_transparent_hugepage
|
||||
static inline int has_transparent_hugepage(void)
|
||||
{
|
||||
return MACHINE_HAS_HPAGE ? 1 : 0;
|
||||
|
||||
@@ -681,8 +681,6 @@ static inline unsigned long pmd_trans_huge(pmd_t pmd)
|
||||
return pte_val(pte) & _PAGE_PMD_HUGE;
|
||||
}
|
||||
|
||||
#define has_transparent_hugepage() 1
|
||||
|
||||
static inline pmd_t pmd_mkold(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
@@ -487,7 +487,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define has_transparent_hugepage() 1
|
||||
#define pmd_trans_huge pmd_huge_page
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
|
||||
@@ -962,9 +962,7 @@ static void __init setup_numa_mapping(void)
|
||||
cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
|
||||
cpu_2_node[best_cpu] = node;
|
||||
cpumask_clear_cpu(best_cpu, &unbound_cpus);
|
||||
node = next_node(node, default_nodes);
|
||||
if (node == MAX_NUMNODES)
|
||||
node = first_node(default_nodes);
|
||||
node = next_node_in(node, default_nodes);
|
||||
}
|
||||
|
||||
/* Print out node assignments and set defaults for disabled cpus */
|
||||
|
||||
@@ -308,11 +308,16 @@ static bool saw_hugepagesz;
|
||||
|
||||
static __init int setup_hugepagesz(char *opt)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!saw_hugepagesz) {
|
||||
saw_hugepagesz = true;
|
||||
memset(huge_shift, 0, sizeof(huge_shift));
|
||||
}
|
||||
return __setup_hugepagesz(memparse(opt, NULL));
|
||||
rc = __setup_hugepagesz(memparse(opt, NULL));
|
||||
if (rc)
|
||||
hugetlb_bad_size();
|
||||
return rc;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user