You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
shmem: add huge pages support
Here's basic implementation of huge pages support for shmem/tmpfs.
It's all pretty streight-forward:
- shmem_getpage() allcoates huge page if it can and try to inserd into
radix tree with shmem_add_to_page_cache();
- shmem_add_to_page_cache() puts the page onto radix-tree if there's
space for it;
- shmem_undo_range() removes huge pages, if it fully within range.
Partial truncate of huge pages zero out this part of THP.
This have visible effect on fallocate(FALLOC_FL_PUNCH_HOLE)
behaviour. As we don't really create hole in this case,
lseek(SEEK_HOLE) may have inconsistent results depending what
pages happened to be allocated.
- no need to change shmem_fault: core-mm will map an compound page as
huge if VMA is suitable;
Link: http://lkml.kernel.org/r/1466021202-61880-30-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
c01d5b3007
commit
800d8c63b2
@@ -156,6 +156,8 @@ void put_huge_zero_page(void);
|
||||
|
||||
#define transparent_hugepage_enabled(__vma) 0
|
||||
|
||||
static inline void prep_transhuge_page(struct page *page) {}
|
||||
|
||||
#define transparent_hugepage_flags 0UL
|
||||
static inline int
|
||||
split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
|
||||
@@ -71,6 +71,9 @@ static inline struct page *shmem_read_mapping_page(
|
||||
mapping_gfp_mask(mapping));
|
||||
}
|
||||
|
||||
extern bool shmem_charge(struct inode *inode, long pages);
|
||||
extern void shmem_uncharge(struct inode *inode, long pages);
|
||||
|
||||
#ifdef CONFIG_TMPFS
|
||||
|
||||
extern int shmem_add_seals(struct file *file, unsigned int seals);
|
||||
|
||||
@@ -219,8 +219,13 @@ void __delete_from_page_cache(struct page *page, void *shadow)
|
||||
/* hugetlb pages do not participate in page cache accounting. */
|
||||
if (!PageHuge(page))
|
||||
__mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr);
|
||||
if (PageSwapBacked(page))
|
||||
if (PageSwapBacked(page)) {
|
||||
__mod_zone_page_state(page_zone(page), NR_SHMEM, -nr);
|
||||
if (PageTransHuge(page))
|
||||
__dec_zone_page_state(page, NR_SHMEM_THPS);
|
||||
} else {
|
||||
VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point page must be either written or cleaned by truncate.
|
||||
|
||||
@@ -3316,6 +3316,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||
if (head[i].index >= end) {
|
||||
__ClearPageDirty(head + i);
|
||||
__delete_from_page_cache(head + i, NULL);
|
||||
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
|
||||
shmem_uncharge(head->mapping->host, 1);
|
||||
put_page(head + i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1142,7 +1142,7 @@ again:
|
||||
* unmap shared but keep private pages.
|
||||
*/
|
||||
if (details->check_mapping &&
|
||||
details->check_mapping != page->mapping)
|
||||
details->check_mapping != page_rmapping(page))
|
||||
continue;
|
||||
}
|
||||
ptent = ptep_get_and_clear_full(mm, addr, pte,
|
||||
|
||||
@@ -531,7 +531,7 @@ retry:
|
||||
nid = page_to_nid(page);
|
||||
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
|
||||
continue;
|
||||
if (PageTransCompound(page) && PageAnon(page)) {
|
||||
if (PageTransCompound(page)) {
|
||||
get_page(page);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
lock_page(page);
|
||||
|
||||
@@ -2563,6 +2563,7 @@ int set_page_dirty(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
page = compound_head(page);
|
||||
if (likely(mapping)) {
|
||||
int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
|
||||
/*
|
||||
|
||||
380
mm/shmem.c
380
mm/shmem.c
File diff suppressed because it is too large
Load Diff
@@ -292,6 +292,7 @@ static bool need_activate_page_drain(int cpu)
|
||||
|
||||
void activate_page(struct page *page)
|
||||
{
|
||||
page = compound_head(page);
|
||||
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
||||
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
||||
|
||||
@@ -316,6 +317,7 @@ void activate_page(struct page *page)
|
||||
{
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
page = compound_head(page);
|
||||
spin_lock_irq(&zone->lru_lock);
|
||||
__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
|
||||
spin_unlock_irq(&zone->lru_lock);
|
||||
|
||||
Reference in New Issue
Block a user