diff --git a/include/linux/mm.h b/include/linux/mm.h index f9f8d7f1058e..6d7e7641fc9d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3355,6 +3355,8 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; extern bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr); +extern int reclaim_shmem_address_space(struct address_space *mapping); +extern int reclaim_pages_from_list(struct list_head *page_list); /** * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it diff --git a/mm/shmem.c b/mm/shmem.c index a4b5012b1267..d915b0ab010d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -38,6 +38,7 @@ #include #include #include +#include #include /* for arch/microblaze update_mmu_cache() */ @@ -4290,3 +4291,41 @@ void shmem_mark_page_lazyfree(struct page *page) mark_page_lazyfree_movetail(page); } EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree); + +int reclaim_shmem_address_space(struct address_space *mapping) +{ + pgoff_t start = 0; + struct page *page; + LIST_HEAD(page_list); + int reclaimed; + XA_STATE(xas, &mapping->i_pages, start); + + if (!shmem_mapping(mapping)) + return -EINVAL; + + lru_add_drain(); + + rcu_read_lock(); + xas_for_each(&xas, page, ULONG_MAX) { + if (xas_retry(&xas, page)) + continue; + if (xa_is_value(page)) + continue; + if (isolate_lru_page(page)) + continue; + + list_add(&page->lru, &page_list); + inc_node_page_state(page, NR_ISOLATED_ANON + + page_is_file_lru(page)); + + if (need_resched()) { + xas_pause(&xas); + cond_resched_rcu(); + } + } + rcu_read_unlock(); + reclaimed = reclaim_pages_from_list(&page_list); + + return reclaimed; +} +EXPORT_SYMBOL_GPL(reclaim_shmem_address_space); diff --git a/mm/vmscan.c b/mm/vmscan.c index c5eecab35226..b2fd7a513b99 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1562,6 +1562,36 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, return nr_reclaimed; } +int reclaim_pages_from_list(struct list_head *page_list) +{ + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .priority = DEF_PRIORITY, + .may_writepage = 1, + .may_unmap = 1, + .may_swap = 1, + }; + unsigned long nr_reclaimed; + struct reclaim_stat dummy_stat; + struct page *page; + + list_for_each_entry(page, page_list, lru) + ClearPageActive(page); + + nr_reclaimed = shrink_page_list(page_list, NULL, &sc, + &dummy_stat, false); + while (!list_empty(page_list)) { + + page = lru_to_page(page_list); + list_del(&page->lru); + dec_node_page_state(page, NR_ISOLATED_ANON + + page_is_file_lru(page)); + putback_lru_page(page); + } + + return nr_reclaimed; +} + /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being