2017-11-01 15:07:57 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2006-03-22 00:09:12 -08:00
|
|
|
#ifndef _LINUX_MIGRATE_H
|
|
|
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
2007-05-06 14:50:20 -07:00
|
|
|
#include <linux/mempolicy.h>
|
2012-01-20 14:33:53 -08:00
|
|
|
#include <linux/migrate_mode.h>
|
2017-07-10 15:48:47 -07:00
|
|
|
#include <linux/hugetlb.h>
|
2006-03-22 00:09:12 -08:00
|
|
|
|
2018-04-10 16:30:03 -07:00
|
|
|
typedef struct page *new_page_t(struct page *page, unsigned long private);
|
2014-06-04 16:08:25 -07:00
|
|
|
typedef void free_page_t(struct page *page, unsigned long private);
|
2006-06-23 02:03:53 -07:00
|
|
|
|
2020-08-11 18:37:25 -07:00
|
|
|
struct migration_target_control;
|
|
|
|
|
|
2012-12-11 16:02:31 -08:00
|
|
|
/*
|
|
|
|
|
* Return values from addresss_space_operations.migratepage():
|
|
|
|
|
* - negative errno on page migration failure;
|
|
|
|
|
* - zero on page migration success;
|
|
|
|
|
*/
|
|
|
|
|
#define MIGRATEPAGE_SUCCESS 0
|
2014-10-09 15:29:27 -07:00
|
|
|
|
2021-11-05 13:43:32 -07:00
|
|
|
/* Defined in mm/debug.c: */
|
2018-12-28 00:35:59 -08:00
|
|
|
extern const char *migrate_reason_names[MR_TYPES];
|
2016-03-15 14:56:18 -07:00
|
|
|
|
2007-05-06 14:50:20 -07:00
|
|
|
#ifdef CONFIG_MIGRATION
|
2009-01-06 14:39:16 -08:00
|
|
|
|
2012-12-11 16:02:47 -08:00
|
|
|
extern void putback_movable_pages(struct list_head *l);
|
2017-05-03 14:54:45 -07:00
|
|
|
extern int migrate_page(struct address_space *mapping,
|
|
|
|
|
struct page *newpage, struct page *page,
|
|
|
|
|
enum migrate_mode mode);
|
2014-06-04 16:08:25 -07:00
|
|
|
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
|
2021-09-02 14:59:13 -07:00
|
|
|
unsigned long private, enum migrate_mode mode, int reason,
|
|
|
|
|
unsigned int *ret_succeeded);
|
2020-08-11 18:37:25 -07:00
|
|
|
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
|
2017-02-24 14:57:29 -08:00
|
|
|
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
|
2006-06-23 02:03:53 -07:00
|
|
|
|
2017-09-08 16:12:06 -07:00
|
|
|
extern void migrate_page_states(struct page *newpage, struct page *page);
|
2010-09-08 10:19:35 +09:00
|
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
|
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
|
|
|
struct page *newpage, struct page *page);
|
2013-07-16 17:56:16 +08:00
|
|
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
2019-07-18 15:58:46 -07:00
|
|
|
struct page *newpage, struct page *page, int extra_count);
|
2022-01-21 22:10:46 -08:00
|
|
|
void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
|
|
|
|
|
spinlock_t *ptl);
|
2021-05-07 15:26:29 -04:00
|
|
|
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
|
2021-05-07 15:05:06 -04:00
|
|
|
void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
|
2021-05-07 07:28:40 -04:00
|
|
|
int folio_migrate_mapping(struct address_space *mapping,
|
|
|
|
|
struct folio *newfolio, struct folio *folio, int extra_count);
|
2021-11-05 13:43:35 -07:00
|
|
|
|
|
|
|
|
extern bool numa_demotion_enabled;
|
2006-03-22 00:09:12 -08:00
|
|
|
#else
|
2009-01-06 14:39:16 -08:00
|
|
|
|
2012-12-11 16:02:47 -08:00
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
2014-06-04 16:08:25 -07:00
|
|
|
static inline int migrate_pages(struct list_head *l, new_page_t new,
|
|
|
|
|
free_page_t free, unsigned long private, enum migrate_mode mode,
|
2021-09-02 14:59:13 -07:00
|
|
|
int reason, unsigned int *ret_succeeded)
|
2013-02-22 16:35:14 -08:00
|
|
|
{ return -ENOSYS; }
|
2020-08-11 18:37:25 -07:00
|
|
|
static inline struct page *alloc_migration_target(struct page *page,
|
|
|
|
|
unsigned long private)
|
2020-08-11 18:37:14 -07:00
|
|
|
{ return NULL; }
|
2017-02-24 14:57:32 -08:00
|
|
|
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
|
|
|
|
|
{ return -EBUSY; }
|
2006-03-31 02:29:56 -08:00
|
|
|
|
2017-09-08 16:12:06 -07:00
|
|
|
static inline void migrate_page_states(struct page *newpage, struct page *page)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2010-09-08 10:19:35 +09:00
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
|
|
|
struct page *page) {}
|
|
|
|
|
|
2010-09-30 11:54:51 +09:00
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
2010-09-08 10:19:35 +09:00
|
|
|
struct page *newpage, struct page *page)
|
|
|
|
|
{
|
|
|
|
|
return -ENOSYS;
|
|
|
|
|
}
|
2021-11-05 13:43:35 -07:00
|
|
|
|
|
|
|
|
#define numa_demotion_enabled false
|
2006-03-22 00:09:12 -08:00
|
|
|
#endif /* CONFIG_MIGRATION */
|
2012-10-25 14:16:34 +02:00
|
|
|
|
2016-07-26 15:26:50 -07:00
|
|
|
#ifdef CONFIG_COMPACTION
|
|
|
|
|
extern int PageMovable(struct page *page);
|
|
|
|
|
extern void __SetPageMovable(struct page *page, struct address_space *mapping);
|
|
|
|
|
extern void __ClearPageMovable(struct page *page);
|
|
|
|
|
#else
|
2021-02-24 12:10:28 -08:00
|
|
|
static inline int PageMovable(struct page *page) { return 0; }
|
2016-07-26 15:26:50 -07:00
|
|
|
static inline void __SetPageMovable(struct page *page,
|
|
|
|
|
struct address_space *mapping)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
static inline void __ClearPageMovable(struct page *page)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-10-25 14:16:34 +02:00
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
2013-10-07 11:29:05 +01:00
|
|
|
extern int migrate_misplaced_page(struct page *page,
|
|
|
|
|
struct vm_area_struct *vma, int node);
|
2012-10-25 14:16:34 +02:00
|
|
|
#else
|
2013-10-07 11:29:05 +01:00
|
|
|
static inline int migrate_misplaced_page(struct page *page,
|
|
|
|
|
struct vm_area_struct *vma, int node)
|
2012-10-25 14:16:34 +02:00
|
|
|
{
|
|
|
|
|
return -EAGAIN; /* can't migrate now */
|
|
|
|
|
}
|
2012-12-05 09:32:56 +00:00
|
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
2012-11-19 12:35:47 +00:00
|
|
|
|
2017-09-08 16:12:09 -07:00
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
|
|
2017-09-08 16:12:17 -07:00
|
|
|
/*
|
|
|
|
|
* Watch out for PAE architecture, which has an unsigned long, and might not
|
|
|
|
|
* have enough bits to store all physical address and flags. So far we have
|
|
|
|
|
* enough room for all our flags.
|
|
|
|
|
*/
|
2017-09-08 16:12:09 -07:00
|
|
|
#define MIGRATE_PFN_VALID (1UL << 0)
|
|
|
|
|
#define MIGRATE_PFN_MIGRATE (1UL << 1)
|
|
|
|
|
#define MIGRATE_PFN_WRITE (1UL << 3)
|
2017-09-08 16:12:17 -07:00
|
|
|
#define MIGRATE_PFN_SHIFT 6
|
2017-09-08 16:12:09 -07:00
|
|
|
|
|
|
|
|
static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
|
|
|
|
|
{
|
|
|
|
|
if (!(mpfn & MIGRATE_PFN_VALID))
|
|
|
|
|
return NULL;
|
|
|
|
|
return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline unsigned long migrate_pfn(unsigned long pfn)
|
|
|
|
|
{
|
|
|
|
|
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-23 15:30:00 -07:00
|
|
|
enum migrate_vma_direction {
|
|
|
|
|
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
|
|
|
|
|
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
|
|
|
|
|
};
|
|
|
|
|
|
2019-08-14 09:59:19 +02:00
|
|
|
struct migrate_vma {
|
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
/*
|
|
|
|
|
* Both src and dst array must be big enough for
|
|
|
|
|
* (end - start) >> PAGE_SHIFT entries.
|
|
|
|
|
*
|
|
|
|
|
* The src array must not be modified by the caller after
|
|
|
|
|
* migrate_vma_setup(), and must not change the dst array after
|
|
|
|
|
* migrate_vma_pages() returns.
|
|
|
|
|
*/
|
|
|
|
|
unsigned long *dst;
|
|
|
|
|
unsigned long *src;
|
|
|
|
|
unsigned long cpages;
|
|
|
|
|
unsigned long npages;
|
|
|
|
|
unsigned long start;
|
|
|
|
|
unsigned long end;
|
2020-03-16 20:32:14 +01:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Set to the owner value also stored in page->pgmap->owner for
|
2020-07-23 15:30:00 -07:00
|
|
|
* migrating out of device private memory. The flags also need to
|
|
|
|
|
* be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
|
2020-07-23 15:30:01 -07:00
|
|
|
* The caller should always set this field when using mmu notifier
|
|
|
|
|
* callbacks to avoid device MMU invalidations for device private
|
|
|
|
|
* pages that are not being migrated.
|
2020-03-16 20:32:14 +01:00
|
|
|
*/
|
2020-07-23 15:30:00 -07:00
|
|
|
void *pgmap_owner;
|
|
|
|
|
unsigned long flags;
|
2017-09-08 16:12:09 -07:00
|
|
|
};
|
|
|
|
|
|
2019-08-14 09:59:19 +02:00
|
|
|
int migrate_vma_setup(struct migrate_vma *args);
|
|
|
|
|
void migrate_vma_pages(struct migrate_vma *migrate);
|
|
|
|
|
void migrate_vma_finalize(struct migrate_vma *migrate);
|
2021-09-02 14:59:16 -07:00
|
|
|
int next_demotion_node(int node);
|
|
|
|
|
|
|
|
|
|
#else /* CONFIG_MIGRATION disabled: */
|
|
|
|
|
|
|
|
|
|
static inline int next_demotion_node(int node)
|
|
|
|
|
{
|
|
|
|
|
return NUMA_NO_NODE;
|
|
|
|
|
}
|
2017-09-08 16:12:09 -07:00
|
|
|
|
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
|
|
2006-03-22 00:09:12 -08:00
|
|
|
#endif /* _LINUX_MIGRATE_H */
|