mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
merge mm-hotfixes-stable into mm-stable to pick up depended-upon changes
This commit is contained in:
@@ -12474,6 +12474,7 @@ F: net/mctp/
|
||||
|
||||
MAPLE TREE
|
||||
M: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
L: maple-tree@lists.infradead.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Supported
|
||||
F: Documentation/core-api/maple_tree.rst
|
||||
|
||||
@@ -145,6 +145,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
static const struct mm_walk_ops subpage_walk_ops = {
|
||||
.pmd_entry = subpage_walk_pmd_entry,
|
||||
.walk_lock = PGWALK_WRLOCK_VERIFY,
|
||||
};
|
||||
|
||||
static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
@@ -102,6 +102,7 @@ static const struct mm_walk_ops pageattr_ops = {
|
||||
.pmd_entry = pageattr_pmd_entry,
|
||||
.pte_entry = pageattr_pte_entry,
|
||||
.pte_hole = pageattr_pte_hole,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
|
||||
|
||||
@@ -2514,6 +2514,7 @@ static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
static const struct mm_walk_ops thp_split_walk_ops = {
|
||||
.pmd_entry = thp_split_walk_pmd_entry,
|
||||
.walk_lock = PGWALK_WRLOCK_VERIFY,
|
||||
};
|
||||
|
||||
static inline void thp_split_mm(struct mm_struct *mm)
|
||||
@@ -2565,6 +2566,7 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
|
||||
|
||||
static const struct mm_walk_ops zap_zero_walk_ops = {
|
||||
.pmd_entry = __zap_zero_pages,
|
||||
.walk_lock = PGWALK_WRLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -2655,6 +2657,7 @@ static const struct mm_walk_ops enable_skey_walk_ops = {
|
||||
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
||||
.pte_entry = __s390_enable_skey_pte,
|
||||
.pmd_entry = __s390_enable_skey_pmd,
|
||||
.walk_lock = PGWALK_WRLOCK,
|
||||
};
|
||||
|
||||
int s390_enable_skey(void)
|
||||
@@ -2692,6 +2695,7 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
|
||||
|
||||
static const struct mm_walk_ops reset_cmma_walk_ops = {
|
||||
.pte_entry = __s390_reset_cmma,
|
||||
.walk_lock = PGWALK_WRLOCK,
|
||||
};
|
||||
|
||||
void s390_reset_cmma(struct mm_struct *mm)
|
||||
@@ -2728,6 +2732,7 @@ static int s390_gather_pages(pte_t *ptep, unsigned long addr,
|
||||
|
||||
static const struct mm_walk_ops gather_pages_ops = {
|
||||
.pte_entry = s390_gather_pages,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -1101,9 +1101,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
|
||||
|
||||
int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
|
||||
{
|
||||
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
||||
struct buffer_head *ibh;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Do not dirty inodes after the log writer has been detached
|
||||
* and its nilfs_root struct has been freed.
|
||||
*/
|
||||
if (unlikely(nilfs_purging(nilfs)))
|
||||
return 0;
|
||||
|
||||
err = nilfs_load_inode_block(inode, &ibh);
|
||||
if (unlikely(err)) {
|
||||
nilfs_warn(inode->i_sb,
|
||||
|
||||
@@ -725,6 +725,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
folio_lock(folio);
|
||||
if (unlikely(folio->mapping != mapping)) {
|
||||
/* Exclude folios removed from the address space */
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
head = folio_buffers(folio);
|
||||
if (!head) {
|
||||
create_empty_buffers(&folio->page, i_blocksize(inode), 0);
|
||||
@@ -2845,6 +2850,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
|
||||
nilfs_segctor_destroy(nilfs->ns_writer);
|
||||
nilfs->ns_writer = NULL;
|
||||
}
|
||||
set_nilfs_purging(nilfs);
|
||||
|
||||
/* Force to free the list of dirty files */
|
||||
spin_lock(&nilfs->ns_inode_lock);
|
||||
@@ -2857,4 +2863,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
|
||||
up_write(&nilfs->ns_segctor_sem);
|
||||
|
||||
nilfs_dispose_list(nilfs, &garbage_list, 1);
|
||||
clear_nilfs_purging(nilfs);
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ enum {
|
||||
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
|
||||
THE_NILFS_GC_RUNNING, /* gc process is running */
|
||||
THE_NILFS_SB_DIRTY, /* super block is dirty */
|
||||
THE_NILFS_PURGING, /* disposing dirty files for cleanup */
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
|
||||
THE_NILFS_FNS(DISCONTINUED, discontinued)
|
||||
THE_NILFS_FNS(GC_RUNNING, gc_running)
|
||||
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
|
||||
THE_NILFS_FNS(PURGING, purging)
|
||||
|
||||
/*
|
||||
* Mount option operations
|
||||
|
||||
@@ -309,6 +309,8 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
|
||||
|
||||
static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
char *buf = file->private_data;
|
||||
loff_t *fpos = &iocb->ki_pos;
|
||||
size_t phdrs_offset, notes_offset, data_offset;
|
||||
size_t page_offline_frozen = 1;
|
||||
@@ -555,10 +557,21 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
case KCORE_VMEMMAP:
|
||||
case KCORE_TEXT:
|
||||
/*
|
||||
* We use _copy_to_iter() to bypass usermode hardening
|
||||
* which would otherwise prevent this operation.
|
||||
* Sadly we must use a bounce buffer here to be able to
|
||||
* make use of copy_from_kernel_nofault(), as these
|
||||
* memory regions might not always be mapped on all
|
||||
* architectures.
|
||||
*/
|
||||
if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
|
||||
if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
|
||||
if (iov_iter_zero(tsz, iter) != tsz) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* We know the bounce buffer is safe to copy from, so
|
||||
* use _copy_to_iter() directly.
|
||||
*/
|
||||
} else if (_copy_to_iter(buf, tsz, iter) != tsz) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
@@ -595,6 +608,10 @@ static int open_kcore(struct inode *inode, struct file *filp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!filp->private_data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (kcore_need_update)
|
||||
kcore_update_ram();
|
||||
if (i_size_read(inode) != proc_root_kcore->size) {
|
||||
@@ -605,9 +622,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int release_kcore(struct inode *inode, struct file *file)
|
||||
{
|
||||
kfree(file->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct proc_ops kcore_proc_ops = {
|
||||
.proc_read_iter = read_kcore_iter,
|
||||
.proc_open = open_kcore,
|
||||
.proc_release = release_kcore,
|
||||
.proc_lseek = default_llseek,
|
||||
};
|
||||
|
||||
|
||||
@@ -571,8 +571,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
bool migration = false;
|
||||
|
||||
if (pmd_present(*pmd)) {
|
||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||
page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
|
||||
page = vm_normal_page_pmd(vma, addr, *pmd);
|
||||
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||
|
||||
@@ -742,12 +741,14 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
|
||||
static const struct mm_walk_ops smaps_walk_ops = {
|
||||
.pmd_entry = smaps_pte_range,
|
||||
.hugetlb_entry = smaps_hugetlb_range,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
static const struct mm_walk_ops smaps_shmem_walk_ops = {
|
||||
.pmd_entry = smaps_pte_range,
|
||||
.hugetlb_entry = smaps_hugetlb_range,
|
||||
.pte_hole = smaps_pte_hole,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1229,6 +1230,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
|
||||
static const struct mm_walk_ops clear_refs_walk_ops = {
|
||||
.pmd_entry = clear_refs_pte_range,
|
||||
.test_walk = clear_refs_test_walk,
|
||||
.walk_lock = PGWALK_WRLOCK,
|
||||
};
|
||||
|
||||
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
@@ -1606,6 +1608,7 @@ static const struct mm_walk_ops pagemap_ops = {
|
||||
.pmd_entry = pagemap_pmd_range,
|
||||
.pte_hole = pagemap_pte_hole,
|
||||
.hugetlb_entry = pagemap_hugetlb_range,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1919,6 +1922,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
|
||||
static const struct mm_walk_ops show_numa_ops = {
|
||||
.hugetlb_entry = gather_hugetlb_stats,
|
||||
.pmd_entry = gather_pte_stats,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -25,9 +25,6 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
|
||||
#endif
|
||||
|
||||
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
|
||||
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmd,
|
||||
unsigned int flags);
|
||||
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
pmd_t *pmd, unsigned long addr, unsigned long next);
|
||||
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
|
||||
|
||||
@@ -3496,15 +3496,24 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
|
||||
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
|
||||
* a (NUMA hinting) fault is required.
|
||||
*/
|
||||
static inline bool gup_can_follow_protnone(unsigned int flags)
|
||||
static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
|
||||
unsigned int flags)
|
||||
{
|
||||
/*
|
||||
* FOLL_FORCE has to be able to make progress even if the VMA is
|
||||
* inaccessible. Further, FOLL_FORCE access usually does not represent
|
||||
* application behaviour and we should avoid triggering NUMA hinting
|
||||
* faults.
|
||||
* If callers don't want to honor NUMA hinting faults, no need to
|
||||
* determine if we would actually have to trigger a NUMA hinting fault.
|
||||
*/
|
||||
return flags & FOLL_FORCE;
|
||||
if (!(flags & FOLL_HONOR_NUMA_FAULT))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
|
||||
*
|
||||
* Requiring a fault here even for inaccessible VMAs would mean that
|
||||
* FOLL_FORCE cannot make any progress, because handle_mm_fault()
|
||||
* refuses to process NUMA hinting faults in inaccessible VMAs.
|
||||
*/
|
||||
return !vma_is_accessible(vma);
|
||||
}
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
|
||||
|
||||
@@ -1356,6 +1356,15 @@ enum {
|
||||
FOLL_PCI_P2PDMA = 1 << 10,
|
||||
/* allow interrupts from generic signals */
|
||||
FOLL_INTERRUPTIBLE = 1 << 11,
|
||||
/*
|
||||
* Always honor (trigger) NUMA hinting faults.
|
||||
*
|
||||
* FOLL_WRITE implicitly honors NUMA hinting faults because a
|
||||
* PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
|
||||
* apply). get_user_pages_fast_only() always implicitly honors NUMA
|
||||
* hinting faults.
|
||||
*/
|
||||
FOLL_HONOR_NUMA_FAULT = 1 << 12,
|
||||
|
||||
/* See also internal only FOLL flags in mm/internal.h */
|
||||
};
|
||||
|
||||
@@ -6,6 +6,16 @@
|
||||
|
||||
struct mm_walk;
|
||||
|
||||
/* Locking requirement during a page walk. */
|
||||
enum page_walk_lock {
|
||||
/* mmap_lock should be locked for read to stabilize the vma tree */
|
||||
PGWALK_RDLOCK = 0,
|
||||
/* vma will be write-locked during the walk */
|
||||
PGWALK_WRLOCK = 1,
|
||||
/* vma is expected to be already write-locked during the walk */
|
||||
PGWALK_WRLOCK_VERIFY = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mm_walk_ops - callbacks for walk_page_range
|
||||
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
|
||||
@@ -66,6 +76,7 @@ struct mm_walk_ops {
|
||||
int (*pre_vma)(unsigned long start, unsigned long end,
|
||||
struct mm_walk *walk);
|
||||
void (*post_vma)(struct mm_walk *walk);
|
||||
enum page_walk_lock walk_lock;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -1136,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
|
||||
void __rcu **radix_tree_iter_resume(void __rcu **slot,
|
||||
struct radix_tree_iter *iter)
|
||||
{
|
||||
slot++;
|
||||
iter->index = __radix_tree_iter_add(iter, 1);
|
||||
iter->next_index = iter->index;
|
||||
iter->tags = 0;
|
||||
|
||||
@@ -1148,7 +1148,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
|
||||
|
||||
failed:
|
||||
while (sgtable->nents > sgtable->orig_nents)
|
||||
put_page(sg_page(&sgtable->sgl[--sgtable->nents]));
|
||||
unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@@ -933,11 +933,12 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||
|
||||
/*
|
||||
* Check if the pageblock has already been marked skipped.
|
||||
* Only the aligned PFN is checked as the caller isolates
|
||||
* Only the first PFN is checked as the caller isolates
|
||||
* COMPACT_CLUSTER_MAX at a time so the second call must
|
||||
* not falsely conclude that the block should be skipped.
|
||||
*/
|
||||
if (!valid_page && pageblock_aligned(low_pfn)) {
|
||||
if (!valid_page && (pageblock_aligned(low_pfn) ||
|
||||
low_pfn == cc->zone->zone_start_pfn)) {
|
||||
if (!isolation_suitable(cc, page)) {
|
||||
low_pfn = end_pfn;
|
||||
folio = NULL;
|
||||
@@ -2030,7 +2031,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
|
||||
* before making it "skip" so other compaction instances do
|
||||
* not scan the same block.
|
||||
*/
|
||||
if (pageblock_aligned(low_pfn) &&
|
||||
if ((pageblock_aligned(low_pfn) ||
|
||||
low_pfn == cc->zone->zone_start_pfn) &&
|
||||
!fast_find_block && !isolation_suitable(cc, page))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -273,6 +273,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type,
|
||||
return NULL;
|
||||
filter->type = type;
|
||||
filter->matching = matching;
|
||||
INIT_LIST_HEAD(&filter->list);
|
||||
return filter;
|
||||
}
|
||||
|
||||
|
||||
@@ -389,6 +389,7 @@ out:
|
||||
static const struct mm_walk_ops damon_mkold_ops = {
|
||||
.pmd_entry = damon_mkold_pmd_entry,
|
||||
.hugetlb_entry = damon_mkold_hugetlb_entry,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
|
||||
@@ -532,6 +533,7 @@ out:
|
||||
static const struct mm_walk_ops damon_young_ops = {
|
||||
.pmd_entry = damon_young_pmd_entry,
|
||||
.hugetlb_entry = damon_young_hugetlb_entry,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
30
mm/gup.c
30
mm/gup.c
@@ -597,7 +597,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
pte = ptep_get(ptep);
|
||||
if (!pte_present(pte))
|
||||
goto no_page;
|
||||
if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
|
||||
if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
|
||||
goto no_page;
|
||||
|
||||
page = vm_normal_page(vma, address, pte);
|
||||
@@ -714,7 +714,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
|
||||
if (likely(!pmd_trans_huge(pmdval)))
|
||||
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
||||
|
||||
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
|
||||
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
|
||||
return no_page_table(vma, flags);
|
||||
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
@@ -844,6 +844,10 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
||||
if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
|
||||
* to fail on PROT_NONE-mapped pages.
|
||||
*/
|
||||
page = follow_page_mask(vma, address, foll_flags, &ctx);
|
||||
if (ctx.pgmap)
|
||||
put_dev_pagemap(ctx.pgmap);
|
||||
@@ -2240,6 +2244,13 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
|
||||
gup_flags |= FOLL_UNLOCKABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* For now, always trigger NUMA hinting faults. Some GUP users like
|
||||
* KVM require the hint to be as the calling context of GUP is
|
||||
* functionally similar to a memory reference from task context.
|
||||
*/
|
||||
gup_flags |= FOLL_HONOR_NUMA_FAULT;
|
||||
|
||||
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
|
||||
if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
|
||||
(FOLL_PIN | FOLL_GET)))
|
||||
@@ -2564,7 +2575,14 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
|
||||
/*
|
||||
* Always fallback to ordinary GUP on PROT_NONE-mapped pages:
|
||||
* pte_access_permitted() better should reject these pages
|
||||
* either way: otherwise, GUP-fast might succeed in
|
||||
* cases where ordinary GUP would fail due to VMA access
|
||||
* permissions.
|
||||
*/
|
||||
if (pte_protnone(pte))
|
||||
goto pte_unmap;
|
||||
|
||||
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
|
||||
@@ -2983,8 +3001,8 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
|
||||
|
||||
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
|
||||
pmd_devmap(pmd))) {
|
||||
if (pmd_protnone(pmd) &&
|
||||
!gup_can_follow_protnone(flags))
|
||||
/* See gup_pte_range() */
|
||||
if (pmd_protnone(pmd))
|
||||
return 0;
|
||||
|
||||
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
|
||||
@@ -3164,7 +3182,7 @@ static int internal_get_user_pages_fast(unsigned long start,
|
||||
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
|
||||
FOLL_FORCE | FOLL_PIN | FOLL_GET |
|
||||
FOLL_FAST_ONLY | FOLL_NOFAULT |
|
||||
FOLL_PCI_P2PDMA)))
|
||||
FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
|
||||
return -EINVAL;
|
||||
|
||||
if (gup_flags & FOLL_PIN)
|
||||
|
||||
1
mm/hmm.c
1
mm/hmm.c
@@ -562,6 +562,7 @@ static const struct mm_walk_ops hmm_walk_ops = {
|
||||
.pte_hole = hmm_vma_walk_hole,
|
||||
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
|
||||
.test_walk = hmm_vma_walk_test,
|
||||
.walk_lock = PGWALK_RDLOCK,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user