ANDROID: mm/khugepaged: add missing vm_write_{begin|end}

Speculative page fault handler needs to detect concurrent pmd changes
and relies on vma seqcount for that. pmdp_collapse_flush(), set_huge_pmd() and collapse_and_free_pmd() can modify a pmd.
vm_write_{begin|end} are needed in the paths which can call these
functions for page fault handler to detect pmd changes.

Bug: 257443051
Change-Id: Ieb784b5f44901b66a594f61b9e7c91190ff97f80
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Suren Baghdasaryan
2022-11-21 12:15:43 -08:00
parent 59d4d125b7
commit 5ed391bd8a

View File

@@ -1472,6 +1472,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
if (!pmd)
goto drop_hpage;
vm_write_begin(vma);
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
/* step 1: check all mapped PTEs are to the right huge page */
@@ -1521,6 +1522,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
ptl = pmd_lock(vma->vm_mm, pmd);
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
spin_unlock(ptl);
vm_write_end(vma);
mm_dec_nr_ptes(mm);
pte_free(mm, pmd_pgtable(_pmd));
@@ -1531,6 +1533,7 @@ drop_hpage:
abort:
pte_unmap_unlock(start_pte, ptl);
vm_write_end(vma);
goto drop_hpage;
}
@@ -1602,10 +1605,12 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
*/
if (mmap_write_trylock(mm)) {
if (!khugepaged_test_exit(mm)) {
vm_write_begin(vma);
spinlock_t *ptl = pmd_lock(mm, pmd);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
spin_unlock(ptl);
vm_write_end(vma);
mm_dec_nr_ptes(mm);
pte_free(mm, pmd_pgtable(_pmd));
}