You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
ANDROID: mm: remove sequence counting when mmap_lock is not exclusively owned
In a number of cases vm_write_{begin|end} is called while mmap_lock is
not owned exclusively. This is unnecessary and can affect correctness of
the sequence counting protecting speculative page fault handlers. Remove
extra calls.
Bug: 257443051
Change-Id: I1278638a0794448e22fbdab5601212b3b2eaebdc
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
@@ -506,11 +506,9 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
|
||||
.tlb = tlb,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static long madvise_cold(struct vm_area_struct *vma,
|
||||
@@ -543,11 +541,9 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
|
||||
.can_pageout_file = can_pageout_file,
|
||||
};
|
||||
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
static inline bool can_do_file_pageout(struct vm_area_struct *vma)
|
||||
@@ -754,12 +750,10 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(&tlb, vma);
|
||||
walk_page_range(vma->vm_mm, range.start, range.end,
|
||||
&madvise_free_walk_ops, &tlb);
|
||||
tlb_end_vma(&tlb, vma);
|
||||
vm_write_end(vma);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, range.start, range.end);
|
||||
|
||||
|
||||
@@ -1495,7 +1495,6 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||
unsigned long next;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
vm_write_begin(vma);
|
||||
tlb_start_vma(tlb, vma);
|
||||
pgd = pgd_offset(vma->vm_mm, addr);
|
||||
do {
|
||||
@@ -1505,7 +1504,6 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
tlb_end_vma(tlb, vma);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -657,11 +657,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
|
||||
{
|
||||
int nr_updated;
|
||||
|
||||
vm_write_begin(vma);
|
||||
nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
|
||||
if (nr_updated)
|
||||
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
|
||||
vm_write_end(vma);
|
||||
|
||||
return nr_updated;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user