You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
BACKPORT: FROMLIST: mm: implement speculative handling in filemap_fault()
Extend filemap_fault() to handle speculative faults. In the speculative case, we will only be fishing existing pages out of the page cache. The logic we use mirrors what is done in the non-speculative case, assuming that pages are found in the page cache, are up to date and not already locked, and that readahead is not necessary at this time. In all other cases, the fault is aborted to be handled non-speculatively. Signed-off-by: Michel Lespinasse <michel@lespinasse.org> Link: https://lore.kernel.org/all/20210407014502.24091-26-michel@lespinasse.org/ Conflicts: mm/filemap.c 1. Added back file_ra_state variable used by SPF path. 2. Updated comment for filemap_fault to reflect SPF locking rules. Bug: 161210518 Signed-off-by: Suren Baghdasaryan <surenb@google.com> Change-Id: I82eba7fcfc81876245c2e65bc5ae3d33ddfcc368
This commit is contained in:
committed by
Suren Baghdasaryan
parent
2bb39b9121
commit
59d4d125b7
45
mm/filemap.c
45
mm/filemap.c
@@ -2706,7 +2706,9 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
* it in the page cache, and handles the special cases reasonably without
|
||||
* having a lot of duplicated code.
|
||||
*
|
||||
* vma->vm_mm->mmap_lock must be held on entry.
|
||||
* If FAULT_FLAG_SPECULATIVE is set, this function runs with elevated vma
|
||||
* refcount and with mmap lock not held.
|
||||
* Otherwise, vma->vm_mm->mmap_lock must be held on entry.
|
||||
*
|
||||
* If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
|
||||
* may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
|
||||
@@ -2732,6 +2734,47 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
|
||||
vm_fault_t ret = 0;
|
||||
bool retry = false;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
|
||||
page = find_get_page(mapping, offset);
|
||||
if (unlikely(!page) || unlikely(PageReadahead(page)))
|
||||
return VM_FAULT_RETRY;
|
||||
|
||||
if (!trylock_page(page))
|
||||
return VM_FAULT_RETRY;
|
||||
|
||||
if (unlikely(compound_head(page)->mapping != mapping))
|
||||
goto page_unlock;
|
||||
VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
|
||||
if (unlikely(!PageUptodate(page)))
|
||||
goto page_unlock;
|
||||
|
||||
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
if (unlikely(offset >= max_off))
|
||||
goto page_unlock;
|
||||
|
||||
/*
|
||||
* Update readahead mmap_miss statistic.
|
||||
*
|
||||
* Note that we are not sure if finish_fault() will
|
||||
* manage to complete the transaction. If it fails,
|
||||
* we'll come back to filemap_fault() non-speculative
|
||||
* case which will update mmap_miss a second time.
|
||||
* This is not ideal, we would prefer to guarantee the
|
||||
* update will happen exactly once.
|
||||
*/
|
||||
if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) {
|
||||
unsigned int mmap_miss = READ_ONCE(ra->mmap_miss);
|
||||
if (mmap_miss)
|
||||
WRITE_ONCE(ra->mmap_miss, --mmap_miss);
|
||||
}
|
||||
|
||||
vmf->page = page;
|
||||
return VM_FAULT_LOCKED;
|
||||
page_unlock:
|
||||
unlock_page(page);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
if (unlikely(offset >= max_off))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
Reference in New Issue
Block a user