You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
KVM: Drop FOLL_GET in GUP when doing async page fault
Page pinning is not mandatory in kvm async page fault processing since after async page fault event is delivered to a guest it accesses page once again and does its own GUP. Drop the FOLL_GET flag in GUP in async_pf code, and do some simplifying in check/clear processing. Suggested-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Gu zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: chai wen <chaiw.fnst@cn.fujitsu.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
+5
-12
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void async_pf_execute(struct work_struct *work)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
struct kvm_async_pf *apf =
|
||||
container_of(work, struct kvm_async_pf, work);
|
||||
struct mm_struct *mm = apf->mm;
|
||||
@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
|
||||
|
||||
use_mm(mm);
|
||||
down_read(&mm->mmap_sem);
|
||||
get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
|
||||
get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
unuse_mm(mm);
|
||||
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
list_add_tail(&apf->link, &vcpu->async_pf.done);
|
||||
apf->page = page;
|
||||
spin_unlock(&vcpu->async_pf.lock);
|
||||
|
||||
/*
|
||||
@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
|
||||
* this point
|
||||
*/
|
||||
|
||||
trace_kvm_async_pf_completed(addr, page, gva);
|
||||
trace_kvm_async_pf_completed(addr, gva);
|
||||
|
||||
if (waitqueue_active(&vcpu->wq))
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
list_entry(vcpu->async_pf.done.next,
|
||||
typeof(*work), link);
|
||||
list_del(&work->link);
|
||||
if (!is_error_page(work->page))
|
||||
kvm_release_page_clean(work->page);
|
||||
kmem_cache_free(async_pf_cache, work);
|
||||
}
|
||||
spin_unlock(&vcpu->async_pf.lock);
|
||||
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
|
||||
list_del(&work->link);
|
||||
spin_unlock(&vcpu->async_pf.lock);
|
||||
|
||||
if (work->page)
|
||||
kvm_arch_async_page_ready(vcpu, work);
|
||||
kvm_arch_async_page_ready(vcpu, work);
|
||||
kvm_arch_async_page_present(vcpu, work);
|
||||
|
||||
list_del(&work->queue);
|
||||
vcpu->async_pf.queued--;
|
||||
if (!is_error_page(work->page))
|
||||
kvm_release_page_clean(work->page);
|
||||
kmem_cache_free(async_pf_cache, work);
|
||||
}
|
||||
}
|
||||
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
||||
if (!work)
|
||||
return 0;
|
||||
|
||||
work->page = NULL;
|
||||
work->wakeup_all = false;
|
||||
work->vcpu = vcpu;
|
||||
work->gva = gva;
|
||||
work->addr = gfn_to_hva(vcpu->kvm, gfn);
|
||||
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
|
||||
if (!work)
|
||||
return -ENOMEM;
|
||||
|
||||
work->page = KVM_ERR_PTR_BAD_PAGE;
|
||||
work->wakeup_all = true;
|
||||
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
|
||||
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
|
||||
Reference in New Issue
Block a user