You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
KVM: KVM: Use cpumask_available() to check for NULL cpumask when kicking vCPUs
[ Upstream commit 0bbc2ca851 ]
Check for a NULL cpumask_var_t when kicking multiple vCPUs via
cpumask_available(), which performs a !NULL check if and only if cpumasks
are configured to be allocated off-stack. This is a meaningless
optimization, e.g. avoids a TEST+Jcc and TEST+CMOV on x86, but more
importantly helps document that the NULL check is necessary even though
all callers pass in a local variable.
No functional change intended.
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210827092516.1027264-3-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
4cc54f6ae5
commit
ad120bc869
@@ -248,9 +248,13 @@ static void ack_flush(void *_completed)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
|
||||
static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
|
||||
{
|
||||
if (unlikely(!cpus))
|
||||
const struct cpumask *cpus;
|
||||
|
||||
if (likely(cpumask_available(tmp)))
|
||||
cpus = tmp;
|
||||
else
|
||||
cpus = cpu_online_mask;
|
||||
|
||||
if (cpumask_empty(cpus))
|
||||
@@ -280,6 +284,14 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* tmp can be "unavailable" if cpumasks are allocated off stack
|
||||
* as allocation of the mask is deliberately not fatal and is
|
||||
* handled by falling back to kicking all online CPUs.
|
||||
*/
|
||||
if (!cpumask_available(tmp))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any
|
||||
* point after kvm_request_needs_ipi(), which could result in
|
||||
@@ -291,7 +303,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
* were reading SPTEs _before_ any changes were finalized. See
|
||||
* kvm_vcpu_kick() for more details on handling requests.
|
||||
*/
|
||||
if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
|
||||
if (kvm_request_needs_ipi(vcpu, req)) {
|
||||
cpu = READ_ONCE(vcpu->cpu);
|
||||
if (cpu != -1 && cpu != me)
|
||||
__cpumask_set_cpu(cpu, tmp);
|
||||
|
||||
Reference in New Issue
Block a user