mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "x86: - Fixes for the new scalable MMU - Fixes for migration of nested hypervisors on AMD - Fix for clang integrated assembler - Fix for left shift by 64 (UBSAN) - Small cleanups - Straggler SEV-ES patch ARM: - VM init cleanups - PSCI relay cleanups - Kill CONFIG_KVM_ARM_PMU - Fixup __init annotations - Fixup reg_to_encoding() - Fix spurious PMCR_EL0 access Misc: - selftests cleanups" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (38 commits) KVM: x86: __kvm_vcpu_halt can be static KVM: SVM: Add support for booting APs in an SEV-ES guest KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES on nested vmexit KVM: nSVM: mark vmcb as dirty when forcingly leaving the guest mode KVM: nSVM: correctly restore nested_run_pending on migration KVM: x86/mmu: Clarify TDP MMU page list invariants KVM: x86/mmu: Ensure TDP MMU roots are freed after yield kvm: check tlbs_dirty directly KVM: x86: change in pv_eoi_get_pending() to make code more readable MAINTAINERS: Really update email address for Sean Christopherson KVM: x86: fix shift out of bounds reported by UBSAN KVM: selftests: Implement perf_test_util more conventionally KVM: selftests: Use vm_create_with_vcpus in create_vm KVM: selftests: Factor out guest mode code KVM/SVM: Remove leftover __svm_vcpu_run prototype from svm.c KVM: SVM: Add register operand to vmsave call in sev_es_vcpu_load KVM: x86/mmu: Optimize not-present/MMIO SPTE check in get_mmio_spte() KVM: x86/mmu: Use raw level to index into MMIO walks' sptes array KVM: x86/mmu: Get root level from walkers when retrieving MMIO SPTE KVM: x86/mmu: Use -1 to flag an undefined spte in get_mmio_spte() ...
This commit is contained in:
@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.
|
||||
|
||||
Errors:
|
||||
|
||||
===== =============================
|
||||
======= ==============================================================
|
||||
EINTR an unmasked signal is pending
|
||||
===== =============================
|
||||
ENOEXEC the vcpu hasn't been initialized or the guest tried to execute
|
||||
instructions from device memory (arm64)
|
||||
ENOSYS data abort outside memslots with no syndrome info and
|
||||
KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
|
||||
EPERM SVE feature set but not finalized (arm64)
|
||||
======= ==============================================================
|
||||
|
||||
This ioctl is used to run a guest virtual cpu. While there are no
|
||||
explicit parameters, there is an implicit parameter block that can be
|
||||
|
||||
@@ -9776,7 +9776,7 @@ F: tools/testing/selftests/kvm/s390x/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
R: Sean Christopherson <seanjc@google.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Jim Mattson <jmattson@google.com>
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/psci.h>
|
||||
#include <asm/arch_gicv3.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cpufeature.h>
|
||||
@@ -240,6 +241,28 @@ struct kvm_host_data {
|
||||
struct kvm_pmu_events pmu_events;
|
||||
};
|
||||
|
||||
struct kvm_host_psci_config {
|
||||
/* PSCI version used by host. */
|
||||
u32 version;
|
||||
|
||||
/* Function IDs used by host if version is v0.1. */
|
||||
struct psci_0_1_function_ids function_ids_0_1;
|
||||
|
||||
bool psci_0_1_cpu_suspend_implemented;
|
||||
bool psci_0_1_cpu_on_implemented;
|
||||
bool psci_0_1_cpu_off_implemented;
|
||||
bool psci_0_1_migrate_implemented;
|
||||
};
|
||||
|
||||
extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
|
||||
#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
|
||||
|
||||
extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
|
||||
#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
|
||||
|
||||
extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
|
||||
#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
|
||||
@@ -2568,7 +2568,7 @@ static void verify_hyp_capabilities(void)
|
||||
int parange, ipa_max;
|
||||
unsigned int safe_vmid_bits, vmid_bits;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
|
||||
if (!IS_ENABLED(CONFIG_KVM))
|
||||
return;
|
||||
|
||||
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
|
||||
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
|
||||
"CPU: CPUs started in inconsistent modes");
|
||||
else
|
||||
pr_info("CPU: All CPU(s) started at EL1\n");
|
||||
if (IS_ENABLED(CONFIG_KVM))
|
||||
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
|
||||
kvm_compute_layout();
|
||||
}
|
||||
|
||||
|
||||
@@ -49,14 +49,6 @@ if KVM
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
config KVM_ARM_PMU
|
||||
bool "Virtual Performance Monitoring Unit (PMU) support"
|
||||
depends on HW_PERF_EVENTS
|
||||
default y
|
||||
help
|
||||
Adds support for a virtual Performance Monitoring Unit (PMU) in
|
||||
virtual machines.
|
||||
|
||||
endif # KVM
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
||||
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
||||
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
|
||||
vgic/vgic-its.o vgic/vgic-debug.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
|
||||
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
|
||||
|
||||
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
goto no_vgic;
|
||||
|
||||
if (!vgic_initialized(vcpu->kvm))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* At this stage, we have the guarantee that the vgic is both
|
||||
* available and initialized.
|
||||
*/
|
||||
if (!timer_irqs_are_valid(vcpu)) {
|
||||
kvm_debug("incorrectly configured timer irqs\n");
|
||||
return -EINVAL;
|
||||
|
||||
@@ -65,10 +65,6 @@ static bool vgic_present;
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
|
||||
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
|
||||
extern u32 kvm_nvhe_sym(kvm_host_psci_version);
|
||||
extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
|
||||
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
|
||||
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
* Map the VGIC hardware resources before running a vcpu the
|
||||
* first time on this VM.
|
||||
*/
|
||||
if (unlikely(!vgic_ready(kvm))) {
|
||||
ret = kvm_vgic_map_resources(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = kvm_vgic_map_resources(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/*
|
||||
* Tell the rest of the code that there are userspace irqchip
|
||||
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
|
||||
.notifier_call = hyp_init_cpu_pm_notifier,
|
||||
};
|
||||
|
||||
static void __init hyp_cpu_pm_init(void)
|
||||
static void hyp_cpu_pm_init(void)
|
||||
{
|
||||
if (!is_protected_kvm_enabled())
|
||||
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
|
||||
}
|
||||
static void __init hyp_cpu_pm_exit(void)
|
||||
static void hyp_cpu_pm_exit(void)
|
||||
{
|
||||
if (!is_protected_kvm_enabled())
|
||||
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
|
||||
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
|
||||
* allow any other CPUs from the `possible` set to boot.
|
||||
*/
|
||||
for_each_online_cpu(cpu)
|
||||
kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
|
||||
hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
|
||||
}
|
||||
|
||||
#define init_psci_0_1_impl_state(config, what) \
|
||||
config.psci_0_1_ ## what ## _implemented = psci_ops.what
|
||||
|
||||
static bool init_psci_relay(void)
|
||||
{
|
||||
/*
|
||||
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
|
||||
kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
|
||||
kvm_host_psci_config.version = psci_ops.get_version();
|
||||
|
||||
if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
|
||||
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
|
||||
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
|
||||
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
|
||||
init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
|
||||
init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip an instruction while host sysregs are live.
|
||||
* Assumes host is always 64-bit.
|
||||
*/
|
||||
static inline void kvm_skip_host_instr(void)
|
||||
{
|
||||
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
|
||||
__kvm_hyp_host_forward_smc(host_ctxt);
|
||||
}
|
||||
|
||||
static void skip_host_instruction(void)
|
||||
{
|
||||
write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
|
||||
}
|
||||
|
||||
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
bool handled;
|
||||
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
|
||||
if (!handled)
|
||||
default_host_smc_handler(host_ctxt);
|
||||
|
||||
/*
|
||||
* Unlike HVC, the return address of an SMC is the instruction's PC.
|
||||
* Move the return address past the instruction.
|
||||
*/
|
||||
skip_host_instruction();
|
||||
/* SMC was trapped, move ELR past the current PC. */
|
||||
kvm_skip_host_instr();
|
||||
}
|
||||
|
||||
void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
* Other CPUs should not be allowed to boot because their features were
|
||||
* not checked against the finalized system capabilities.
|
||||
*/
|
||||
u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
|
||||
u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
|
||||
|
||||
u64 cpu_logical_map(unsigned int cpu)
|
||||
{
|
||||
if (cpu >= ARRAY_SIZE(__cpu_logical_map))
|
||||
if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
|
||||
hyp_panic();
|
||||
|
||||
return __cpu_logical_map[cpu];
|
||||
return hyp_cpu_logical_map[cpu];
|
||||
}
|
||||
|
||||
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
|
||||
|
||||
@@ -7,11 +7,8 @@
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/psci.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#include <nvhe/trap_handler.h>
|
||||
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
|
||||
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
||||
|
||||
/* Config options set by the host. */
|
||||
__ro_after_init u32 kvm_host_psci_version;
|
||||
__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
|
||||
__ro_after_init s64 hyp_physvirt_offset;
|
||||
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
|
||||
s64 __ro_after_init hyp_physvirt_offset;
|
||||
|
||||
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
|
||||
|
||||
@@ -47,19 +43,16 @@ struct psci_boot_args {
|
||||
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
|
||||
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
|
||||
|
||||
static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(u64, func_id, host_ctxt, 0);
|
||||
|
||||
return func_id;
|
||||
}
|
||||
#define is_psci_0_1(what, func_id) \
|
||||
(kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
|
||||
(func_id) == kvm_host_psci_config.function_ids_0_1.what)
|
||||
|
||||
static bool is_psci_0_1_call(u64 func_id)
|
||||
{
|
||||
return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
|
||||
(func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
|
||||
(func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
|
||||
(func_id == kvm_host_psci_0_1_function_ids.migrate);
|
||||
return (is_psci_0_1(cpu_suspend, func_id) ||
|
||||
is_psci_0_1(cpu_on, func_id) ||
|
||||
is_psci_0_1(cpu_off, func_id) ||
|
||||
is_psci_0_1(migrate, func_id));
|
||||
}
|
||||
|
||||
static bool is_psci_0_2_call(u64 func_id)
|
||||
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
|
||||
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
|
||||
}
|
||||
|
||||
static bool is_psci_call(u64 func_id)
|
||||
{
|
||||
switch (kvm_host_psci_version) {
|
||||
case PSCI_VERSION(0, 1):
|
||||
return is_psci_0_1_call(func_id);
|
||||
default:
|
||||
return is_psci_0_2_call(func_id);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
|
||||
unsigned long arg1, unsigned long arg2)
|
||||
{
|
||||
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
|
||||
|
||||
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
|
||||
(func_id == kvm_host_psci_0_1_function_ids.migrate))
|
||||
if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
|
||||
return psci_forward(host_ctxt);
|
||||
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
|
||||
if (is_psci_0_1(cpu_on, func_id))
|
||||
return psci_cpu_on(func_id, host_ctxt);
|
||||
else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
|
||||
if (is_psci_0_1(cpu_suspend, func_id))
|
||||
return psci_cpu_suspend(func_id, host_ctxt);
|
||||
else
|
||||
return PSCI_RET_NOT_SUPPORTED;
|
||||
|
||||
return PSCI_RET_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
||||
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
|
||||
|
||||
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
u64 func_id = get_psci_func_id(host_ctxt);
|
||||
DECLARE_REG(u64, func_id, host_ctxt, 0);
|
||||
unsigned long ret;
|
||||
|
||||
if (!is_psci_call(func_id))
|
||||
return false;
|
||||
|
||||
switch (kvm_host_psci_version) {
|
||||
switch (kvm_host_psci_config.version) {
|
||||
case PSCI_VERSION(0, 1):
|
||||
if (!is_psci_0_1_call(func_id))
|
||||
return false;
|
||||
ret = psci_0_1_handler(func_id, host_ctxt);
|
||||
break;
|
||||
case PSCI_VERSION(0, 2):
|
||||
if (!is_psci_0_2_call(func_id))
|
||||
return false;
|
||||
ret = psci_0_2_handler(func_id, host_ctxt);
|
||||
break;
|
||||
default:
|
||||
if (!is_psci_0_2_call(func_id))
|
||||
return false;
|
||||
ret = psci_1_0_handler(func_id, host_ctxt);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
||||
/* No PMU available, PMCR_EL0 may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return;
|
||||
|
||||
pmcr = read_sysreg(pmcr_el0);
|
||||
/*
|
||||
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
|
||||
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
|
||||
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
|
||||
}
|
||||
|
||||
/*
|
||||
* Store a hyp VA <-> PA offset into a hyp-owned variable.
|
||||
* Store a hyp VA <-> PA offset into a EL2-owned variable.
|
||||
*/
|
||||
static void init_hyp_physvirt_offset(void)
|
||||
{
|
||||
extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
|
||||
u64 kern_va, hyp_va;
|
||||
|
||||
/* Compute the offset from the hyp VA and PA of a random symbol. */
|
||||
kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
|
||||
kern_va = (u64)lm_alias(__hyp_text_start);
|
||||
hyp_va = __early_kern_hyp_va(kern_va);
|
||||
CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
|
||||
hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
|
||||
* Map the MMIO regions depending on the VGIC model exposed to the guest
|
||||
* called on the first VCPU run.
|
||||
* Also map the virtual CPU interface into the VM.
|
||||
* v2/v3 derivatives call vgic_init if not already done.
|
||||
* v2 calls vgic_init() if not already done.
|
||||
* v3 and derivatives return an error if the VGIC is not initialized.
|
||||
* vgic_ready() returns true if this function has succeeded.
|
||||
* @kvm: kvm struct pointer
|
||||
*/
|
||||
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int ret = 0;
|
||||
|
||||
if (likely(vgic_ready(kvm)))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
||||
if (!irqchip_in_kernel(kvm))
|
||||
goto out;
|
||||
|
||||
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
|
||||
if (ret)
|
||||
__kvm_vgic_destroy(kvm);
|
||||
else
|
||||
dist->ready = true;
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int ret = 0;
|
||||
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
||||
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
|
||||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
|
||||
kvm_err("Need to set vgic cpu and dist addresses first\n");
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
|
||||
kvm_err("VGIC CPU and dist frames overlap\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
||||
ret = vgic_init(kvm);
|
||||
if (ret) {
|
||||
kvm_err("Unable to initialize VGIC dynamic data structures\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC MMIO regions\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
||||
KVM_VGIC_V2_CPU_SIZE, true);
|
||||
if (ret) {
|
||||
kvm_err("Unable to remap VGIC CPU to VCPU\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
dist->ready = true;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
|
||||
|
||||
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
int ret = 0;
|
||||
int c;
|
||||
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
||||
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
|
||||
kvm_debug("vcpu %d redistributor base not set\n", c);
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
|
||||
kvm_err("Need to set vgic distributor addresses first\n");
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!vgic_v3_check_base(kvm)) {
|
||||
kvm_err("VGIC redist and dist frames overlap\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
* the VGIC before we need to use it.
|
||||
*/
|
||||
if (!vgic_initialized(kvm)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (kvm_vgic_global_state.has_gicv4_1)
|
||||
vgic_v4_configure_vsgis(kvm);
|
||||
dist->ready = true;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
|
||||
|
||||
@@ -1010,9 +1010,21 @@ struct kvm_arch {
|
||||
*/
|
||||
bool tdp_mmu_enabled;
|
||||
|
||||
/* List of struct tdp_mmu_pages being used as roots */
|
||||
/*
|
||||
* List of struct kvmp_mmu_pages being used as roots.
|
||||
* All struct kvm_mmu_pages in the list should have
|
||||
* tdp_mmu_page set.
|
||||
* All struct kvm_mmu_pages in the list should have a positive
|
||||
* root_count except when a thread holds the MMU lock and is removing
|
||||
* an entry from the list.
|
||||
*/
|
||||
struct list_head tdp_mmu_roots;
|
||||
/* List of struct tdp_mmu_pages not being used as roots */
|
||||
|
||||
/*
|
||||
* List of struct kvmp_mmu_pages not being used as roots.
|
||||
* All struct kvm_mmu_pages in the list should have
|
||||
* tdp_mmu_page set and a root_count of 0.
|
||||
*/
|
||||
struct list_head tdp_mmu_pages;
|
||||
};
|
||||
|
||||
@@ -1287,6 +1299,8 @@ struct kvm_x86_ops {
|
||||
void (*migrate_timers)(struct kvm_vcpu *vcpu);
|
||||
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
|
||||
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
|
||||
|
||||
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
|
||||
};
|
||||
|
||||
struct kvm_x86_nested_ops {
|
||||
@@ -1468,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
|
||||
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user