You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge branch 'kvm-older-features' into HEAD
Merge branch for features that did not make it into 5.18: * New ioctls to get/set TSC frequency for a whole VM * Allow userspace to opt out of hypercall patching Nested virtualization improvements for AMD: * Support for "nested nested" optimizations (nested vVMLOAD/VMSAVE, nested vGIF) * Allow AVIC to co-exist with a nested guest running * Fixes for LBR virtualizations when a nested guest is running, and nested LBR virtualization support * PAUSE filtering for nested hypervisors Guest support: * Decoupling of vcpu_is_preempted from PV spinlocks Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -982,12 +982,22 @@ memory.
|
||||
__u8 pad2[30];
|
||||
};
|
||||
|
||||
If the KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag is returned from the
|
||||
KVM_CAP_XEN_HVM check, it may be set in the flags field of this ioctl.
|
||||
This requests KVM to generate the contents of the hypercall page
|
||||
automatically; hypercalls will be intercepted and passed to userspace
|
||||
through KVM_EXIT_XEN. In this case, all of the blob size and address
|
||||
fields must be zero.
|
||||
If certain flags are returned from the KVM_CAP_XEN_HVM check, they may
|
||||
be set in the flags field of this ioctl:
|
||||
|
||||
The KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag requests KVM to generate
|
||||
the contents of the hypercall page automatically; hypercalls will be
|
||||
intercepted and passed to userspace through KVM_EXIT_XEN. In this
|
||||
ase, all of the blob size and address fields must be zero.
|
||||
|
||||
The KVM_XEN_HVM_CONFIG_EVTCHN_SEND flag indicates to KVM that userspace
|
||||
will always use the KVM_XEN_HVM_EVTCHN_SEND ioctl to deliver event
|
||||
channel interrupts rather than manipulating the guest's shared_info
|
||||
structures directly. This, in turn, may allow KVM to enable features
|
||||
such as intercepting the SCHEDOP_poll hypercall to accelerate PV
|
||||
spinlock operation for the guest. Userspace may still use the ioctl
|
||||
to deliver events if it was advertised, even if userspace does not
|
||||
send this indication that it will always do so
|
||||
|
||||
No other flags are currently valid in the struct kvm_xen_hvm_config.
|
||||
|
||||
@@ -1887,22 +1897,25 @@ the future.
|
||||
4.55 KVM_SET_TSC_KHZ
|
||||
--------------------
|
||||
|
||||
:Capability: KVM_CAP_TSC_CONTROL
|
||||
:Capability: KVM_CAP_TSC_CONTROL / KVM_CAP_VM_TSC_CONTROL
|
||||
:Architectures: x86
|
||||
:Type: vcpu ioctl
|
||||
:Type: vcpu ioctl / vm ioctl
|
||||
:Parameters: virtual tsc_khz
|
||||
:Returns: 0 on success, -1 on error
|
||||
|
||||
Specifies the tsc frequency for the virtual machine. The unit of the
|
||||
frequency is KHz.
|
||||
|
||||
If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also
|
||||
be used as a vm ioctl to set the initial tsc frequency of subsequently
|
||||
created vCPUs.
|
||||
|
||||
4.56 KVM_GET_TSC_KHZ
|
||||
--------------------
|
||||
|
||||
:Capability: KVM_CAP_GET_TSC_KHZ
|
||||
:Capability: KVM_CAP_GET_TSC_KHZ / KVM_CAP_VM_TSC_CONTROL
|
||||
:Architectures: x86
|
||||
:Type: vcpu ioctl
|
||||
:Type: vcpu ioctl / vm ioctl
|
||||
:Parameters: none
|
||||
:Returns: virtual tsc-khz on success, negative value on error
|
||||
|
||||
@@ -5216,7 +5229,25 @@ have deterministic behavior.
|
||||
struct {
|
||||
__u64 gfn;
|
||||
} shared_info;
|
||||
__u64 pad[4];
|
||||
struct {
|
||||
__u32 send_port;
|
||||
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
|
||||
__u32 flags;
|
||||
union {
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 vcpu;
|
||||
__u32 priority;
|
||||
} port;
|
||||
struct {
|
||||
__u32 port; /* Zero for eventfd */
|
||||
__s32 fd;
|
||||
} eventfd;
|
||||
__u32 padding[4];
|
||||
} deliver;
|
||||
} evtchn;
|
||||
__u32 xen_version;
|
||||
__u64 pad[8];
|
||||
} u;
|
||||
};
|
||||
|
||||
@@ -5247,6 +5278,30 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
||||
|
||||
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
Sets the exception vector used to deliver Xen event channel upcalls.
|
||||
This is the HVM-wide vector injected directly by the hypervisor
|
||||
(not through the local APIC), typically configured by a guest via
|
||||
HVM_PARAM_CALLBACK_IRQ.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_EVTCHN
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
|
||||
an outbound port number for interception of EVTCHNOP_send requests
|
||||
from the guest. A given sending port number may be directed back
|
||||
to a specified vCPU (by APIC ID) / port / priority on the guest,
|
||||
or to trigger events on an eventfd. The vCPU and priority can be
|
||||
changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call,
|
||||
but other fields cannot change for a given sending port. A port
|
||||
mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags
|
||||
field.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_XEN_VERSION
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
|
||||
the 32-bit version code returned to the guest when it invokes the
|
||||
XENVER_version call; typically (XEN_MAJOR << 16 | XEN_MINOR). PV
|
||||
Xen guests will often use this to as a dummy hypercall to trigger
|
||||
event channel delivery, so responding within the kernel without
|
||||
exiting to userspace is beneficial.
|
||||
|
||||
4.127 KVM_XEN_HVM_GET_ATTR
|
||||
--------------------------
|
||||
@@ -5258,7 +5313,8 @@ KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
:Returns: 0 on success, < 0 on error
|
||||
|
||||
Allows Xen VM attributes to be read. For the structure and types,
|
||||
see KVM_XEN_HVM_SET_ATTR above.
|
||||
see KVM_XEN_HVM_SET_ATTR above. The KVM_XEN_ATTR_TYPE_EVTCHN
|
||||
attribute cannot be read.
|
||||
|
||||
4.128 KVM_XEN_VCPU_SET_ATTR
|
||||
---------------------------
|
||||
@@ -5285,6 +5341,13 @@ see KVM_XEN_HVM_SET_ATTR above.
|
||||
__u64 time_blocked;
|
||||
__u64 time_offline;
|
||||
} runstate;
|
||||
__u32 vcpu_id;
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 priority;
|
||||
__u64 expires_ns;
|
||||
} timer;
|
||||
__u8 vector;
|
||||
} u;
|
||||
};
|
||||
|
||||
@@ -5326,6 +5389,27 @@ KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST
|
||||
or RUNSTATE_offline) to set the current accounted state as of the
|
||||
adjusted state_entry_time.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the Xen
|
||||
vCPU ID of the given vCPU, to allow timer-related VCPU operations to
|
||||
be intercepted by KVM.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_TIMER
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
|
||||
event channel port/priority for the VIRQ_TIMER of the vCPU, as well
|
||||
as allowing a pending timer to be saved/restored.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
|
||||
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
|
||||
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
|
||||
per-vCPU local APIC upcall vector, configured by a Xen guest with
|
||||
the HVMOP_set_evtchn_upcall_vector hypercall. This is typically
|
||||
used by Windows guests, and is distinct from the HVM-wide upcall
|
||||
vector configured with HVM_PARAM_CALLBACK_IRQ.
|
||||
|
||||
|
||||
4.129 KVM_XEN_VCPU_GET_ATTR
|
||||
---------------------------
|
||||
|
||||
@@ -5645,6 +5729,25 @@ enabled with ``arch_prctl()``, but this may change in the future.
|
||||
The offsets of the state save areas in struct kvm_xsave follow the contents
|
||||
of CPUID leaf 0xD on the host.
|
||||
|
||||
4.135 KVM_XEN_HVM_EVTCHN_SEND
|
||||
-----------------------------
|
||||
|
||||
:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND
|
||||
:Architectures: x86
|
||||
:Type: vm ioctl
|
||||
:Parameters: struct kvm_irq_routing_xen_evtchn
|
||||
:Returns: 0 on success, < 0 on error
|
||||
|
||||
|
||||
::
|
||||
|
||||
struct kvm_irq_routing_xen_evtchn {
|
||||
__u32 port;
|
||||
__u32 vcpu;
|
||||
__u32 priority;
|
||||
};
|
||||
|
||||
This ioctl injects an event channel interrupt directly to the guest vCPU.
|
||||
|
||||
5. The kvm_run structure
|
||||
========================
|
||||
@@ -7135,6 +7238,15 @@ The valid bits in cap.args[0] are:
|
||||
Additionally, when this quirk is disabled,
|
||||
KVM clears CPUID.01H:ECX[bit 3] if
|
||||
IA32_MISC_ENABLE[bit 18] is cleared.
|
||||
|
||||
KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest
|
||||
VMMCALL/VMCALL instructions to match the
|
||||
vendor's hypercall instruction for the
|
||||
system. When this quirk is disabled, KVM
|
||||
will no longer rewrite invalid guest
|
||||
hypercall instructions. Executing the
|
||||
incorrect hypercall instruction will
|
||||
generate a #UD within the guest.
|
||||
=================================== ============================================
|
||||
|
||||
8. Other capabilities.
|
||||
@@ -7612,8 +7724,9 @@ PVHVM guests. Valid flags are::
|
||||
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
|
||||
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 2)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 3)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
|
||||
|
||||
The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG
|
||||
ioctl is available, for the guest to set its hypercall page.
|
||||
@@ -7637,6 +7750,14 @@ The KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL flag indicates that IRQ routing entries
|
||||
of the type KVM_IRQ_ROUTING_XEN_EVTCHN are supported, with the priority
|
||||
field set to indicate 2 level event channel delivery.
|
||||
|
||||
The KVM_XEN_HVM_CONFIG_EVTCHN_SEND flag indicates that KVM supports
|
||||
injecting event channel events directly into the guest with the
|
||||
KVM_XEN_HVM_EVTCHN_SEND ioctl. It also indicates support for the
|
||||
KVM_XEN_ATTR_TYPE_EVTCHN/XEN_VERSION HVM attributes and the
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID/TIMER/UPCALL_VECTOR vCPU attributes.
|
||||
related to event channel delivery, timers, and the XENVER_version
|
||||
interception.
|
||||
|
||||
8.31 KVM_CAP_PPC_MULTITCE
|
||||
-------------------------
|
||||
|
||||
|
||||
@@ -126,6 +126,7 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
|
||||
KVM_X86_OP(msr_filter_changed)
|
||||
KVM_X86_OP(complete_emulated_msr)
|
||||
KVM_X86_OP(vcpu_deliver_sipi_vector)
|
||||
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
|
||||
|
||||
#undef KVM_X86_OP
|
||||
#undef KVM_X86_OP_OPTIONAL
|
||||
|
||||
@@ -607,16 +607,21 @@ struct kvm_vcpu_hv {
|
||||
struct kvm_vcpu_xen {
|
||||
u64 hypercall_rip;
|
||||
u32 current_runstate;
|
||||
bool vcpu_info_set;
|
||||
bool vcpu_time_info_set;
|
||||
bool runstate_set;
|
||||
struct gfn_to_hva_cache vcpu_info_cache;
|
||||
struct gfn_to_hva_cache vcpu_time_info_cache;
|
||||
struct gfn_to_hva_cache runstate_cache;
|
||||
u8 upcall_vector;
|
||||
struct gfn_to_pfn_cache vcpu_info_cache;
|
||||
struct gfn_to_pfn_cache vcpu_time_info_cache;
|
||||
struct gfn_to_pfn_cache runstate_cache;
|
||||
u64 last_steal;
|
||||
u64 runstate_entry_time;
|
||||
u64 runstate_times[4];
|
||||
unsigned long evtchn_pending_sel;
|
||||
u32 vcpu_id; /* The Xen / ACPI vCPU ID */
|
||||
u32 timer_virq;
|
||||
u64 timer_expires; /* In guest epoch */
|
||||
atomic_t timer_pending;
|
||||
struct hrtimer timer;
|
||||
int poll_evtchn;
|
||||
struct timer_list poll_timer;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
@@ -753,8 +758,7 @@ struct kvm_vcpu_arch {
|
||||
gpa_t time;
|
||||
struct pvclock_vcpu_time_info hv_clock;
|
||||
unsigned int hw_tsc_khz;
|
||||
struct gfn_to_hva_cache pv_time;
|
||||
bool pv_time_enabled;
|
||||
struct gfn_to_pfn_cache pv_time;
|
||||
/* set guest stopped flag in pvclock flags field */
|
||||
bool pvclock_set_guest_stopped_request;
|
||||
|
||||
@@ -1024,9 +1028,12 @@ struct msr_bitmap_range {
|
||||
|
||||
/* Xen emulation context */
|
||||
struct kvm_xen {
|
||||
u32 xen_version;
|
||||
bool long_mode;
|
||||
u8 upcall_vector;
|
||||
struct gfn_to_pfn_cache shinfo_cache;
|
||||
struct idr evtchn_ports;
|
||||
unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
|
||||
};
|
||||
|
||||
enum kvm_irqchip_mode {
|
||||
@@ -1119,6 +1126,8 @@ struct kvm_arch {
|
||||
u64 cur_tsc_generation;
|
||||
int nr_vcpus_matched_tsc;
|
||||
|
||||
u32 default_tsc_khz;
|
||||
|
||||
seqcount_raw_spinlock_t pvclock_sc;
|
||||
bool use_master_clock;
|
||||
u64 master_kernel_ns;
|
||||
@@ -1498,6 +1507,11 @@ struct kvm_x86_ops {
|
||||
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
|
||||
|
||||
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
|
||||
|
||||
/*
|
||||
* Returns vCPU specific APICv inhibit reasons
|
||||
*/
|
||||
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
||||
struct kvm_x86_nested_ops {
|
||||
@@ -1799,6 +1813,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
struct x86_exception *exception);
|
||||
|
||||
bool kvm_apicv_activated(struct kvm *kvm);
|
||||
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
|
||||
void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
|
||||
enum kvm_apicv_inhibit reason, bool set);
|
||||
@@ -1988,6 +2003,7 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
|
||||
KVM_X86_QUIRK_CD_NW_CLEARED | \
|
||||
KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
|
||||
KVM_X86_QUIRK_OUT_7E_INC_RIP | \
|
||||
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)
|
||||
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
|
||||
KVM_X86_QUIRK_FIX_HYPERCALL_INSN)
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
||||
@@ -428,11 +428,12 @@ struct kvm_sync_regs {
|
||||
struct kvm_vcpu_events events;
|
||||
};
|
||||
|
||||
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
|
||||
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
|
||||
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
|
||||
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
|
||||
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
|
||||
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
|
||||
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
|
||||
|
||||
#define KVM_STATE_NESTED_FORMAT_VMX 0
|
||||
#define KVM_STATE_NESTED_FORMAT_SVM 1
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
#include <asm/ia32.h>
|
||||
|
||||
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
#if defined(CONFIG_KVM_GUEST)
|
||||
#include <asm/kvm_para.h>
|
||||
#endif
|
||||
|
||||
@@ -20,7 +20,7 @@ int main(void)
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
#if defined(CONFIG_KVM_GUEST)
|
||||
OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
@@ -752,6 +752,42 @@ static void kvm_crash_shutdown(struct pt_regs *regs)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
|
||||
bool __kvm_vcpu_is_preempted(long cpu);
|
||||
|
||||
__visible bool __kvm_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
return !!(src->preempted & KVM_VCPU_PREEMPTED);
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
#else
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
|
||||
|
||||
/*
|
||||
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
|
||||
* restoring to/from the stack.
|
||||
*/
|
||||
asm(
|
||||
".pushsection .text;"
|
||||
".global __raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
|
||||
"__raw_callee_save___kvm_vcpu_is_preempted:"
|
||||
ASM_ENDBR
|
||||
"movq __per_cpu_offset(,%rdi,8), %rax;"
|
||||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||
"setne %al;"
|
||||
ASM_RET
|
||||
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".popsection");
|
||||
|
||||
#endif
|
||||
|
||||
static void __init kvm_guest_init(void)
|
||||
{
|
||||
int i;
|
||||
@@ -764,6 +800,9 @@ static void __init kvm_guest_init(void)
|
||||
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
has_steal_clock = 1;
|
||||
static_call_update(pv_steal_clock, kvm_steal_clock);
|
||||
|
||||
pv_ops.lock.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
@@ -1005,40 +1044,6 @@ static void kvm_wait(u8 *ptr, u8 val)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible bool __kvm_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
return !!(src->preempted & KVM_VCPU_PREEMPTED);
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
#else
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
|
||||
|
||||
/*
|
||||
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
|
||||
* restoring to/from the stack.
|
||||
*/
|
||||
asm(
|
||||
".pushsection .text;"
|
||||
".global __raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
|
||||
"__raw_callee_save___kvm_vcpu_is_preempted:"
|
||||
ASM_ENDBR
|
||||
"movq __per_cpu_offset(,%rdi,8), %rax;"
|
||||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||
"setne %al;"
|
||||
ASM_RET
|
||||
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".popsection");
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
||||
*/
|
||||
@@ -1082,10 +1087,6 @@ void __init kvm_spinlock_init(void)
|
||||
pv_ops.lock.wait = kvm_wait;
|
||||
pv_ops.lock.kick = kvm_kick_cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
pv_ops.lock.vcpu_is_preempted =
|
||||
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
|
||||
}
|
||||
/*
|
||||
* When PV spinlock is enabled which is preferred over
|
||||
* virt_spin_lock(), virt_spin_lock_key's value is meaningless.
|
||||
|
||||
@@ -252,7 +252,6 @@ int kvm_pic_read_irq(struct kvm *kvm)
|
||||
*/
|
||||
irq2 = 7;
|
||||
intno = s->pics[1].irq_base + irq2;
|
||||
irq = irq2 + 8;
|
||||
} else
|
||||
intno = s->pics[0].irq_base + irq;
|
||||
} else {
|
||||
|
||||
@@ -22,10 +22,14 @@
|
||||
*/
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (lapic_in_kernel(vcpu))
|
||||
return apic_has_pending_timer(vcpu);
|
||||
int r = 0;
|
||||
|
||||
return 0;
|
||||
if (lapic_in_kernel(vcpu))
|
||||
r = apic_has_pending_timer(vcpu);
|
||||
if (kvm_xen_timer_enabled(vcpu))
|
||||
r += kvm_xen_has_pending_timer(vcpu);
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
|
||||
|
||||
@@ -143,6 +147,8 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (lapic_in_kernel(vcpu))
|
||||
kvm_inject_apic_timer_irqs(vcpu);
|
||||
if (kvm_xen_timer_enabled(vcpu))
|
||||
kvm_xen_inject_timer_irqs(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
|
||||
if (!level)
|
||||
return -1;
|
||||
|
||||
return kvm_xen_set_evtchn_fast(e, kvm);
|
||||
return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm);
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
|
||||
@@ -1866,17 +1866,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
|
||||
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
|
||||
|
||||
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct list_head *invalid_list)
|
||||
{
|
||||
int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret < 0)
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
||||
return false;
|
||||
}
|
||||
|
||||
return !!ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
|
||||
@@ -1998,7 +1995,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
|
||||
for_each_sp(pages, sp, parents, i) {
|
||||
kvm_unlink_unsync_page(vcpu->kvm, sp);
|
||||
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
|
||||
flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
|
||||
mmu_pages_clear_parents(&parents);
|
||||
}
|
||||
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
|
||||
@@ -2039,6 +2036,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
struct hlist_head *sp_list;
|
||||
unsigned quadrant;
|
||||
struct kvm_mmu_page *sp;
|
||||
int ret;
|
||||
int collisions = 0;
|
||||
LIST_HEAD(invalid_list);
|
||||
|
||||
@@ -2091,11 +2089,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
* If the sync fails, the page is zapped. If so, break
|
||||
* in order to rebuild it.
|
||||
*/
|
||||
if (!kvm_sync_page(vcpu, sp, &invalid_list))
|
||||
ret = kvm_sync_page(vcpu, sp, &invalid_list);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
WARN_ON(!list_empty(&invalid_list));
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
if (ret > 0)
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
}
|
||||
|
||||
__clear_sp_write_flooding_count(sp);
|
||||
|
||||
@@ -165,9 +165,8 @@ free_avic:
|
||||
return err;
|
||||
}
|
||||
|
||||
void avic_init_vmcb(struct vcpu_svm *svm)
|
||||
void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
|
||||
{
|
||||
struct vmcb *vmcb = svm->vmcb;
|
||||
struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
|
||||
phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
|
||||
phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
|
||||
@@ -357,6 +356,13 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
|
||||
return 1;
|
||||
}
|
||||
|
||||
unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (is_guest_mode(vcpu))
|
||||
return APICV_INHIBIT_REASON_NESTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
|
||||
{
|
||||
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -62,8 +62,6 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
||||
#define SEG_TYPE_LDT 2
|
||||
#define SEG_TYPE_BUSY_TSS16 3
|
||||
|
||||
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
||||
|
||||
static bool erratum_383_found __read_mostly;
|
||||
|
||||
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
|
||||
@@ -172,7 +170,7 @@ static int vls = true;
|
||||
module_param(vls, int, 0444);
|
||||
|
||||
/* enable/disable Virtual GIF */
|
||||
static int vgif = true;
|
||||
int vgif = true;
|
||||
module_param(vgif, int, 0444);
|
||||
|
||||
/* enable/disable LBR virtualization */
|
||||
@@ -189,6 +187,9 @@ module_param(tsc_scaling, int, 0444);
|
||||
static bool avic;
|
||||
module_param(avic, bool, 0444);
|
||||
|
||||
static bool force_avic;
|
||||
module_param_unsafe(force_avic, bool, 0444);
|
||||
|
||||
bool __read_mostly dump_invalid_vmcb;
|
||||
module_param(dump_invalid_vmcb, bool, 0644);
|
||||
|
||||
@@ -790,6 +791,17 @@ static void init_msrpm_offsets(void)
|
||||
}
|
||||
}
|
||||
|
||||
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
|
||||
{
|
||||
to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
|
||||
to_vmcb->save.br_from = from_vmcb->save.br_from;
|
||||
to_vmcb->save.br_to = from_vmcb->save.br_to;
|
||||
to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from;
|
||||
to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to;
|
||||
|
||||
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
|
||||
}
|
||||
|
||||
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@@ -799,6 +811,10 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
|
||||
|
||||
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
|
||||
if (is_guest_mode(vcpu))
|
||||
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
|
||||
}
|
||||
|
||||
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
|
||||
@@ -810,6 +826,67 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
|
||||
|
||||
/*
|
||||
* Move the LBR msrs back to the vmcb01 to avoid copying them
|
||||
* on nested guest entries.
|
||||
*/
|
||||
if (is_guest_mode(vcpu))
|
||||
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
|
||||
}
|
||||
|
||||
static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
|
||||
{
|
||||
/*
|
||||
* If the LBR virtualization is disabled, the LBR msrs are always
|
||||
* kept in the vmcb01 to avoid copying them on nested guest entries.
|
||||
*
|
||||
* If nested, and the LBR virtualization is enabled/disabled, the msrs
|
||||
* are moved between the vmcb01 and vmcb02 as needed.
|
||||
*/
|
||||
struct vmcb *vmcb =
|
||||
(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
|
||||
svm->vmcb : svm->vmcb01.ptr;
|
||||
|
||||
switch (index) {
|
||||
case MSR_IA32_DEBUGCTLMSR:
|
||||
return vmcb->save.dbgctl;
|
||||
case MSR_IA32_LASTBRANCHFROMIP:
|
||||
return vmcb->save.br_from;
|
||||
case MSR_IA32_LASTBRANCHTOIP:
|
||||
return vmcb->save.br_to;
|
||||
case MSR_IA32_LASTINTFROMIP:
|
||||
return vmcb->save.last_excp_from;
|
||||
case MSR_IA32_LASTINTTOIP:
|
||||
return vmcb->save.last_excp_to;
|
||||
default:
|
||||
KVM_BUG(false, svm->vcpu.kvm,
|
||||
"%s: Unknown MSR 0x%x", __func__, index);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void svm_update_lbrv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
|
||||
DEBUGCTLMSR_LBR;
|
||||
|
||||
bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
|
||||
LBR_CTL_ENABLE_MASK);
|
||||
|
||||
if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
|
||||
if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
|
||||
enable_lbrv = true;
|
||||
|
||||
if (enable_lbrv == current_enable_lbrv)
|
||||
return;
|
||||
|
||||
if (enable_lbrv)
|
||||
svm_enable_lbrv(vcpu);
|
||||
else
|
||||
svm_disable_lbrv(vcpu);
|
||||
}
|
||||
|
||||
void disable_nmi_singlestep(struct vcpu_svm *svm)
|
||||
@@ -831,6 +908,9 @@ static void grow_ple_window(struct kvm_vcpu *vcpu)
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
int old = control->pause_filter_count;
|
||||
|
||||
if (kvm_pause_in_guest(vcpu->kvm) || !old)
|
||||
return;
|
||||
|
||||
control->pause_filter_count = __grow_ple_window(old,
|
||||
pause_filter_count,
|
||||
pause_filter_count_grow,
|
||||
@@ -849,6 +929,9 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
int old = control->pause_filter_count;
|
||||
|
||||
if (kvm_pause_in_guest(vcpu->kvm) || !old)
|
||||
return;
|
||||
|
||||
control->pause_filter_count =
|
||||
__shrink_ple_window(old,
|
||||
pause_filter_count,
|
||||
@@ -960,6 +1043,8 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
|
||||
|
||||
svm->v_vmload_vmsave_enabled = false;
|
||||
} else {
|
||||
/*
|
||||
* If hardware supports Virtual VMLOAD VMSAVE then enable it
|
||||
@@ -979,8 +1064,9 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
static void init_vmcb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
struct vmcb_save_area *save = &svm->vmcb->save;
|
||||
struct vmcb *vmcb = svm->vmcb01.ptr;
|
||||
struct vmcb_control_area *control = &vmcb->control;
|
||||
struct vmcb_save_area *save = &vmcb->save;
|
||||
|
||||
svm_set_intercept(svm, INTERCEPT_CR0_READ);
|
||||
svm_set_intercept(svm, INTERCEPT_CR3_READ);
|
||||
@@ -1104,7 +1190,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
|
||||
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
avic_init_vmcb(svm);
|
||||
avic_init_vmcb(svm, vmcb);
|
||||
|
||||
if (vgif) {
|
||||
svm_clr_intercept(svm, INTERCEPT_STGI);
|
||||
@@ -1122,10 +1208,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
svm_hv_init_vmcb(svm->vmcb);
|
||||
svm_hv_init_vmcb(vmcb);
|
||||
init_vmcb_after_set_cpuid(vcpu);
|
||||
|
||||
vmcb_mark_all_dirty(svm->vmcb);
|
||||
vmcb_mark_all_dirty(vmcb);
|
||||
|
||||
enable_gif(svm);
|
||||
}
|
||||
@@ -1380,7 +1466,7 @@ static void svm_set_vintr(struct vcpu_svm *svm)
|
||||
/*
|
||||
* The following fields are ignored when AVIC is enabled
|
||||
*/
|
||||
WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
|
||||
WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
|
||||
|
||||
svm_set_intercept(svm, INTERCEPT_VINTR);
|
||||
|
||||
@@ -2142,7 +2228,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
|
||||
* Likewise, clear the VINTR intercept, we will set it
|
||||
* again while processing KVM_REQ_EVENT if needed.
|
||||
*/
|
||||
if (vgif_enabled(svm))
|
||||
if (vgif)
|
||||
svm_clr_intercept(svm, INTERCEPT_STGI);
|
||||
if (svm_is_intercept(svm, INTERCEPT_VINTR))
|
||||
svm_clear_vintr(svm);
|
||||
@@ -2160,7 +2246,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
|
||||
* in use, we still rely on the VINTR intercept (rather than
|
||||
* STGI) to detect an open interrupt window.
|
||||
*/
|
||||
if (!vgif_enabled(svm))
|
||||
if (!vgif)
|
||||
svm_clear_vintr(svm);
|
||||
}
|
||||
}
|
||||
@@ -2575,25 +2661,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_TSC_AUX:
|
||||
msr_info->data = svm->tsc_aux;
|
||||
break;
|
||||
/*
|
||||
* Nobody will change the following 5 values in the VMCB so we can
|
||||
* safely return them on rdmsr. They will always be 0 until LBRV is
|
||||
* implemented.
|
||||
*/
|
||||
case MSR_IA32_DEBUGCTLMSR:
|
||||
msr_info->data = svm->vmcb->save.dbgctl;
|
||||
break;
|
||||
case MSR_IA32_LASTBRANCHFROMIP:
|
||||
msr_info->data = svm->vmcb->save.br_from;
|
||||
break;
|
||||
case MSR_IA32_LASTBRANCHTOIP:
|
||||
msr_info->data = svm->vmcb->save.br_to;
|
||||
break;
|
||||
case MSR_IA32_LASTINTFROMIP:
|
||||
msr_info->data = svm->vmcb->save.last_excp_from;
|
||||
break;
|
||||
case MSR_IA32_LASTINTTOIP:
|
||||
msr_info->data = svm->vmcb->save.last_excp_to;
|
||||
msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
msr_info->data = svm->nested.hsave_msr;
|
||||
@@ -2839,12 +2912,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
if (data & DEBUGCTL_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
svm->vmcb->save.dbgctl = data;
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
|
||||
if (data & (1ULL<<0))
|
||||
svm_enable_lbrv(vcpu);
|
||||
if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
|
||||
svm->vmcb->save.dbgctl = data;
|
||||
else
|
||||
svm_disable_lbrv(vcpu);
|
||||
svm->vmcb01.ptr->save.dbgctl = data;
|
||||
|
||||
svm_update_lbrv(vcpu);
|
||||
|
||||
break;
|
||||
case MSR_VM_HSAVE_PA:
|
||||
/*
|
||||
@@ -2901,9 +2975,16 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
|
||||
svm_clear_vintr(to_svm(vcpu));
|
||||
|
||||
/*
|
||||
* For AVIC, the only reason to end up here is ExtINTs.
|
||||
* If not running nested, for AVIC, the only reason to end up here is ExtINTs.
|
||||
* In this case AVIC was temporarily disabled for
|
||||
* requesting the IRQ window and we have to re-enable it.
|
||||
*
|
||||
* If running nested, still remove the VM wide AVIC inhibit to
|
||||
* support case in which the interrupt window was requested when the
|
||||
* vCPU was not running nested.
|
||||
|
||||
* All vCPUs which run still run nested, will remain to have their
|
||||
* AVIC still inhibited due to per-cpu AVIC inhibition.
|
||||
*/
|
||||
kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
|
||||
|
||||
@@ -2914,7 +2995,6 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
|
||||
static int pause_interception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool in_kernel;
|
||||
|
||||
/*
|
||||
* CPL is not made available for an SEV-ES guest, therefore
|
||||
* vcpu->arch.preempted_in_kernel can never be true. Just
|
||||
@@ -2922,8 +3002,7 @@ static int pause_interception(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
|
||||
|
||||
if (!kvm_pause_in_guest(vcpu->kvm))
|
||||
grow_ple_window(vcpu);
|
||||
grow_ple_window(vcpu);
|
||||
|
||||
kvm_vcpu_on_spin(vcpu, in_kernel);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
@@ -3496,14 +3575,20 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
|
||||
* enabled, the STGI interception will not occur. Enable the irq
|
||||
* window under the assumption that the hardware will set the GIF.
|
||||
*/
|
||||
if (vgif_enabled(svm) || gif_set(svm)) {
|
||||
if (vgif || gif_set(svm)) {
|
||||
/*
|
||||
* IRQ window is not needed when AVIC is enabled,
|
||||
* unless we have pending ExtINT since it cannot be injected
|
||||
* via AVIC. In such case, we need to temporarily disable AVIC,
|
||||
* via AVIC. In such case, KVM needs to temporarily disable AVIC,
|
||||
* and fallback to injecting IRQ via V_IRQ.
|
||||
*
|
||||
* If running nested, AVIC is already locally inhibited
|
||||
* on this vCPU, therefore there is no need to request
|
||||
* the VM wide AVIC inhibition.
|
||||
*/
|
||||
kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
|
||||
if (!is_guest_mode(vcpu))
|
||||
kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
|
||||
|
||||
svm_set_vintr(svm);
|
||||
}
|
||||
}
|
||||
@@ -3516,7 +3601,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||
return; /* IRET will cause a vm exit */
|
||||
|
||||
if (!gif_set(svm)) {
|
||||
if (vgif_enabled(svm))
|
||||
if (vgif)
|
||||
svm_set_intercept(svm, INTERCEPT_STGI);
|
||||
return; /* STGI will cause a vm exit */
|
||||
}
|
||||
@@ -3946,6 +4031,17 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
|
||||
|
||||
svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
|
||||
svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
|
||||
|
||||
svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
|
||||
|
||||
svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) &&
|
||||
guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER);
|
||||
|
||||
svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) &&
|
||||
guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD);
|
||||
|
||||
svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
|
||||
|
||||
svm_recalc_instruction_intercepts(vcpu, svm);
|
||||
|
||||
@@ -3963,13 +4059,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
|
||||
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
|
||||
|
||||
/*
|
||||
* Currently, AVIC does not work with nested virtualization.
|
||||
* So, we disable AVIC when cpuid for SVM is set in the L1 guest.
|
||||
*/
|
||||
if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
|
||||
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
|
||||
}
|
||||
init_vmcb_after_set_cpuid(vcpu);
|
||||
}
|
||||
@@ -4224,7 +4313,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
|
||||
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
|
||||
|
||||
ret = nested_svm_vmexit(svm);
|
||||
ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -4321,7 +4410,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (!gif_set(svm)) {
|
||||
if (vgif_enabled(svm))
|
||||
if (vgif)
|
||||
svm_set_intercept(svm, INTERCEPT_STGI);
|
||||
/* STGI will cause a vm exit */
|
||||
} else {
|
||||
@@ -4632,6 +4721,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
.complete_emulated_msr = svm_complete_emulated_msr,
|
||||
|
||||
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
|
||||
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -4695,6 +4785,20 @@ static __init void svm_set_cpu_caps(void)
|
||||
if (tsc_scaling)
|
||||
kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
|
||||
|
||||
if (vls)
|
||||
kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
|
||||
if (lbrv)
|
||||
kvm_cpu_cap_set(X86_FEATURE_LBRV);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
|
||||
kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
|
||||
kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
|
||||
|
||||
if (vgif)
|
||||
kvm_cpu_cap_set(X86_FEATURE_VGIF);
|
||||
|
||||
/* Nested VM can receive #VMEXIT instead of triggering #GP */
|
||||
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
|
||||
}
|
||||
@@ -4806,15 +4910,20 @@ static __init int svm_hardware_setup(void)
|
||||
nrips = false;
|
||||
}
|
||||
|
||||
enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
|
||||
enable_apicv = avic = avic && npt_enabled && (boot_cpu_has(X86_FEATURE_AVIC) || force_avic);
|
||||
|
||||
if (enable_apicv) {
|
||||
pr_info("AVIC enabled\n");
|
||||
if (!boot_cpu_has(X86_FEATURE_AVIC)) {
|
||||
pr_warn("AVIC is not supported in CPUID but force enabled");
|
||||
pr_warn("Your system might crash and burn");
|
||||
} else
|
||||
pr_info("AVIC enabled\n");
|
||||
|
||||
amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
|
||||
} else {
|
||||
svm_x86_ops.vcpu_blocking = NULL;
|
||||
svm_x86_ops.vcpu_unblocking = NULL;
|
||||
svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
|
||||
}
|
||||
|
||||
if (vls) {
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#define MSRPM_OFFSETS 16
|
||||
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
|
||||
extern bool npt_enabled;
|
||||
extern int vgif;
|
||||
extern bool intercept_smi;
|
||||
|
||||
/*
|
||||
@@ -231,9 +232,14 @@ struct vcpu_svm {
|
||||
unsigned int3_injected;
|
||||
unsigned long int3_rip;
|
||||
|
||||
/* cached guest cpuid flags for faster access */
|
||||
/* optional nested SVM features that are enabled for this guest */
|
||||
bool nrips_enabled : 1;
|
||||
bool tsc_scaling_enabled : 1;
|
||||
bool v_vmload_vmsave_enabled : 1;
|
||||
bool lbrv_enabled : 1;
|
||||
bool pause_filter_enabled : 1;
|
||||
bool pause_threshold_enabled : 1;
|
||||
bool vgif_enabled : 1;
|
||||
|
||||
u32 ldr_reg;
|
||||
u32 dfr_reg;
|
||||
@@ -452,44 +458,70 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
|
||||
return vmcb_is_intercept(&svm->vmcb->control, bit);
|
||||
}
|
||||
|
||||
static inline bool vgif_enabled(struct vcpu_svm *svm)
|
||||
static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
|
||||
{
|
||||
return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
|
||||
return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
|
||||
}
|
||||
|
||||
static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!vgif)
|
||||
return NULL;
|
||||
|
||||
if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
|
||||
return svm->nested.vmcb02.ptr;
|
||||
else
|
||||
return svm->vmcb01.ptr;
|
||||
}
|
||||
|
||||
static inline void enable_gif(struct vcpu_svm *svm)
|
||||
{
|
||||
if (vgif_enabled(svm))
|
||||
svm->vmcb->control.int_ctl |= V_GIF_MASK;
|
||||
struct vmcb *vmcb = get_vgif_vmcb(svm);
|
||||
|
||||
if (vmcb)
|
||||
vmcb->control.int_ctl |= V_GIF_MASK;
|
||||
else
|
||||
svm->vcpu.arch.hflags |= HF_GIF_MASK;
|
||||
}
|
||||
|
||||
static inline void disable_gif(struct vcpu_svm *svm)
|
||||
{
|
||||
if (vgif_enabled(svm))
|
||||
svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
|
||||
struct vmcb *vmcb = get_vgif_vmcb(svm);
|
||||
|
||||
if (vmcb)
|
||||
vmcb->control.int_ctl &= ~V_GIF_MASK;
|
||||
else
|
||||
svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
|
||||
}
|
||||
|
||||
static inline bool gif_set(struct vcpu_svm *svm)
|
||||
{
|
||||
if (vgif_enabled(svm))
|
||||
return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
|
||||
struct vmcb *vmcb = get_vgif_vmcb(svm);
|
||||
|
||||
if (vmcb)
|
||||
return !!(vmcb->control.int_ctl & V_GIF_MASK);
|
||||
else
|
||||
return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
|
||||
}
|
||||
|
||||
static inline bool nested_npt_enabled(struct vcpu_svm *svm)
|
||||
{
|
||||
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
|
||||
}
|
||||
|
||||
/* svm.c */
|
||||
#define MSR_INVALID 0xffffffffU
|
||||
|
||||
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
||||
|
||||
extern bool dump_invalid_vmcb;
|
||||
|
||||
u32 svm_msrpm_offset(u32 msr);
|
||||
u32 *svm_vcpu_alloc_msrpm(void);
|
||||
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
|
||||
void svm_vcpu_free_msrpm(u32 *msrpm);
|
||||
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
|
||||
void svm_update_lbrv(struct kvm_vcpu *vcpu);
|
||||
|
||||
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||
@@ -574,7 +606,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
|
||||
int avic_ga_log_notifier(u32 ga_tag);
|
||||
void avic_vm_destroy(struct kvm *kvm);
|
||||
int avic_vm_init(struct kvm *kvm);
|
||||
void avic_init_vmcb(struct vcpu_svm *svm);
|
||||
void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
|
||||
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_init_vcpu(struct vcpu_svm *svm);
|
||||
@@ -592,6 +624,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
|
||||
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
||||
void avic_ring_doorbell(struct kvm_vcpu *vcpu);
|
||||
unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* sev.c */
|
||||
|
||||
|
||||
@@ -4380,7 +4380,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
|
||||
if (cpu_has_secondary_exec_ctrls())
|
||||
secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
|
||||
|
||||
if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
|
||||
if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
|
||||
vmcs_write64(EOI_EXIT_BITMAP0, 0);
|
||||
vmcs_write64(EOI_EXIT_BITMAP1, 0);
|
||||
vmcs_write64(EOI_EXIT_BITMAP2, 0);
|
||||
|
||||
@@ -961,11 +961,13 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
|
||||
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
(kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
|
||||
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
|
||||
vcpu->arch.pkru != vcpu->arch.host_pkru)
|
||||
vcpu->arch.pkru != vcpu->arch.host_pkru &&
|
||||
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
|
||||
write_pkru(vcpu->arch.pkru);
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
|
||||
|
||||
@@ -974,13 +976,15 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.guest_state_protected)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
(kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
|
||||
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
|
||||
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
|
||||
vcpu->arch.pkru = rdpkru();
|
||||
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
|
||||
write_pkru(vcpu->arch.host_pkru);
|
||||
}
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
|
||||
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
|
||||
|
||||
@@ -2249,14 +2253,13 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
|
||||
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
|
||||
|
||||
/* we verify if the enable bit is set... */
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
if (!(system_time & 1))
|
||||
return;
|
||||
|
||||
if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.pv_time, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info)))
|
||||
vcpu->arch.pv_time_enabled = true;
|
||||
if (system_time & 1) {
|
||||
kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
|
||||
KVM_HOST_USES_PFN, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
} else {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -2961,63 +2964,55 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
||||
return data.clock;
|
||||
}
|
||||
|
||||
static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
|
||||
struct gfn_to_hva_cache *cache,
|
||||
unsigned int offset)
|
||||
static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
|
||||
struct gfn_to_pfn_cache *gpc,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct kvm_vcpu_arch *vcpu = &v->arch;
|
||||
struct pvclock_vcpu_time_info guest_hv_clock;
|
||||
struct pvclock_vcpu_time_info *guest_hv_clock;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache,
|
||||
&guest_hv_clock, offset, sizeof(guest_hv_clock))))
|
||||
return;
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock))) {
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
/* This VCPU is paused, but it's legal for a guest to read another
|
||||
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
|
||||
offset + sizeof(*guest_hv_clock)))
|
||||
return;
|
||||
|
||||
read_lock_irqsave(&gpc->lock, flags);
|
||||
}
|
||||
|
||||
guest_hv_clock = (void *)(gpc->khva + offset);
|
||||
|
||||
/*
|
||||
* This VCPU is paused, but it's legal for a guest to read another
|
||||
* VCPU's kvmclock, so we really have to follow the specification where
|
||||
* it says that version is odd if data is being modified, and even after
|
||||
* it is consistent.
|
||||
*
|
||||
* Version field updates must be kept separate. This is because
|
||||
* kvm_write_guest_cached might use a "rep movs" instruction, and
|
||||
* writes within a string instruction are weakly ordered. So there
|
||||
* are three writes overall.
|
||||
*
|
||||
* As a small optimization, only write the version field in the first
|
||||
* and third write. The vcpu->pv_time cache is still valid, because the
|
||||
* version field is the first in the struct.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
|
||||
|
||||
if (guest_hv_clock.version & 1)
|
||||
++guest_hv_clock.version; /* first time write, random junk */
|
||||
|
||||
vcpu->hv_clock.version = guest_hv_clock.version + 1;
|
||||
kvm_write_guest_offset_cached(v->kvm, cache,
|
||||
&vcpu->hv_clock, offset,
|
||||
sizeof(vcpu->hv_clock.version));
|
||||
|
||||
guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1;
|
||||
smp_wmb();
|
||||
|
||||
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
|
||||
vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
|
||||
vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
|
||||
|
||||
if (vcpu->pvclock_set_guest_stopped_request) {
|
||||
vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
|
||||
vcpu->pvclock_set_guest_stopped_request = false;
|
||||
}
|
||||
|
||||
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
|
||||
|
||||
kvm_write_guest_offset_cached(v->kvm, cache,
|
||||
&vcpu->hv_clock, offset,
|
||||
sizeof(vcpu->hv_clock));
|
||||
|
||||
memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
|
||||
smp_wmb();
|
||||
|
||||
vcpu->hv_clock.version++;
|
||||
kvm_write_guest_offset_cached(v->kvm, cache,
|
||||
&vcpu->hv_clock, offset,
|
||||
sizeof(vcpu->hv_clock.version));
|
||||
guest_hv_clock->version = ++vcpu->hv_clock.version;
|
||||
|
||||
mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
|
||||
read_unlock_irqrestore(&gpc->lock, flags);
|
||||
|
||||
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
|
||||
}
|
||||
|
||||
static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
@@ -3106,13 +3101,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
|
||||
vcpu->hv_clock.flags = pvclock_flags;
|
||||
|
||||
if (vcpu->pv_time_enabled)
|
||||
kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
|
||||
if (vcpu->xen.vcpu_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
if (vcpu->xen.vcpu_time_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
|
||||
if (vcpu->pv_time.active)
|
||||
kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
|
||||
if (vcpu->xen.vcpu_info_cache.active)
|
||||
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
if (vcpu->xen.vcpu_time_info_cache.active)
|
||||
kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
|
||||
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
|
||||
return 0;
|
||||
}
|
||||
@@ -3300,7 +3295,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
vcpu->arch.time = 0;
|
||||
}
|
||||
|
||||
@@ -4284,7 +4279,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
|
||||
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
|
||||
KVM_XEN_HVM_CONFIG_SHARED_INFO |
|
||||
KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL;
|
||||
KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
|
||||
KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
|
||||
if (sched_info_on())
|
||||
r |= KVM_XEN_HVM_CONFIG_RUNSTATE;
|
||||
break;
|
||||
@@ -4331,6 +4327,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = boot_cpu_has(X86_FEATURE_XSAVE);
|
||||
break;
|
||||
case KVM_CAP_TSC_CONTROL:
|
||||
case KVM_CAP_VM_TSC_CONTROL:
|
||||
r = kvm_has_tsc_control;
|
||||
break;
|
||||
case KVM_CAP_X2APIC_API:
|
||||
@@ -5102,7 +5099,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->arch.pv_time_enabled)
|
||||
if (!vcpu->arch.pv_time.active)
|
||||
return -EINVAL;
|
||||
vcpu->arch.pvclock_set_guest_stopped_request = true;
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@@ -6186,7 +6183,7 @@ static int kvm_arch_suspend_notifier(struct kvm *kvm)
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (!vcpu->arch.pv_time_enabled)
|
||||
if (!vcpu->arch.pv_time.active)
|
||||
continue;
|
||||
|
||||
ret = kvm_set_guest_paused(vcpu);
|
||||
@@ -6513,6 +6510,15 @@ set_pit2_out:
|
||||
r = kvm_xen_hvm_set_attr(kvm, &xha);
|
||||
break;
|
||||
}
|
||||
case KVM_XEN_HVM_EVTCHN_SEND: {
|
||||
struct kvm_irq_routing_xen_evtchn uxe;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&uxe, argp, sizeof(uxe)))
|
||||
goto out;
|
||||
r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case KVM_SET_CLOCK:
|
||||
r = kvm_vm_ioctl_set_clock(kvm, argp);
|
||||
@@ -6520,6 +6526,28 @@ set_pit2_out:
|
||||
case KVM_GET_CLOCK:
|
||||
r = kvm_vm_ioctl_get_clock(kvm, argp);
|
||||
break;
|
||||
case KVM_SET_TSC_KHZ: {
|
||||
u32 user_tsc_khz;
|
||||
|
||||
r = -EINVAL;
|
||||
user_tsc_khz = (u32)arg;
|
||||
|
||||
if (kvm_has_tsc_control &&
|
||||
user_tsc_khz >= kvm_max_guest_tsc_khz)
|
||||
goto out;
|
||||
|
||||
if (user_tsc_khz == 0)
|
||||
user_tsc_khz = tsc_khz;
|
||||
|
||||
WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
|
||||
r = 0;
|
||||
|
||||
goto out;
|
||||
}
|
||||
case KVM_GET_TSC_KHZ: {
|
||||
r = READ_ONCE(kvm->arch.default_tsc_khz);
|
||||
goto out;
|
||||
}
|
||||
case KVM_MEMORY_ENCRYPT_OP: {
|
||||
r = -ENOTTY;
|
||||
if (!kvm_x86_ops.mem_enc_ioctl)
|
||||
@@ -8789,22 +8817,22 @@ static int kvmclock_cpu_online(unsigned int cpu)
|
||||
|
||||
static void kvm_timer_init(void)
|
||||
{
|
||||
max_tsc_khz = tsc_khz;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct cpufreq_policy *policy;
|
||||
int cpu;
|
||||
max_tsc_khz = tsc_khz;
|
||||
|
||||
cpu = get_cpu();
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (policy) {
|
||||
if (policy->cpuinfo.max_freq)
|
||||
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||
cpufreq_cpu_put(policy);
|
||||
if (IS_ENABLED(CONFIG_CPU_FREQ)) {
|
||||
struct cpufreq_policy *policy;
|
||||
int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (policy) {
|
||||
if (policy->cpuinfo.max_freq)
|
||||
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
put_cpu();
|
||||
#endif
|
||||
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
@@ -9089,6 +9117,14 @@ bool kvm_apicv_activated(struct kvm *kvm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
|
||||
|
||||
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
|
||||
ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu);
|
||||
|
||||
return (vm_reasons | vcpu_reasons) == 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
|
||||
|
||||
static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
|
||||
enum kvm_apicv_inhibit reason, bool set)
|
||||
@@ -9266,6 +9302,17 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
|
||||
char instruction[3];
|
||||
unsigned long rip = kvm_rip_read(vcpu);
|
||||
|
||||
/*
|
||||
* If the quirk is disabled, synthesize a #UD and let the guest pick up
|
||||
* the pieces.
|
||||
*/
|
||||
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
|
||||
ctxt->exception.error_code_valid = false;
|
||||
ctxt->exception.vector = UD_VECTOR;
|
||||
ctxt->have_exception = true;
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
|
||||
static_call(kvm_x86_patch_hypercall)(vcpu, instruction);
|
||||
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3,
|
||||
@@ -9763,7 +9810,8 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
||||
|
||||
down_read(&vcpu->kvm->arch.apicv_update_lock);
|
||||
|
||||
activate = kvm_apicv_activated(vcpu->kvm);
|
||||
activate = kvm_vcpu_apicv_activated(vcpu);
|
||||
|
||||
if (vcpu->arch.apicv_active == activate)
|
||||
goto out;
|
||||
|
||||
@@ -10164,7 +10212,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
* per-VM state, and responsing vCPUs must wait for the update
|
||||
* to complete before servicing KVM_REQ_APICV_UPDATE.
|
||||
*/
|
||||
WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu));
|
||||
WARN_ON_ONCE(kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu));
|
||||
|
||||
exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
|
||||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||
@@ -10362,6 +10410,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
|
||||
if (kvm_xen_has_pending_events(vcpu))
|
||||
kvm_xen_inject_pending_events(vcpu);
|
||||
|
||||
if (kvm_cpu_has_pending_timer(vcpu))
|
||||
kvm_inject_pending_timer_irqs(vcpu);
|
||||
|
||||
@@ -11247,9 +11298,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
|
||||
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
|
||||
kvm_xen_init_vcpu(vcpu);
|
||||
kvm_vcpu_mtrr_init(vcpu);
|
||||
vcpu_load(vcpu);
|
||||
kvm_set_tsc_khz(vcpu, max_tsc_khz);
|
||||
kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
|
||||
kvm_vcpu_reset(vcpu, false);
|
||||
kvm_init_mmu(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
@@ -11304,6 +11356,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
||||
fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
|
||||
|
||||
kvm_xen_destroy_vcpu(vcpu);
|
||||
kvm_hv_vcpu_uninit(vcpu);
|
||||
kvm_pmu_destroy(vcpu);
|
||||
kfree(vcpu->arch.mce_banks);
|
||||
@@ -11696,6 +11749,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||
|
||||
kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
|
||||
kvm->arch.guest_can_read_msr_platform_info = true;
|
||||
kvm->arch.enable_pmu = enable_pmu;
|
||||
|
||||
@@ -12173,6 +12227,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_xen_has_pending_events(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
1246
arch/x86/kvm/xen.c
1246
arch/x86/kvm/xen.c
File diff suppressed because it is too large
Load Diff
@@ -15,16 +15,19 @@
|
||||
extern struct static_key_false_deferred kvm_xen_enabled;
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
|
||||
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
||||
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
||||
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
||||
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
|
||||
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
|
||||
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
|
||||
void kvm_xen_init_vm(struct kvm *kvm);
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm);
|
||||
|
||||
int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
|
||||
void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
|
||||
int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
|
||||
struct kvm *kvm);
|
||||
int kvm_xen_setup_evtchn(struct kvm *kvm,
|
||||
struct kvm_kernel_irq_routing_entry *e,
|
||||
@@ -46,11 +49,33 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
|
||||
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
|
||||
vcpu->arch.xen.vcpu_info_cache.active &&
|
||||
vcpu->kvm->arch.xen.upcall_vector)
|
||||
return __kvm_xen_has_interrupt(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
||||
vcpu->arch.xen.evtchn_pending_sel;
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!vcpu->arch.xen.timer_virq;
|
||||
}
|
||||
|
||||
static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu))
|
||||
return atomic_read(&vcpu->arch.xen.timer_pending);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
||||
{
|
||||
@@ -65,6 +90,14 @@ static inline void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
@@ -79,6 +112,29 @@ static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -611,7 +611,8 @@ struct kvm_hv_sint {
|
||||
|
||||
struct kvm_xen_evtchn {
|
||||
u32 port;
|
||||
u32 vcpu;
|
||||
u32 vcpu_id;
|
||||
int vcpu_idx;
|
||||
u32 priority;
|
||||
};
|
||||
|
||||
|
||||
@@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt {
|
||||
#define KVM_CAP_S390_MEM_OP_EXTENSION 211
|
||||
#define KVM_CAP_PMU_CAPABILITY 212
|
||||
#define KVM_CAP_DISABLE_QUIRKS2 213
|
||||
#define KVM_CAP_VM_TSC_CONTROL 214
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@@ -1232,6 +1233,7 @@ struct kvm_x86_mce {
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
|
||||
|
||||
struct kvm_xen_hvm_config {
|
||||
__u32 flags;
|
||||
@@ -1470,7 +1472,8 @@ struct kvm_s390_ucas_mapping {
|
||||
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
|
||||
/* Available with KVM_CAP_PPC_GET_PVINFO */
|
||||
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
|
||||
/* Available with KVM_CAP_TSC_CONTROL */
|
||||
/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
|
||||
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
|
||||
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
|
||||
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
|
||||
/* Available with KVM_CAP_PCI_2_3 */
|
||||
@@ -1686,6 +1689,32 @@ struct kvm_xen_hvm_attr {
|
||||
struct {
|
||||
__u64 gfn;
|
||||
} shared_info;
|
||||
struct {
|
||||
__u32 send_port;
|
||||
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
|
||||
__u32 flags;
|
||||
#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
|
||||
#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
|
||||
#define KVM_XEN_EVTCHN_RESET (1 << 2)
|
||||
/*
|
||||
* Events sent by the guest are either looped back to
|
||||
* the guest itself (potentially on a different port#)
|
||||
* or signalled via an eventfd.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 vcpu;
|
||||
__u32 priority;
|
||||
} port;
|
||||
struct {
|
||||
__u32 port; /* Zero for eventfd */
|
||||
__s32 fd;
|
||||
} eventfd;
|
||||
__u32 padding[4];
|
||||
} deliver;
|
||||
} evtchn;
|
||||
__u32 xen_version;
|
||||
__u64 pad[8];
|
||||
} u;
|
||||
};
|
||||
@@ -1694,11 +1723,17 @@ struct kvm_xen_hvm_attr {
|
||||
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
|
||||
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
|
||||
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
|
||||
#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
|
||||
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
|
||||
|
||||
/* Per-vCPU Xen attributes */
|
||||
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
|
||||
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
|
||||
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
|
||||
#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
|
||||
|
||||
#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
|
||||
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
|
||||
|
||||
@@ -1716,6 +1751,13 @@ struct kvm_xen_vcpu_attr {
|
||||
__u64 time_blocked;
|
||||
__u64 time_offline;
|
||||
} runstate;
|
||||
__u32 vcpu_id;
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 priority;
|
||||
__u64 expires_ns;
|
||||
} timer;
|
||||
__u8 vector;
|
||||
} u;
|
||||
};
|
||||
|
||||
@@ -1726,6 +1768,10 @@ struct kvm_xen_vcpu_attr {
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
|
||||
|
||||
/* Secure Encrypted Virtualization command */
|
||||
enum sev_cmd_id {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user