You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (113 commits) KVM: VMX: Don't allow uninhibited access to EFER on i386 KVM: Correct deassign device ioctl to IOW KVM: ppc: e500: Fix the bug that KVM is unstable in SMP KVM: ppc: e500: Fix the bug that mas0 update to wrong value when read TLB entry KVM: Fix missing smp tlb flush in invlpg KVM: Get support IRQ routing entry counts KVM: fix sparse warnings: Should it be static? KVM: fix sparse warnings: context imbalance KVM: is_long_mode() should check for EFER.LMA KVM: VMX: Update necessary state when guest enters long mode KVM: ia64: Fix the build errors due to lack of macros related to MSI. ia64: Move the macro definitions related to MSI to one header file. KVM: fix kvm_vm_ioctl_deassign_device KVM: define KVM_CAP_DEVICE_DEASSIGNMENT KVM: ppc: Add emulation of E500 register mmucsr0 KVM: Report IRQ injection status for MSI delivered interrupts KVM: MMU: Fix another largepage memory leak KVM: SVM: set accessed bit for VMCB segment selectors KVM: Report IRQ injection status to userspace. KVM: MMU: remove assertion in kvm_mmu_alloc_page ...
This commit is contained in:
@@ -166,7 +166,40 @@ struct saved_vpd {
|
||||
unsigned long vcpuid[5];
|
||||
unsigned long vpsr;
|
||||
unsigned long vpr;
|
||||
unsigned long vcr[128];
|
||||
union {
|
||||
unsigned long vcr[128];
|
||||
struct {
|
||||
unsigned long dcr;
|
||||
unsigned long itm;
|
||||
unsigned long iva;
|
||||
unsigned long rsv1[5];
|
||||
unsigned long pta;
|
||||
unsigned long rsv2[7];
|
||||
unsigned long ipsr;
|
||||
unsigned long isr;
|
||||
unsigned long rsv3;
|
||||
unsigned long iip;
|
||||
unsigned long ifa;
|
||||
unsigned long itir;
|
||||
unsigned long iipa;
|
||||
unsigned long ifs;
|
||||
unsigned long iim;
|
||||
unsigned long iha;
|
||||
unsigned long rsv4[38];
|
||||
unsigned long lid;
|
||||
unsigned long ivr;
|
||||
unsigned long tpr;
|
||||
unsigned long eoi;
|
||||
unsigned long irr[4];
|
||||
unsigned long itv;
|
||||
unsigned long pmv;
|
||||
unsigned long cmcv;
|
||||
unsigned long rsv5[5];
|
||||
unsigned long lrr0;
|
||||
unsigned long lrr1;
|
||||
unsigned long rsv6[46];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct kvm_regs {
|
||||
@@ -214,4 +247,18 @@ struct kvm_sregs {
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
#define KVM_IA64_VCPU_STACK_SHIFT 16
|
||||
#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
|
||||
|
||||
struct kvm_ia64_vcpu_stack {
|
||||
unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -112,7 +112,11 @@
|
||||
#define VCPU_STRUCT_SHIFT 16
|
||||
#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
|
||||
|
||||
#define KVM_STK_OFFSET VCPU_STRUCT_SIZE
|
||||
/*
|
||||
* This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
|
||||
*/
|
||||
#define KVM_STK_SHIFT 16
|
||||
#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
|
||||
|
||||
#define KVM_VM_STRUCT_SHIFT 19
|
||||
#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
|
||||
@@ -153,10 +157,10 @@ struct kvm_vm_data {
|
||||
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
|
||||
};
|
||||
|
||||
#define VCPU_BASE(n) KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, vcpu_data[n])
|
||||
#define VM_BASE KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, kvm_vm_struct)
|
||||
#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, vcpu_data[n]))
|
||||
#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, kvm_vm_struct))
|
||||
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
|
||||
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
|
||||
|
||||
@@ -235,8 +239,6 @@ struct kvm_vm_data {
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
struct kvm_guest_debug{
|
||||
};
|
||||
|
||||
struct kvm_mmio_req {
|
||||
uint64_t addr; /* physical address */
|
||||
@@ -462,6 +464,8 @@ struct kvm_arch {
|
||||
unsigned long metaphysical_rr4;
|
||||
unsigned long vmm_init_rr;
|
||||
|
||||
int online_vcpus;
|
||||
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_vm_stat stat;
|
||||
struct kvm_sal_data rdv_sal_data;
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#ifndef _IA64_MSI_DEF_H
|
||||
#define _IA64_MSI_DEF_H
|
||||
|
||||
/*
|
||||
* Shifts for APIC-based data
|
||||
*/
|
||||
|
||||
#define MSI_DATA_VECTOR_SHIFT 0
|
||||
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
|
||||
#define MSI_DATA_VECTOR_MASK 0xffffff00
|
||||
|
||||
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
|
||||
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
|
||||
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
|
||||
|
||||
#define MSI_DATA_LEVEL_SHIFT 14
|
||||
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
|
||||
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
|
||||
|
||||
#define MSI_DATA_TRIGGER_SHIFT 15
|
||||
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
|
||||
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
|
||||
|
||||
/*
|
||||
* Shift/mask fields for APIC-based bus address
|
||||
*/
|
||||
|
||||
#define MSI_ADDR_DEST_ID_SHIFT 4
|
||||
#define MSI_ADDR_HEADER 0xfee00000
|
||||
|
||||
#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
|
||||
#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
|
||||
|
||||
#define MSI_ADDR_DEST_MODE_SHIFT 2
|
||||
#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
|
||||
#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
|
||||
|
||||
#define MSI_ADDR_REDIRECTION_SHIFT 3
|
||||
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
|
||||
#endif/* _IA64_MSI_DEF_H */
|
||||
@@ -7,44 +7,7 @@
|
||||
#include <linux/msi.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
/*
|
||||
* Shifts for APIC-based data
|
||||
*/
|
||||
|
||||
#define MSI_DATA_VECTOR_SHIFT 0
|
||||
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
|
||||
#define MSI_DATA_VECTOR_MASK 0xffffff00
|
||||
|
||||
#define MSI_DATA_DELIVERY_SHIFT 8
|
||||
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
|
||||
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
|
||||
|
||||
#define MSI_DATA_LEVEL_SHIFT 14
|
||||
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
|
||||
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
|
||||
|
||||
#define MSI_DATA_TRIGGER_SHIFT 15
|
||||
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
|
||||
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
|
||||
|
||||
/*
|
||||
* Shift/mask fields for APIC-based bus address
|
||||
*/
|
||||
|
||||
#define MSI_TARGET_CPU_SHIFT 4
|
||||
#define MSI_ADDR_HEADER 0xfee00000
|
||||
|
||||
#define MSI_ADDR_DESTID_MASK 0xfff0000f
|
||||
#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
|
||||
|
||||
#define MSI_ADDR_DESTMODE_SHIFT 2
|
||||
#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
|
||||
#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
|
||||
|
||||
#define MSI_ADDR_REDIRECTION_SHIFT 3
|
||||
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
|
||||
#include <asm/msidef.h>
|
||||
|
||||
static struct irq_chip ia64_msi_chip;
|
||||
|
||||
@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
read_msi_msg(irq, &msg);
|
||||
|
||||
addr = msg.address_lo;
|
||||
addr &= MSI_ADDR_DESTID_MASK;
|
||||
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
addr &= MSI_ADDR_DEST_ID_MASK;
|
||||
addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
|
||||
msg.address_lo = addr;
|
||||
|
||||
data = msg.data;
|
||||
@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
||||
msg.address_hi = 0;
|
||||
msg.address_lo =
|
||||
MSI_ADDR_HEADER |
|
||||
MSI_ADDR_DESTMODE_PHYS |
|
||||
MSI_ADDR_DEST_MODE_PHYS |
|
||||
MSI_ADDR_REDIRECTION_CPU |
|
||||
MSI_ADDR_DESTID_CPU(dest_phys_id);
|
||||
MSI_ADDR_DEST_ID_CPU(dest_phys_id);
|
||||
|
||||
msg.data =
|
||||
MSI_DATA_TRIGGER_EDGE |
|
||||
@@ -183,8 +146,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
msg.data &= ~MSI_DATA_VECTOR_MASK;
|
||||
msg.data |= MSI_DATA_VECTOR(cfg->vector);
|
||||
msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
|
||||
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
irq_desc[irq].affinity = *mask;
|
||||
@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
|
||||
msg->address_hi = 0;
|
||||
msg->address_lo =
|
||||
MSI_ADDR_HEADER |
|
||||
MSI_ADDR_DESTMODE_PHYS |
|
||||
MSI_ADDR_DEST_MODE_PHYS |
|
||||
MSI_ADDR_REDIRECTION_CPU |
|
||||
MSI_ADDR_DESTID_CPU(dest);
|
||||
MSI_ADDR_DEST_ID_CPU(dest);
|
||||
|
||||
msg->data =
|
||||
MSI_DATA_TRIGGER_EDGE |
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
config HAVE_KVM
|
||||
bool
|
||||
|
||||
config HAVE_KVM_IRQCHIP
|
||||
bool
|
||||
default y
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
depends on HAVE_KVM || IA64
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
#ifndef __IRQ_H
|
||||
#define __IRQ_H
|
||||
|
||||
#include "lapic.h"
|
||||
|
||||
static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
return 1;
|
||||
|
||||
+114
-11
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||
switch (ext) {
|
||||
case KVM_CAP_IRQCHIP:
|
||||
case KVM_CAP_MP_STATE:
|
||||
|
||||
case KVM_CAP_IRQ_INJECT_STATUS:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
|
||||
union ia64_lid lid;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCPUS; i++) {
|
||||
for (i = 0; i < kvm->arch.online_vcpus; i++) {
|
||||
if (kvm->vcpus[i]) {
|
||||
lid.val = VCPU_LID(kvm->vcpus[i]);
|
||||
if (lid.id == id && lid.eid == eid)
|
||||
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
|
||||
call_data.ptc_g_data = p->u.ptc_g_data;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCPUS; i++) {
|
||||
for (i = 0; i < kvm->arch.online_vcpus; i++) {
|
||||
if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
|
||||
KVM_MP_STATE_UNINITIALIZED ||
|
||||
vcpu == kvm->vcpus[i])
|
||||
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
kvm_init_vm(kvm);
|
||||
|
||||
kvm->arch.online_vcpus = 0;
|
||||
|
||||
return kvm;
|
||||
|
||||
}
|
||||
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = kvm_ioapic_init(kvm);
|
||||
if (r)
|
||||
goto out;
|
||||
r = kvm_setup_default_irq_routing(kvm);
|
||||
if (r) {
|
||||
kfree(kvm->arch.vioapic);
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case KVM_IRQ_LINE_STATUS:
|
||||
case KVM_IRQ_LINE: {
|
||||
struct kvm_irq_level irq_event;
|
||||
|
||||
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
if (copy_from_user(&irq_event, argp, sizeof irq_event))
|
||||
goto out;
|
||||
if (irqchip_in_kernel(kvm)) {
|
||||
__s32 status;
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
|
||||
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
|
||||
irq_event.irq, irq_event.level);
|
||||
mutex_unlock(&kvm->lock);
|
||||
if (ioctl == KVM_IRQ_LINE_STATUS) {
|
||||
irq_event.status = status;
|
||||
if (copy_to_user(argp, &irq_event,
|
||||
sizeof irq_event))
|
||||
goto out;
|
||||
}
|
||||
r = 0;
|
||||
}
|
||||
break;
|
||||
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
/*Initialize itc offset for vcpus*/
|
||||
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
|
||||
for (i = 0; i < KVM_MAX_VCPUS; i++) {
|
||||
for (i = 0; i < kvm->arch.online_vcpus; i++) {
|
||||
v = (struct kvm_vcpu *)((char *)vcpu +
|
||||
sizeof(struct kvm_vcpu_data) * i);
|
||||
v->arch.itc_offset = itc_offset;
|
||||
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kvm->arch.online_vcpus++;
|
||||
|
||||
return vcpu;
|
||||
fail:
|
||||
return ERR_PTR(r);
|
||||
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
|
||||
struct kvm_debug_guest *dbg)
|
||||
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
|
||||
struct kvm_ia64_vcpu_stack *stack)
|
||||
{
|
||||
memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
|
||||
struct kvm_ia64_vcpu_stack *stack)
|
||||
{
|
||||
memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
|
||||
sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
|
||||
|
||||
vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
||||
|
||||
|
||||
long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
return -EINVAL;
|
||||
struct kvm_vcpu *vcpu = filp->private_data;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct kvm_ia64_vcpu_stack *stack = NULL;
|
||||
long r;
|
||||
|
||||
switch (ioctl) {
|
||||
case KVM_IA64_VCPU_GET_STACK: {
|
||||
struct kvm_ia64_vcpu_stack __user *user_stack;
|
||||
void __user *first_p = argp;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&user_stack, first_p, sizeof(void *)))
|
||||
goto out;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, user_stack,
|
||||
sizeof(struct kvm_ia64_vcpu_stack))) {
|
||||
printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
|
||||
"Illegal user destination address for stack\n");
|
||||
goto out;
|
||||
}
|
||||
stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
|
||||
if (!stack) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (copy_to_user(user_stack, stack,
|
||||
sizeof(struct kvm_ia64_vcpu_stack)))
|
||||
goto out;
|
||||
|
||||
break;
|
||||
}
|
||||
case KVM_IA64_VCPU_SET_STACK: {
|
||||
struct kvm_ia64_vcpu_stack __user *user_stack;
|
||||
void __user *first_p = argp;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&user_stack, first_p, sizeof(void *)))
|
||||
goto out;
|
||||
|
||||
if (!access_ok(VERIFY_READ, user_stack,
|
||||
sizeof(struct kvm_ia64_vcpu_stack))) {
|
||||
printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
|
||||
"Illegal user address for stack\n");
|
||||
goto out;
|
||||
}
|
||||
stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
|
||||
if (!stack) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (copy_from_user(stack, user_stack,
|
||||
sizeof(struct kvm_ia64_vcpu_stack)))
|
||||
goto out;
|
||||
|
||||
r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(stack);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
}
|
||||
|
||||
long kvm_arch_dev_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
|
||||
struct kvm_vcpu *lvcpu = kvm->vcpus[0];
|
||||
int i;
|
||||
|
||||
for (i = 1; i < KVM_MAX_VCPUS; i++) {
|
||||
for (i = 1; i < kvm->arch.online_vcpus; i++) {
|
||||
if (!kvm->vcpus[i])
|
||||
continue;
|
||||
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
|
||||
|
||||
+148
-3
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
struct ia64_pal_retval result = {0, 0, 0, 0};
|
||||
long in0, in1, in2, in3;
|
||||
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
|
||||
static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ia64_pal_retval result;
|
||||
unsigned long in0, in1, in2, in3;
|
||||
|
||||
INIT_PAL_STATUS_UNIMPLEMENTED(result);
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
|
||||
result.status = ia64_pal_vm_info(in1, in2,
|
||||
(pal_tc_info_u_t *)&result.v1, &result.v2);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.timer_fired = 0;
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
long status;
|
||||
unsigned long in0, in1, in2, in3, r9;
|
||||
unsigned long pm_buffer[16];
|
||||
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
status = ia64_pal_perf_mon_info(pm_buffer,
|
||||
(pal_perf_mon_info_u_t *) &r9);
|
||||
if (status != 0) {
|
||||
printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
|
||||
} else {
|
||||
if (in1)
|
||||
memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
|
||||
else {
|
||||
status = PAL_STATUS_EINVAL;
|
||||
printk(KERN_WARNING"Invalid parameters "
|
||||
"for PAL call:0x%lx!\n", in0);
|
||||
}
|
||||
}
|
||||
return (struct ia64_pal_retval){status, r9, 0, 0};
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long in0, in1, in2, in3;
|
||||
long status;
|
||||
unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
|
||||
| (1UL << 61) | (1UL << 60);
|
||||
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
if (in1) {
|
||||
memcpy((void *)in1, &res, sizeof(res));
|
||||
status = 0;
|
||||
} else{
|
||||
status = PAL_STATUS_EINVAL;
|
||||
printk(KERN_WARNING"Invalid parameters "
|
||||
"for PAL call:0x%lx!\n", in0);
|
||||
}
|
||||
|
||||
return (struct ia64_pal_retval){status, 0, 0, 0};
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long r9;
|
||||
long status;
|
||||
|
||||
status = ia64_pal_mem_attrib(&r9);
|
||||
|
||||
return (struct ia64_pal_retval){status, r9, 0, 0};
|
||||
}
|
||||
|
||||
static void remote_pal_prefetch_visibility(void *v)
|
||||
{
|
||||
s64 trans_type = (s64)v;
|
||||
ia64_pal_prefetch_visibility(trans_type);
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ia64_pal_retval result = {0, 0, 0, 0};
|
||||
unsigned long in0, in1, in2, in3;
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
result.status = ia64_pal_prefetch_visibility(in1);
|
||||
if (result.status == 0) {
|
||||
/* Must be performed on all remote processors
|
||||
in the coherence domain. */
|
||||
smp_call_function(remote_pal_prefetch_visibility,
|
||||
(void *)in1, 1);
|
||||
/* Unnecessary on remote processor for other vcpus!*/
|
||||
result.status = 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static void remote_pal_mc_drain(void *v)
|
||||
{
|
||||
ia64_pal_mc_drain();
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ia64_pal_retval result = {0, 0, 0, 0};
|
||||
unsigned long in0, in1, in2, in3;
|
||||
|
||||
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
|
||||
|
||||
if (in1 == 0 && in2) {
|
||||
char brand_info[128];
|
||||
result.status = ia64_pal_get_brand_info(brand_info);
|
||||
if (result.status == PAL_STATUS_SUCCESS)
|
||||
memcpy((void *)in2, brand_info, 128);
|
||||
} else {
|
||||
result.status = PAL_STATUS_REQUIRES_MEMORY;
|
||||
printk(KERN_WARNING"Invalid parameters for "
|
||||
"PAL call:0x%lx!\n", in0);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
|
||||
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int ret = 1;
|
||||
|
||||
gr28 = kvm_get_pal_call_index(vcpu);
|
||||
/*printk("pal_call index:%lx\n",gr28);*/
|
||||
switch (gr28) {
|
||||
case PAL_CACHE_FLUSH:
|
||||
result = pal_cache_flush(vcpu);
|
||||
break;
|
||||
case PAL_MEM_ATTRIB:
|
||||
result = pal_mem_attrib(vcpu);
|
||||
break;
|
||||
case PAL_CACHE_SUMMARY:
|
||||
result = pal_cache_summary(vcpu);
|
||||
break;
|
||||
case PAL_PERF_MON_INFO:
|
||||
result = pal_perf_mon_info(vcpu);
|
||||
break;
|
||||
case PAL_HALT_INFO:
|
||||
result = pal_halt_info(vcpu);
|
||||
break;
|
||||
case PAL_HALT_LIGHT:
|
||||
{
|
||||
INIT_PAL_STATUS_SUCCESS(result);
|
||||
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
}
|
||||
break;
|
||||
|
||||
case PAL_PREFETCH_VISIBILITY:
|
||||
result = pal_prefetch_visibility(vcpu);
|
||||
break;
|
||||
case PAL_MC_DRAIN:
|
||||
result.status = ia64_pal_mc_drain();
|
||||
/* FIXME: All vcpus likely call PAL_MC_DRAIN.
|
||||
That causes the congestion. */
|
||||
smp_call_function(remote_pal_mc_drain, NULL, 1);
|
||||
break;
|
||||
|
||||
case PAL_FREQ_RATIOS:
|
||||
result = pal_freq_ratios(vcpu);
|
||||
break;
|
||||
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
INIT_PAL_STATUS_SUCCESS(result);
|
||||
result.v1 = (1L << 32) | 1L;
|
||||
break;
|
||||
case PAL_REGISTER_INFO:
|
||||
result = pal_register_info(vcpu);
|
||||
break;
|
||||
case PAL_VM_PAGE_SIZE:
|
||||
result.status = ia64_pal_vm_page_size(&result.v0,
|
||||
&result.v1);
|
||||
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
result.status = ia64_pal_version(
|
||||
(pal_version_u_t *)&result.v0,
|
||||
(pal_version_u_t *)&result.v1);
|
||||
|
||||
break;
|
||||
case PAL_FIXED_ADDR:
|
||||
result.status = PAL_STATUS_SUCCESS;
|
||||
result.v0 = vcpu->vcpu_id;
|
||||
break;
|
||||
case PAL_BRAND_INFO:
|
||||
result = pal_get_brand_info(vcpu);
|
||||
break;
|
||||
case PAL_GET_PSTATE:
|
||||
case PAL_CACHE_SHARED_INFO:
|
||||
INIT_PAL_STATUS_UNIMPLEMENTED(result);
|
||||
break;
|
||||
default:
|
||||
INIT_PAL_STATUS_UNIMPLEMENTED(result);
|
||||
printk(KERN_WARNING"kvm: Unsupported pal call,"
|
||||
|
||||
+49
-22
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
|
||||
return (rr1.val);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set vIFA & vITIR & vIHA, when vPSR.ic =1
|
||||
* Parameter:
|
||||
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Data Nested TLB Fault
|
||||
* @ Data Nested TLB Vector
|
||||
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Data TLB Fault
|
||||
* @ Data TLB vector
|
||||
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
/* If vPSR.ic, IFA, ITIR, IHA*/
|
||||
set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
|
||||
inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
_vhpt_fault(vcpu, vadr);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* VHPT Data Fault
|
||||
* @ VHPT Translation vector
|
||||
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
_vhpt_fault(vcpu, vadr);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Deal with:
|
||||
* General Exception vector
|
||||
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
|
||||
inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Illegal Operation Fault
|
||||
* @ General Exception Vector
|
||||
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
|
||||
}
|
||||
|
||||
|
||||
void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
{
|
||||
__page_not_present(vcpu, vadr);
|
||||
}
|
||||
|
||||
|
||||
void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
{
|
||||
__page_not_present(vcpu, vadr);
|
||||
}
|
||||
|
||||
|
||||
/* Deal with
|
||||
* Data access rights vector
|
||||
*/
|
||||
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
|
||||
inject_guest_interruption(vcpu, vector);
|
||||
}
|
||||
|
||||
static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct thash_data *data;
|
||||
unsigned long gpa, poff;
|
||||
|
||||
if (!is_physical_mode(vcpu)) {
|
||||
/* Depends on caller to provide the DTR or DTC mapping.*/
|
||||
data = vtlb_lookup(vcpu, arg, D_TLB);
|
||||
if (data)
|
||||
gpa = data->page_flags & _PAGE_PPN_MASK;
|
||||
else {
|
||||
data = vhpt_lookup(arg);
|
||||
if (!data)
|
||||
return 0;
|
||||
gpa = data->gpaddr & _PAGE_PPN_MASK;
|
||||
}
|
||||
|
||||
poff = arg & (PSIZE(data->ps) - 1);
|
||||
arg = PAGEALIGN(gpa, data->ps) | poff;
|
||||
}
|
||||
arg = kvm_gpa_to_mpa(arg << 1 >> 1);
|
||||
|
||||
return (unsigned long)__va(arg);
|
||||
}
|
||||
|
||||
static void set_pal_call_data(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct exit_ctl_data *p = &vcpu->arch.exit_data;
|
||||
unsigned long gr28 = vcpu_get_gr(vcpu, 28);
|
||||
unsigned long gr29 = vcpu_get_gr(vcpu, 29);
|
||||
unsigned long gr30 = vcpu_get_gr(vcpu, 30);
|
||||
|
||||
/*FIXME:For static and stacked convention, firmware
|
||||
* has put the parameters in gr28-gr31 before
|
||||
* break to vmm !!*/
|
||||
|
||||
p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
|
||||
p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
|
||||
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
|
||||
switch (gr28) {
|
||||
case PAL_PERF_MON_INFO:
|
||||
case PAL_HALT_INFO:
|
||||
p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
|
||||
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
|
||||
break;
|
||||
case PAL_BRAND_INFO:
|
||||
p->u.pal_data.gr29 = gr29;;
|
||||
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
|
||||
break;
|
||||
default:
|
||||
p->u.pal_data.gr29 = gr29;;
|
||||
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
|
||||
}
|
||||
p->u.pal_data.gr28 = gr28;
|
||||
p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
|
||||
|
||||
p->exit_reason = EXIT_REASON_PAL_CALL;
|
||||
}
|
||||
|
||||
static void set_pal_call_result(struct kvm_vcpu *vcpu)
|
||||
static void get_pal_call_result(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct exit_ctl_data *p = &vcpu->arch.exit_data;
|
||||
|
||||
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu)
|
||||
p->exit_reason = EXIT_REASON_SAL_CALL;
|
||||
}
|
||||
|
||||
static void set_sal_call_result(struct kvm_vcpu *vcpu)
|
||||
static void get_sal_call_result(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct exit_ctl_data *p = &vcpu->arch.exit_data;
|
||||
|
||||
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
|
||||
if (iim == DOMN_PAL_REQUEST) {
|
||||
set_pal_call_data(v);
|
||||
vmm_transition(v);
|
||||
set_pal_call_result(v);
|
||||
get_pal_call_result(v);
|
||||
vcpu_increment_iip(v);
|
||||
return;
|
||||
} else if (iim == DOMN_SAL_REQUEST) {
|
||||
set_sal_call_data(v);
|
||||
vmm_transition(v);
|
||||
set_sal_call_result(v);
|
||||
get_sal_call_result(v);
|
||||
vcpu_increment_iip(v);
|
||||
return;
|
||||
}
|
||||
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void leave_hypervisor_tail(void)
|
||||
{
|
||||
struct kvm_vcpu *v = current_vcpu;
|
||||
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void handle_lds(struct kvm_pt_regs *regs)
|
||||
{
|
||||
regs->cr_ipsr |= IA64_PSR_ED;
|
||||
|
||||
+6
-38
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long psr;
|
||||
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* In physical mode, insert tc/tr for region 0 and 4 uses
|
||||
* RID[0] and RID[4] which is for physical mode emulation.
|
||||
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
|
||||
return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The inverse of the above: given bspstore and the number of
|
||||
* registers, calculate ar.bsp.
|
||||
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
|
||||
static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
struct kvm_vcpu *v;
|
||||
struct kvm *kvm;
|
||||
int i;
|
||||
long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
|
||||
unsigned long vitv = VCPU(vcpu, itv);
|
||||
|
||||
kvm = (struct kvm *)KVM_VM_BASE;
|
||||
|
||||
if (vcpu->vcpu_id == 0) {
|
||||
for (i = 0; i < KVM_MAX_VCPUS; i++) {
|
||||
for (i = 0; i < kvm->arch.online_vcpus; i++) {
|
||||
v = (struct kvm_vcpu *)((char *)vcpu +
|
||||
sizeof(struct kvm_vcpu_data) * i);
|
||||
VMX(v, itc_offset) = itc_offset;
|
||||
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
|
||||
return key;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long thash, vadr;
|
||||
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
|
||||
}
|
||||
|
||||
|
||||
void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long tag, vadr;
|
||||
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
|
||||
return IA64_NO_FAULT;
|
||||
}
|
||||
|
||||
|
||||
int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long r1, r3;
|
||||
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
|
||||
}
|
||||
|
||||
|
||||
/************************************
|
||||
* Insert/Purge translation register/cache
|
||||
************************************/
|
||||
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
vcpu_set_itc(vcpu, r2);
|
||||
}
|
||||
|
||||
|
||||
void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long r1;
|
||||
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
r1 = vcpu_get_itc(vcpu);
|
||||
vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
struct kvm_vcpu*protection key register access routines
|
||||
struct kvm_vcpu protection key register access routines
|
||||
**************************************************************************/
|
||||
|
||||
unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
|
||||
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
|
||||
ia64_set_pkr(reg, val);
|
||||
}
|
||||
|
||||
|
||||
unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
|
||||
{
|
||||
union ia64_rr rr, rr1;
|
||||
|
||||
rr.val = vcpu_get_rr(vcpu, ifa);
|
||||
rr1.val = 0;
|
||||
rr1.ps = rr.ps;
|
||||
rr1.rid = rr.rid;
|
||||
return (rr1.val);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/********************************
|
||||
* Moves to privileged registers
|
||||
********************************/
|
||||
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
|
||||
return (IA64_NO_FAULT);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long r3, r2;
|
||||
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
vcpu_set_pkr(vcpu, r3, r2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long r3, r1;
|
||||
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
|
||||
}
|
||||
|
||||
|
||||
unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
|
||||
{
|
||||
/* FIXME: This could get called as a result of a rsvd-reg fault */
|
||||
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
{
|
||||
unsigned long tgt = inst.M33.r1;
|
||||
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
|
||||
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void vcpu_rfi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long ifs, psr;
|
||||
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
|
||||
regs->cr_iip = VCPU(vcpu, iip);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
VPSR can't keep track of below bits of guest PSR
|
||||
This function gets guest PSR
|
||||
|
||||
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
|
||||
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
|
||||
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
|
||||
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
|
||||
extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
|
||||
extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
|
||||
u64 itir, u64 ifa, int type);
|
||||
extern void thash_purge_all(struct kvm_vcpu *v);
|
||||
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
|
||||
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
|
||||
void thash_init(struct thash_cb *hcb, u64 sz);
|
||||
|
||||
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
|
||||
|
||||
u64 kvm_gpa_to_mpa(u64 gpa);
|
||||
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
|
||||
u64 arg4, u64 arg5, u64 arg6, u64 arg7);
|
||||
|
||||
|
||||
+18
-28
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
|
||||
unsigned long ps, gpaddr;
|
||||
|
||||
ps = itir_ps(itir);
|
||||
|
||||
gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
|
||||
(ifa & ((1UL << ps) - 1));
|
||||
|
||||
rr.val = ia64_get_rr(ifa);
|
||||
|
||||
gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
|
||||
(ifa & ((1UL << ps) - 1));
|
||||
|
||||
head = (struct thash_data *)ia64_thash(ifa);
|
||||
head->etag = INVALID_TI_TAG;
|
||||
ia64_mf();
|
||||
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
|
||||
|
||||
/*
|
||||
* Purge overlap TCs and then insert the new entry to emulate itc ops.
|
||||
* Notes: Only TC entry can purge and insert.
|
||||
* 1 indicates this is MMIO
|
||||
* Notes: Only TC entry can purge and insert.
|
||||
*/
|
||||
int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
u64 ifa, int type)
|
||||
{
|
||||
u64 ps;
|
||||
u64 phy_pte, io_mask, index;
|
||||
union ia64_rr vrr, mrr;
|
||||
int ret = 0;
|
||||
|
||||
ps = itir_ps(itir);
|
||||
vrr.val = vcpu_get_rr(v, ifa);
|
||||
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
phy_pte &= ~_PAGE_MA_MASK;
|
||||
}
|
||||
|
||||
if (pte & VTLB_PTE_IO)
|
||||
ret = 1;
|
||||
|
||||
vtlb_purge(v, ifa, ps);
|
||||
vhpt_purge(v, ifa, ps);
|
||||
|
||||
if (ps == mrr.ps) {
|
||||
if (!(pte&VTLB_PTE_IO)) {
|
||||
vhpt_insert(phy_pte, itir, ifa, pte);
|
||||
} else {
|
||||
vtlb_insert(v, pte, itir, ifa);
|
||||
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
|
||||
}
|
||||
} else if (ps > mrr.ps) {
|
||||
if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
|
||||
vtlb_insert(v, pte, itir, ifa);
|
||||
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
|
||||
if (!(pte&VTLB_PTE_IO))
|
||||
vhpt_insert(phy_pte, itir, ifa, pte);
|
||||
} else {
|
||||
}
|
||||
if (pte & VTLB_PTE_IO)
|
||||
return;
|
||||
|
||||
if (ps >= mrr.ps)
|
||||
vhpt_insert(phy_pte, itir, ifa, pte);
|
||||
else {
|
||||
u64 psr;
|
||||
phy_pte &= ~PAGE_FLAGS_RV_MASK;
|
||||
psr = ia64_clear_ic();
|
||||
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
if (!(pte&VTLB_PTE_IO))
|
||||
mark_pages_dirty(v, pte, ps);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v)
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Lookup the hash table and its collision chain to find an entry
|
||||
* covering this address rid:va or the entry.
|
||||
@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v)
|
||||
* INPUT:
|
||||
* in: TLB format for both VHPT & TLB.
|
||||
*/
|
||||
|
||||
struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
|
||||
{
|
||||
struct thash_data *cch;
|
||||
@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initialize internal control data before service.
|
||||
*/
|
||||
@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
|
||||
u64 kvm_get_mpt_entry(u64 gpfn)
|
||||
{
|
||||
u64 *base = (u64 *) KVM_P2M_BASE;
|
||||
|
||||
if (gpfn >= (KVM_P2M_SIZE >> 3))
|
||||
panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
|
||||
|
||||
return *(base + gpfn);
|
||||
}
|
||||
|
||||
@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
|
||||
return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Fetch guest bundle code.
|
||||
* INPUT:
|
||||
@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
|
||||
return IA64_NO_FAULT;
|
||||
}
|
||||
|
||||
|
||||
void kvm_init_vhpt(struct kvm_vcpu *v)
|
||||
{
|
||||
v->arch.vhpt.num = VHPT_NUM_ENTRIES;
|
||||
|
||||
@@ -52,4 +52,11 @@ struct kvm_fpu {
|
||||
__u64 fpr[32];
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
#endif /* __LINUX_KVM_POWERPC_H */
|
||||
|
||||
@@ -28,6 +28,13 @@
|
||||
* need to find some way of advertising it. */
|
||||
#define KVM44x_GUEST_TLB_SIZE 64
|
||||
|
||||
struct kvmppc_44x_tlbe {
|
||||
u32 tid; /* Only the low 8 bits are used. */
|
||||
u32 word0;
|
||||
u32 word1;
|
||||
u32 word2;
|
||||
};
|
||||
|
||||
struct kvmppc_44x_shadow_ref {
|
||||
struct page *page;
|
||||
u16 gtlb_index;
|
||||
|
||||
@@ -42,7 +42,12 @@
|
||||
#define BOOKE_INTERRUPT_DTLB_MISS 13
|
||||
#define BOOKE_INTERRUPT_ITLB_MISS 14
|
||||
#define BOOKE_INTERRUPT_DEBUG 15
|
||||
#define BOOKE_MAX_INTERRUPT 15
|
||||
|
||||
/* E500 */
|
||||
#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
|
||||
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
|
||||
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
|
||||
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
|
||||
|
||||
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Author: Yu Liu, <yu.liu@freescale.com>
|
||||
*
|
||||
* Description:
|
||||
* This file is derived from arch/powerpc/include/asm/kvm_44x.h,
|
||||
* by Hollis Blanchard <hollisb@us.ibm.com>.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_KVM_E500_H__
|
||||
#define __ASM_KVM_E500_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#define BOOKE_INTERRUPT_SIZE 36
|
||||
|
||||
#define E500_PID_NUM 3
|
||||
#define E500_TLB_NUM 2
|
||||
|
||||
struct tlbe{
|
||||
u32 mas1;
|
||||
u32 mas2;
|
||||
u32 mas3;
|
||||
u32 mas7;
|
||||
};
|
||||
|
||||
struct kvmppc_vcpu_e500 {
|
||||
/* Unmodified copy of the guest's TLB. */
|
||||
struct tlbe *guest_tlb[E500_TLB_NUM];
|
||||
/* TLB that's actually used when the guest is running. */
|
||||
struct tlbe *shadow_tlb[E500_TLB_NUM];
|
||||
/* Pages which are referenced in the shadow TLB. */
|
||||
struct page **shadow_pages[E500_TLB_NUM];
|
||||
|
||||
unsigned int guest_tlb_size[E500_TLB_NUM];
|
||||
unsigned int shadow_tlb_size[E500_TLB_NUM];
|
||||
unsigned int guest_tlb_nv[E500_TLB_NUM];
|
||||
|
||||
u32 host_pid[E500_PID_NUM];
|
||||
u32 pid[E500_PID_NUM];
|
||||
|
||||
u32 mas0;
|
||||
u32 mas1;
|
||||
u32 mas2;
|
||||
u32 mas3;
|
||||
u32 mas4;
|
||||
u32 mas5;
|
||||
u32 mas6;
|
||||
u32 mas7;
|
||||
u32 l1csr1;
|
||||
u32 hid0;
|
||||
u32 hid1;
|
||||
|
||||
struct kvm_vcpu vcpu;
|
||||
};
|
||||
|
||||
static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
|
||||
}
|
||||
|
||||
#endif /* __ASM_KVM_E500_H__ */
|
||||
@@ -64,13 +64,6 @@ struct kvm_vcpu_stat {
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
struct kvmppc_44x_tlbe {
|
||||
u32 tid; /* Only the low 8 bits are used. */
|
||||
u32 word0;
|
||||
u32 word1;
|
||||
u32 word2;
|
||||
};
|
||||
|
||||
enum kvm_exit_types {
|
||||
MMIO_EXITS,
|
||||
DCR_EXITS,
|
||||
@@ -118,11 +111,6 @@ struct kvm_arch {
|
||||
struct kvm_vcpu_arch {
|
||||
u32 host_stack;
|
||||
u32 host_pid;
|
||||
u32 host_dbcr0;
|
||||
u32 host_dbcr1;
|
||||
u32 host_dbcr2;
|
||||
u32 host_iac[4];
|
||||
u32 host_msr;
|
||||
|
||||
u64 fpr[32];
|
||||
ulong gpr[32];
|
||||
@@ -157,7 +145,7 @@ struct kvm_vcpu_arch {
|
||||
u32 tbu;
|
||||
u32 tcr;
|
||||
u32 tsr;
|
||||
u32 ivor[16];
|
||||
u32 ivor[64];
|
||||
ulong ivpr;
|
||||
u32 pir;
|
||||
|
||||
@@ -170,6 +158,7 @@ struct kvm_vcpu_arch {
|
||||
u32 ccr1;
|
||||
u32 dbcr0;
|
||||
u32 dbcr1;
|
||||
u32 dbsr;
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
struct kvmppc_exit_timing timing_exit;
|
||||
@@ -200,10 +189,4 @@ struct kvm_vcpu_arch {
|
||||
unsigned long pending_exceptions;
|
||||
};
|
||||
|
||||
struct kvm_guest_debug {
|
||||
int enabled;
|
||||
unsigned long bp[4];
|
||||
int singlestep;
|
||||
};
|
||||
|
||||
#endif /* __POWERPC_KVM_HOST_H__ */
|
||||
|
||||
@@ -52,13 +52,19 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Core-specific hooks */
|
||||
|
||||
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
|
||||
u64 asid, u32 flags, u32 max_bytes,
|
||||
unsigned int gtlb_idx);
|
||||
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
|
||||
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
|
||||
|
||||
/* Core-specific hooks */
|
||||
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
|
||||
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
|
||||
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
|
||||
gva_t eaddr);
|
||||
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id);
|
||||
@@ -71,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
||||
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -75,6 +75,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern unsigned int tlbcam_index;
|
||||
|
||||
typedef struct {
|
||||
unsigned int id;
|
||||
unsigned int active;
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
#include <asm/iseries/alpaca.h>
|
||||
#endif
|
||||
#ifdef CONFIG_KVM
|
||||
#include <asm/kvm_44x.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||
@@ -361,8 +361,6 @@ int main(void)
|
||||
DEFINE(PTE_SIZE, sizeof(pte_t));
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
|
||||
|
||||
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
|
||||
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
|
||||
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user