Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm

KVM/ARM New features for 3.17 include:
 - Fixes and code refactoring for stage2 kvm MMU unmap_range
 - Support unmapping IPAs on deleting memslots for arm and arm64
 - Support MMIO mappings in stage2 faults
 - KVM VGIC v2 emulation on GICv3 hardware
 - Big-Endian support for arm/arm64 (guest and host)
 - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list)

Conflicts:
	virt/kvm/arm/vgic.c [last minute cherry-pick from 3.17 to 3.16]
This commit is contained in:
Paolo Bonzini
2014-08-05 09:47:45 +02:00
39 changed files with 2861 additions and 579 deletions
+8
View File
@@ -168,6 +168,14 @@ Before jumping into the kernel, the following conditions must be met:
the kernel image will be entered must be initialised by software at a
higher exception level to prevent execution in an UNKNOWN state.
For systems with a GICv3 interrupt controller:
- If EL3 is present:
ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
- If the kernel is entered at EL1:
ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level.
@@ -0,0 +1,79 @@
* ARM Generic Interrupt Controller, version 3
AArch64 SMP cores are often associated with a GICv3, providing Private
Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
Software Generated Interrupts (SGI), and Locality-specific Peripheral
Interrupts (LPI).
Main node required properties:
- compatible : should at least contain "arm,gic-v3".
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. Must be a single cell with a value of at least 3.
The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
interrupts. Other values are reserved for future use.
The 2nd cell contains the interrupt number for the interrupt type.
SPI interrupts are in the range [0-987]. PPI interrupts are in the
range [0-15].
The 3rd cell is the flags, encoded as follows:
bits[3:0] trigger type and level flags.
1 = edge triggered
4 = level triggered
Cells 4 and beyond are reserved for future use. When the 1st cell
has a value of 0 or 1, cells 4 and beyond act as padding, and may be
ignored. It is recommended that padding cells have a value of 0.
- reg : Specifies base physical address(s) and size of the GIC
registers, in the following order:
- GIC Distributor interface (GICD)
- GIC Redistributors (GICR), one range per redistributor region
- GIC CPU interface (GICC)
- GIC Hypervisor interface (GICH)
- GIC Virtual CPU interface (GICV)
GICC, GICH and GICV are optional.
- interrupts : Interrupt source of the VGIC maintenance interrupt.
Optional
- redistributor-stride : If using padding pages, specifies the stride
of consecutive redistributors. Must be a multiple of 64kB.
- #redistributor-regions: The number of independent contiguous regions
occupied by the redistributors. Required if more than one such
region is present.
Examples:
gic: interrupt-controller@2cf00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
reg = <0x0 0x2f000000 0 0x10000>, // GICD
<0x0 0x2f100000 0 0x200000>, // GICR
<0x0 0x2c000000 0 0x2000>, // GICC
<0x0 0x2c010000 0 0x2000>, // GICH
<0x0 0x2c020000 0 0x2000>; // GICV
interrupts = <1 9 4>;
};
gic: interrupt-controller@2c010000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
redistributor-stride = <0x0 0x40000>; // 256kB stride
#redistributor-regions = <2>;
reg = <0x0 0x2c010000 0 0x10000>, // GICD
<0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
<0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
<0x0 0x2c040000 0 0x2000>, // GICC
<0x0 0x2c060000 0 0x2000>, // GICH
<0x0 0x2c080000 0 0x2000>; // GICV
interrupts = <1 9 4>;
};
+18
View File
@@ -61,6 +61,24 @@
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
* instructions that load/store a 64-bit value from/to memory to/from
* a pair of registers which are used with the mrrc and mcrr instructions.
* If used with the ldrd/strd instructions, the a1 parameter is the first
* source/destination register and the a2 parameter is the second
* source/destination register. Note that the ldrd/strd instructions
* already swap the bytes within the words correctly according to the
* endianness setting, but the order of the registers need to be effectively
* swapped when used with the mrrc/mcrr instructions.
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define rr_lo_hi(a1, a2) a2, a1
#else
#define rr_lo_hi(a1, a2) a1, a2
#endif
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
+18 -4
View File
@@ -185,9 +185,16 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
default:
return be32_to_cpu(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return le16_to_cpu(data & 0xffff);
default:
return le32_to_cpu(data);
}
}
return data; /* Leave LE untouched */
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
@@ -203,9 +210,16 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
default:
return cpu_to_be32(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_le16(data & 0xffff);
default:
return cpu_to_le32(data);
}
}
return data; /* Leave LE untouched */
}
#endif /* __ARM_KVM_EMULATE_H__ */
+5 -3
View File
@@ -225,10 +225,12 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
return 0;
}
static inline void vgic_arch_setup(const struct vgic_params *vgic)
{
BUG_ON(vgic->type != VGIC_V2);
}
int kvm_perf_init(void);
int kvm_perf_teardown(void);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
#endif /* __ARM_KVM_HOST_H__ */
+12
View File
@@ -127,6 +127,18 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(pudp) (0)
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+7 -7
View File
@@ -182,13 +182,13 @@ int main(void)
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+1 -3
View File
@@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary)
mcr p15, 4, r7, c1, c1, 3 @ HSTR
THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
#ifdef CONFIG_CPU_BIG_ENDIAN
orr r7, #(1 << 9) @ HSCTLR.EE
#endif
ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
mrc p15, 4, r7, c1, c1, 1 @ HDCR
+1 -1
View File
@@ -23,7 +23,7 @@ config KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
depends on ARM_VIRT_EXT && ARM_LPAE
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.
+1
View File
@@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
-37
View File
@@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
@@ -225,33 +215,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
void kvm_arch_memslots_updated(struct kvm *kvm)
{
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
+79 -9
View File
@@ -44,6 +44,31 @@ static u32 cache_levels;
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
#define CSSELR_MAX 12
/*
* kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
* of cp15 registers can be viewed either as couple of two u32 registers
* or one u64 register. Current u64 register encoding is that least
* significant u32 word is followed by most significant u32 word.
*/
static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
const struct coproc_reg *r,
u64 val)
{
vcpu->arch.cp15[r->reg] = val & 0xffffffff;
vcpu->arch.cp15[r->reg + 1] = val >> 32;
}
static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
u64 val;
val = vcpu->arch.cp15[r->reg + 1];
val = val << 32;
val = val | vcpu->arch.cp15[r->reg];
return val;
}
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
@@ -682,17 +707,23 @@ static struct coproc_reg invariant_cp15[] = {
{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
};
/*
* Reads a register value from a userspace address to a kernel
* variable. Make sure that register size matches sizeof(*__val).
*/
static int reg_from_user(void *val, const void __user *uaddr, u64 id)
{
/* This Just Works because we are little endian. */
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
/*
* Writes a register value to a userspace address from a kernel variable.
* Make sure that register size matches sizeof(*__val).
*/
static int reg_to_user(void __user *uaddr, const void *val, u64 id)
{
/* This Just Works because we are little endian. */
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
@@ -702,6 +733,7 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
{
struct coproc_params params;
const struct coproc_reg *r;
int ret;
if (!index_to_params(id, &params))
return -ENOENT;
@@ -710,7 +742,15 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
if (!r)
return -ENOENT;
return reg_to_user(uaddr, &r->val, id);
ret = -ENOENT;
if (KVM_REG_SIZE(id) == 4) {
u32 val = r->val;
ret = reg_to_user(uaddr, &val, id);
} else if (KVM_REG_SIZE(id) == 8) {
ret = reg_to_user(uaddr, &r->val, id);
}
return ret;
}
static int set_invariant_cp15(u64 id, void __user *uaddr)
@@ -718,7 +758,7 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
struct coproc_params params;
const struct coproc_reg *r;
int err;
u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
u64 val;
if (!index_to_params(id, &params))
return -ENOENT;
@@ -726,7 +766,16 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
if (!r)
return -ENOENT;
err = reg_from_user(&val, uaddr, id);
err = -ENOENT;
if (KVM_REG_SIZE(id) == 4) {
u32 val32;
err = reg_from_user(&val32, uaddr, id);
if (!err)
val = val32;
} else if (KVM_REG_SIZE(id) == 8) {
err = reg_from_user(&val, uaddr, id);
}
if (err)
return err;
@@ -1004,6 +1053,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
const struct coproc_reg *r;
void __user *uaddr = (void __user *)(long)reg->addr;
int ret;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(reg->id, uaddr);
@@ -1015,14 +1065,24 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (!r)
return get_invariant_cp15(reg->id, uaddr);
/* Note: copies two regs if size is 64 bit. */
return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
ret = -ENOENT;
if (KVM_REG_SIZE(reg->id) == 8) {
u64 val;
val = vcpu_cp15_reg64_get(vcpu, r);
ret = reg_to_user(uaddr, &val, reg->id);
} else if (KVM_REG_SIZE(reg->id) == 4) {
ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
}
return ret;
}
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
const struct coproc_reg *r;
void __user *uaddr = (void __user *)(long)reg->addr;
int ret;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(reg->id, uaddr);
@@ -1034,8 +1094,18 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (!r)
return set_invariant_cp15(reg->id, uaddr);
/* Note: copies two regs if size is 64 bit */
return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
ret = -ENOENT;
if (KVM_REG_SIZE(reg->id) == 8) {
u64 val;
ret = reg_from_user(&val, uaddr, reg->id);
if (!ret)
vcpu_cp15_reg64_set(vcpu, r, val);
} else if (KVM_REG_SIZE(reg->id) == 4) {
ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
}
return ret;
}
static unsigned int num_demux_regs(void)
-10
View File
@@ -124,16 +124,6 @@ static bool is_timer_reg(u64 index)
return false;
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
return 0;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
return 0;
}
#else
#define NUM_TIMER_REGS 3
+2 -2
View File
@@ -71,7 +71,7 @@ __do_hyp_init:
bne phase2 @ Yes, second stage init
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, r2, r3, c2
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@ Set the HTCR and VTCR to the same shareability and cacheability
@ settings as the non-secure TTBCR and with T0SZ == 0.
@@ -137,7 +137,7 @@ phase2:
mov pc, r0
target: @ We're now in the trampoline code, switch page tables
mcrr p15, 4, r2, r3, c2
mcrr p15, 4, rr_lo_hi(r2, r3), c2
isb
@ Invalidate the old TLBs
+7 -2
View File
@@ -52,7 +52,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
dsb ishst
add r0, r0, #KVM_VTTBR
ldrd r2, r3, [r0]
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
isb
mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
dsb ish
@@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run)
ldr r1, [vcpu, #VCPU_KVM]
add r1, r1, #KVM_VTTBR
ldrd r2, r3, [r1]
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
@ We're all done, just restore the GPRs and go to the guest
restore_guest_regs
@@ -199,8 +199,13 @@ after_vfp_restore:
restore_host_regs
clrex @ Clear exclusive monitor
#ifndef CONFIG_CPU_ENDIAN_BE8
mov r0, r1 @ Return the return code
mov r1, #0 @ Clear upper bits in return value
#else
@ r1 already has return code
mov r0, #0 @ Clear upper bits in return value
#endif /* CONFIG_CPU_ENDIAN_BE8 */
bx lr @ return to IOCTL
/********************************************************************
+30 -16
View File
@@ -1,4 +1,5 @@
#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
#define VCPU_USR_SP (VCPU_USR_REG(13))
@@ -420,15 +421,23 @@ vcpu .req r0 @ vcpu pointer always in r0
ldr r8, [r2, #GICH_ELRSR0]
ldr r9, [r2, #GICH_ELRSR1]
ldr r10, [r2, #GICH_APR]
ARM_BE8(rev r3, r3 )
ARM_BE8(rev r4, r4 )
ARM_BE8(rev r5, r5 )
ARM_BE8(rev r6, r6 )
ARM_BE8(rev r7, r7 )
ARM_BE8(rev r8, r8 )
ARM_BE8(rev r9, r9 )
ARM_BE8(rev r10, r10 )
str r3, [r11, #VGIC_CPU_HCR]
str r4, [r11, #VGIC_CPU_VMCR]
str r5, [r11, #VGIC_CPU_MISR]
str r6, [r11, #VGIC_CPU_EISR]
str r7, [r11, #(VGIC_CPU_EISR + 4)]
str r8, [r11, #VGIC_CPU_ELRSR]
str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
str r10, [r11, #VGIC_CPU_APR]
str r3, [r11, #VGIC_V2_CPU_HCR]
str r4, [r11, #VGIC_V2_CPU_VMCR]
str r5, [r11, #VGIC_V2_CPU_MISR]
str r6, [r11, #VGIC_V2_CPU_EISR]
str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r8, [r11, #VGIC_V2_CPU_ELRSR]
str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
str r10, [r11, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */
mov r5, #0
@@ -436,9 +445,10 @@ vcpu .req r0 @ vcpu pointer always in r0
/* Save list registers */
add r2, r2, #GICH_LR0
add r3, r11, #VGIC_CPU_LR
add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r2], #4
ARM_BE8(rev r6, r6 )
str r6, [r3], #4
subs r4, r4, #1
bne 1b
@@ -463,9 +473,12 @@ vcpu .req r0 @ vcpu pointer always in r0
add r11, vcpu, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
ldr r3, [r11, #VGIC_CPU_HCR]
ldr r4, [r11, #VGIC_CPU_VMCR]
ldr r8, [r11, #VGIC_CPU_APR]
ldr r3, [r11, #VGIC_V2_CPU_HCR]
ldr r4, [r11, #VGIC_V2_CPU_VMCR]
ldr r8, [r11, #VGIC_V2_CPU_APR]
ARM_BE8(rev r3, r3 )
ARM_BE8(rev r4, r4 )
ARM_BE8(rev r8, r8 )
str r3, [r2, #GICH_HCR]
str r4, [r2, #GICH_VMCR]
@@ -473,9 +486,10 @@ vcpu .req r0 @ vcpu pointer always in r0
/* Restore list registers */
add r2, r2, #GICH_LR0
add r3, r11, #VGIC_CPU_LR
add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r3], #4
ARM_BE8(rev r6, r6 )
str r6, [r2], #4
subs r4, r4, #1
bne 1b
@@ -506,7 +520,7 @@ vcpu .req r0 @ vcpu pointer always in r0
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
isb
mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
strd r2, r3, [r5]
@@ -546,12 +560,12 @@ vcpu .req r0 @ vcpu pointer always in r0
ldr r2, [r4, #KVM_TIMER_CNTVOFF]
ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
mcrr p15, 4, r2, r3, c14 @ CNTVOFF
mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
ldrd r2, r3, [r5]
mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
isb
ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
+142 -80
View File
@@ -90,104 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p;
}
static bool page_empty(void *ptr)
static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
pgd_clear(pgd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pud_free(NULL, pud_table);
put_page(virt_to_page(pgd));
}
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
if (pud_huge(*pud)) {
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
pmd_t *pmd_table = pmd_offset(pud, 0);
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pmd_free(NULL, pmd_table);
}
pmd_t *pmd_table = pmd_offset(pud, 0);
VM_BUG_ON(pud_huge(*pud));
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pmd_free(NULL, pmd_table);
put_page(virt_to_page(pud));
}
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
if (kvm_pmd_huge(*pmd)) {
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
pte_t *pte_table = pte_offset_kernel(pmd, 0);
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pte_free_kernel(NULL, pte_table);
}
pte_t *pte_table = pte_offset_kernel(pmd, 0);
VM_BUG_ON(kvm_pmd_huge(*pmd));
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pte_free_kernel(NULL, pte_table);
put_page(virt_to_page(pmd));
}
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
if (pte_present(*pte)) {
kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
}
phys_addr_t start_addr = addr;
pte_t *pte, *start_pte;
start_pte = pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
if (kvm_pte_table_empty(start_pte))
clear_pmd_entry(kvm, pmd, start_addr);
}
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
static void unmap_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t addr, phys_addr_t end)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long long addr = start, end = start + size;
u64 next;
phys_addr_t next, start_addr = addr;
pmd_t *pmd, *start_pmd;
while (addr < end) {
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
pte = NULL;
if (pud_none(*pud)) {
addr = kvm_pud_addr_end(addr, end);
continue;
}
if (pud_huge(*pud)) {
/*
* If we are dealing with a huge pud, just clear it and
* move on.
*/
clear_pud_entry(kvm, pud, addr);
addr = kvm_pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
addr = kvm_pmd_addr_end(addr, end);
continue;
}
if (!kvm_pmd_huge(*pmd)) {
pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr);
next = addr + PAGE_SIZE;
}
/*
* If the pmd entry is to be cleared, walk back up the ladder
*/
if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) {
clear_pmd_entry(kvm, pmd, addr);
next = kvm_pmd_addr_end(addr, end);
if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
next = kvm_pud_addr_end(addr, end);
start_pmd = pmd = pmd_offset(pud, addr);
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
put_page(virt_to_page(pmd));
} else {
unmap_ptes(kvm, pmd, addr, next);
}
}
} while (pmd++, addr = next, addr != end);
addr = next;
}
if (kvm_pmd_table_empty(start_pmd))
clear_pud_entry(kvm, pud, start_addr);
}
static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t addr, phys_addr_t end)
{
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
start_pud = pud = pud_offset(pgd, addr);
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
put_page(virt_to_page(pud));
} else {
unmap_pmds(kvm, pud, addr, next);
}
}
} while (pud++, addr = next, addr != end);
if (kvm_pud_table_empty(start_pud))
clear_pgd_entry(kvm, pgd, start_addr);
}
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
phys_addr_t start, u64 size)
{
pgd_t *pgd;
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
pgd = pgdp + pgd_index(addr);
do {
next = kvm_pgd_addr_end(addr, end);
unmap_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
@@ -748,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma;
pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
if (fault_status == FSC_PERM && !write_fault) {
@@ -798,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_pfn(pfn))
return -EFAULT;
if (kvm_is_mmio_pfn(pfn))
mem_type = PAGE_S2_DEVICE;
spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
@@ -805,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd);
if (writable) {
kvm_set_s2pmd_writable(&new_pmd);
@@ -814,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, PAGE_S2);
pte_t new_pte = pfn_pte(pfn, mem_type);
if (writable) {
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
mem_type == PAGE_S2_DEVICE);
}
@@ -1100,3 +1116,49 @@ out:
free_hyp_pgds();
return err;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
gpa_t gpa = old->base_gfn << PAGE_SHIFT;
phys_addr_t size = old->npages << PAGE_SHIFT;
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
void kvm_arch_memslots_updated(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
+14 -5
View File
@@ -18,6 +18,15 @@
#ifdef __KERNEL__
/* Low-level stepping controls. */
#define DBG_MDSCR_SS (1 << 0)
#define DBG_SPSR_SS (1 << 21)
/* MDSCR_EL1 enabling bits */
#define DBG_MDSCR_KDE (1 << 13)
#define DBG_MDSCR_MDE (1 << 15)
#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -73,11 +82,6 @@
#define CACHE_FLUSH_IS_SAFE 1
enum debug_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
};
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
@@ -115,6 +119,11 @@ void unregister_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
enum debug_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
};
void enable_debug_monitors(enum debug_el el);
void disable_debug_monitors(enum debug_el el);
+3 -2
View File
@@ -76,9 +76,10 @@
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_SWIO | HCR_TIDCP | HCR_RW)
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
/* Hyp System Control Register (SCTLR_EL2) bits */
#define SCTLR_EL2_EE (1 << 25)
+43 -10
View File
@@ -18,6 +18,8 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#include <asm/virt.h>
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
@@ -43,14 +45,25 @@
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
#define DBGBCR15_EL1 38
#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
#define DBGBVR15_EL1 54
#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
#define DBGWCR15_EL1 70
#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
#define DBGWVR15_EL1 86
#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
/* 32bit specific registers. Keep them at the end of the range */
#define DACR32_EL2 22 /* Domain Access Control Register */
#define IFSR32_EL2 23 /* Instruction Fault Status Register */
#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
#define NR_SYS_REGS 28
#define DACR32_EL2 88 /* Domain Access Control Register */
#define IFSR32_EL2 89 /* Instruction Fault Status Register */
#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
#define NR_SYS_REGS 94
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -82,11 +95,23 @@
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
#define NR_CP15_REGS (NR_SYS_REGS * 2)
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
@@ -96,13 +121,21 @@ extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
extern char __kvm_hyp_code_start[];
extern char __kvm_hyp_code_end[];
#define __kvm_hyp_code_start __hyp_text_start
#define __kvm_hyp_code_end __hyp_text_end
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern char __save_vgic_v2_state[];
extern char __restore_vgic_v2_state[];
extern char __save_vgic_v3_state[];
extern char __restore_vgic_v3_state[];
#endif
#endif /* __ARM_KVM_ASM_H__ */

Some files were not shown because too many files have changed in this diff Show More