You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-queue
Conflicts: arch/powerpc/kvm/book3s_hv_rmhandlers.S arch/powerpc/kvm/booke.c
This commit is contained in:
@@ -1838,6 +1838,7 @@ registers, find a list below:
|
||||
PPC | KVM_REG_PPC_LPCR | 64
|
||||
PPC | KVM_REG_PPC_PPR | 64
|
||||
PPC | KVM_REG_PPC_ARCH_COMPAT 32
|
||||
PPC | KVM_REG_PPC_DABRX | 32
|
||||
PPC | KVM_REG_PPC_TM_GPR0 | 64
|
||||
...
|
||||
PPC | KVM_REG_PPC_TM_GPR31 | 64
|
||||
|
||||
@@ -343,6 +343,8 @@ config PPC_TRANSACTIONAL_MEM
|
||||
bool "Transactional Memory support for POWERPC"
|
||||
depends on PPC_BOOK3S_64
|
||||
depends on SMP
|
||||
select ALTIVEC
|
||||
select VSX
|
||||
default n
|
||||
---help---
|
||||
Support user-mode Transactional Memory on POWERPC.
|
||||
|
||||
@@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)
|
||||
|
||||
return r3;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EPAPR_PARAVIRT
|
||||
static inline unsigned long epapr_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
unsigned long register r0 asm("r0");
|
||||
unsigned long register r3 asm("r3") = in[0];
|
||||
unsigned long register r4 asm("r4") = in[1];
|
||||
unsigned long register r5 asm("r5") = in[2];
|
||||
unsigned long register r6 asm("r6") = in[3];
|
||||
unsigned long register r7 asm("r7") = in[4];
|
||||
unsigned long register r8 asm("r8") = in[5];
|
||||
unsigned long register r9 asm("r9") = in[6];
|
||||
unsigned long register r10 asm("r10") = in[7];
|
||||
unsigned long register r11 asm("r11") = nr;
|
||||
unsigned long register r12 asm("r12");
|
||||
|
||||
asm volatile("bl epapr_hypercall_start"
|
||||
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
|
||||
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
|
||||
"=r"(r12)
|
||||
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
|
||||
"r"(r9), "r"(r10), "r"(r11)
|
||||
: "memory", "cc", "xer", "ctr", "lr");
|
||||
|
||||
out[0] = r4;
|
||||
out[1] = r5;
|
||||
out[2] = r6;
|
||||
out[3] = r7;
|
||||
out[4] = r8;
|
||||
out[5] = r9;
|
||||
out[6] = r10;
|
||||
out[7] = r11;
|
||||
|
||||
return r3;
|
||||
}
|
||||
#else
|
||||
static unsigned long epapr_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
return EV_UNIMPLEMENTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
unsigned long r;
|
||||
|
||||
r = epapr_hypercall(in, out, nr);
|
||||
*r2 = out[0];
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall0(unsigned int nr)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
|
||||
static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
in[3] = p4;
|
||||
return epapr_hypercall(in, out, nr);
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _EPAPR_HCALLS_H */
|
||||
|
||||
@@ -91,14 +91,17 @@
|
||||
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
|
||||
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
|
||||
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
|
||||
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
|
||||
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
|
||||
#define BOOK3S_INTERRUPT_TRACE 0xd00
|
||||
#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
|
||||
#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
|
||||
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
|
||||
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
|
||||
#define BOOK3S_INTERRUPT_PERFMON 0xf00
|
||||
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
|
||||
#define BOOK3S_INTERRUPT_VSX 0xf40
|
||||
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
|
||||
|
||||
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
|
||||
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
|
||||
|
||||
@@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
extern void kvmppc_load_up_fpu(void);
|
||||
extern void kvmppc_load_up_altivec(void);
|
||||
extern void kvmppc_load_up_vsx(void);
|
||||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
||||
@@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.pc;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
|
||||
{
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
|
||||
|
||||
return vcpu->arch.last_inst;
|
||||
return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
|
||||
vcpu->arch.last_inst;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu) - 4;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
|
||||
|
||||
return vcpu->arch.last_inst;
|
||||
return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -88,6 +88,7 @@ struct kvmppc_host_state {
|
||||
u8 hwthread_req;
|
||||
u8 hwthread_state;
|
||||
u8 host_ipi;
|
||||
u8 ptid;
|
||||
struct kvm_vcpu *kvm_vcpu;
|
||||
struct kvmppc_vcore *kvm_vcore;
|
||||
unsigned long xics_phys;
|
||||
|
||||
@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.xer;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* XXX Would need to check TLB entry */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.last_inst;
|
||||
|
||||
@@ -288,6 +288,7 @@ struct kvmppc_vcore {
|
||||
int n_woken;
|
||||
int nap_count;
|
||||
int napping_threads;
|
||||
int first_vcpuid;
|
||||
u16 pcpu;
|
||||
u16 last_cpu;
|
||||
u8 vcore_state;
|
||||
@@ -298,10 +299,12 @@ struct kvmppc_vcore {
|
||||
u64 stolen_tb;
|
||||
u64 preempt_tb;
|
||||
struct kvm_vcpu *runner;
|
||||
struct kvm *kvm;
|
||||
u64 tb_offset; /* guest timebase - host timebase */
|
||||
ulong lpcr;
|
||||
u32 arch_compat;
|
||||
ulong pcr;
|
||||
ulong dpdes; /* doorbell state (POWER8) */
|
||||
};
|
||||
|
||||
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
|
||||
@@ -410,8 +413,7 @@ struct kvm_vcpu_arch {
|
||||
|
||||
ulong gpr[32];
|
||||
|
||||
u64 fpr[32];
|
||||
u64 fpscr;
|
||||
struct thread_fp_state fp;
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
ulong evr[32];
|
||||
@@ -420,12 +422,7 @@ struct kvm_vcpu_arch {
|
||||
u64 acc;
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
vector128 vr[32];
|
||||
vector128 vscr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
u64 vsr[64];
|
||||
struct thread_vr_state vr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
@@ -452,6 +449,7 @@ struct kvm_vcpu_arch {
|
||||
ulong pc;
|
||||
ulong ctr;
|
||||
ulong lr;
|
||||
ulong tar;
|
||||
|
||||
ulong xer;
|
||||
u32 cr;
|
||||
@@ -461,13 +459,30 @@ struct kvm_vcpu_arch {
|
||||
ulong guest_owned_ext;
|
||||
ulong purr;
|
||||
ulong spurr;
|
||||
ulong ic;
|
||||
ulong vtb;
|
||||
ulong dscr;
|
||||
ulong amr;
|
||||
ulong uamor;
|
||||
ulong iamr;
|
||||
u32 ctrl;
|
||||
u32 dabrx;
|
||||
ulong dabr;
|
||||
ulong dawr;
|
||||
ulong dawrx;
|
||||
ulong ciabr;
|
||||
ulong cfar;
|
||||
ulong ppr;
|
||||
ulong pspb;
|
||||
ulong fscr;
|
||||
ulong ebbhr;
|
||||
ulong ebbrr;
|
||||
ulong bescr;
|
||||
ulong csigr;
|
||||
ulong tacr;
|
||||
ulong tcscr;
|
||||
ulong acop;
|
||||
ulong wort;
|
||||
ulong shadow_srr1;
|
||||
#endif
|
||||
u32 vrsave; /* also USPRG0 */
|
||||
@@ -502,10 +517,33 @@ struct kvm_vcpu_arch {
|
||||
u32 ccr1;
|
||||
u32 dbsr;
|
||||
|
||||
u64 mmcr[3];
|
||||
u64 mmcr[5];
|
||||
u32 pmc[8];
|
||||
u32 spmc[2];
|
||||
u64 siar;
|
||||
u64 sdar;
|
||||
u64 sier;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
u64 tfhar;
|
||||
u64 texasr;
|
||||
u64 tfiar;
|
||||
|
||||
u32 cr_tm;
|
||||
u64 lr_tm;
|
||||
u64 ctr_tm;
|
||||
u64 amr_tm;
|
||||
u64 ppr_tm;
|
||||
u64 dscr_tm;
|
||||
u64 tar_tm;
|
||||
|
||||
ulong gpr_tm[32];
|
||||
|
||||
struct thread_fp_state fp_tm;
|
||||
|
||||
struct thread_vr_state vr_tm;
|
||||
u32 vrsave_tm; /* also USPRG0 */
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
struct mutex exit_timing_lock;
|
||||
@@ -546,6 +584,7 @@ struct kvm_vcpu_arch {
|
||||
#endif
|
||||
gpa_t paddr_accessed;
|
||||
gva_t vaddr_accessed;
|
||||
pgd_t *pgdir;
|
||||
|
||||
u8 io_gpr; /* GPR used as IO source/target */
|
||||
u8 mmio_is_bigendian;
|
||||
@@ -603,7 +642,6 @@ struct kvm_vcpu_arch {
|
||||
struct list_head run_list;
|
||||
struct task_struct *run_task;
|
||||
struct kvm_run *kvm_run;
|
||||
pgd_t *pgdir;
|
||||
|
||||
spinlock_t vpa_update_lock;
|
||||
struct kvmppc_vpa vpa;
|
||||
@@ -616,9 +654,12 @@ struct kvm_vcpu_arch {
|
||||
spinlock_t tbacct_lock;
|
||||
u64 busy_stolen;
|
||||
u64 busy_preempt;
|
||||
unsigned long intr_msr;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
|
||||
|
||||
/* Values for vcpu->arch.state */
|
||||
#define KVMPPC_VCPU_NOTREADY 0
|
||||
#define KVMPPC_VCPU_RUNNABLE 1
|
||||
|
||||
@@ -39,10 +39,6 @@ static inline int kvm_para_available(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
extern unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr);
|
||||
|
||||
#else
|
||||
|
||||
static inline int kvm_para_available(void)
|
||||
@@ -50,82 +46,8 @@ static inline int kvm_para_available(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
return EV_UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
unsigned long r;
|
||||
|
||||
r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
*r2 = out[0];
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall0(unsigned int nr)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
unsigned long in[8];
|
||||
unsigned long out[8];
|
||||
|
||||
in[0] = p1;
|
||||
in[1] = p2;
|
||||
in[2] = p3;
|
||||
in[3] = p4;
|
||||
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned int kvm_arch_para_features(void)
|
||||
{
|
||||
unsigned long r;
|
||||
@@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)
|
||||
if (!kvm_para_available())
|
||||
return 0;
|
||||
|
||||
if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
|
||||
if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
|
||||
return 0;
|
||||
|
||||
return r;
|
||||
|
||||
@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
|
||||
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_bigendian);
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_bigendian);
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes, int is_bigendian);
|
||||
u64 val, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
|
||||
extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
@@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
|
||||
trace_hardirqs_on();
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* To avoid races, the caller must have gone directly from having
|
||||
* interrupts fully-enabled to hard-disabled.
|
||||
*/
|
||||
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
|
||||
|
||||
/* Only need to enable IRQs by hard enabling them after this */
|
||||
local_paca->irq_happened = 0;
|
||||
local_paca->soft_enabled = 1;
|
||||
|
||||
@@ -223,6 +223,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
|
||||
#endif
|
||||
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
|
||||
unsigned *shift);
|
||||
|
||||
static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
|
||||
unsigned long *pte_sizep)
|
||||
{
|
||||
pte_t *ptep;
|
||||
unsigned long ps = *pte_sizep;
|
||||
unsigned int shift;
|
||||
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
|
||||
if (!ptep)
|
||||
return NULL;
|
||||
if (shift)
|
||||
*pte_sizep = 1ul << shift;
|
||||
else
|
||||
*pte_sizep = PAGE_SIZE;
|
||||
|
||||
if (ps > *pte_sizep)
|
||||
return NULL;
|
||||
|
||||
return ptep;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
@@ -223,17 +223,26 @@
|
||||
#define CTRL_TE 0x00c00000 /* thread enable */
|
||||
#define CTRL_RUNLATCH 0x1
|
||||
#define SPRN_DAWR 0xB4
|
||||
#define SPRN_CIABR 0xBB
|
||||
#define CIABR_PRIV 0x3
|
||||
#define CIABR_PRIV_USER 1
|
||||
#define CIABR_PRIV_SUPER 2
|
||||
#define CIABR_PRIV_HYPER 3
|
||||
#define SPRN_DAWRX 0xBC
|
||||
#define DAWRX_USER (1UL << 0)
|
||||
#define DAWRX_KERNEL (1UL << 1)
|
||||
#define DAWRX_HYP (1UL << 2)
|
||||
#define DAWRX_USER __MASK(0)
|
||||
#define DAWRX_KERNEL __MASK(1)
|
||||
#define DAWRX_HYP __MASK(2)
|
||||
#define DAWRX_WTI __MASK(3)
|
||||
#define DAWRX_WT __MASK(4)
|
||||
#define DAWRX_DR __MASK(5)
|
||||
#define DAWRX_DW __MASK(6)
|
||||
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
|
||||
#define SPRN_DABR2 0x13D /* e300 */
|
||||
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
|
||||
#define DABRX_USER (1UL << 0)
|
||||
#define DABRX_KERNEL (1UL << 1)
|
||||
#define DABRX_HYP (1UL << 2)
|
||||
#define DABRX_BTI (1UL << 3)
|
||||
#define DABRX_USER __MASK(0)
|
||||
#define DABRX_KERNEL __MASK(1)
|
||||
#define DABRX_HYP __MASK(2)
|
||||
#define DABRX_BTI __MASK(3)
|
||||
#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
|
||||
#define SPRN_DAR 0x013 /* Data Address Register */
|
||||
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
|
||||
@@ -260,6 +269,8 @@
|
||||
#define SPRN_HRMOR 0x139 /* Real mode offset register */
|
||||
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
|
||||
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
|
||||
#define SPRN_IC 0x350 /* Virtual Instruction Count */
|
||||
#define SPRN_VTB 0x351 /* Virtual Time Base */
|
||||
/* HFSCR and FSCR bit numbers are the same */
|
||||
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
|
||||
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
|
||||
@@ -298,9 +309,13 @@
|
||||
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
|
||||
#define LPCR_RMLS_SH (63-37)
|
||||
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
|
||||
#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
|
||||
#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
|
||||
#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
|
||||
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
|
||||
#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
|
||||
#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
|
||||
#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
|
||||
#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
|
||||
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
|
||||
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
|
||||
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
|
||||
@@ -322,6 +337,8 @@
|
||||
#define SPRN_PCR 0x152 /* Processor compatibility register */
|
||||
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
|
||||
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
|
||||
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
|
||||
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
|
||||
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
|
||||
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
|
||||
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
|
||||
@@ -368,6 +385,8 @@
|
||||
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
|
||||
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
|
||||
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
|
||||
#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
|
||||
#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
|
||||
#define SPRN_EAR 0x11A /* External Address Register */
|
||||
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
|
||||
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
|
||||
@@ -427,6 +446,7 @@
|
||||
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
|
||||
#define SPRN_IABR2 0x3FA /* 83xx */
|
||||
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
|
||||
#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
|
||||
#define SPRN_HID4 0x3F4 /* 970 HID4 */
|
||||
#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
|
||||
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
|
||||
@@ -541,6 +561,7 @@
|
||||
#define SPRN_PIR 0x3FF /* Processor Identification Register */
|
||||
#endif
|
||||
#define SPRN_TIR 0x1BE /* Thread Identification Register */
|
||||
#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
|
||||
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
|
||||
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
|
||||
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
|
||||
@@ -682,6 +703,7 @@
|
||||
#define SPRN_EBBHR 804 /* Event based branch handler register */
|
||||
#define SPRN_EBBRR 805 /* Event based branch return register */
|
||||
#define SPRN_BESCR 806 /* Branch event status and control register */
|
||||
#define SPRN_WORT 895 /* Workload optimization register - thread */
|
||||
|
||||
#define SPRN_PMC1 787
|
||||
#define SPRN_PMC2 788
|
||||
@@ -698,6 +720,11 @@
|
||||
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
#define SPRN_TACR 888
|
||||
#define SPRN_TCSCR 889
|
||||
#define SPRN_CSIGR 890
|
||||
#define SPRN_SPMC1 892
|
||||
#define SPRN_SPMC2 893
|
||||
|
||||
/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
|
||||
#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
|
||||
|
||||
@@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
|
||||
static inline void save_tar(struct thread_struct *prev) {}
|
||||
#endif
|
||||
|
||||
extern void load_up_fpu(void);
|
||||
extern void enable_kernel_fp(void);
|
||||
extern void enable_kernel_altivec(void);
|
||||
extern void load_up_altivec(struct task_struct *);
|
||||
extern int emulate_altivec(struct pt_regs *);
|
||||
extern void __giveup_vsx(struct task_struct *);
|
||||
extern void giveup_vsx(struct task_struct *);
|
||||
|
||||
@@ -545,6 +545,7 @@ struct kvm_get_htab_header {
|
||||
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
|
||||
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
|
||||
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
|
||||
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
|
||||
|
||||
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
|
||||
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
|
||||
@@ -553,6 +554,8 @@ struct kvm_get_htab_header {
|
||||
/* Architecture compatibility level */
|
||||
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
|
||||
|
||||
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
|
||||
|
||||
/* Transactional Memory checkpointed state:
|
||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||
*/
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
* the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
|
||||
*/
|
||||
#define TM_CAUSE_PERSISTENT 0x01
|
||||
#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
|
||||
#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
|
||||
#define TM_CAUSE_RESCHED 0xde
|
||||
#define TM_CAUSE_TLBI 0xdc
|
||||
#define TM_CAUSE_FAC_UNAV 0xda
|
||||
|
||||
@@ -425,18 +425,14 @@ int main(void)
|
||||
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
|
||||
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
|
||||
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
|
||||
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
|
||||
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
|
||||
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
|
||||
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
|
||||
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
|
||||
#endif
|
||||
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
|
||||
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
|
||||
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
|
||||
DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
|
||||
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
|
||||
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
@@ -484,16 +480,24 @@ int main(void)
|
||||
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
|
||||
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
|
||||
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
|
||||
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
|
||||
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
|
||||
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
|
||||
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
|
||||
DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
|
||||
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
|
||||
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
|
||||
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
|
||||
DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
|
||||
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
|
||||
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
|
||||
DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
|
||||
DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
|
||||
DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
|
||||
DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
|
||||
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
|
||||
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
|
||||
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
|
||||
@@ -502,8 +506,10 @@ int main(void)
|
||||
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
|
||||
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
|
||||
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
|
||||
DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
|
||||
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
|
||||
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
|
||||
DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
|
||||
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
|
||||
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
|
||||
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
|
||||
@@ -511,20 +517,47 @@ int main(void)
|
||||
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
|
||||
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
|
||||
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
|
||||
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
|
||||
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
|
||||
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
|
||||
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
|
||||
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
|
||||
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
|
||||
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
|
||||
DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
|
||||
DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
|
||||
DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
|
||||
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
|
||||
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
|
||||
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
|
||||
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
|
||||
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
|
||||
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
|
||||
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
|
||||
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
|
||||
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
|
||||
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
|
||||
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
|
||||
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
|
||||
DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
|
||||
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
|
||||
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
|
||||
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
|
||||
DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
|
||||
DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
|
||||
DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
|
||||
DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
|
||||
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
|
||||
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
|
||||
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
|
||||
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
|
||||
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
|
||||
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
|
||||
DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
|
||||
DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
|
||||
DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
@@ -589,6 +622,7 @@ int main(void)
|
||||
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
|
||||
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
|
||||
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
|
||||
HSTATE_FIELD(HSTATE_PTID, ptid);
|
||||
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
|
||||
HSTATE_FIELD(HSTATE_PMC, host_pmc);
|
||||
HSTATE_FIELD(HSTATE_PURR, host_purr);
|
||||
|
||||
@@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data)
|
||||
{
|
||||
u32 *features = data;
|
||||
|
||||
ulong in[8];
|
||||
ulong in[8] = {0};
|
||||
ulong out[8];
|
||||
|
||||
in[0] = KVM_MAGIC_PAGE;
|
||||
in[1] = KVM_MAGIC_PAGE;
|
||||
|
||||
kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
|
||||
epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
|
||||
|
||||
*features = out[0];
|
||||
}
|
||||
@@ -711,43 +711,6 @@ static void kvm_use_magic_page(void)
|
||||
kvm_patching_worked ? "worked" : "failed");
|
||||
}
|
||||
|
||||
unsigned long kvm_hypercall(unsigned long *in,
|
||||
unsigned long *out,
|
||||
unsigned long nr)
|
||||
{
|
||||
unsigned long register r0 asm("r0");
|
||||
unsigned long register r3 asm("r3") = in[0];
|
||||
unsigned long register r4 asm("r4") = in[1];
|
||||
unsigned long register r5 asm("r5") = in[2];
|
||||
unsigned long register r6 asm("r6") = in[3];
|
||||
unsigned long register r7 asm("r7") = in[4];
|
||||
unsigned long register r8 asm("r8") = in[5];
|
||||
unsigned long register r9 asm("r9") = in[6];
|
||||
unsigned long register r10 asm("r10") = in[7];
|
||||
unsigned long register r11 asm("r11") = nr;
|
||||
unsigned long register r12 asm("r12");
|
||||
|
||||
asm volatile("bl epapr_hypercall_start"
|
||||
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
|
||||
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
|
||||
"=r"(r12)
|
||||
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
|
||||
"r"(r9), "r"(r10), "r"(r11)
|
||||
: "memory", "cc", "xer", "ctr", "lr");
|
||||
|
||||
out[0] = r4;
|
||||
out[1] = r5;
|
||||
out[2] = r6;
|
||||
out[3] = r7;
|
||||
out[4] = r8;
|
||||
out[5] = r9;
|
||||
out[6] = r10;
|
||||
out[7] = r11;
|
||||
|
||||
return r3;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_hypercall);
|
||||
|
||||
static __init void kvm_free_tmp(void)
|
||||
{
|
||||
free_reserved_area(&kvm_tmp[kvm_tmp_index],
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void)
|
||||
|
||||
module_init(kvmppc_44x_init);
|
||||
module_exit(kvmppc_44x_exit);
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
@@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
i = reg->id - KVM_REG_PPC_FPR0;
|
||||
val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
|
||||
val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
|
||||
break;
|
||||
case KVM_REG_PPC_FPSCR:
|
||||
val = get_reg_val(reg->id, vcpu->arch.fpscr);
|
||||
val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
|
||||
break;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
|
||||
@@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
|
||||
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
|
||||
break;
|
||||
case KVM_REG_PPC_VSCR:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
|
||||
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = reg->id - KVM_REG_PPC_VSR0;
|
||||
val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
|
||||
val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
case KVM_REG_PPC_DEBUG_INST: {
|
||||
u32 opcode = INS_TW;
|
||||
r = copy_to_user((u32 __user *)(long)reg->addr,
|
||||
@@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
i = reg->id - KVM_REG_PPC_FPR0;
|
||||
vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
|
||||
VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_FPSCR:
|
||||
vcpu->arch.fpscr = set_reg_val(reg->id, val);
|
||||
vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
|
||||
@@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
||||
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
|
||||
break;
|
||||
case KVM_REG_PPC_VSCR:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
|
||||
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
@@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
long int i = reg->id - KVM_REG_PPC_VSR0;
|
||||
vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
|
||||
vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
|
||||
} else {
|
||||
r = -ENXIO;
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
case KVM_REG_PPC_ICP_STATE:
|
||||
if (!vcpu->arch.icp) {
|
||||
@@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void)
|
||||
|
||||
module_init(kvmppc_book3s_init);
|
||||
module_exit(kvmppc_book3s_exit);
|
||||
|
||||
/* On 32bit this is our one and only kernel module */
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
#endif
|
||||
|
||||
@@ -243,6 +243,11 @@ next_pteg:
|
||||
/* Now tell our Shadow PTE code about the new page */
|
||||
|
||||
pte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
if (!pte) {
|
||||
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
||||
r = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
|
||||
orig_pte->may_write ? 'w' : '-',
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user