KVM: PPC: Use PACA backed shadow vcpu

We're being horribly racy right now. All the entry and exit code hijacks
random fields from the PACA that could easily be used by different code in
case we get interrupted, for example by a #MC or even page fault.

After discussing this with Ben, we figured it's best to reserve some more
space in the PACA and just shove off some vcpu state to there.

That way we can drastically improve the readability of the code, make it
less racy and less complex.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Alexander Graf
2010-01-08 02:58:03 +01:00
committed by Marcelo Tosatti
parent 992b5b29b5
commit 7e57cba060
10 changed files with 248 additions and 236 deletions

View File

@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s_64_asm.h>
struct kvmppc_slb {
u64 esid;
@@ -69,6 +70,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct {

View File

@@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_asm.h>
@@ -55,4 +57,21 @@ kvmppc_resume_\intno:
#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
#else /*__ASSEMBLY__ */
struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
u32 xer;
ulong host_r1;
ulong host_r2;
ulong handler;
ulong scratch0;
ulong scratch1;
ulong vmhandler;
ulong rmhandler;
};
#endif /*__ASSEMBLY__ */
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */

View File

@@ -175,10 +175,13 @@ struct kvm_vcpu_arch {
ulong gpr[32];
ulong pc;
u32 cr;
ulong ctr;
ulong lr;
#ifdef CONFIG_BOOKE
ulong xer;
u32 cr;
#endif
ulong msr;
#ifdef CONFIG_PPC64

View File

@@ -98,34 +98,42 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
#ifdef CONFIG_PPC_BOOK3S
/* We assume we're always acting on the current vcpu */
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
if ( num < 14 )
get_paca()->shadow_vcpu.gpr[num] = val;
else
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
if ( num < 14 )
return get_paca()->shadow_vcpu.gpr[num];
else
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.cr = val;
get_paca()->shadow_vcpu.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr;
return get_paca()->shadow_vcpu.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.xer = val;
get_paca()->shadow_vcpu.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
return get_paca()->shadow_vcpu.xer;
}
#else

View File

@@ -19,6 +19,9 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/exception-64e.h>
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_64_asm.h>
#endif
register struct paca_struct *local_paca asm("r13");
@@ -135,6 +138,8 @@ struct paca_struct {
u64 esid;
u64 vsid;
} kvm_slb[64]; /* guest SLB */
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
u8 kvm_slb_max; /* highest used guest slb entry */
u8 kvm_in_guest; /* are we inside the guest? */
#endif