Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity: "Changes include timekeeping improvements, support for assigning host PCI devices that share interrupt lines, s390 user-controlled guests, a large ppc update, and random fixes." This is with the sign-off's fixed, hopefully next merge window we won't have rebased commits. * 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits) KVM: Convert intx_mask_lock to spin lock KVM: x86: fix kvm_write_tsc() TSC matching thinko x86: kvmclock: abstract save/restore sched_clock_state KVM: nVMX: Fix erroneous exception bitmap check KVM: Ignore the writes to MSR_K7_HWCR(3) KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask KVM: PMU: add proper support for fixed counter 2 KVM: PMU: Fix raw event check KVM: PMU: warn when pin control is set in eventsel msr KVM: VMX: Fix delayed load of shared MSRs KVM: use correct tlbs dirty type in cmpxchg KVM: Allow host IRQ sharing for assigned PCI 2.3 devices KVM: Ensure all vcpus are consistent with in-kernel irqchip settings KVM: x86 emulator: Allow PM/VM86 switch during task switch KVM: SVM: Fix CPL updates KVM: x86 emulator: VM86 segments must have DPL 3 KVM: x86 emulator: Fix task switch privilege checks arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation KVM: mmu_notifier: Flush TLBs before releasing mmu_lock ...
This commit is contained in:
@@ -265,12 +265,9 @@ struct kvm_debug_exit_arch {
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
#define KVM_REG_MASK 0x001f
|
||||
#define KVM_REG_EXT_MASK 0xffe0
|
||||
#define KVM_REG_GPR 0x0000
|
||||
#define KVM_REG_FPR 0x0020
|
||||
#define KVM_REG_QPR 0x0040
|
||||
#define KVM_REG_FQPR 0x0060
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
#define KVM_INTERRUPT_SET -1U
|
||||
#define KVM_INTERRUPT_UNSET -2U
|
||||
@@ -292,4 +289,41 @@ struct kvm_allocate_rma {
|
||||
__u64 rma_size;
|
||||
};
|
||||
|
||||
struct kvm_book3e_206_tlb_entry {
|
||||
__u32 mas8;
|
||||
__u32 mas1;
|
||||
__u64 mas2;
|
||||
__u64 mas7_3;
|
||||
};
|
||||
|
||||
struct kvm_book3e_206_tlb_params {
|
||||
/*
|
||||
* For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
|
||||
*
|
||||
* - The number of ways of TLB0 must be a power of two between 2 and
|
||||
* 16.
|
||||
* - TLB1 must be fully associative.
|
||||
* - The size of TLB0 must be a multiple of the number of ways, and
|
||||
* the number of sets must be a power of two.
|
||||
* - The size of TLB1 may not exceed 64 entries.
|
||||
* - TLB0 supports 4 KiB pages.
|
||||
* - The page sizes supported by TLB1 are as indicated by
|
||||
* TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
|
||||
* as returned by KVM_GET_SREGS.
|
||||
* - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
|
||||
* and tlb_ways[] must be zero.
|
||||
*
|
||||
* tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
|
||||
*
|
||||
* KVM will adjust TLBnCFG based on the sizes configured here,
|
||||
* though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
|
||||
* set to zero.
|
||||
*/
|
||||
__u32 tlb_sizes[4];
|
||||
__u32 tlb_ways[4];
|
||||
__u32 reserved[8];
|
||||
};
|
||||
|
||||
#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
|
||||
|
||||
#endif /* __LINUX_KVM_POWERPC_H */
|
||||
|
||||
@@ -90,6 +90,8 @@ struct kvmppc_vcpu_book3s {
|
||||
#endif
|
||||
int context_id[SID_CONTEXTS];
|
||||
|
||||
bool hior_explicit; /* HIOR is set by ioctl, not PVR */
|
||||
|
||||
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
|
||||
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
|
||||
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
|
||||
@@ -119,6 +121,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
|
||||
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
|
||||
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu, unsigned long addr,
|
||||
unsigned long status);
|
||||
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
|
||||
unsigned long slb_v, unsigned long valid);
|
||||
|
||||
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
|
||||
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
|
||||
@@ -138,6 +145,21 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
|
||||
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
|
||||
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
|
||||
unsigned long *rmap, long pte_index, int realmode);
|
||||
extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
|
||||
unsigned long pte_index);
|
||||
void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
|
||||
unsigned long pte_index);
|
||||
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
|
||||
unsigned long *nb_ret);
|
||||
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
|
||||
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
long pte_index, unsigned long pteh, unsigned long ptel);
|
||||
extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
long pte_index, unsigned long pteh, unsigned long ptel);
|
||||
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
@@ -183,7 +205,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
{
|
||||
if ( num < 14 ) {
|
||||
to_svcpu(vcpu)->gpr[num] = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->gpr[num] = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
|
||||
} else
|
||||
vcpu->arch.gpr[num] = val;
|
||||
@@ -191,80 +215,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
|
||||
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||
{
|
||||
if ( num < 14 )
|
||||
return to_svcpu(vcpu)->gpr[num];
|
||||
else
|
||||
if ( num < 14 ) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r = svcpu->gpr[num];
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
} else
|
||||
return vcpu->arch.gpr[num];
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
to_svcpu(vcpu)->cr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->cr = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->cr = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->cr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->cr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
to_svcpu(vcpu)->xer = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->xer = val;
|
||||
to_book3s(vcpu)->shadow_vcpu->xer = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->xer;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->xer;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->ctr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->ctr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->ctr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->ctr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->lr = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->lr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->lr;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->lr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
to_svcpu(vcpu)->pc = val;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->pc = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->pc;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->pc;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
|
||||
|
||||
return svcpu->last_inst;
|
||||
r = svcpu->last_inst;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svcpu(vcpu)->fault_dar;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->fault_dar;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -20,11 +20,15 @@
|
||||
#ifndef __ASM_KVM_BOOK3S_32_H__
|
||||
#define __ASM_KVM_BOOK3S_32_H__
|
||||
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_book3s(vcpu)->shadow_vcpu;
|
||||
}
|
||||
|
||||
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
}
|
||||
|
||||
#define PTE_SIZE 12
|
||||
#define VSID_ALL 0
|
||||
#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
|
||||
|
||||
@@ -21,14 +21,56 @@
|
||||
#define __ASM_KVM_BOOK3S_64_H__
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
return &get_paca()->shadow_vcpu;
|
||||
}
|
||||
|
||||
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define SPAPR_TCE_SHIFT 12
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
/* For now use fixed-size 16MB page table */
|
||||
#define HPT_ORDER 24
|
||||
#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
|
||||
#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
|
||||
#define HPT_HASH_MASK (HPT_NPTEG - 1)
|
||||
#endif
|
||||
|
||||
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
|
||||
|
||||
/*
|
||||
* We use a lock bit in HPTE dword 0 to synchronize updates and
|
||||
* accesses to each HPTE, and another bit to indicate non-present
|
||||
* HPTEs.
|
||||
*/
|
||||
#define HPTE_V_HVLOCK 0x40UL
|
||||
#define HPTE_V_ABSENT 0x20UL
|
||||
|
||||
static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
|
||||
{
|
||||
unsigned long tmp, old;
|
||||
|
||||
asm volatile(" ldarx %0,0,%2\n"
|
||||
" and. %1,%0,%3\n"
|
||||
" bne 2f\n"
|
||||
" ori %0,%0,%4\n"
|
||||
" stdcx. %0,0,%2\n"
|
||||
" beq+ 2f\n"
|
||||
" li %1,%3\n"
|
||||
"2: isync"
|
||||
: "=&r" (tmp), "=&r" (old)
|
||||
: "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
|
||||
: "cc", "memory");
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
|
||||
unsigned long pte_index)
|
||||
{
|
||||
@@ -62,4 +104,140 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
|
||||
return rb;
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
|
||||
{
|
||||
/* only handle 4k, 64k and 16M pages for now */
|
||||
if (!(h & HPTE_V_LARGE))
|
||||
return 1ul << 12; /* 4k page */
|
||||
if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
return 1ul << 16; /* 64k page */
|
||||
if ((l & 0xff000) == 0)
|
||||
return 1ul << 24; /* 16M page */
|
||||
return 0; /* error */
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
|
||||
{
|
||||
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline int hpte_is_writable(unsigned long ptel)
|
||||
{
|
||||
unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
|
||||
|
||||
return pp != PP_RXRX && pp != PP_RXXX;
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_make_readonly(unsigned long ptel)
|
||||
{
|
||||
if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
|
||||
ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
|
||||
else
|
||||
ptel |= PP_RXRX;
|
||||
return ptel;
|
||||
}
|
||||
|
||||
static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
|
||||
{
|
||||
unsigned int wimg = ptel & HPTE_R_WIMG;
|
||||
|
||||
/* Handle SAO */
|
||||
if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
|
||||
cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
wimg = HPTE_R_M;
|
||||
|
||||
if (!io_type)
|
||||
return wimg == HPTE_R_M;
|
||||
|
||||
return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock and read a linux PTE. If it's present and writable, atomically
|
||||
* set dirty and referenced bits and return the PTE, otherwise return 0.
|
||||
*/
|
||||
static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
|
||||
{
|
||||
pte_t pte, tmp;
|
||||
|
||||
/* wait until _PAGE_BUSY is clear then set it atomically */
|
||||
__asm__ __volatile__ (
|
||||
"1: ldarx %0,0,%3\n"
|
||||
" andi. %1,%0,%4\n"
|
||||
" bne- 1b\n"
|
||||
" ori %1,%0,%4\n"
|
||||
" stdcx. %1,0,%3\n"
|
||||
" bne- 1b"
|
||||
: "=&r" (pte), "=&r" (tmp), "=m" (*p)
|
||||
: "r" (p), "i" (_PAGE_BUSY)
|
||||
: "cc");
|
||||
|
||||
if (pte_present(pte)) {
|
||||
pte = pte_mkyoung(pte);
|
||||
if (writing && pte_write(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
}
|
||||
|
||||
*p = pte; /* clears _PAGE_BUSY */
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
/* Return HPTE cache control bits corresponding to Linux pte bits */
|
||||
static inline unsigned long hpte_cache_bits(unsigned long pte_val)
|
||||
{
|
||||
#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
|
||||
return pte_val & (HPTE_R_W | HPTE_R_I);
|
||||
#else
|
||||
return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
|
||||
((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
|
||||
{
|
||||
if (key)
|
||||
return PP_RWRX <= pp && pp <= PP_RXRX;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
|
||||
{
|
||||
if (key)
|
||||
return pp == PP_RWRW;
|
||||
return pp <= PP_RWRW;
|
||||
}
|
||||
|
||||
static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
|
||||
{
|
||||
unsigned long skey;
|
||||
|
||||
skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
|
||||
((hpte_r & HPTE_R_KEY_LO) >> 9);
|
||||
return (amr >> (62 - 2 * skey)) & 3;
|
||||
}
|
||||
|
||||
static inline void lock_rmap(unsigned long *rmap)
|
||||
{
|
||||
do {
|
||||
while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
|
||||
cpu_relax();
|
||||
} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
|
||||
}
|
||||
|
||||
static inline void unlock_rmap(unsigned long *rmap)
|
||||
{
|
||||
__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
|
||||
}
|
||||
|
||||
static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
|
||||
unsigned long pagesize)
|
||||
{
|
||||
unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
|
||||
|
||||
if (pagesize <= PAGE_SIZE)
|
||||
return 1;
|
||||
return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
|
||||
}
|
||||
|
||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||
|
||||
@@ -22,46 +22,55 @@
|
||||
#define E500_PID_NUM 3
|
||||
#define E500_TLB_NUM 2
|
||||
|
||||
struct tlbe{
|
||||
u32 mas1;
|
||||
u32 mas2;
|
||||
u32 mas3;
|
||||
u32 mas7;
|
||||
};
|
||||
|
||||
#define E500_TLB_VALID 1
|
||||
#define E500_TLB_DIRTY 2
|
||||
|
||||
struct tlbe_priv {
|
||||
struct tlbe_ref {
|
||||
pfn_t pfn;
|
||||
unsigned int flags; /* E500_TLB_* */
|
||||
};
|
||||
|
||||
struct tlbe_priv {
|
||||
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
|
||||
};
|
||||
|
||||
struct vcpu_id_table;
|
||||
|
||||
struct kvmppc_e500_tlb_params {
|
||||
int entries, ways, sets;
|
||||
};
|
||||
|
||||
struct kvmppc_vcpu_e500 {
|
||||
/* Unmodified copy of the guest's TLB. */
|
||||
struct tlbe *gtlb_arch[E500_TLB_NUM];
|
||||
/* Unmodified copy of the guest's TLB -- shared with host userspace. */
|
||||
struct kvm_book3e_206_tlb_entry *gtlb_arch;
|
||||
|
||||
/* Starting entry number in gtlb_arch[] */
|
||||
int gtlb_offset[E500_TLB_NUM];
|
||||
|
||||
/* KVM internal information associated with each guest TLB entry */
|
||||
struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
|
||||
|
||||
unsigned int gtlb_size[E500_TLB_NUM];
|
||||
struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
|
||||
|
||||
unsigned int gtlb_nv[E500_TLB_NUM];
|
||||
|
||||
/*
|
||||
* information associated with each host TLB entry --
|
||||
* TLB1 only for now. If/when guest TLB1 entries can be
|
||||
* mapped with host TLB0, this will be used for that too.
|
||||
*
|
||||
* We don't want to use this for guest TLB0 because then we'd
|
||||
* have the overhead of doing the translation again even if
|
||||
* the entry is still in the guest TLB (e.g. we swapped out
|
||||
* and back, and our host TLB entries got evicted).
|
||||
*/
|
||||
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
|
||||
unsigned int host_tlb1_nv;
|
||||
|
||||
u32 host_pid[E500_PID_NUM];
|
||||
u32 pid[E500_PID_NUM];
|
||||
u32 svr;
|
||||
|
||||
u32 mas0;
|
||||
u32 mas1;
|
||||
u32 mas2;
|
||||
u32 mas3;
|
||||
u32 mas4;
|
||||
u32 mas5;
|
||||
u32 mas6;
|
||||
u32 mas7;
|
||||
|
||||
/* vcpu id table */
|
||||
struct vcpu_id_table *idt;
|
||||
|
||||
@@ -73,6 +82,9 @@ struct kvmppc_vcpu_e500 {
|
||||
u32 tlb1cfg;
|
||||
u64 mcar;
|
||||
|
||||
struct page **shared_tlb_pages;
|
||||
int num_shared_tlb_pages;
|
||||
|
||||
struct kvm_vcpu vcpu;
|
||||
};
|
||||
|
||||
|
||||
@@ -32,17 +32,32 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define KVM_MAX_VCPUS NR_CPUS
|
||||
#define KVM_MAX_VCORES NR_CPUS
|
||||
#define KVM_MEMORY_SLOTS 32
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
|
||||
|
||||
#ifdef CONFIG_KVM_MMIO
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
|
||||
struct kvm;
|
||||
extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
#endif
|
||||
|
||||
/* We don't currently support large pages. */
|
||||
#define KVM_HPAGE_GFN_SHIFT(x) 0
|
||||
#define KVM_NR_PAGE_SIZES 1
|
||||
@@ -158,34 +173,72 @@ struct kvmppc_spapr_tce_table {
|
||||
struct page *pages[0];
|
||||
};
|
||||
|
||||
struct kvmppc_rma_info {
|
||||
struct kvmppc_linear_info {
|
||||
void *base_virt;
|
||||
unsigned long base_pfn;
|
||||
unsigned long npages;
|
||||
struct list_head list;
|
||||
atomic_t use_count;
|
||||
atomic_t use_count;
|
||||
int type;
|
||||
};
|
||||
|
||||
/*
|
||||
* The reverse mapping array has one entry for each HPTE,
|
||||
* which stores the guest's view of the second word of the HPTE
|
||||
* (including the guest physical address of the mapping),
|
||||
* plus forward and backward pointers in a doubly-linked ring
|
||||
* of HPTEs that map the same host page. The pointers in this
|
||||
* ring are 32-bit HPTE indexes, to save space.
|
||||
*/
|
||||
struct revmap_entry {
|
||||
unsigned long guest_rpte;
|
||||
unsigned int forw, back;
|
||||
};
|
||||
|
||||
/*
|
||||
* We use the top bit of each memslot->rmap entry as a lock bit,
|
||||
* and bit 32 as a present flag. The bottom 32 bits are the
|
||||
* index in the guest HPT of a HPTE that points to the page.
|
||||
*/
|
||||
#define KVMPPC_RMAP_LOCK_BIT 63
|
||||
#define KVMPPC_RMAP_RC_SHIFT 32
|
||||
#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
|
||||
#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
|
||||
#define KVMPPC_RMAP_PRESENT 0x100000000ul
|
||||
#define KVMPPC_RMAP_INDEX 0xfffffffful
|
||||
|
||||
/* Low-order bits in kvm->arch.slot_phys[][] */
|
||||
#define KVMPPC_PAGE_ORDER_MASK 0x1f
|
||||
#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
|
||||
#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
|
||||
#define KVMPPC_GOT_PAGE 0x80
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
unsigned long hpt_virt;
|
||||
unsigned long ram_npages;
|
||||
unsigned long ram_psize;
|
||||
unsigned long ram_porder;
|
||||
struct kvmppc_pginfo *ram_pginfo;
|
||||
struct revmap_entry *revmap;
|
||||
unsigned int lpid;
|
||||
unsigned int host_lpid;
|
||||
unsigned long host_lpcr;
|
||||
unsigned long sdr1;
|
||||
unsigned long host_sdr1;
|
||||
int tlbie_lock;
|
||||
int n_rma_pages;
|
||||
unsigned long lpcr;
|
||||
unsigned long rmor;
|
||||
struct kvmppc_rma_info *rma;
|
||||
struct kvmppc_linear_info *rma;
|
||||
unsigned long vrma_slb_v;
|
||||
int rma_setup_done;
|
||||
int using_mmu_notifiers;
|
||||
struct list_head spapr_tce_tables;
|
||||
spinlock_t slot_phys_lock;
|
||||
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
|
||||
int slot_npages[KVM_MEM_SLOTS_NUM];
|
||||
unsigned short last_vcpu[NR_CPUS];
|
||||
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
|
||||
struct kvmppc_linear_info *hpt_li;
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
};
|
||||
|
||||
@@ -318,10 +371,6 @@ struct kvm_vcpu_arch {
|
||||
u32 vrsave; /* also USPRG0 */
|
||||
u32 mmucr;
|
||||
ulong shadow_msr;
|
||||
ulong sprg4;
|
||||
ulong sprg5;
|
||||
ulong sprg6;
|
||||
ulong sprg7;
|
||||
ulong csrr0;
|
||||
ulong csrr1;
|
||||
ulong dsrr0;
|
||||
@@ -329,16 +378,14 @@ struct kvm_vcpu_arch {
|
||||
ulong mcsrr0;
|
||||
ulong mcsrr1;
|
||||
ulong mcsr;
|
||||
ulong esr;
|
||||
u32 dec;
|
||||
u32 decar;
|
||||
u32 tbl;
|
||||
u32 tbu;
|
||||
u32 tcr;
|
||||
u32 tsr;
|
||||
ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
|
||||
u32 ivor[64];
|
||||
ulong ivpr;
|
||||
u32 pir;
|
||||
u32 pvr;
|
||||
|
||||
u32 shadow_pid;
|
||||
@@ -427,9 +474,14 @@ struct kvm_vcpu_arch {
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
struct kvm_vcpu_arch_shared shregs;
|
||||
|
||||
unsigned long pgfault_addr;
|
||||
long pgfault_index;
|
||||
unsigned long pgfault_hpte[2];
|
||||
|
||||
struct list_head run_list;
|
||||
struct task_struct *run_task;
|
||||
struct kvm_run *kvm_run;
|
||||
pgd_t *pgdir;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -438,4 +490,12 @@ struct kvm_vcpu_arch {
|
||||
#define KVMPPC_VCPU_BUSY_IN_HOST 1
|
||||
#define KVMPPC_VCPU_RUNNABLE 2
|
||||
|
||||
/* Values for vcpu->arch.io_gpr */
|
||||
#define KVM_MMIO_REG_MASK 0x001f
|
||||
#define KVM_MMIO_REG_EXT_MASK 0xffe0
|
||||
#define KVM_MMIO_REG_GPR 0x0000
|
||||
#define KVM_MMIO_REG_FPR 0x0020
|
||||
#define KVM_MMIO_REG_QPR 0x0040
|
||||
#define KVM_MMIO_REG_FQPR 0x0060
|
||||
|
||||
#endif /* __POWERPC_KVM_HOST_H__ */
|
||||
|
||||
@@ -22,6 +22,16 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Additions to this struct must only occur at the end, and should be
|
||||
* accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
|
||||
* (albeit not necessarily relevant to the current target hardware platform).
|
||||
*
|
||||
* Struct fields are always 32 or 64 bit aligned, depending on them being 32
|
||||
* or 64 bit wide respectively.
|
||||
*
|
||||
* See Documentation/virtual/kvm/ppc-pv.txt
|
||||
*/
|
||||
struct kvm_vcpu_arch_shared {
|
||||
__u64 scratch1;
|
||||
__u64 scratch2;
|
||||
@@ -33,11 +43,35 @@ struct kvm_vcpu_arch_shared {
|
||||
__u64 sprg3;
|
||||
__u64 srr0;
|
||||
__u64 srr1;
|
||||
__u64 dar;
|
||||
__u64 dar; /* dear on BookE */
|
||||
__u64 msr;
|
||||
__u32 dsisr;
|
||||
__u32 int_pending; /* Tells the guest if we have an interrupt */
|
||||
__u32 sr[16];
|
||||
__u32 mas0;
|
||||
__u32 mas1;
|
||||
__u64 mas7_3;
|
||||
__u64 mas2;
|
||||
__u32 mas4;
|
||||
__u32 mas6;
|
||||
__u32 esr;
|
||||
__u32 pir;
|
||||
|
||||
/*
|
||||
* SPRG4-7 are user-readable, so we can only keep these consistent
|
||||
* between the shared area and the real registers when there's an
|
||||
* intervening exit to KVM. This also applies to SPRG3 on some
|
||||
* chips.
|
||||
*
|
||||
* This suffices for access by guest userspace, since in PR-mode
|
||||
* KVM, an exit must occur when changing the guest's MSR[PR].
|
||||
* If the guest kernel writes to SPRG3-7 via the shared area, it
|
||||
* must also use the shared area for reading while in kernel space.
|
||||
*/
|
||||
__u64 sprg4;
|
||||
__u64 sprg5;
|
||||
__u64 sprg6;
|
||||
__u64 sprg7;
|
||||
};
|
||||
|
||||
#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
|
||||
@@ -47,7 +81,10 @@ struct kvm_vcpu_arch_shared {
|
||||
|
||||
#define KVM_FEATURE_MAGIC_PAGE 1
|
||||
|
||||
#define KVM_MAGIC_FEAT_SR (1 << 0)
|
||||
#define KVM_MAGIC_FEAT_SR (1 << 0)
|
||||
|
||||
/* MASn, ESR, PIR, and high SPRGs */
|
||||
#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
|
||||
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
|
||||
extern void kvmppc_decrementer_func(unsigned long data);
|
||||
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Core-specific hooks */
|
||||
@@ -94,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
||||
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
||||
@@ -120,15 +121,17 @@ extern long kvmppc_alloc_hpt(struct kvm *kvm);
|
||||
extern void kvmppc_free_hpt(struct kvm *kvm);
|
||||
extern long kvmppc_prepare_vrma(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem);
|
||||
extern void kvmppc_map_vrma(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem);
|
||||
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
|
||||
struct kvm_memory_slot *memslot, unsigned long porder);
|
||||
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
|
||||
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
struct kvm_create_spapr_tce *args);
|
||||
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
|
||||
struct kvm_allocate_rma *rma);
|
||||
extern struct kvmppc_rma_info *kvm_alloc_rma(void);
|
||||
extern void kvm_release_rma(struct kvmppc_rma_info *ri);
|
||||
extern struct kvmppc_linear_info *kvm_alloc_rma(void);
|
||||
extern void kvm_release_rma(struct kvmppc_linear_info *ri);
|
||||
extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
|
||||
extern void kvm_release_hpt(struct kvmppc_linear_info *li);
|
||||
extern int kvmppc_core_init_vm(struct kvm *kvm);
|
||||
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
|
||||
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
@@ -175,6 +178,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
|
||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
|
||||
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
|
||||
|
||||
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
@@ -183,14 +189,19 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
||||
paca[cpu].kvm_hstate.xics_phys = addr;
|
||||
}
|
||||
|
||||
extern void kvm_rma_init(void);
|
||||
extern void kvm_linear_init(void);
|
||||
|
||||
#else
|
||||
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
||||
{}
|
||||
|
||||
static inline void kvm_rma_init(void)
|
||||
static inline void kvm_linear_init(void)
|
||||
{}
|
||||
#endif
|
||||
|
||||
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_config_tlb *cfg);
|
||||
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_dirty_tlb *cfg);
|
||||
|
||||
#endif /* __POWERPC_KVM_PPC_H__ */
|
||||
|
||||
@@ -41,9 +41,10 @@
|
||||
/* MAS registers bit definitions */
|
||||
|
||||
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
|
||||
#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
|
||||
#define MAS0_NV(x) ((x) & 0x00000FFF)
|
||||
#define MAS0_ESEL_MASK 0x0FFF0000
|
||||
#define MAS0_ESEL_SHIFT 16
|
||||
#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
|
||||
#define MAS0_NV(x) ((x) & 0x00000FFF)
|
||||
#define MAS0_HES 0x00004000
|
||||
#define MAS0_WQ_ALLWAYS 0x00000000
|
||||
#define MAS0_WQ_COND 0x00001000
|
||||
@@ -167,6 +168,7 @@
|
||||
#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
|
||||
#define TLBnCFG_MAXSIZE_SHIFT 16
|
||||
#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
|
||||
#define TLBnCFG_ASSOC_SHIFT 24
|
||||
|
||||
/* TLBnPS encoding */
|
||||
#define TLBnPS_4K 0x00000004
|
||||
|
||||
@@ -108,11 +108,11 @@ extern char initial_stab[];
|
||||
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
|
||||
|
||||
/* Values for PP (assumes Ks=0, Kp=1) */
|
||||
/* pp0 will always be 0 for linux */
|
||||
#define PP_RWXX 0 /* Supervisor read/write, User none */
|
||||
#define PP_RWRX 1 /* Supervisor read/write, User read */
|
||||
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
|
||||
#define PP_RXRX 3 /* Supervisor read, User read */
|
||||
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
|
||||
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
|
||||
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
|
||||
#define PPC_INST_SLBFEE 0x7c0007a7
|
||||
|
||||
#define PPC_INST_STRING 0x7c00042a
|
||||
#define PPC_INST_STRING_MASK 0xfc0007fe
|
||||
@@ -183,7 +184,8 @@
|
||||
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
|
||||
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
|
||||
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
|
||||
|
||||
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
||||
__PPC_RT(t) | __PPC_RB(b))
|
||||
|
||||
/*
|
||||
* Define what the VSX XX1 form instructions will look like, then add
|
||||
|
||||
@@ -216,6 +216,7 @@
|
||||
#define DSISR_ISSTORE 0x02000000 /* access was a store */
|
||||
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
|
||||
#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
|
||||
#define DSISR_KEYFAULT 0x00200000 /* Key fault */
|
||||
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
|
||||
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
|
||||
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
|
||||
@@ -237,6 +238,7 @@
|
||||
#define LPCR_ISL (1ul << (63-2))
|
||||
#define LPCR_VC_SH (63-2)
|
||||
#define LPCR_DPFD_SH (63-11)
|
||||
#define LPCR_VRMASD (0x1ful << (63-16))
|
||||
#define LPCR_VRMA_L (1ul << (63-12))
|
||||
#define LPCR_VRMA_LP0 (1ul << (63-15))
|
||||
#define LPCR_VRMA_LP1 (1ul << (63-16))
|
||||
@@ -493,6 +495,9 @@
|
||||
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
|
||||
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
|
||||
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
|
||||
#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
|
||||
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
|
||||
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
|
||||
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
|
||||
#define SRR1_WAKESYSERR 0x00300000 /* System error */
|
||||
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
|
||||
|
||||
Reference in New Issue
Block a user