Merge commit 'origin/master' into next
This commit is contained in:
@@ -35,3 +35,4 @@ unifdef-y += spu_info.h
|
||||
unifdef-y += termios.h
|
||||
unifdef-y += types.h
|
||||
unifdef-y += unistd.h
|
||||
unifdef-y += swab.h
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* PowerPC atomic operations
|
||||
*/
|
||||
|
||||
typedef struct { int counter; } atomic_t;
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/compiler.h>
|
||||
@@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
|
||||
|
||||
#ifdef __powerpc64__
|
||||
|
||||
typedef struct { long counter; } atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static __inline__ long atomic64_read(const atomic64_t *v)
|
||||
|
||||
@@ -8,86 +8,7 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define __BIG_ENDIAN
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __KERNEL__
|
||||
|
||||
static __inline__ __u16 ld_le16(const volatile __u16 *addr)
|
||||
{
|
||||
__u16 val;
|
||||
|
||||
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab16p ld_le16
|
||||
|
||||
static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
|
||||
{
|
||||
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __arch_swab16s(__u16 *addr)
|
||||
{
|
||||
st_le16(addr, *addr);
|
||||
}
|
||||
#define __arch_swab16s __arch_swab16s
|
||||
|
||||
static __inline__ __u32 ld_le32(const volatile __u32 *addr)
|
||||
{
|
||||
__u32 val;
|
||||
|
||||
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab32p ld_le32
|
||||
|
||||
static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
|
||||
{
|
||||
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __arch_swab32s(__u32 *addr)
|
||||
{
|
||||
st_le32(addr, *addr);
|
||||
}
|
||||
#define __arch_swab32s __arch_swab32s
|
||||
|
||||
static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
|
||||
{
|
||||
__u16 result;
|
||||
|
||||
__asm__("rlwimi %0,%1,8,16,23"
|
||||
: "=r" (result)
|
||||
: "r" (value), "0" (value >> 8));
|
||||
return result;
|
||||
}
|
||||
#define __arch_swab16 __arch_swab16
|
||||
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
|
||||
{
|
||||
__u32 result;
|
||||
|
||||
__asm__("rlwimi %0,%1,24,16,23\n\t"
|
||||
"rlwimi %0,%1,8,8,15\n\t"
|
||||
"rlwimi %0,%1,24,0,7"
|
||||
: "=r" (result)
|
||||
: "r" (value), "0" (value >> 24));
|
||||
return result;
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#ifndef __powerpc64__
|
||||
#define __SWAB_64_THRU_32__
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#include <linux/byteorder.h>
|
||||
#include <asm/swab.h>
|
||||
#include <linux/byteorder/big_endian.h>
|
||||
|
||||
#endif /* _ASM_POWERPC_BYTEORDER_H */
|
||||
|
||||
80
arch/powerpc/include/asm/disassemble.h
Normal file
80
arch/powerpc/include/asm/disassemble.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_PPC_DISASSEMBLE_H__
|
||||
#define __ASM_PPC_DISASSEMBLE_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
static inline unsigned int get_op(u32 inst)
|
||||
{
|
||||
return inst >> 26;
|
||||
}
|
||||
|
||||
static inline unsigned int get_xop(u32 inst)
|
||||
{
|
||||
return (inst >> 1) & 0x3ff;
|
||||
}
|
||||
|
||||
static inline unsigned int get_sprn(u32 inst)
|
||||
{
|
||||
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
|
||||
}
|
||||
|
||||
static inline unsigned int get_dcrn(u32 inst)
|
||||
{
|
||||
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
|
||||
}
|
||||
|
||||
static inline unsigned int get_rt(u32 inst)
|
||||
{
|
||||
return (inst >> 21) & 0x1f;
|
||||
}
|
||||
|
||||
static inline unsigned int get_rs(u32 inst)
|
||||
{
|
||||
return (inst >> 21) & 0x1f;
|
||||
}
|
||||
|
||||
static inline unsigned int get_ra(u32 inst)
|
||||
{
|
||||
return (inst >> 16) & 0x1f;
|
||||
}
|
||||
|
||||
static inline unsigned int get_rb(u32 inst)
|
||||
{
|
||||
return (inst >> 11) & 0x1f;
|
||||
}
|
||||
|
||||
static inline unsigned int get_rc(u32 inst)
|
||||
{
|
||||
return inst & 0x1;
|
||||
}
|
||||
|
||||
static inline unsigned int get_ws(u32 inst)
|
||||
{
|
||||
return (inst >> 11) & 0x1f;
|
||||
}
|
||||
|
||||
static inline unsigned int get_d(u32 inst)
|
||||
{
|
||||
return inst & 0xffff;
|
||||
}
|
||||
|
||||
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
|
||||
@@ -17,6 +17,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
/*
|
||||
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
|
||||
* to override the version in mm/hugetlb.c
|
||||
*/
|
||||
#define vma_mmu_pagesize vma_mmu_pagesize
|
||||
|
||||
/*
|
||||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
|
||||
61
arch/powerpc/include/asm/kvm_44x.h
Normal file
61
arch/powerpc/include/asm/kvm_44x.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_44X_H__
|
||||
#define __ASM_44X_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#define PPC44x_TLB_SIZE 64
|
||||
|
||||
/* If the guest is expecting it, this can be as large as we like; we'd just
|
||||
* need to find some way of advertising it. */
|
||||
#define KVM44x_GUEST_TLB_SIZE 64
|
||||
|
||||
struct kvmppc_44x_shadow_ref {
|
||||
struct page *page;
|
||||
u16 gtlb_index;
|
||||
u8 writeable;
|
||||
u8 tid;
|
||||
};
|
||||
|
||||
struct kvmppc_vcpu_44x {
|
||||
/* Unmodified copy of the guest's TLB. */
|
||||
struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
|
||||
|
||||
/* References to guest pages in the hardware TLB. */
|
||||
struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
|
||||
|
||||
/* State of the shadow TLB at guest context switch time. */
|
||||
struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
|
||||
u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
|
||||
|
||||
struct kvm_vcpu vcpu;
|
||||
};
|
||||
|
||||
static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
|
||||
void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
|
||||
void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ASM_44X_H__ */
|
||||
@@ -64,27 +64,58 @@ struct kvm_vcpu_stat {
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
struct tlbe {
|
||||
struct kvmppc_44x_tlbe {
|
||||
u32 tid; /* Only the low 8 bits are used. */
|
||||
u32 word0;
|
||||
u32 word1;
|
||||
u32 word2;
|
||||
};
|
||||
|
||||
enum kvm_exit_types {
|
||||
MMIO_EXITS,
|
||||
DCR_EXITS,
|
||||
SIGNAL_EXITS,
|
||||
ITLB_REAL_MISS_EXITS,
|
||||
ITLB_VIRT_MISS_EXITS,
|
||||
DTLB_REAL_MISS_EXITS,
|
||||
DTLB_VIRT_MISS_EXITS,
|
||||
SYSCALL_EXITS,
|
||||
ISI_EXITS,
|
||||
DSI_EXITS,
|
||||
EMULATED_INST_EXITS,
|
||||
EMULATED_MTMSRWE_EXITS,
|
||||
EMULATED_WRTEE_EXITS,
|
||||
EMULATED_MTSPR_EXITS,
|
||||
EMULATED_MFSPR_EXITS,
|
||||
EMULATED_MTMSR_EXITS,
|
||||
EMULATED_MFMSR_EXITS,
|
||||
EMULATED_TLBSX_EXITS,
|
||||
EMULATED_TLBWE_EXITS,
|
||||
EMULATED_RFI_EXITS,
|
||||
DEC_EXITS,
|
||||
EXT_INTR_EXITS,
|
||||
HALT_WAKEUP,
|
||||
USR_PR_INST,
|
||||
FP_UNAVAIL,
|
||||
DEBUG_EXITS,
|
||||
TIMEINGUEST,
|
||||
__NUMBER_OF_KVM_EXIT_TYPES
|
||||
};
|
||||
|
||||
/* allow access to big endian 32bit upper/lower parts and 64bit var */
|
||||
struct kvmppc_exit_timing {
|
||||
union {
|
||||
u64 tv64;
|
||||
struct {
|
||||
u32 tbu, tbl;
|
||||
} tv32;
|
||||
};
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
/* Unmodified copy of the guest's TLB. */
|
||||
struct tlbe guest_tlb[PPC44x_TLB_SIZE];
|
||||
/* TLB that's actually used when the guest is running. */
|
||||
struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
|
||||
/* Pages which are referenced in the shadow TLB. */
|
||||
struct page *shadow_pages[PPC44x_TLB_SIZE];
|
||||
|
||||
/* Track which TLB entries we've modified in the current exit. */
|
||||
u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
|
||||
|
||||
u32 host_stack;
|
||||
u32 host_pid;
|
||||
u32 host_dbcr0;
|
||||
@@ -94,32 +125,32 @@ struct kvm_vcpu_arch {
|
||||
u32 host_msr;
|
||||
|
||||
u64 fpr[32];
|
||||
u32 gpr[32];
|
||||
ulong gpr[32];
|
||||
|
||||
u32 pc;
|
||||
ulong pc;
|
||||
u32 cr;
|
||||
u32 ctr;
|
||||
u32 lr;
|
||||
u32 xer;
|
||||
ulong ctr;
|
||||
ulong lr;
|
||||
ulong xer;
|
||||
|
||||
u32 msr;
|
||||
ulong msr;
|
||||
u32 mmucr;
|
||||
u32 sprg0;
|
||||
u32 sprg1;
|
||||
u32 sprg2;
|
||||
u32 sprg3;
|
||||
u32 sprg4;
|
||||
u32 sprg5;
|
||||
u32 sprg6;
|
||||
u32 sprg7;
|
||||
u32 srr0;
|
||||
u32 srr1;
|
||||
u32 csrr0;
|
||||
u32 csrr1;
|
||||
u32 dsrr0;
|
||||
u32 dsrr1;
|
||||
u32 dear;
|
||||
u32 esr;
|
||||
ulong sprg0;
|
||||
ulong sprg1;
|
||||
ulong sprg2;
|
||||
ulong sprg3;
|
||||
ulong sprg4;
|
||||
ulong sprg5;
|
||||
ulong sprg6;
|
||||
ulong sprg7;
|
||||
ulong srr0;
|
||||
ulong srr1;
|
||||
ulong csrr0;
|
||||
ulong csrr1;
|
||||
ulong dsrr0;
|
||||
ulong dsrr1;
|
||||
ulong dear;
|
||||
ulong esr;
|
||||
u32 dec;
|
||||
u32 decar;
|
||||
u32 tbl;
|
||||
@@ -127,7 +158,7 @@ struct kvm_vcpu_arch {
|
||||
u32 tcr;
|
||||
u32 tsr;
|
||||
u32 ivor[16];
|
||||
u32 ivpr;
|
||||
ulong ivpr;
|
||||
u32 pir;
|
||||
|
||||
u32 shadow_pid;
|
||||
@@ -140,9 +171,22 @@ struct kvm_vcpu_arch {
|
||||
u32 dbcr0;
|
||||
u32 dbcr1;
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
struct kvmppc_exit_timing timing_exit;
|
||||
struct kvmppc_exit_timing timing_last_enter;
|
||||
u32 last_exit_type;
|
||||
u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
|
||||
u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
|
||||
u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
|
||||
u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
|
||||
u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
|
||||
u64 timing_last_exit;
|
||||
struct dentry *debugfs_exit_timing;
|
||||
#endif
|
||||
|
||||
u32 last_inst;
|
||||
u32 fault_dear;
|
||||
u32 fault_esr;
|
||||
ulong fault_dear;
|
||||
ulong fault_esr;
|
||||
gpa_t paddr_accessed;
|
||||
|
||||
u8 io_gpr; /* GPR used as IO source/target */
|
||||
|
||||
@@ -29,11 +29,6 @@
|
||||
#include <linux/kvm_types.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
struct kvm_tlb {
|
||||
struct tlbe guest_tlb[PPC44x_TLB_SIZE];
|
||||
struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
|
||||
};
|
||||
|
||||
enum emulation_result {
|
||||
EMULATE_DONE, /* no further processing */
|
||||
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
|
||||
@@ -41,9 +36,6 @@ enum emulation_result {
|
||||
EMULATE_FAIL, /* can't emulate this instruction */
|
||||
};
|
||||
|
||||
extern const unsigned char exception_priority[];
|
||||
extern const unsigned char priority_exception[];
|
||||
|
||||
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
|
||||
extern char kvmppc_handlers_start[];
|
||||
extern unsigned long kvmppc_handler_len;
|
||||
@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
extern int kvmppc_emulate_instruction(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
|
||||
u64 asid, u32 flags);
|
||||
extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
gva_t eend, u32 asid);
|
||||
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
|
||||
u64 asid, u32 flags, u32 max_bytes,
|
||||
unsigned int gtlb_idx);
|
||||
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
|
||||
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
|
||||
|
||||
/* XXX Book E specific */
|
||||
extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
|
||||
/* Core-specific hooks */
|
||||
|
||||
extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
|
||||
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id);
|
||||
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_check_processor_compat(void);
|
||||
extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_translation *tr);
|
||||
|
||||
static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
|
||||
{
|
||||
unsigned int priority = exception_priority[exception];
|
||||
set_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
|
||||
{
|
||||
unsigned int priority = exception_priority[exception];
|
||||
clear_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Helper function for "full" MSR writes. No need to call this if only EE is
|
||||
* changing. */
|
||||
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
||||
{
|
||||
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
|
||||
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
|
||||
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
||||
struct kvm_interrupt *irq);
|
||||
|
||||
vcpu->arch.msr = new_msr;
|
||||
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int op, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
|
||||
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
|
||||
|
||||
if (vcpu->arch.msr & MSR_WE)
|
||||
kvm_vcpu_block(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
|
||||
{
|
||||
if (vcpu->arch.pid != new_pid) {
|
||||
vcpu->arch.pid = new_pid;
|
||||
vcpu->arch.swap_pid = 1;
|
||||
}
|
||||
}
|
||||
extern int kvmppc_booke_init(void);
|
||||
extern void kvmppc_booke_exit(void);
|
||||
|
||||
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern unsigned int tlb_44x_hwater;
|
||||
extern unsigned int tlb_44x_index;
|
||||
|
||||
typedef struct {
|
||||
unsigned int id;
|
||||
|
||||
90
arch/powerpc/include/asm/swab.h
Normal file
90
arch/powerpc/include/asm/swab.h
Normal file
@@ -0,0 +1,90 @@
|
||||
#ifndef _ASM_POWERPC_SWAB_H
|
||||
#define _ASM_POWERPC_SWAB_H
|
||||
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
#ifndef __powerpc64__
|
||||
#define __SWAB_64_THRU_32__
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
static __inline__ __u16 ld_le16(const volatile __u16 *addr)
|
||||
{
|
||||
__u16 val;
|
||||
|
||||
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab16p ld_le16
|
||||
|
||||
static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
|
||||
{
|
||||
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __arch_swab16s(__u16 *addr)
|
||||
{
|
||||
st_le16(addr, *addr);
|
||||
}
|
||||
#define __arch_swab16s __arch_swab16s
|
||||
|
||||
static __inline__ __u32 ld_le32(const volatile __u32 *addr)
|
||||
{
|
||||
__u32 val;
|
||||
|
||||
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab32p ld_le32
|
||||
|
||||
static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
|
||||
{
|
||||
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __arch_swab32s(__u32 *addr)
|
||||
{
|
||||
st_le32(addr, *addr);
|
||||
}
|
||||
#define __arch_swab32s __arch_swab32s
|
||||
|
||||
static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
|
||||
{
|
||||
__u16 result;
|
||||
|
||||
__asm__("rlwimi %0,%1,8,16,23"
|
||||
: "=r" (result)
|
||||
: "r" (value), "0" (value >> 8));
|
||||
return result;
|
||||
}
|
||||
#define __arch_swab16 __arch_swab16
|
||||
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
|
||||
{
|
||||
__u32 result;
|
||||
|
||||
__asm__("rlwimi %0,%1,24,16,23\n\t"
|
||||
"rlwimi %0,%1,8,8,15\n\t"
|
||||
"rlwimi %0,%1,24,0,7"
|
||||
: "=r" (result)
|
||||
: "r" (value), "0" (value >> 24));
|
||||
return result;
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_SWAB_H */
|
||||
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
|
||||
return numa_cpumask_lookup_table[node];
|
||||
}
|
||||
|
||||
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
|
||||
|
||||
static inline int node_to_first_cpu(int node)
|
||||
{
|
||||
cpumask_t tmp;
|
||||
tmp = node_to_cpumask(node);
|
||||
return first_cpu(tmp);
|
||||
return cpumask_first(cpumask_of_node(node));
|
||||
}
|
||||
|
||||
int of_node_to_nid(struct device_node *device);
|
||||
@@ -46,9 +46,12 @@ static inline int pcibus_to_node(struct pci_bus *bus)
|
||||
node_to_cpumask(pcibus_to_node(bus)) \
|
||||
)
|
||||
|
||||
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
|
||||
cpu_all_mask : \
|
||||
cpumask_of_node(pcibus_to_node(bus)))
|
||||
|
||||
/* sched_domains SD_NODE_INIT for PPC64 machines */
|
||||
#define SD_NODE_INIT (struct sched_domain) { \
|
||||
.span = CPU_MASK_NONE, \
|
||||
.parent = NULL, \
|
||||
.child = NULL, \
|
||||
.groups = NULL, \
|
||||
@@ -109,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
|
||||
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user