Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity: "Changes include timekeeping improvements, support for assigning host PCI devices that share interrupt lines, s390 user-controlled guests, a large ppc update, and random fixes." This is with the sign-off's fixed, hopefully next merge window we won't have rebased commits. * 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits) KVM: Convert intx_mask_lock to spin lock KVM: x86: fix kvm_write_tsc() TSC matching thinko x86: kvmclock: abstract save/restore sched_clock_state KVM: nVMX: Fix erroneous exception bitmap check KVM: Ignore the writes to MSR_K7_HWCR(3) KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask KVM: PMU: add proper support for fixed counter 2 KVM: PMU: Fix raw event check KVM: PMU: warn when pin control is set in eventsel msr KVM: VMX: Fix delayed load of shared MSRs KVM: use correct tlbs dirty type in cmpxchg KVM: Allow host IRQ sharing for assigned PCI 2.3 devices KVM: Ensure all vcpus are consistent with in-kernel irqchip settings KVM: x86 emulator: Allow PM/VM86 switch during task switch KVM: SVM: Fix CPL updates KVM: x86 emulator: VM86 segments must have DPL 3 KVM: x86 emulator: Fix task switch privilege checks arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation KVM: mmu_notifier: Flush TLBs before releasing mmu_lock ...
This commit is contained in:
@@ -321,4 +321,8 @@ struct kvm_xcrs {
|
||||
__u64 padding[16];
|
||||
};
|
||||
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_KVM_H */
|
||||
|
||||
@@ -176,6 +176,7 @@ struct x86_emulate_ops {
|
||||
void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
|
||||
ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
|
||||
int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
|
||||
void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
|
||||
int (*cpl)(struct x86_emulate_ctxt *ctxt);
|
||||
int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
|
||||
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
|
||||
@@ -388,7 +389,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
|
||||
#define EMULATION_INTERCEPTED 2
|
||||
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
|
||||
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
|
||||
u16 tss_selector, int reason,
|
||||
u16 tss_selector, int idt_index, int reason,
|
||||
bool has_error_code, u32 error_code);
|
||||
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
|
||||
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#define KVM_MAX_VCPUS 254
|
||||
#define KVM_SOFT_MAX_VCPUS 64
|
||||
#define KVM_SOFT_MAX_VCPUS 160
|
||||
#define KVM_MEMORY_SLOTS 32
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
@@ -181,13 +181,6 @@ struct kvm_mmu_memory_cache {
|
||||
void *objects[KVM_NR_MEM_OBJS];
|
||||
};
|
||||
|
||||
#define NR_PTE_CHAIN_ENTRIES 5
|
||||
|
||||
struct kvm_pte_chain {
|
||||
u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
|
||||
struct hlist_node link;
|
||||
};
|
||||
|
||||
/*
|
||||
* kvm_mmu_page_role, below, is defined as:
|
||||
*
|
||||
@@ -427,12 +420,16 @@ struct kvm_vcpu_arch {
|
||||
|
||||
u64 last_guest_tsc;
|
||||
u64 last_kernel_ns;
|
||||
u64 last_tsc_nsec;
|
||||
u64 last_tsc_write;
|
||||
u32 virtual_tsc_khz;
|
||||
u64 last_host_tsc;
|
||||
u64 tsc_offset_adjustment;
|
||||
u64 this_tsc_nsec;
|
||||
u64 this_tsc_write;
|
||||
u8 this_tsc_generation;
|
||||
bool tsc_catchup;
|
||||
u32 tsc_catchup_mult;
|
||||
s8 tsc_catchup_shift;
|
||||
bool tsc_always_catchup;
|
||||
s8 virtual_tsc_shift;
|
||||
u32 virtual_tsc_mult;
|
||||
u32 virtual_tsc_khz;
|
||||
|
||||
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
|
||||
unsigned nmi_pending; /* NMI queued after currently running handler */
|
||||
@@ -478,6 +475,21 @@ struct kvm_vcpu_arch {
|
||||
u32 id;
|
||||
bool send_user_only;
|
||||
} apf;
|
||||
|
||||
/* OSVW MSRs (AMD only) */
|
||||
struct {
|
||||
u64 length;
|
||||
u64 status;
|
||||
} osvw;
|
||||
};
|
||||
|
||||
struct kvm_lpage_info {
|
||||
unsigned long rmap_pde;
|
||||
int write_count;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
@@ -511,8 +523,12 @@ struct kvm_arch {
|
||||
s64 kvmclock_offset;
|
||||
raw_spinlock_t tsc_write_lock;
|
||||
u64 last_tsc_nsec;
|
||||
u64 last_tsc_offset;
|
||||
u64 last_tsc_write;
|
||||
u32 last_tsc_khz;
|
||||
u64 cur_tsc_nsec;
|
||||
u64 cur_tsc_write;
|
||||
u64 cur_tsc_offset;
|
||||
u8 cur_tsc_generation;
|
||||
|
||||
struct kvm_xen_hvm_config xen_hvm_config;
|
||||
|
||||
@@ -644,7 +660,7 @@ struct kvm_x86_ops {
|
||||
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
|
||||
int (*get_lpage_level)(void);
|
||||
bool (*rdtscp_supported)(void);
|
||||
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
|
||||
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
|
||||
|
||||
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
|
||||
|
||||
@@ -652,7 +668,7 @@ struct kvm_x86_ops {
|
||||
|
||||
bool (*has_wbinvd_exit)(void);
|
||||
|
||||
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
|
||||
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
|
||||
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
|
||||
u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
|
||||
@@ -674,6 +690,17 @@ struct kvm_arch_async_pf {
|
||||
|
||||
extern struct kvm_x86_ops *kvm_x86_ops;
|
||||
|
||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||
s64 adjustment)
|
||||
{
|
||||
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
|
||||
}
|
||||
|
||||
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
{
|
||||
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
|
||||
}
|
||||
|
||||
int kvm_mmu_module_init(void);
|
||||
void kvm_mmu_module_exit(void);
|
||||
|
||||
@@ -741,8 +768,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
|
||||
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
|
||||
|
||||
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
|
||||
bool has_error_code, u32 error_code);
|
||||
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
|
||||
int reason, bool has_error_code, u32 error_code);
|
||||
|
||||
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
|
||||
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
|
||||
#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
|
||||
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
|
||||
#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
|
||||
#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
|
||||
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
|
||||
|
||||
@@ -61,7 +61,7 @@ extern void check_tsc_sync_source(int cpu);
|
||||
extern void check_tsc_sync_target(void);
|
||||
|
||||
extern int notsc_setup(char *);
|
||||
extern void save_sched_clock_state(void);
|
||||
extern void restore_sched_clock_state(void);
|
||||
extern void tsc_save_sched_clock_state(void);
|
||||
extern void tsc_restore_sched_clock_state(void);
|
||||
|
||||
#endif /* _ASM_X86_TSC_H */
|
||||
|
||||
@@ -145,9 +145,11 @@ struct x86_init_ops {
|
||||
/**
|
||||
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
|
||||
* @setup_percpu_clockev: set up the per cpu clock event device
|
||||
* @early_percpu_clock_init: early init of the per cpu clock event device
|
||||
*/
|
||||
struct x86_cpuinit_ops {
|
||||
void (*setup_percpu_clockev)(void);
|
||||
void (*early_percpu_clock_init)(void);
|
||||
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
|
||||
};
|
||||
|
||||
@@ -160,6 +162,8 @@ struct x86_cpuinit_ops {
|
||||
* @is_untracked_pat_range exclude from PAT logic
|
||||
* @nmi_init enable NMI on cpus
|
||||
* @i8042_detect pre-detect if i8042 controller exists
|
||||
* @save_sched_clock_state: save state for sched_clock() on suspend
|
||||
* @restore_sched_clock_state: restore state for sched_clock() on resume
|
||||
*/
|
||||
struct x86_platform_ops {
|
||||
unsigned long (*calibrate_tsc)(void);
|
||||
@@ -171,6 +175,8 @@ struct x86_platform_ops {
|
||||
void (*nmi_init)(void);
|
||||
unsigned char (*get_nmi_reason)(void);
|
||||
int (*i8042_detect)(void);
|
||||
void (*save_sched_clock_state)(void);
|
||||
void (*restore_sched_clock_state)(void);
|
||||
};
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
Reference in New Issue
Block a user