Merge branches 'x86/alternatives', 'x86/cleanups', 'x86/commandline', 'x86/crashdump', 'x86/debug', 'x86/defconfig', 'x86/doc', 'x86/exports', 'x86/fpu', 'x86/gart', 'x86/idle', 'x86/mm', 'x86/mtrr', 'x86/nmi-watchdog', 'x86/oprofile', 'x86/paravirt', 'x86/reboot', 'x86/sparse-fixes', 'x86/tsc', 'x86/urgent' and 'x86/vmalloc' into x86-v28-for-linus-phase1
This commit is contained in:
@@ -81,9 +81,7 @@ extern int get_physical_broadcast(void);
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction:
|
||||
* - a single rmw on Pentium/82489DX
|
||||
* - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
|
||||
@@ -20,17 +20,22 @@
|
||||
|
||||
#define _ASM_PTR __ASM_SEL(.long, .quad)
|
||||
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
|
||||
#define _ASM_MOV_UL __ASM_SIZE(mov)
|
||||
|
||||
#define _ASM_MOV __ASM_SIZE(mov)
|
||||
#define _ASM_INC __ASM_SIZE(inc)
|
||||
#define _ASM_DEC __ASM_SIZE(dec)
|
||||
#define _ASM_ADD __ASM_SIZE(add)
|
||||
#define _ASM_SUB __ASM_SIZE(sub)
|
||||
#define _ASM_XADD __ASM_SIZE(xadd)
|
||||
|
||||
#define _ASM_AX __ASM_REG(ax)
|
||||
#define _ASM_BX __ASM_REG(bx)
|
||||
#define _ASM_CX __ASM_REG(cx)
|
||||
#define _ASM_DX __ASM_REG(dx)
|
||||
#define _ASM_SP __ASM_REG(sp)
|
||||
#define _ASM_BP __ASM_REG(bp)
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Exception table entry */
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
|
||||
@@ -148,8 +148,9 @@ do { \
|
||||
|
||||
static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
|
||||
{
|
||||
asm volatile("movl %0,%%fs" :: "r" (0));
|
||||
asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS));
|
||||
loadsegment(fs, 0);
|
||||
loadsegment(ds, __USER32_DS);
|
||||
loadsegment(es, __USER32_DS);
|
||||
load_gs_index(0);
|
||||
regs->ip = ip;
|
||||
regs->sp = sp;
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
asm volatile("1:\tmovl %2, %0\n" \
|
||||
"\tmovl\t%0, %3\n" \
|
||||
"\t" insn "\n" \
|
||||
"2:\tlock; cmpxchgl %3, %2\n" \
|
||||
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
|
||||
"\tjnz\t1b\n" \
|
||||
"3:\t.section .fixup,\"ax\"\n" \
|
||||
"4:\tmov\t%5, %1\n" \
|
||||
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op1("lock; xaddl %0, %2", ret, oldval,
|
||||
__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
|
||||
uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("1:\tlock; cmpxchgl %3, %1\n"
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
|
||||
"2:\t.section .fixup, \"ax\"\n"
|
||||
"3:\tmov %2, %0\n"
|
||||
"\tjmp 2b\n"
|
||||
|
||||
@@ -52,15 +52,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
|
||||
return 0;
|
||||
|
||||
if (aper_base + aper_size > 0x100000000ULL) {
|
||||
printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
|
||||
printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
|
||||
return 0;
|
||||
}
|
||||
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
|
||||
printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
|
||||
printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
|
||||
return 0;
|
||||
}
|
||||
if (aper_size < min_size) {
|
||||
printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
|
||||
printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
|
||||
aper_size>>20, min_size>>20);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef ASM_X86__MACH_RDC321X__GPIO_H
|
||||
#define ASM_X86__MACH_RDC321X__GPIO_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
extern int rdc_gpio_get_value(unsigned gpio);
|
||||
extern void rdc_gpio_set_value(unsigned gpio, int value);
|
||||
extern int rdc_gpio_direction_input(unsigned gpio);
|
||||
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
|
||||
|
||||
static inline void gpio_free(unsigned gpio)
|
||||
{
|
||||
might_sleep();
|
||||
rdc_gpio_free(gpio);
|
||||
}
|
||||
|
||||
|
||||
@@ -7,14 +7,9 @@
|
||||
/*
|
||||
* The x86 doesn't have a mmu context, but
|
||||
* we put the segment information here.
|
||||
*
|
||||
* cpu_vm_mask is used to optimize ldt flushing.
|
||||
*/
|
||||
typedef struct {
|
||||
void *ldt;
|
||||
#ifdef CONFIG_X86_64
|
||||
rwlock_t ldtlock;
|
||||
#endif
|
||||
int size;
|
||||
struct mutex lock;
|
||||
void *vdso;
|
||||
|
||||
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("2: rdmsr ; xor %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_amd_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
|
||||
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *);
|
||||
extern void disable_timer_nmi_watchdog(void);
|
||||
extern void enable_timer_nmi_watchdog(void);
|
||||
extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
|
||||
extern void cpu_nmi_set_wd_enabled(void);
|
||||
|
||||
extern atomic_t nmi_active;
|
||||
extern unsigned int nmi_watchdog;
|
||||
|
||||
@@ -89,9 +89,6 @@ extern int nx_enabled;
|
||||
extern unsigned int __VMALLOC_RESERVE;
|
||||
extern int sysctl_legacy_va_layout;
|
||||
|
||||
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
|
||||
#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
|
||||
|
||||
extern void find_low_pfn_range(void);
|
||||
extern unsigned long init_memory_mapping(unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
@@ -137,6 +137,7 @@ struct pv_cpu_ops {
|
||||
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr_amd)(unsigned int msr, int *err);
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
|
||||
@@ -257,13 +258,13 @@ struct pv_mmu_ops {
|
||||
* Hooks for allocating/releasing pagetable pages when they're
|
||||
* attached to a pagetable
|
||||
*/
|
||||
void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
|
||||
void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
|
||||
void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
|
||||
void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
|
||||
void (*release_pte)(u32 pfn);
|
||||
void (*release_pmd)(u32 pfn);
|
||||
void (*release_pud)(u32 pfn);
|
||||
void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
|
||||
void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
|
||||
void (*release_pte)(unsigned long pfn);
|
||||
void (*release_pmd)(unsigned long pfn);
|
||||
void (*release_pud)(unsigned long pfn);
|
||||
|
||||
/* Pagetable manipulation functions */
|
||||
void (*set_pte)(pte_t *ptep, pte_t pteval);
|
||||
@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||
}
|
||||
static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
|
||||
}
|
||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||
@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = paravirt_read_msr(msr, &err);
|
||||
return err;
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = paravirt_read_msr_amd(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_tsc(void)
|
||||
{
|
||||
@@ -993,35 +1005,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
|
||||
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
|
||||
}
|
||||
static inline void paravirt_release_pte(unsigned pfn)
|
||||
static inline void paravirt_release_pte(unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
|
||||
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
|
||||
unsigned start, unsigned count)
|
||||
static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
|
||||
unsigned long start, unsigned long count)
|
||||
{
|
||||
PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
|
||||
}
|
||||
static inline void paravirt_release_pmd(unsigned pfn)
|
||||
static inline void paravirt_release_pmd(unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
|
||||
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
|
||||
}
|
||||
static inline void paravirt_release_pud(unsigned pfn)
|
||||
static inline void paravirt_release_pud(unsigned long pfn)
|
||||
{
|
||||
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
|
||||
}
|
||||
|
||||
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
||||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
#define pte_none(x) (!(x).pte_low)
|
||||
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset
|
||||
|
||||
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
|
||||
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
|
||||
}
|
||||
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte_low && !pte.pte_high;
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits 0, 6 and 7 are taken in the low part of the pte,
|
||||
* put the 32 bits of offset into the high part.
|
||||
|
||||
@@ -186,6 +186,13 @@ static inline int pte_special(pte_t pte)
|
||||
return pte_val(pte) & _PAGE_SPECIAL;
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
{
|
||||
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
|
||||
@@ -57,8 +57,7 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
||||
* area for the same reason. ;)
|
||||
*/
|
||||
#define VMALLOC_OFFSET (8 * 1024 * 1024)
|
||||
#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
|
||||
& ~(VMALLOC_OFFSET - 1))
|
||||
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
|
||||
#ifdef CONFIG_X86_PAE
|
||||
#define LAST_PKMAP 512
|
||||
#else
|
||||
@@ -74,6 +73,8 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
||||
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
|
||||
|
||||
/*
|
||||
* Define this if things work differently on an i386 and an i486:
|
||||
* it will (on an i486) warn about kernel memory accesses that are
|
||||
|
||||
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
|
||||
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
|
||||
#define pte_page(x) pfn_to_page(pte_pfn((x)))
|
||||
#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
do { \
|
||||
if (pm_trace_enabled) { \
|
||||
const void *tracedata; \
|
||||
asm volatile(_ASM_MOV_UL " $1f,%0\n" \
|
||||
asm volatile(_ASM_MOV " $1f,%0\n" \
|
||||
".section .tracedata,\"a\"\n" \
|
||||
"1:\t.word %c1\n\t" \
|
||||
_ASM_PTR " %c2\n" \
|
||||
|
||||
@@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
"jne 1f\n\t"
|
||||
"movw %w0,%w1\n\t"
|
||||
"incb %h1\n\t"
|
||||
"lock ; cmpxchgw %w1,%2\n\t"
|
||||
LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
|
||||
"1:"
|
||||
"sete %b1\n\t"
|
||||
"movzbl %b1,%0\n\t"
|
||||
@@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||
int inc = 0x00010000;
|
||||
int tmp;
|
||||
|
||||
asm volatile("lock ; xaddl %0, %1\n"
|
||||
asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
|
||||
"movzwl %w0, %2\n\t"
|
||||
"shrl $16, %0\n\t"
|
||||
"1:\t"
|
||||
@@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||
"cmpl %0,%1\n\t"
|
||||
"jne 1f\n\t"
|
||||
"addl $0x00010000, %1\n\t"
|
||||
"lock ; cmpxchgl %1,%2\n\t"
|
||||
LOCK_PREFIX "cmpxchgl %1,%2\n\t"
|
||||
"1:"
|
||||
"sete %b1\n\t"
|
||||
"movzbl %b1,%0\n\t"
|
||||
|
||||
Reference in New Issue
Block a user