Merge branches 'x86/numa-fixes', 'x86/apic', 'x86/apm', 'x86/bitops', 'x86/build', 'x86/cleanups', 'x86/cpa', 'x86/cpu', 'x86/defconfig', 'x86/gart', 'x86/i8259', 'x86/intel', 'x86/irqstats', 'x86/kconfig', 'x86/ldt', 'x86/mce', 'x86/memtest', 'x86/pat', 'x86/ptemask', 'x86/resumetrace', 'x86/threadinfo', 'x86/timers', 'x86/vdso' and 'x86/xen' into x86/devel
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
header-y += kvm.h
|
||||
|
||||
ifeq ($(wildcard include/asm-$(SRCARCH)/a.out.h),include/asm-$(SRCARCH)/a.out.h)
|
||||
ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
|
||||
unifdef-y += a.out.h
|
||||
endif
|
||||
unifdef-y += auxvec.h
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define _ASM_GENERIC_ATOMIC_H
|
||||
/*
|
||||
* Copyright (C) 2005 Silicon Graphics, Inc.
|
||||
* Christoph Lameter <clameter@sgi.com>
|
||||
* Christoph Lameter
|
||||
*
|
||||
* Allows to provide arch independent atomic definitions without the need to
|
||||
* edit all arch specific atomic.h files.
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
#ifndef _ASM_GENERIC_GPIO_H
|
||||
#define _ASM_GENERIC_GPIO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_GPIO_LIB
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Platforms may implement their GPIO interface with library code,
|
||||
* at a small performance cost for non-inlined operations and some
|
||||
* extra memory (for code and for per-GPIO table entries).
|
||||
@@ -74,7 +78,7 @@ struct gpio_chip {
|
||||
|
||||
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
|
||||
unsigned offset);
|
||||
extern int __init __must_check gpiochip_reserve(int start, int ngpio);
|
||||
extern int __must_check gpiochip_reserve(int start, int ngpio);
|
||||
|
||||
/* add/remove chips */
|
||||
extern int gpiochip_add(struct gpio_chip *chip);
|
||||
|
||||
@@ -197,6 +197,63 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd)
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
/*
|
||||
* Get the current pte state, but zero it out to make it
|
||||
* non-present, preventing the hardware from asynchronously
|
||||
* updating it.
|
||||
*/
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
/*
|
||||
* The pte is non-present, so there's no hardware state to
|
||||
* preserve.
|
||||
*/
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
||||
/*
|
||||
* Start a pte protection read-modify-write transaction, which
|
||||
* protects against asynchronous hardware modifications to the pte.
|
||||
* The intention is not to prevent the hardware from making pte
|
||||
* updates, but to prevent any updates it may make from being lost.
|
||||
*
|
||||
* This does not protect against other software modifications of the
|
||||
* pte; the appropriate pte lock must be held over the transation.
|
||||
*
|
||||
* Note that this interface is intended to be batchable, meaning that
|
||||
* ptep_modify_prot_commit may not actually update the pte, but merely
|
||||
* queue the update to be done at some later time. The update must be
|
||||
* actually committed before the pte lock is released, however.
|
||||
*/
|
||||
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
return __ptep_modify_prot_start(mm, addr, ptep);
|
||||
}
|
||||
|
||||
/*
|
||||
* Commit an update to a pte, leaving any hardware-controlled bits in
|
||||
* the PTE unmodified.
|
||||
*/
|
||||
static inline void ptep_modify_prot_commit(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
__ptep_modify_prot_commit(mm, addr, ptep, pte);
|
||||
}
|
||||
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
|
||||
|
||||
/*
|
||||
* A facility to provide lazy MMU batching. This allows PTE updates and
|
||||
* page invalidations to be delayed until a call to leave lazy MMU mode
|
||||
|
||||
@@ -93,6 +93,8 @@
|
||||
VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
|
||||
} \
|
||||
\
|
||||
TRACEDATA \
|
||||
\
|
||||
/* Kernel symbol table: Normal symbols */ \
|
||||
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__start___ksymtab) = .; \
|
||||
@@ -318,6 +320,18 @@
|
||||
__stop___bug_table = .; \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_TRACE
|
||||
#define TRACEDATA \
|
||||
. = ALIGN(4); \
|
||||
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
|
||||
__tracedata_start = .; \
|
||||
*(.tracedata) \
|
||||
__tracedata_end = .; \
|
||||
}
|
||||
#else
|
||||
#define TRACEDATA
|
||||
#endif
|
||||
|
||||
#define NOTES \
|
||||
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__start_notes) = .; \
|
||||
|
||||
Reference in New Issue
Block a user