Merge branch 'master' into gfs2
This commit is contained in:
24
include/linux/aer.h
Normal file
24
include/linux/aer.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Intel Corp.
|
||||
* Tom Long Nguyen (tom.l.nguyen@intel.com)
|
||||
* Zhang Yanmin (yanmin.zhang@intel.com)
|
||||
*/
|
||||
|
||||
#ifndef _AER_H_
|
||||
#define _AER_H_
|
||||
|
||||
#if defined(CONFIG_PCIEAER)
|
||||
/* pci-e port driver needs this function to enable aer */
|
||||
extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
|
||||
extern int pci_find_aer_capability(struct pci_dev *dev);
|
||||
extern int pci_disable_pcie_error_reporting(struct pci_dev *dev);
|
||||
extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
|
||||
#else
|
||||
#define pci_enable_pcie_error_reporting(dev) do { } while (0)
|
||||
#define pci_find_aer_capability(dev) do { } while (0)
|
||||
#define pci_disable_pcie_error_reporting(dev) do { } while (0)
|
||||
#define pci_cleanup_aer_uncorrect_error_status(dev) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif //_AER_H_
|
||||
|
||||
@@ -4,11 +4,8 @@
|
||||
#ifndef _LINUX_BOOTMEM_H
|
||||
#define _LINUX_BOOTMEM_H
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/dma.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
/*
|
||||
* simple boot-time physical memory area allocator.
|
||||
@@ -41,45 +38,64 @@ typedef struct bootmem_data {
|
||||
struct list_head list;
|
||||
} bootmem_data_t;
|
||||
|
||||
extern unsigned long __init bootmem_bootmap_pages (unsigned long);
|
||||
extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
|
||||
extern void __init free_bootmem (unsigned long addr, unsigned long size);
|
||||
extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
|
||||
extern void * __init __alloc_bootmem_nopanic (unsigned long size, unsigned long align, unsigned long goal);
|
||||
extern void * __init __alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void * __init __alloc_bootmem_core(struct bootmem_data *bdata,
|
||||
unsigned long size, unsigned long align, unsigned long goal,
|
||||
unsigned long limit);
|
||||
extern unsigned long bootmem_bootmap_pages(unsigned long);
|
||||
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
|
||||
extern void free_bootmem(unsigned long addr, unsigned long size);
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal,
|
||||
unsigned long limit);
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
|
||||
extern void reserve_bootmem(unsigned long addr, unsigned long size);
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low((x), SMP_CACHE_BYTES, 0)
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_pages(x) \
|
||||
__alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low((x), PAGE_SIZE, 0)
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
extern unsigned long __init free_all_bootmem (void);
|
||||
extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
|
||||
extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn);
|
||||
extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
|
||||
extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
|
||||
extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
|
||||
|
||||
extern unsigned long free_all_bootmem(void);
|
||||
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
|
||||
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern unsigned long init_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long freepfn,
|
||||
unsigned long startpfn,
|
||||
unsigned long endpfn);
|
||||
extern void reserve_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long physaddr,
|
||||
unsigned long size);
|
||||
extern void free_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long addr,
|
||||
unsigned long size);
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
#define alloc_bootmem_node(pgdat, x) \
|
||||
__alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0)
|
||||
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
|
||||
@@ -89,19 +105,19 @@ static inline void *alloc_remap(int nid, unsigned long size)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
|
||||
|
||||
extern unsigned long __meminitdata nr_kernel_pages;
|
||||
extern unsigned long nr_all_pages;
|
||||
|
||||
extern void *__init alloc_large_system_hash(const char *tablename,
|
||||
unsigned long bucketsize,
|
||||
unsigned long numentries,
|
||||
int scale,
|
||||
int flags,
|
||||
unsigned int *_hash_shift,
|
||||
unsigned int *_hash_mask,
|
||||
unsigned long limit);
|
||||
extern void *alloc_large_system_hash(const char *tablename,
|
||||
unsigned long bucketsize,
|
||||
unsigned long numentries,
|
||||
int scale,
|
||||
int flags,
|
||||
unsigned int *_hash_shift,
|
||||
unsigned int *_hash_mask,
|
||||
unsigned long limit);
|
||||
|
||||
#define HASH_HIGHMEM 0x00000001 /* Consider highmem? */
|
||||
#define HASH_EARLY 0x00000002 /* Allocating during early boot? */
|
||||
|
||||
@@ -23,5 +23,7 @@ void cdev_del(struct cdev *);
|
||||
|
||||
void cd_forget(struct inode *);
|
||||
|
||||
extern struct backing_dev_info directly_mappable_cdev_bdi;
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -99,6 +99,11 @@ extern void __chk_io_ptr(void __iomem *);
|
||||
#define __must_check
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ENABLE_MUST_CHECK
|
||||
#undef __must_check
|
||||
#define __must_check
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow us to avoid 'defined but not used' warnings on functions and data,
|
||||
* as well as force them to be emitted to the assembly file.
|
||||
|
||||
@@ -120,9 +120,14 @@ extern void console_stop(struct console *);
|
||||
extern void console_start(struct console *);
|
||||
extern int is_console_locked(void);
|
||||
|
||||
#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND
|
||||
/* Suspend and resume console messages over PM events */
|
||||
extern void suspend_console(void);
|
||||
extern void resume_console(void);
|
||||
#else
|
||||
static inline void suspend_console(void) {}
|
||||
static inline void resume_console(void) {}
|
||||
#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
|
||||
|
||||
/* Some debug stub to catch some of the obvious races in the VT code */
|
||||
#if 1
|
||||
|
||||
@@ -89,4 +89,12 @@ int cpu_down(unsigned int cpu);
|
||||
static inline int cpu_is_offline(int cpu) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND_SMP
|
||||
extern int disable_nonboot_cpus(void);
|
||||
extern void enable_nonboot_cpus(void);
|
||||
#else
|
||||
static inline int disable_nonboot_cpus(void) { return 0; }
|
||||
static inline void enable_nonboot_cpus(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CPU_H_ */
|
||||
|
||||
@@ -169,6 +169,12 @@ enum {
|
||||
DCCPO_MAX_CCID_SPECIFIC = 255,
|
||||
};
|
||||
|
||||
/* DCCP CCIDS */
|
||||
enum {
|
||||
DCCPC_CCID2 = 2,
|
||||
DCCPC_CCID3 = 3,
|
||||
};
|
||||
|
||||
/* DCCP features */
|
||||
enum {
|
||||
DCCPF_RESERVED = 0,
|
||||
@@ -320,7 +326,7 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
|
||||
/* initial values for each feature */
|
||||
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
|
||||
#define DCCPF_INITIAL_ACK_RATIO 2
|
||||
#define DCCPF_INITIAL_CCID 2
|
||||
#define DCCPF_INITIAL_CCID DCCPC_CCID2
|
||||
#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
|
||||
/* FIXME: for now we're default to 1 but it should really be 0 */
|
||||
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
|
||||
@@ -404,6 +410,7 @@ struct dccp_service_list {
|
||||
};
|
||||
|
||||
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
|
||||
#define DCCP_SERVICE_CODE_IS_ABSENT 0
|
||||
|
||||
static inline int dccp_list_has_service(const struct dccp_service_list *sl,
|
||||
const __be32 service)
|
||||
@@ -484,11 +491,6 @@ static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
|
||||
return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
|
||||
}
|
||||
|
||||
static inline int dccp_service_not_initialized(const struct sock *sk)
|
||||
{
|
||||
return dccp_sk(sk)->dccps_service == DCCP_SERVICE_INVALID_VALUE;
|
||||
}
|
||||
|
||||
static inline const char *dccp_role(const struct sock *sk)
|
||||
{
|
||||
switch (dccp_sk(sk)->dccps_role) {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/klist.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm.h>
|
||||
@@ -51,14 +52,17 @@ struct bus_type {
|
||||
int (*probe)(struct device * dev);
|
||||
int (*remove)(struct device * dev);
|
||||
void (*shutdown)(struct device * dev);
|
||||
int (*suspend)(struct device * dev, pm_message_t state);
|
||||
int (*resume)(struct device * dev);
|
||||
|
||||
int (*suspend)(struct device * dev, pm_message_t state);
|
||||
int (*suspend_late)(struct device * dev, pm_message_t state);
|
||||
int (*resume_early)(struct device * dev);
|
||||
int (*resume)(struct device * dev);
|
||||
};
|
||||
|
||||
extern int bus_register(struct bus_type * bus);
|
||||
extern int __must_check bus_register(struct bus_type * bus);
|
||||
extern void bus_unregister(struct bus_type * bus);
|
||||
|
||||
extern void bus_rescan_devices(struct bus_type * bus);
|
||||
extern int __must_check bus_rescan_devices(struct bus_type * bus);
|
||||
|
||||
/* iterator helpers for buses */
|
||||
|
||||
@@ -67,9 +71,9 @@ int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
|
||||
struct device * bus_find_device(struct bus_type *bus, struct device *start,
|
||||
void *data, int (*match)(struct device *, void *));
|
||||
|
||||
int bus_for_each_drv(struct bus_type * bus, struct device_driver * start,
|
||||
void * data, int (*fn)(struct device_driver *, void *));
|
||||
|
||||
int __must_check bus_for_each_drv(struct bus_type *bus,
|
||||
struct device_driver *start, void *data,
|
||||
int (*fn)(struct device_driver *, void *));
|
||||
|
||||
/* driverfs interface for exporting bus attributes */
|
||||
|
||||
@@ -82,7 +86,8 @@ struct bus_attribute {
|
||||
#define BUS_ATTR(_name,_mode,_show,_store) \
|
||||
struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
extern int bus_create_file(struct bus_type *, struct bus_attribute *);
|
||||
extern int __must_check bus_create_file(struct bus_type *,
|
||||
struct bus_attribute *);
|
||||
extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
|
||||
struct device_driver {
|
||||
@@ -101,16 +106,18 @@ struct device_driver {
|
||||
void (*shutdown) (struct device * dev);
|
||||
int (*suspend) (struct device * dev, pm_message_t state);
|
||||
int (*resume) (struct device * dev);
|
||||
|
||||
unsigned int multithread_probe:1;
|
||||
};
|
||||
|
||||
|
||||
extern int driver_register(struct device_driver * drv);
|
||||
extern int __must_check driver_register(struct device_driver * drv);
|
||||
extern void driver_unregister(struct device_driver * drv);
|
||||
|
||||
extern struct device_driver * get_driver(struct device_driver * drv);
|
||||
extern void put_driver(struct device_driver * drv);
|
||||
extern struct device_driver *driver_find(const char *name, struct bus_type *bus);
|
||||
|
||||
extern int driver_probe_done(void);
|
||||
|
||||
/* driverfs interface for exporting driver attributes */
|
||||
|
||||
@@ -123,16 +130,17 @@ struct driver_attribute {
|
||||
#define DRIVER_ATTR(_name,_mode,_show,_store) \
|
||||
struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
extern int driver_create_file(struct device_driver *, struct driver_attribute *);
|
||||
extern int __must_check driver_create_file(struct device_driver *,
|
||||
struct driver_attribute *);
|
||||
extern void driver_remove_file(struct device_driver *, struct driver_attribute *);
|
||||
|
||||
extern int driver_for_each_device(struct device_driver * drv, struct device * start,
|
||||
void * data, int (*fn)(struct device *, void *));
|
||||
extern int __must_check driver_for_each_device(struct device_driver * drv,
|
||||
struct device *start, void *data,
|
||||
int (*fn)(struct device *, void *));
|
||||
struct device * driver_find_device(struct device_driver *drv,
|
||||
struct device *start, void *data,
|
||||
int (*match)(struct device *, void *));
|
||||
|
||||
|
||||
/*
|
||||
* device classes
|
||||
*/
|
||||
@@ -146,17 +154,26 @@ struct class {
|
||||
struct list_head interfaces;
|
||||
struct semaphore sem; /* locks both the children and interfaces lists */
|
||||
|
||||
struct kobject *virtual_dir;
|
||||
|
||||
struct class_attribute * class_attrs;
|
||||
struct class_device_attribute * class_dev_attrs;
|
||||
struct device_attribute * dev_attrs;
|
||||
|
||||
int (*uevent)(struct class_device *dev, char **envp,
|
||||
int num_envp, char *buffer, int buffer_size);
|
||||
int (*dev_uevent)(struct device *dev, char **envp, int num_envp,
|
||||
char *buffer, int buffer_size);
|
||||
|
||||
void (*release)(struct class_device *dev);
|
||||
void (*class_release)(struct class *class);
|
||||
void (*dev_release)(struct device *dev);
|
||||
|
||||
int (*suspend)(struct device *, pm_message_t state);
|
||||
int (*resume)(struct device *);
|
||||
};
|
||||
|
||||
extern int class_register(struct class *);
|
||||
extern int __must_check class_register(struct class *);
|
||||
extern void class_unregister(struct class *);
|
||||
|
||||
|
||||
@@ -169,7 +186,8 @@ struct class_attribute {
|
||||
#define CLASS_ATTR(_name,_mode,_show,_store) \
|
||||
struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
extern int class_create_file(struct class *, const struct class_attribute *);
|
||||
extern int __must_check class_create_file(struct class *,
|
||||
const struct class_attribute *);
|
||||
extern void class_remove_file(struct class *, const struct class_attribute *);
|
||||
|
||||
struct class_device_attribute {
|
||||
@@ -182,7 +200,7 @@ struct class_device_attribute {
|
||||
struct class_device_attribute class_device_attr_##_name = \
|
||||
__ATTR(_name,_mode,_show,_store)
|
||||
|
||||
extern int class_device_create_file(struct class_device *,
|
||||
extern int __must_check class_device_create_file(struct class_device *,
|
||||
const struct class_device_attribute *);
|
||||
|
||||
/**
|
||||
@@ -242,10 +260,10 @@ class_set_devdata (struct class_device *dev, void *data)
|
||||
}
|
||||
|
||||
|
||||
extern int class_device_register(struct class_device *);
|
||||
extern int __must_check class_device_register(struct class_device *);
|
||||
extern void class_device_unregister(struct class_device *);
|
||||
extern void class_device_initialize(struct class_device *);
|
||||
extern int class_device_add(struct class_device *);
|
||||
extern int __must_check class_device_add(struct class_device *);
|
||||
extern void class_device_del(struct class_device *);
|
||||
|
||||
extern int class_device_rename(struct class_device *, char *);
|
||||
@@ -255,7 +273,7 @@ extern void class_device_put(struct class_device *);
|
||||
|
||||
extern void class_device_remove_file(struct class_device *,
|
||||
const struct class_device_attribute *);
|
||||
extern int class_device_create_bin_file(struct class_device *,
|
||||
extern int __must_check class_device_create_bin_file(struct class_device *,
|
||||
struct bin_attribute *);
|
||||
extern void class_device_remove_bin_file(struct class_device *,
|
||||
struct bin_attribute *);
|
||||
@@ -266,22 +284,23 @@ struct class_interface {
|
||||
|
||||
int (*add) (struct class_device *, struct class_interface *);
|
||||
void (*remove) (struct class_device *, struct class_interface *);
|
||||
int (*add_dev) (struct device *, struct class_interface *);
|
||||
void (*remove_dev) (struct device *, struct class_interface *);
|
||||
};
|
||||
|
||||
extern int class_interface_register(struct class_interface *);
|
||||
extern int __must_check class_interface_register(struct class_interface *);
|
||||
extern void class_interface_unregister(struct class_interface *);
|
||||
|
||||
extern struct class *class_create(struct module *owner, char *name);
|
||||
extern struct class *class_create(struct module *owner, const char *name);
|
||||
extern void class_destroy(struct class *cls);
|
||||
extern struct class_device *class_device_create(struct class *cls,
|
||||
struct class_device *parent,
|
||||
dev_t devt,
|
||||
struct device *device,
|
||||
char *fmt, ...)
|
||||
const char *fmt, ...)
|
||||
__attribute__((format(printf,5,6)));
|
||||
extern void class_device_destroy(struct class *cls, dev_t devt);
|
||||
|
||||
|
||||
/* interface for exporting device attributes */
|
||||
struct device_attribute {
|
||||
struct attribute attr;
|
||||
@@ -294,8 +313,13 @@ struct device_attribute {
|
||||
#define DEVICE_ATTR(_name,_mode,_show,_store) \
|
||||
struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
extern int device_create_file(struct device *device, struct device_attribute * entry);
|
||||
extern int __must_check device_create_file(struct device *device,
|
||||
struct device_attribute * entry);
|
||||
extern void device_remove_file(struct device * dev, struct device_attribute * attr);
|
||||
extern int __must_check device_create_bin_file(struct device *dev,
|
||||
struct bin_attribute *attr);
|
||||
extern void device_remove_bin_file(struct device *dev,
|
||||
struct bin_attribute *attr);
|
||||
struct device {
|
||||
struct klist klist_children;
|
||||
struct klist_node knode_parent; /* node in sibling list */
|
||||
@@ -305,6 +329,7 @@ struct device {
|
||||
|
||||
struct kobject kobj;
|
||||
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
|
||||
unsigned is_registered:1;
|
||||
struct device_attribute uevent_attr;
|
||||
struct device_attribute *devt_attr;
|
||||
|
||||
@@ -338,6 +363,7 @@ struct device {
|
||||
struct list_head node;
|
||||
struct class *class; /* optional*/
|
||||
dev_t devt; /* dev_t, creates the sysfs "dev" */
|
||||
struct attribute_group **groups; /* optional groups */
|
||||
|
||||
void (*release)(struct device * dev);
|
||||
};
|
||||
@@ -356,38 +382,41 @@ dev_set_drvdata (struct device *dev, void *data)
|
||||
|
||||
static inline int device_is_registered(struct device *dev)
|
||||
{
|
||||
return klist_node_attached(&dev->knode_bus);
|
||||
return dev->is_registered;
|
||||
}
|
||||
|
||||
/*
|
||||
* High level routines for use by the bus drivers
|
||||
*/
|
||||
extern int device_register(struct device * dev);
|
||||
extern int __must_check device_register(struct device * dev);
|
||||
extern void device_unregister(struct device * dev);
|
||||
extern void device_initialize(struct device * dev);
|
||||
extern int device_add(struct device * dev);
|
||||
extern int __must_check device_add(struct device * dev);
|
||||
extern void device_del(struct device * dev);
|
||||
extern int device_for_each_child(struct device *, void *,
|
||||
extern int __must_check device_for_each_child(struct device *, void *,
|
||||
int (*fn)(struct device *, void *));
|
||||
extern int device_rename(struct device *dev, char *new_name);
|
||||
|
||||
/*
|
||||
* Manual binding of a device to driver. See drivers/base/bus.c
|
||||
* for information on use.
|
||||
*/
|
||||
extern void device_bind_driver(struct device * dev);
|
||||
extern int __must_check device_bind_driver(struct device *dev);
|
||||
extern void device_release_driver(struct device * dev);
|
||||
extern int device_attach(struct device * dev);
|
||||
extern void driver_attach(struct device_driver * drv);
|
||||
extern void device_reprobe(struct device *dev);
|
||||
extern int __must_check device_attach(struct device * dev);
|
||||
extern int __must_check driver_attach(struct device_driver *drv);
|
||||
extern int __must_check device_reprobe(struct device *dev);
|
||||
|
||||
/*
|
||||
* Easy functions for dynamically creating devices on the fly
|
||||
*/
|
||||
extern struct device *device_create(struct class *cls, struct device *parent,
|
||||
dev_t devt, char *fmt, ...)
|
||||
dev_t devt, const char *fmt, ...)
|
||||
__attribute__((format(printf,4,5)));
|
||||
extern void device_destroy(struct class *cls, dev_t devt);
|
||||
|
||||
extern int virtual_device_parent(struct device *dev);
|
||||
|
||||
/*
|
||||
* Platform "fixup" functions - allow the platform to have their say
|
||||
* about devices and actions that the general device layer doesn't
|
||||
@@ -412,7 +441,7 @@ extern void device_shutdown(void);
|
||||
|
||||
|
||||
/* drivers/base/firmware.c */
|
||||
extern int firmware_register(struct subsystem *);
|
||||
extern int __must_check firmware_register(struct subsystem *);
|
||||
extern void firmware_unregister(struct subsystem *);
|
||||
|
||||
/* debugging and troubleshooting/diagnostic helpers. */
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
#define EDD_CL_EQUALS 0x3d646465 /* "edd=" */
|
||||
#define EDD_CL_OFF 0x666f /* "of" for off */
|
||||
#define EDD_CL_SKIP 0x6b73 /* "sk" for skipmbr */
|
||||
#define EDD_CL_ON 0x6e6f /* "on" for on */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
||||
#define EISA_SIG_LEN 8
|
||||
#define EISA_MAX_SLOTS 8
|
||||
|
||||
#define EISA_MAX_RESOURCES 4
|
||||
@@ -27,12 +27,6 @@
|
||||
#define EISA_CONFIG_ENABLED 1
|
||||
#define EISA_CONFIG_FORCED 2
|
||||
|
||||
/* The EISA signature, in ASCII form, null terminated */
|
||||
struct eisa_device_id {
|
||||
char sig[EISA_SIG_LEN];
|
||||
unsigned long driver_data;
|
||||
};
|
||||
|
||||
/* There is not much we can say about an EISA device, apart from
|
||||
* signature, slot number, and base address. dma_mask is set by
|
||||
* default to parent device mask..*/
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#define EM_M32R 88 /* Renesas M32R */
|
||||
#define EM_H8_300 46 /* Renesas H8/300,300H,H8S */
|
||||
#define EM_FRV 0x5441 /* Fujitsu FR-V */
|
||||
#define EM_AVR32 0x18ad /* Atmel AVR32 */
|
||||
|
||||
/*
|
||||
* This is an interim value that we will use until the committee comes
|
||||
|
||||
90
include/linux/elfnote.h
Normal file
90
include/linux/elfnote.h
Normal file
@@ -0,0 +1,90 @@
|
||||
#ifndef _LINUX_ELFNOTE_H
|
||||
#define _LINUX_ELFNOTE_H
|
||||
/*
|
||||
* Helper macros to generate ELF Note structures, which are put into a
|
||||
* PT_NOTE segment of the final vmlinux image. These are useful for
|
||||
* including name-value pairs of metadata into the kernel binary (or
|
||||
* modules?) for use by external programs.
|
||||
*
|
||||
* Each note has three parts: a name, a type and a desc. The name is
|
||||
* intended to distinguish the note's originator, so it would be a
|
||||
* company, project, subsystem, etc; it must be in a suitable form for
|
||||
* use in a section name. The type is an integer which is used to tag
|
||||
* the data, and is considered to be within the "name" namespace (so
|
||||
* "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The
|
||||
* "desc" field is the actual data. There are no constraints on the
|
||||
* desc field's contents, though typically they're fairly small.
|
||||
*
|
||||
* All notes from a given NAME are put into a section named
|
||||
* .note.NAME. When the kernel image is finally linked, all the notes
|
||||
* are packed into a single .notes section, which is mapped into the
|
||||
* PT_NOTE segment. Because notes for a given name are grouped into
|
||||
* the same section, they'll all be adjacent the output file.
|
||||
*
|
||||
* This file defines macros for both C and assembler use. Their
|
||||
* syntax is slightly different, but they're semantically similar.
|
||||
*
|
||||
* See the ELF specification for more detail about ELF notes.
|
||||
*/
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
/*
|
||||
* Generate a structure with the same shape as Elf{32,64}_Nhdr (which
|
||||
* turn out to be the same size and shape), followed by the name and
|
||||
* desc data with appropriate padding. The 'desctype' argument is the
|
||||
* assembler pseudo op defining the type of the data e.g. .asciz while
|
||||
* 'descdata' is the data itself e.g. "hello, world".
|
||||
*
|
||||
* e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
|
||||
* ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
|
||||
*/
|
||||
#define ELFNOTE(name, type, desctype, descdata) \
|
||||
.pushsection .note.name ; \
|
||||
.align 4 ; \
|
||||
.long 2f - 1f /* namesz */ ; \
|
||||
.long 4f - 3f /* descsz */ ; \
|
||||
.long type ; \
|
||||
1:.asciz "name" ; \
|
||||
2:.align 4 ; \
|
||||
3:desctype descdata ; \
|
||||
4:.align 4 ; \
|
||||
.popsection ;
|
||||
#else /* !__ASSEMBLER__ */
|
||||
#include <linux/elf.h>
|
||||
/*
|
||||
* Use an anonymous structure which matches the shape of
|
||||
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
|
||||
* type of name and desc depend on the macro arguments. "name" must
|
||||
* be a literal string, and "desc" must be passed by value. You may
|
||||
* only define one note per line, since __LINE__ is used to generate
|
||||
* unique symbols.
|
||||
*/
|
||||
#define _ELFNOTE_PASTE(a,b) a##b
|
||||
#define _ELFNOTE(size, name, unique, type, desc) \
|
||||
static const struct { \
|
||||
struct elf##size##_note _nhdr; \
|
||||
unsigned char _name[sizeof(name)] \
|
||||
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
|
||||
typeof(desc) _desc \
|
||||
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
|
||||
} _ELFNOTE_PASTE(_note_, unique) \
|
||||
__attribute_used__ \
|
||||
__attribute__((section(".note." name), \
|
||||
aligned(sizeof(Elf##size##_Word)), \
|
||||
unused)) = { \
|
||||
{ \
|
||||
sizeof(name), \
|
||||
sizeof(desc), \
|
||||
type, \
|
||||
}, \
|
||||
name, \
|
||||
desc \
|
||||
}
|
||||
#define ELFNOTE(size, name, type, desc) \
|
||||
_ELFNOTE(size, name, __LINE__, type, desc)
|
||||
|
||||
#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
|
||||
#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* _LINUX_ELFNOTE_H */
|
||||
@@ -15,6 +15,8 @@
|
||||
*/
|
||||
#define MAX_ERRNO 4095
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
|
||||
|
||||
static inline void *ERR_PTR(long error)
|
||||
@@ -32,4 +34,6 @@ static inline long IS_ERR(const void *ptr)
|
||||
return IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_ERR_H */
|
||||
|
||||
@@ -460,7 +460,7 @@ struct ext3_super_block {
|
||||
*/
|
||||
__u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
|
||||
__u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
|
||||
__u16 s_reserved_gdt_blocks; /* Per group desc for online growth */
|
||||
__le16 s_reserved_gdt_blocks; /* Per group desc for online growth */
|
||||
/*
|
||||
* Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
|
||||
*/
|
||||
@@ -473,7 +473,7 @@ struct ext3_super_block {
|
||||
__u8 s_reserved_char_pad;
|
||||
__u16 s_reserved_word_pad;
|
||||
__le32 s_default_mount_opts;
|
||||
__le32 s_first_meta_bg; /* First metablock block group */
|
||||
__le32 s_first_meta_bg; /* First metablock block group */
|
||||
__u32 s_reserved[190]; /* Padding to the end of the block */
|
||||
};
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ struct ext3_reserve_window {
|
||||
};
|
||||
|
||||
struct ext3_reserve_window_node {
|
||||
struct rb_node rsv_node;
|
||||
struct rb_node rsv_node;
|
||||
__u32 rsv_goal_size;
|
||||
__u32 rsv_alloc_hit;
|
||||
struct ext3_reserve_window rsv_window;
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
/* Define the number of blocks we need to account to a transaction to
|
||||
* modify one block of data.
|
||||
*
|
||||
*
|
||||
* We may have to touch one inode, one bitmap buffer, up to three
|
||||
* indirection blocks, the group and superblock summaries, and the data
|
||||
* block to complete the transaction. */
|
||||
@@ -88,16 +88,16 @@
|
||||
#endif
|
||||
|
||||
int
|
||||
ext3_mark_iloc_dirty(handle_t *handle,
|
||||
ext3_mark_iloc_dirty(handle_t *handle,
|
||||
struct inode *inode,
|
||||
struct ext3_iloc *iloc);
|
||||
|
||||
/*
|
||||
/*
|
||||
* On success, We end up with an outstanding reference count against
|
||||
* iloc->bh. This _must_ be cleaned up later.
|
||||
* iloc->bh. This _must_ be cleaned up later.
|
||||
*/
|
||||
|
||||
int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
|
||||
int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
|
||||
struct ext3_iloc *iloc);
|
||||
|
||||
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
|
||||
|
||||
@@ -512,7 +512,6 @@ struct inode {
|
||||
struct timespec i_mtime;
|
||||
struct timespec i_ctime;
|
||||
unsigned int i_blkbits;
|
||||
unsigned long i_blksize;
|
||||
unsigned long i_version;
|
||||
blkcnt_t i_blocks;
|
||||
unsigned short i_bytes;
|
||||
@@ -528,11 +527,12 @@ struct inode {
|
||||
#ifdef CONFIG_QUOTA
|
||||
struct dquot *i_dquot[MAXQUOTAS];
|
||||
#endif
|
||||
/* These three should probably be a union */
|
||||
struct list_head i_devices;
|
||||
struct pipe_inode_info *i_pipe;
|
||||
struct block_device *i_bdev;
|
||||
struct cdev *i_cdev;
|
||||
union {
|
||||
struct pipe_inode_info *i_pipe;
|
||||
struct block_device *i_bdev;
|
||||
struct cdev *i_cdev;
|
||||
};
|
||||
int i_cindex;
|
||||
|
||||
__u32 i_generation;
|
||||
@@ -554,9 +554,7 @@ struct inode {
|
||||
|
||||
atomic_t i_writecount;
|
||||
void *i_security;
|
||||
union {
|
||||
void *generic_ip;
|
||||
} u;
|
||||
void *i_private; /* fs or device private pointer */
|
||||
#ifdef __NEED_I_SIZE_ORDERED
|
||||
seqcount_t i_size_seqcount;
|
||||
#endif
|
||||
|
||||
16
include/linux/getcpu.h
Normal file
16
include/linux/getcpu.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef _LINUX_GETCPU_H
|
||||
#define _LINUX_GETCPU_H 1
|
||||
|
||||
/* Cache for getcpu() to speed it up. Results might be upto a jiffie
|
||||
out of date, but will be faster.
|
||||
User programs should not refer to the contents of this structure.
|
||||
It is only a cache for vgetcpu(). It might change in future kernels.
|
||||
The user program must store this information per thread (__thread)
|
||||
If you want 100% accurate information pass NULL instead. */
|
||||
struct getcpu_cache {
|
||||
unsigned long t0;
|
||||
unsigned long t1;
|
||||
unsigned long res[4];
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -9,17 +9,16 @@ struct vm_area_struct;
|
||||
|
||||
/*
|
||||
* GFP bitmasks..
|
||||
*
|
||||
* Zone modifiers (see linux/mmzone.h - low three bits)
|
||||
*
|
||||
* Do not put any conditional on these. If necessary modify the definitions
|
||||
* without the underscores and use the consistently. The definitions here may
|
||||
* be used in bit comparisons.
|
||||
*/
|
||||
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */
|
||||
#define __GFP_DMA ((__force gfp_t)0x01u)
|
||||
#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
|
||||
#ifdef CONFIG_DMA_IS_DMA32
|
||||
#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */
|
||||
#elif BITS_PER_LONG < 64
|
||||
#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */
|
||||
#else
|
||||
#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */
|
||||
#endif
|
||||
#define __GFP_DMA32 ((__force gfp_t)0x04u)
|
||||
|
||||
/*
|
||||
* Action modifiers - doesn't change the zoning
|
||||
@@ -46,6 +45,7 @@ struct vm_area_struct;
|
||||
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
|
||||
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
|
||||
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
|
||||
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
|
||||
|
||||
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
@@ -54,7 +54,7 @@ struct vm_area_struct;
|
||||
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
|
||||
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
|
||||
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
|
||||
__GFP_NOMEMALLOC|__GFP_HARDWALL)
|
||||
__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
|
||||
|
||||
/* This equals 0, but use constants in case they ever change */
|
||||
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
|
||||
@@ -67,6 +67,13 @@ struct vm_area_struct;
|
||||
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
|
||||
__GFP_HIGHMEM)
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
||||
#else
|
||||
#define GFP_THISNODE 0
|
||||
#endif
|
||||
|
||||
|
||||
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
|
||||
platforms, used as appropriate on others */
|
||||
|
||||
@@ -76,11 +83,19 @@ struct vm_area_struct;
|
||||
#define GFP_DMA32 __GFP_DMA32
|
||||
|
||||
|
||||
static inline int gfp_zone(gfp_t gfp)
|
||||
static inline enum zone_type gfp_zone(gfp_t flags)
|
||||
{
|
||||
int zone = GFP_ZONEMASK & (__force int) gfp;
|
||||
BUG_ON(zone >= GFP_ZONETYPES);
|
||||
return zone;
|
||||
if (flags & __GFP_DMA)
|
||||
return ZONE_DMA;
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (flags & __GFP_DMA32)
|
||||
return ZONE_DMA32;
|
||||
#endif
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (flags & __GFP_HIGHMEM)
|
||||
return ZONE_HIGHMEM;
|
||||
#endif
|
||||
return ZONE_NORMAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -11,95 +11,46 @@
|
||||
#ifndef __HDLC_H
|
||||
#define __HDLC_H
|
||||
|
||||
#define GENERIC_HDLC_VERSION 4 /* For synchronization with sethdlc utility */
|
||||
|
||||
#define CLOCK_DEFAULT 0 /* Default setting */
|
||||
#define CLOCK_EXT 1 /* External TX and RX clock - DTE */
|
||||
#define CLOCK_INT 2 /* Internal TX and RX clock - DCE */
|
||||
#define CLOCK_TXINT 3 /* Internal TX and external RX clock */
|
||||
#define CLOCK_TXFROMRX 4 /* TX clock derived from external RX clock */
|
||||
|
||||
|
||||
#define ENCODING_DEFAULT 0 /* Default setting */
|
||||
#define ENCODING_NRZ 1
|
||||
#define ENCODING_NRZI 2
|
||||
#define ENCODING_FM_MARK 3
|
||||
#define ENCODING_FM_SPACE 4
|
||||
#define ENCODING_MANCHESTER 5
|
||||
|
||||
|
||||
#define PARITY_DEFAULT 0 /* Default setting */
|
||||
#define PARITY_NONE 1 /* No parity */
|
||||
#define PARITY_CRC16_PR0 2 /* CRC16, initial value 0x0000 */
|
||||
#define PARITY_CRC16_PR1 3 /* CRC16, initial value 0xFFFF */
|
||||
#define PARITY_CRC16_PR0_CCITT 4 /* CRC16, initial 0x0000, ITU-T version */
|
||||
#define PARITY_CRC16_PR1_CCITT 5 /* CRC16, initial 0xFFFF, ITU-T version */
|
||||
#define PARITY_CRC32_PR0_CCITT 6 /* CRC32, initial value 0x00000000 */
|
||||
#define PARITY_CRC32_PR1_CCITT 7 /* CRC32, initial value 0xFFFFFFFF */
|
||||
|
||||
#define LMI_DEFAULT 0 /* Default setting */
|
||||
#define LMI_NONE 1 /* No LMI, all PVCs are static */
|
||||
#define LMI_ANSI 2 /* ANSI Annex D */
|
||||
#define LMI_CCITT 3 /* ITU-T Annex A */
|
||||
#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */
|
||||
|
||||
#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */
|
||||
#if 0
|
||||
#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */
|
||||
#else
|
||||
#define HDLC_MAX_MRU 1600 /* as required for FR network */
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/syncppp.h>
|
||||
#include <linux/hdlc/ioctl.h>
|
||||
|
||||
|
||||
typedef struct { /* Used in Cisco and PPP mode */
|
||||
u8 address;
|
||||
u8 control;
|
||||
u16 protocol;
|
||||
}__attribute__ ((packed)) hdlc_header;
|
||||
|
||||
|
||||
|
||||
typedef struct {
|
||||
u32 type; /* code */
|
||||
u32 par1;
|
||||
u32 par2;
|
||||
u16 rel; /* reliability */
|
||||
u32 time;
|
||||
}__attribute__ ((packed)) cisco_packet;
|
||||
#define CISCO_PACKET_LEN 18
|
||||
#define CISCO_BIG_PACKET_LEN 20
|
||||
|
||||
|
||||
|
||||
typedef struct pvc_device_struct {
|
||||
struct net_device *master;
|
||||
struct net_device *main;
|
||||
struct net_device *ether; /* bridged Ethernet interface */
|
||||
struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
|
||||
int dlci;
|
||||
int open_count;
|
||||
|
||||
struct {
|
||||
unsigned int new: 1;
|
||||
unsigned int active: 1;
|
||||
unsigned int exist: 1;
|
||||
unsigned int deleted: 1;
|
||||
unsigned int fecn: 1;
|
||||
unsigned int becn: 1;
|
||||
unsigned int bandwidth; /* Cisco LMI reporting only */
|
||||
}state;
|
||||
}pvc_device;
|
||||
|
||||
|
||||
|
||||
typedef struct hdlc_device_struct {
|
||||
/* To be initialized by hardware driver */
|
||||
/* Used by all network devices here, pointed to by netdev_priv(dev) */
|
||||
struct hdlc_device_desc {
|
||||
int (*netif_rx)(struct sk_buff *skb);
|
||||
struct net_device_stats stats;
|
||||
};
|
||||
|
||||
/* This structure is a private property of HDLC protocols.
|
||||
Hardware drivers have no interest here */
|
||||
|
||||
struct hdlc_proto {
|
||||
int (*open)(struct net_device *dev);
|
||||
void (*close)(struct net_device *dev);
|
||||
void (*start)(struct net_device *dev); /* if open & DCD */
|
||||
void (*stop)(struct net_device *dev); /* if open & !DCD */
|
||||
void (*detach)(struct net_device *dev);
|
||||
int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
|
||||
unsigned short (*type_trans)(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
struct module *module;
|
||||
struct hdlc_proto *next; /* next protocol in the list */
|
||||
};
|
||||
|
||||
|
||||
typedef struct hdlc_device {
|
||||
/* used by HDLC layer to take control over HDLC device from hw driver*/
|
||||
int (*attach)(struct net_device *dev,
|
||||
unsigned short encoding, unsigned short parity);
|
||||
@@ -107,82 +58,18 @@ typedef struct hdlc_device_struct {
|
||||
/* hardware driver must handle this instead of dev->hard_start_xmit */
|
||||
int (*xmit)(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
|
||||
/* Things below are for HDLC layer internal use only */
|
||||
struct {
|
||||
int (*open)(struct net_device *dev);
|
||||
void (*close)(struct net_device *dev);
|
||||
|
||||
/* if open & DCD */
|
||||
void (*start)(struct net_device *dev);
|
||||
/* if open & !DCD */
|
||||
void (*stop)(struct net_device *dev);
|
||||
|
||||
void (*detach)(struct hdlc_device_struct *hdlc);
|
||||
int (*netif_rx)(struct sk_buff *skb);
|
||||
unsigned short (*type_trans)(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
int id; /* IF_PROTO_HDLC/CISCO/FR/etc. */
|
||||
}proto;
|
||||
|
||||
const struct hdlc_proto *proto;
|
||||
int carrier;
|
||||
int open;
|
||||
spinlock_t state_lock;
|
||||
|
||||
union {
|
||||
struct {
|
||||
fr_proto settings;
|
||||
pvc_device *first_pvc;
|
||||
int dce_pvc_count;
|
||||
|
||||
struct timer_list timer;
|
||||
unsigned long last_poll;
|
||||
int reliable;
|
||||
int dce_changed;
|
||||
int request;
|
||||
int fullrep_sent;
|
||||
u32 last_errors; /* last errors bit list */
|
||||
u8 n391cnt;
|
||||
u8 txseq; /* TX sequence number */
|
||||
u8 rxseq; /* RX sequence number */
|
||||
}fr;
|
||||
|
||||
struct {
|
||||
cisco_proto settings;
|
||||
|
||||
struct timer_list timer;
|
||||
unsigned long last_poll;
|
||||
int up;
|
||||
int request_sent;
|
||||
u32 txseq; /* TX sequence number */
|
||||
u32 rxseq; /* RX sequence number */
|
||||
}cisco;
|
||||
|
||||
struct {
|
||||
raw_hdlc_proto settings;
|
||||
}raw_hdlc;
|
||||
|
||||
struct {
|
||||
struct ppp_device pppdev;
|
||||
struct ppp_device *syncppp_ptr;
|
||||
int (*old_change_mtu)(struct net_device *dev,
|
||||
int new_mtu);
|
||||
}ppp;
|
||||
}state;
|
||||
void *state;
|
||||
void *priv;
|
||||
}hdlc_device;
|
||||
|
||||
|
||||
|
||||
int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr);
|
||||
|
||||
|
||||
/* Exported from hdlc.o */
|
||||
/* Exported from hdlc module */
|
||||
|
||||
/* Called by hardware driver when a user requests HDLC service */
|
||||
int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
@@ -191,17 +78,21 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
#define register_hdlc_device(dev) register_netdev(dev)
|
||||
void unregister_hdlc_device(struct net_device *dev);
|
||||
|
||||
|
||||
void register_hdlc_protocol(struct hdlc_proto *proto);
|
||||
void unregister_hdlc_protocol(struct hdlc_proto *proto);
|
||||
|
||||
struct net_device *alloc_hdlcdev(void *priv);
|
||||
|
||||
static __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
|
||||
|
||||
static __inline__ struct hdlc_device_desc* dev_to_desc(struct net_device *dev)
|
||||
{
|
||||
return netdev_priv(dev);
|
||||
}
|
||||
|
||||
|
||||
static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
|
||||
static __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
|
||||
{
|
||||
return (pvc_device*)dev->priv;
|
||||
return netdev_priv(dev) + sizeof(struct hdlc_device_desc);
|
||||
}
|
||||
|
||||
|
||||
@@ -225,18 +116,14 @@ int hdlc_open(struct net_device *dev);
|
||||
/* Must be called by hardware driver when HDLC device is being closed */
|
||||
void hdlc_close(struct net_device *dev);
|
||||
|
||||
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
|
||||
int (*rx)(struct sk_buff *skb), size_t size);
|
||||
/* May be used by hardware driver to gain control over HDLC device */
|
||||
static __inline__ void hdlc_proto_detach(hdlc_device *hdlc)
|
||||
{
|
||||
if (hdlc->proto.detach)
|
||||
hdlc->proto.detach(hdlc);
|
||||
hdlc->proto.detach = NULL;
|
||||
}
|
||||
|
||||
void detach_hdlc_protocol(struct net_device *dev);
|
||||
|
||||
static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
|
||||
{
|
||||
return &dev_to_hdlc(dev)->stats;
|
||||
return &dev_to_desc(dev)->stats;
|
||||
}
|
||||
|
||||
|
||||
@@ -248,8 +135,8 @@ static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
|
||||
skb->mac.raw = skb->data;
|
||||
skb->dev = dev;
|
||||
|
||||
if (hdlc->proto.type_trans)
|
||||
return hdlc->proto.type_trans(skb, dev);
|
||||
if (hdlc->proto->type_trans)
|
||||
return hdlc->proto->type_trans(skb, dev);
|
||||
else
|
||||
return htons(ETH_P_HDLC);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,39 @@
|
||||
#ifndef __HDLC_IOCTL_H__
|
||||
#define __HDLC_IOCTL_H__
|
||||
|
||||
|
||||
#define GENERIC_HDLC_VERSION 4 /* For synchronization with sethdlc utility */
|
||||
|
||||
#define CLOCK_DEFAULT 0 /* Default setting */
|
||||
#define CLOCK_EXT 1 /* External TX and RX clock - DTE */
|
||||
#define CLOCK_INT 2 /* Internal TX and RX clock - DCE */
|
||||
#define CLOCK_TXINT 3 /* Internal TX and external RX clock */
|
||||
#define CLOCK_TXFROMRX 4 /* TX clock derived from external RX clock */
|
||||
|
||||
|
||||
#define ENCODING_DEFAULT 0 /* Default setting */
|
||||
#define ENCODING_NRZ 1
|
||||
#define ENCODING_NRZI 2
|
||||
#define ENCODING_FM_MARK 3
|
||||
#define ENCODING_FM_SPACE 4
|
||||
#define ENCODING_MANCHESTER 5
|
||||
|
||||
|
||||
#define PARITY_DEFAULT 0 /* Default setting */
|
||||
#define PARITY_NONE 1 /* No parity */
|
||||
#define PARITY_CRC16_PR0 2 /* CRC16, initial value 0x0000 */
|
||||
#define PARITY_CRC16_PR1 3 /* CRC16, initial value 0xFFFF */
|
||||
#define PARITY_CRC16_PR0_CCITT 4 /* CRC16, initial 0x0000, ITU-T version */
|
||||
#define PARITY_CRC16_PR1_CCITT 5 /* CRC16, initial 0xFFFF, ITU-T version */
|
||||
#define PARITY_CRC32_PR0_CCITT 6 /* CRC32, initial value 0x00000000 */
|
||||
#define PARITY_CRC32_PR1_CCITT 7 /* CRC32, initial value 0xFFFFFFFF */
|
||||
|
||||
#define LMI_DEFAULT 0 /* Default setting */
|
||||
#define LMI_NONE 1 /* No LMI, all PVCs are static */
|
||||
#define LMI_ANSI 2 /* ANSI Annex D */
|
||||
#define LMI_CCITT 3 /* ITU-T Annex A */
|
||||
#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */
|
||||
|
||||
typedef struct {
|
||||
unsigned int clock_rate; /* bits per second */
|
||||
unsigned int clock_type; /* internal, external, TX-internal etc. */
|
||||
|
||||
@@ -24,11 +24,15 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
||||
|
||||
/* declarations for linux/mm/highmem.c */
|
||||
unsigned int nr_free_highpages(void);
|
||||
extern unsigned long totalhigh_pages;
|
||||
|
||||
#else /* CONFIG_HIGHMEM */
|
||||
|
||||
static inline unsigned int nr_free_highpages(void) { return 0; }
|
||||
|
||||
#define totalhigh_pages 0
|
||||
|
||||
#ifndef ARCH_HAS_KMAP
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
@@ -41,6 +45,7 @@ static inline void *kmap(struct page *page)
|
||||
#define kunmap_atomic(addr, idx) do { } while (0)
|
||||
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
|
||||
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ struct i2c_algo_bit_data {
|
||||
/* local settings */
|
||||
int udelay; /* half-clock-cycle time in microsecs */
|
||||
/* i.e. clock is (500 / udelay) KHz */
|
||||
int mdelay; /* in millisecs, unused */
|
||||
int timeout; /* in jiffies */
|
||||
};
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ struct i2c_algo_pcf_data {
|
||||
|
||||
/* local settings */
|
||||
int udelay;
|
||||
int mdelay;
|
||||
int timeout;
|
||||
};
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2001,2002,2003 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef I2C_ALGO_SIBYTE_H
|
||||
#define I2C_ALGO_SIBYTE_H 1
|
||||
|
||||
#include <linux/i2c.h>
|
||||
|
||||
struct i2c_algo_sibyte_data {
|
||||
void *data; /* private data */
|
||||
int bus; /* which bus */
|
||||
void *reg_base; /* CSR base */
|
||||
};
|
||||
|
||||
int i2c_sibyte_add_bus(struct i2c_adapter *, int speed);
|
||||
int i2c_sibyte_del_bus(struct i2c_adapter *);
|
||||
|
||||
#endif /* I2C_ALGO_SIBYTE_H */
|
||||
@@ -64,14 +64,6 @@ extern int i2c_master_recv(struct i2c_client *,char* ,int);
|
||||
*/
|
||||
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
|
||||
|
||||
/*
|
||||
* Some adapter types (i.e. PCF 8584 based ones) may support slave behaviuor.
|
||||
* This is not tested/implemented yet and will change in the future.
|
||||
*/
|
||||
extern int i2c_slave_send(struct i2c_client *,char*,int);
|
||||
extern int i2c_slave_recv(struct i2c_client *,char*,int);
|
||||
|
||||
|
||||
|
||||
/* This is the very generalized SMBus access routine. You probably do not
|
||||
want to use this, though; one of the functions below may be much easier,
|
||||
@@ -201,10 +193,6 @@ struct i2c_algorithm {
|
||||
unsigned short flags, char read_write,
|
||||
u8 command, int size, union i2c_smbus_data * data);
|
||||
|
||||
/* --- these optional/future use for some adapter types.*/
|
||||
int (*slave_send)(struct i2c_adapter *,char*,int);
|
||||
int (*slave_recv)(struct i2c_adapter *,char*,int);
|
||||
|
||||
/* --- ioctl like call to set div. parameters. */
|
||||
int (*algo_control)(struct i2c_adapter *, unsigned int, unsigned long);
|
||||
|
||||
@@ -220,7 +208,7 @@ struct i2c_adapter {
|
||||
struct module *owner;
|
||||
unsigned int id;
|
||||
unsigned int class;
|
||||
struct i2c_algorithm *algo;/* the algorithm to access the bus */
|
||||
const struct i2c_algorithm *algo; /* the algorithm to access the bus */
|
||||
void *algo_data;
|
||||
|
||||
/* --- administration stuff. */
|
||||
|
||||
@@ -59,6 +59,8 @@
|
||||
#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */
|
||||
#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */
|
||||
#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */
|
||||
#define IFF_BONDING 0x20 /* bonding master or slave */
|
||||
#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
|
||||
|
||||
#define IF_GET_IFACE 0x0001 /* for querying only */
|
||||
#define IF_GET_PROTO 0x0002
|
||||
|
||||
@@ -68,6 +68,7 @@ extern initcall_t __security_initcall_start[], __security_initcall_end[];
|
||||
|
||||
/* Defined in init/main.c */
|
||||
extern char saved_command_line[];
|
||||
extern unsigned int reset_devices;
|
||||
|
||||
/* used by init/main.c */
|
||||
extern void setup_arch(char **);
|
||||
|
||||
@@ -320,7 +320,9 @@ handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *,
|
||||
* Monolithic do_IRQ implementation.
|
||||
* (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
|
||||
*/
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
@@ -332,10 +334,14 @@ static inline void generic_handle_irq(unsigned int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc, regs);
|
||||
#else
|
||||
if (likely(desc->handle_irq))
|
||||
desc->handle_irq(irq, desc, regs);
|
||||
else
|
||||
__do_IRQ(irq, regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* linux/include/linux/jbd.h
|
||||
*
|
||||
*
|
||||
* Written by Stephen C. Tweedie <sct@redhat.com>
|
||||
*
|
||||
* Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
|
||||
@@ -64,7 +64,7 @@ extern int journal_enable_debug;
|
||||
if ((n) <= journal_enable_debug) { \
|
||||
printk (KERN_DEBUG "(%s, %d): %s: ", \
|
||||
__FILE__, __LINE__, __FUNCTION__); \
|
||||
printk (f, ## a); \
|
||||
printk (f, ## a); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
@@ -97,8 +97,8 @@ extern void jbd_slab_free(void *ptr, size_t size);
|
||||
* number of outstanding buffers possible at any time. When the
|
||||
* operation completes, any buffer credits not used are credited back to
|
||||
* the transaction, so that at all times we know how many buffers the
|
||||
* outstanding updates on a transaction might possibly touch.
|
||||
*
|
||||
* outstanding updates on a transaction might possibly touch.
|
||||
*
|
||||
* This is an opaque datatype.
|
||||
**/
|
||||
typedef struct handle_s handle_t; /* Atomic operation type */
|
||||
@@ -108,7 +108,7 @@ typedef struct handle_s handle_t; /* Atomic operation type */
|
||||
* typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
|
||||
*
|
||||
* journal_t is linked to from the fs superblock structure.
|
||||
*
|
||||
*
|
||||
* We use the journal_t to keep track of all outstanding transaction
|
||||
* activity on the filesystem, and to manage the state of the log
|
||||
* writing process.
|
||||
@@ -128,7 +128,7 @@ typedef struct journal_s journal_t; /* Journal control structure */
|
||||
* On-disk structures
|
||||
*/
|
||||
|
||||
/*
|
||||
/*
|
||||
* Descriptor block types:
|
||||
*/
|
||||
|
||||
@@ -149,8 +149,8 @@ typedef struct journal_header_s
|
||||
} journal_header_t;
|
||||
|
||||
|
||||
/*
|
||||
* The block tag: used to describe a single buffer in the journal
|
||||
/*
|
||||
* The block tag: used to describe a single buffer in the journal
|
||||
*/
|
||||
typedef struct journal_block_tag_s
|
||||
{
|
||||
@@ -158,9 +158,9 @@ typedef struct journal_block_tag_s
|
||||
__be32 t_flags; /* See below */
|
||||
} journal_block_tag_t;
|
||||
|
||||
/*
|
||||
/*
|
||||
* The revoke descriptor: used on disk to describe a series of blocks to
|
||||
* be revoked from the log
|
||||
* be revoked from the log
|
||||
*/
|
||||
typedef struct journal_revoke_header_s
|
||||
{
|
||||
@@ -201,9 +201,9 @@ typedef struct journal_superblock_s
|
||||
|
||||
/* 0x0024 */
|
||||
/* Remaining fields are only valid in a version-2 superblock */
|
||||
__be32 s_feature_compat; /* compatible feature set */
|
||||
__be32 s_feature_incompat; /* incompatible feature set */
|
||||
__be32 s_feature_ro_compat; /* readonly-compatible feature set */
|
||||
__be32 s_feature_compat; /* compatible feature set */
|
||||
__be32 s_feature_incompat; /* incompatible feature set */
|
||||
__be32 s_feature_ro_compat; /* readonly-compatible feature set */
|
||||
/* 0x0030 */
|
||||
__u8 s_uuid[16]; /* 128-bit uuid for journal */
|
||||
|
||||
@@ -374,10 +374,10 @@ struct jbd_revoke_table_s;
|
||||
**/
|
||||
|
||||
/* Docbook can't yet cope with the bit fields, but will leave the documentation
|
||||
* in so it can be fixed later.
|
||||
* in so it can be fixed later.
|
||||
*/
|
||||
|
||||
struct handle_s
|
||||
struct handle_s
|
||||
{
|
||||
/* Which compound transaction is this update a part of? */
|
||||
transaction_t *h_transaction;
|
||||
@@ -435,7 +435,7 @@ struct handle_s
|
||||
*
|
||||
*/
|
||||
|
||||
struct transaction_s
|
||||
struct transaction_s
|
||||
{
|
||||
/* Pointer to the journal for this transaction. [no locking] */
|
||||
journal_t *t_journal;
|
||||
@@ -455,7 +455,7 @@ struct transaction_s
|
||||
T_RUNDOWN,
|
||||
T_FLUSH,
|
||||
T_COMMIT,
|
||||
T_FINISHED
|
||||
T_FINISHED
|
||||
} t_state;
|
||||
|
||||
/*
|
||||
@@ -569,7 +569,7 @@ struct transaction_s
|
||||
* journal_t.
|
||||
* @j_flags: General journaling state flags
|
||||
* @j_errno: Is there an outstanding uncleared error on the journal (from a
|
||||
* prior abort)?
|
||||
* prior abort)?
|
||||
* @j_sb_buffer: First part of superblock buffer
|
||||
* @j_superblock: Second part of superblock buffer
|
||||
* @j_format_version: Version of the superblock format
|
||||
@@ -583,7 +583,7 @@ struct transaction_s
|
||||
* @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
|
||||
* to start committing, or for a barrier lock to be released
|
||||
* @j_wait_logspace: Wait queue for waiting for checkpointing to complete
|
||||
* @j_wait_done_commit: Wait queue for waiting for commit to complete
|
||||
* @j_wait_done_commit: Wait queue for waiting for commit to complete
|
||||
* @j_wait_checkpoint: Wait queue to trigger checkpointing
|
||||
* @j_wait_commit: Wait queue to trigger commit
|
||||
* @j_wait_updates: Wait queue to wait for updates to complete
|
||||
@@ -592,7 +592,7 @@ struct transaction_s
|
||||
* @j_tail: Journal tail - identifies the oldest still-used block in the
|
||||
* journal.
|
||||
* @j_free: Journal free - how many free blocks are there in the journal?
|
||||
* @j_first: The block number of the first usable block
|
||||
* @j_first: The block number of the first usable block
|
||||
* @j_last: The block number one beyond the last usable block
|
||||
* @j_dev: Device where we store the journal
|
||||
* @j_blocksize: blocksize for the location where we store the journal.
|
||||
@@ -604,12 +604,12 @@ struct transaction_s
|
||||
* @j_list_lock: Protects the buffer lists and internal buffer state.
|
||||
* @j_inode: Optional inode where we store the journal. If present, all journal
|
||||
* block numbers are mapped into this inode via bmap().
|
||||
* @j_tail_sequence: Sequence number of the oldest transaction in the log
|
||||
* @j_tail_sequence: Sequence number of the oldest transaction in the log
|
||||
* @j_transaction_sequence: Sequence number of the next transaction to grant
|
||||
* @j_commit_sequence: Sequence number of the most recently committed
|
||||
* transaction
|
||||
* @j_commit_request: Sequence number of the most recent transaction wanting
|
||||
* commit
|
||||
* commit
|
||||
* @j_uuid: Uuid of client object.
|
||||
* @j_task: Pointer to the current commit thread for this journal
|
||||
* @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
|
||||
@@ -699,7 +699,7 @@ struct journal_s
|
||||
wait_queue_head_t j_wait_updates;
|
||||
|
||||
/* Semaphore for locking against concurrent checkpoints */
|
||||
struct mutex j_checkpoint_mutex;
|
||||
struct mutex j_checkpoint_mutex;
|
||||
|
||||
/*
|
||||
* Journal head: identifies the first unused block in the journal.
|
||||
@@ -732,7 +732,7 @@ struct journal_s
|
||||
*/
|
||||
struct block_device *j_dev;
|
||||
int j_blocksize;
|
||||
unsigned int j_blk_offset;
|
||||
unsigned long j_blk_offset;
|
||||
|
||||
/*
|
||||
* Device which holds the client fs. For internal journal this will be
|
||||
@@ -823,8 +823,8 @@ struct journal_s
|
||||
void *j_private;
|
||||
};
|
||||
|
||||
/*
|
||||
* Journal flag definitions
|
||||
/*
|
||||
* Journal flag definitions
|
||||
*/
|
||||
#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
|
||||
#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
|
||||
@@ -833,7 +833,7 @@ struct journal_s
|
||||
#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
|
||||
#define JFS_BARRIER 0x020 /* Use IDE barriers */
|
||||
|
||||
/*
|
||||
/*
|
||||
* Function declarations for the journaling transaction and buffer
|
||||
* management
|
||||
*/
|
||||
@@ -862,11 +862,11 @@ int __journal_remove_checkpoint(struct journal_head *);
|
||||
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
|
||||
|
||||
/* Buffer IO */
|
||||
extern int
|
||||
extern int
|
||||
journal_write_metadata_buffer(transaction_t *transaction,
|
||||
struct journal_head *jh_in,
|
||||
struct journal_head **jh_out,
|
||||
int blocknr);
|
||||
unsigned long blocknr);
|
||||
|
||||
/* Transaction locking */
|
||||
extern void __wait_on_journal (journal_t *);
|
||||
@@ -890,7 +890,7 @@ static inline handle_t *journal_current_handle(void)
|
||||
/* The journaling code user interface:
|
||||
*
|
||||
* Create and destroy handles
|
||||
* Register buffer modifications against the current transaction.
|
||||
* Register buffer modifications against the current transaction.
|
||||
*/
|
||||
|
||||
extern handle_t *journal_start(journal_t *, int nblocks);
|
||||
@@ -917,11 +917,11 @@ extern journal_t * journal_init_dev(struct block_device *bdev,
|
||||
int start, int len, int bsize);
|
||||
extern journal_t * journal_init_inode (struct inode *);
|
||||
extern int journal_update_format (journal_t *);
|
||||
extern int journal_check_used_features
|
||||
extern int journal_check_used_features
|
||||
(journal_t *, unsigned long, unsigned long, unsigned long);
|
||||
extern int journal_check_available_features
|
||||
extern int journal_check_available_features
|
||||
(journal_t *, unsigned long, unsigned long, unsigned long);
|
||||
extern int journal_set_features
|
||||
extern int journal_set_features
|
||||
(journal_t *, unsigned long, unsigned long, unsigned long);
|
||||
extern int journal_create (journal_t *);
|
||||
extern int journal_load (journal_t *journal);
|
||||
@@ -1015,7 +1015,7 @@ do { \
|
||||
* bit, when set, indicates that we have had a fatal error somewhere,
|
||||
* either inside the journaling layer or indicated to us by the client
|
||||
* (eg. ext3), and that we and should not commit any further
|
||||
* transactions.
|
||||
* transactions.
|
||||
*/
|
||||
|
||||
static inline int is_journal_aborted(journal_t *journal)
|
||||
@@ -1082,7 +1082,7 @@ static inline int jbd_space_needed(journal_t *journal)
|
||||
#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
|
||||
#define BJ_Locked 8 /* Locked for I/O during commit */
|
||||
#define BJ_Types 9
|
||||
|
||||
|
||||
extern int jbd_blocks_per_page(struct inode *inode);
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
@@ -115,6 +115,21 @@ static inline u64 get_jiffies_64(void)
|
||||
((long)(a) - (long)(b) >= 0))
|
||||
#define time_before_eq(a,b) time_after_eq(b,a)
|
||||
|
||||
/* Same as above, but does so with platform independent 64bit types.
|
||||
* These must be used when utilizing jiffies_64 (i.e. return value of
|
||||
* get_jiffies_64() */
|
||||
#define time_after64(a,b) \
|
||||
(typecheck(__u64, a) && \
|
||||
typecheck(__u64, b) && \
|
||||
((__s64)(b) - (__s64)(a) < 0))
|
||||
#define time_before64(a,b) time_after64(b,a)
|
||||
|
||||
#define time_after_eq64(a,b) \
|
||||
(typecheck(__u64, a) && \
|
||||
typecheck(__u64, b) && \
|
||||
((__s64)(a) - (__s64)(b) >= 0))
|
||||
#define time_before_eq64(a,b) time_after_eq64(b,a)
|
||||
|
||||
/*
|
||||
* Have the 32 bit jiffies value wrap 5 minutes after boot
|
||||
* so jiffies wrap bugs show up earlier.
|
||||
|
||||
@@ -187,6 +187,7 @@ extern void bust_spinlocks(int yes);
|
||||
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
|
||||
extern int panic_timeout;
|
||||
extern int panic_on_oops;
|
||||
extern int panic_on_unrecovered_nmi;
|
||||
extern int tainted;
|
||||
extern const char *print_tainted(void);
|
||||
extern void add_taint(unsigned);
|
||||
@@ -349,4 +350,11 @@ struct sysinfo {
|
||||
/* Trap pasters of __FUNCTION__ at compile-time */
|
||||
#define __FUNCTION__ (__func__)
|
||||
|
||||
/* This helps us to avoid #ifdef CONFIG_NUMA */
|
||||
#ifdef CONFIG_NUMA
|
||||
#define NUMA_BUILD 1
|
||||
#else
|
||||
#define NUMA_BUILD 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/kref.h>
|
||||
@@ -71,12 +72,12 @@ static inline const char * kobject_name(const struct kobject * kobj)
|
||||
extern void kobject_init(struct kobject *);
|
||||
extern void kobject_cleanup(struct kobject *);
|
||||
|
||||
extern int kobject_add(struct kobject *);
|
||||
extern int __must_check kobject_add(struct kobject *);
|
||||
extern void kobject_del(struct kobject *);
|
||||
|
||||
extern int kobject_rename(struct kobject *, const char *new_name);
|
||||
extern int __must_check kobject_rename(struct kobject *, const char *new_name);
|
||||
|
||||
extern int kobject_register(struct kobject *);
|
||||
extern int __must_check kobject_register(struct kobject *);
|
||||
extern void kobject_unregister(struct kobject *);
|
||||
|
||||
extern struct kobject * kobject_get(struct kobject *);
|
||||
@@ -128,8 +129,8 @@ struct kset {
|
||||
|
||||
|
||||
extern void kset_init(struct kset * k);
|
||||
extern int kset_add(struct kset * k);
|
||||
extern int kset_register(struct kset * k);
|
||||
extern int __must_check kset_add(struct kset * k);
|
||||
extern int __must_check kset_register(struct kset * k);
|
||||
extern void kset_unregister(struct kset * k);
|
||||
|
||||
static inline struct kset * to_kset(struct kobject * kobj)
|
||||
@@ -239,7 +240,7 @@ extern struct subsystem hypervisor_subsys;
|
||||
(obj)->subsys.kset.kobj.kset = &(_subsys).kset
|
||||
|
||||
extern void subsystem_init(struct subsystem *);
|
||||
extern int subsystem_register(struct subsystem *);
|
||||
extern int __must_check subsystem_register(struct subsystem *);
|
||||
extern void subsystem_unregister(struct subsystem *);
|
||||
|
||||
static inline struct subsystem * subsys_get(struct subsystem * s)
|
||||
@@ -258,7 +259,8 @@ struct subsys_attribute {
|
||||
ssize_t (*store)(struct subsystem *, const char *, size_t);
|
||||
};
|
||||
|
||||
extern int subsys_create_file(struct subsystem * , struct subsys_attribute *);
|
||||
extern int __must_check subsys_create_file(struct subsystem * ,
|
||||
struct subsys_attribute *);
|
||||
|
||||
#if defined(CONFIG_HOTPLUG)
|
||||
void kobject_uevent(struct kobject *kobj, enum kobject_action action);
|
||||
|
||||
@@ -36,7 +36,15 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
/*
|
||||
* Define if arch has non-standard setup. This is a _PCI_ standard
|
||||
* not a legacy or ISA standard.
|
||||
*/
|
||||
#ifdef CONFIG_ATA_NONSTANDARD
|
||||
#include <asm/libata-portmap.h>
|
||||
#else
|
||||
#include <asm-generic/libata-portmap.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* compile-time options: to be removed as soon as all the drivers are
|
||||
|
||||
@@ -35,9 +35,13 @@
|
||||
#endif
|
||||
|
||||
#define KPROBE_ENTRY(name) \
|
||||
.section .kprobes.text, "ax"; \
|
||||
.pushsection .kprobes.text, "ax"; \
|
||||
ENTRY(name)
|
||||
|
||||
#define KPROBE_END(name) \
|
||||
END(name); \
|
||||
.popsection
|
||||
|
||||
#ifndef END
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
|
||||
@@ -162,9 +162,9 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
extern unsigned slab_node(struct mempolicy *policy);
|
||||
|
||||
extern int policy_zone;
|
||||
extern enum zone_type policy_zone;
|
||||
|
||||
static inline void check_highest_zone(int k)
|
||||
static inline void check_highest_zone(enum zone_type k)
|
||||
{
|
||||
if (k > policy_zone)
|
||||
policy_zone = k;
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
struct mempolicy;
|
||||
struct anon_vma;
|
||||
@@ -197,6 +199,7 @@ struct vm_operations_struct {
|
||||
void (*open)(struct vm_area_struct * area);
|
||||
void (*close)(struct vm_area_struct * area);
|
||||
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
|
||||
unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address);
|
||||
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
|
||||
|
||||
/* notification that a previously read-only page is about to become
|
||||
@@ -214,61 +217,6 @@ struct vm_operations_struct {
|
||||
struct mmu_gather;
|
||||
struct inode;
|
||||
|
||||
/*
|
||||
* Each physical page in the system has a struct page associated with
|
||||
* it to keep track of whatever it is we are using the page for at the
|
||||
* moment. Note that we have no way to track which tasks are using
|
||||
* a page.
|
||||
*/
|
||||
struct page {
|
||||
unsigned long flags; /* Atomic flags, some possibly
|
||||
* updated asynchronously */
|
||||
atomic_t _count; /* Usage count, see below. */
|
||||
atomic_t _mapcount; /* Count of ptes mapped in mms,
|
||||
* to show when page is mapped
|
||||
* & limit reverse map searches.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
unsigned long private; /* Mapping-private opaque data:
|
||||
* usually used for buffer_heads
|
||||
* if PagePrivate set; used for
|
||||
* swp_entry_t if PageSwapCache;
|
||||
* indicates order in the buddy
|
||||
* system if PG_buddy is set.
|
||||
*/
|
||||
struct address_space *mapping; /* If low bit clear, points to
|
||||
* inode address_space, or NULL.
|
||||
* If page mapped as anonymous
|
||||
* memory, low bit is set, and
|
||||
* it points to anon_vma object:
|
||||
* see PAGE_MAPPING_ANON below.
|
||||
*/
|
||||
};
|
||||
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
||||
spinlock_t ptl;
|
||||
#endif
|
||||
};
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
struct list_head lru; /* Pageout list, eg. active_list
|
||||
* protected by zone->lru_lock !
|
||||
*/
|
||||
/*
|
||||
* On machines where all RAM is mapped into kernel address space,
|
||||
* we can simply calculate the virtual address. On machines with
|
||||
* highmem some memory is mapped into kernel virtual memory
|
||||
* dynamically, so we need a place to store that address.
|
||||
* Note that this field could be 16 bits on x86 ... ;)
|
||||
*
|
||||
* Architectures with slow multiplication can define
|
||||
* WANT_PAGE_VIRTUAL in asm/page.h
|
||||
*/
|
||||
#if defined(WANT_PAGE_VIRTUAL)
|
||||
void *virtual; /* Kernel virtual address (NULL if
|
||||
not kmapped, ie. highmem) */
|
||||
#endif /* WANT_PAGE_VIRTUAL */
|
||||
};
|
||||
|
||||
#define page_private(page) ((page)->private)
|
||||
#define set_page_private(page, v) ((page)->private = (v))
|
||||
|
||||
@@ -278,6 +226,12 @@ struct page {
|
||||
*/
|
||||
#include <linux/page-flags.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
#define VM_BUG_ON(cond) BUG_ON(cond)
|
||||
#else
|
||||
#define VM_BUG_ON(condition) do { } while(0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Methods to modify the page usage count.
|
||||
*
|
||||
@@ -292,12 +246,11 @@ struct page {
|
||||
*/
|
||||
|
||||
/*
|
||||
* Drop a ref, return true if the logical refcount fell to zero (the page has
|
||||
* no users)
|
||||
* Drop a ref, return true if the refcount fell to zero (the page has no users)
|
||||
*/
|
||||
static inline int put_page_testzero(struct page *page)
|
||||
{
|
||||
BUG_ON(atomic_read(&page->_count) == 0);
|
||||
VM_BUG_ON(atomic_read(&page->_count) == 0);
|
||||
return atomic_dec_and_test(&page->_count);
|
||||
}
|
||||
|
||||
@@ -307,11 +260,10 @@ static inline int put_page_testzero(struct page *page)
|
||||
*/
|
||||
static inline int get_page_unless_zero(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(PageCompound(page));
|
||||
return atomic_inc_not_zero(&page->_count);
|
||||
}
|
||||
|
||||
extern void FASTCALL(__page_cache_release(struct page *));
|
||||
|
||||
static inline int page_count(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
@@ -323,6 +275,7 @@ static inline void get_page(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
page = (struct page *)page_private(page);
|
||||
VM_BUG_ON(atomic_read(&page->_count) == 0);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
@@ -349,43 +302,55 @@ void split_page(struct page *page, unsigned int order);
|
||||
* For the non-reserved pages, page_count(page) denotes a reference count.
|
||||
* page_count() == 0 means the page is free. page->lru is then used for
|
||||
* freelist management in the buddy allocator.
|
||||
* page_count() == 1 means the page is used for exactly one purpose
|
||||
* (e.g. a private data page of one process).
|
||||
* page_count() > 0 means the page has been allocated.
|
||||
*
|
||||
* A page may be used for kmalloc() or anyone else who does a
|
||||
* __get_free_page(). In this case the page_count() is at least 1, and
|
||||
* all other fields are unused but should be 0 or NULL. The
|
||||
* management of this page is the responsibility of the one who uses
|
||||
* it.
|
||||
* Pages are allocated by the slab allocator in order to provide memory
|
||||
* to kmalloc and kmem_cache_alloc. In this case, the management of the
|
||||
* page, and the fields in 'struct page' are the responsibility of mm/slab.c
|
||||
* unless a particular usage is carefully commented. (the responsibility of
|
||||
* freeing the kmalloc memory is the caller's, of course).
|
||||
*
|
||||
* The other pages (we may call them "process pages") are completely
|
||||
* A page may be used by anyone else who does a __get_free_page().
|
||||
* In this case, page_count still tracks the references, and should only
|
||||
* be used through the normal accessor functions. The top bits of page->flags
|
||||
* and page->virtual store page management information, but all other fields
|
||||
* are unused and could be used privately, carefully. The management of this
|
||||
* page is the responsibility of the one who allocated it, and those who have
|
||||
* subsequently been given references to it.
|
||||
*
|
||||
* The other pages (we may call them "pagecache pages") are completely
|
||||
* managed by the Linux memory manager: I/O, buffers, swapping etc.
|
||||
* The following discussion applies only to them.
|
||||
*
|
||||
* A page may belong to an inode's memory mapping. In this case,
|
||||
* page->mapping is the pointer to the inode, and page->index is the
|
||||
* file offset of the page, in units of PAGE_CACHE_SIZE.
|
||||
* A pagecache page contains an opaque `private' member, which belongs to the
|
||||
* page's address_space. Usually, this is the address of a circular list of
|
||||
* the page's disk buffers. PG_private must be set to tell the VM to call
|
||||
* into the filesystem to release these pages.
|
||||
*
|
||||
* A page contains an opaque `private' member, which belongs to the
|
||||
* page's address_space. Usually, this is the address of a circular
|
||||
* list of the page's disk buffers.
|
||||
* A page may belong to an inode's memory mapping. In this case, page->mapping
|
||||
* is the pointer to the inode, and page->index is the file offset of the page,
|
||||
* in units of PAGE_CACHE_SIZE.
|
||||
*
|
||||
* For pages belonging to inodes, the page_count() is the number of
|
||||
* attaches, plus 1 if `private' contains something, plus one for
|
||||
* the page cache itself.
|
||||
* If pagecache pages are not associated with an inode, they are said to be
|
||||
* anonymous pages. These may become associated with the swapcache, and in that
|
||||
* case PG_swapcache is set, and page->private is an offset into the swapcache.
|
||||
*
|
||||
* Instead of keeping dirty/clean pages in per address-space lists, we instead
|
||||
* now tag pages as dirty/under writeback in the radix tree.
|
||||
* In either case (swapcache or inode backed), the pagecache itself holds one
|
||||
* reference to the page. Setting PG_private should also increment the
|
||||
* refcount. The each user mapping also has a reference to the page.
|
||||
*
|
||||
* There is also a per-mapping radix tree mapping index to the page
|
||||
* in memory if present. The tree is rooted at mapping->root.
|
||||
* The pagecache pages are stored in a per-mapping radix tree, which is
|
||||
* rooted at mapping->page_tree, and indexed by offset.
|
||||
* Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
|
||||
* lists, we instead now tag pages as dirty/writeback in the radix tree.
|
||||
*
|
||||
* All process pages can do I/O:
|
||||
* All pagecache pages may be subject to I/O:
|
||||
* - inode pages may need to be read from disk,
|
||||
* - inode pages which have been modified and are MAP_SHARED may need
|
||||
* to be written to disk,
|
||||
* - private pages which have been modified may need to be swapped out
|
||||
* to swap space and (later) to be read back into memory.
|
||||
* to be written back to the inode on disk,
|
||||
* - anonymous pages (including MAP_PRIVATE file mappings) which have been
|
||||
* modified may need to be swapped out to swap space and (later) to be read
|
||||
* back into memory.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -463,7 +428,7 @@ void split_page(struct page *page, unsigned int order);
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
|
||||
|
||||
static inline unsigned long page_zonenum(struct page *page)
|
||||
static inline enum zone_type page_zonenum(struct page *page)
|
||||
{
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
@@ -480,23 +445,33 @@ static inline struct zone *page_zone(struct page *page)
|
||||
return zone_table[page_zone_id(page)];
|
||||
}
|
||||
|
||||
static inline unsigned long zone_to_nid(struct zone *zone)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
return zone->node;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long page_to_nid(struct page *page)
|
||||
{
|
||||
if (FLAGS_HAS_NODE)
|
||||
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
|
||||
else
|
||||
return page_zone(page)->zone_pgdat->node_id;
|
||||
return zone_to_nid(page_zone(page));
|
||||
}
|
||||
static inline unsigned long page_to_section(struct page *page)
|
||||
{
|
||||
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long zone)
|
||||
static inline void set_page_zone(struct page *page, enum zone_type zone)
|
||||
{
|
||||
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
|
||||
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_node(struct page *page, unsigned long node)
|
||||
{
|
||||
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
|
||||
@@ -508,7 +483,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
|
||||
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_links(struct page *page, unsigned long zone,
|
||||
static inline void set_page_links(struct page *page, enum zone_type zone,
|
||||
unsigned long node, unsigned long pfn)
|
||||
{
|
||||
set_page_zone(page, zone);
|
||||
@@ -521,11 +496,6 @@ static inline void set_page_links(struct page *page, unsigned long zone,
|
||||
*/
|
||||
#include <linux/vmstat.h>
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
|
||||
extern struct page *mem_map;
|
||||
#endif
|
||||
|
||||
static __always_inline void *lowmem_page_address(struct page *page)
|
||||
{
|
||||
return __va(page_to_pfn(page) << PAGE_SHIFT);
|
||||
@@ -624,6 +594,12 @@ static inline int page_mapped(struct page *page)
|
||||
#define NOPAGE_SIGBUS (NULL)
|
||||
#define NOPAGE_OOM ((struct page *) (-1))
|
||||
|
||||
/*
|
||||
* Error return values for the *_nopfn functions
|
||||
*/
|
||||
#define NOPFN_SIGBUS ((unsigned long) -1)
|
||||
#define NOPFN_OOM ((unsigned long) -2)
|
||||
|
||||
/*
|
||||
* Different kinds of faults, as returned by handle_mm_fault().
|
||||
* Used to decide whether a process gets delivered SIGBUS or
|
||||
@@ -802,6 +778,39 @@ struct shrinker;
|
||||
extern struct shrinker *set_shrinker(int, shrinker_t);
|
||||
extern void remove_shrinker(struct shrinker *shrinker);
|
||||
|
||||
/*
|
||||
* Some shared mappigns will want the pages marked read-only
|
||||
* to track write events. If so, we'll downgrade vm_page_prot
|
||||
* to the private version (using protection_map[] without the
|
||||
* VM_SHARED bit).
|
||||
*/
|
||||
static inline int vma_wants_writenotify(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned int vm_flags = vma->vm_flags;
|
||||
|
||||
/* If it was private or non-writable, the write bit is already clear */
|
||||
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
|
||||
return 0;
|
||||
|
||||
/* The backer wishes to know when pages are first written to? */
|
||||
if (vma->vm_ops && vma->vm_ops->page_mkwrite)
|
||||
return 1;
|
||||
|
||||
/* The open routine did something to the protections already? */
|
||||
if (pgprot_val(vma->vm_page_prot) !=
|
||||
pgprot_val(protection_map[vm_flags &
|
||||
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
|
||||
return 0;
|
||||
|
||||
/* Specialty mapping? */
|
||||
if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
|
||||
return 0;
|
||||
|
||||
/* Can the mapping track the dirty pages? */
|
||||
return vma->vm_file && vma->vm_file->f_mapping &&
|
||||
mapping_cap_account_dirty(vma->vm_file->f_mapping);
|
||||
}
|
||||
|
||||
extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
|
||||
|
||||
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
|
||||
@@ -879,6 +888,56 @@ extern void free_area_init(unsigned long * zones_size);
|
||||
extern void free_area_init_node(int nid, pg_data_t *pgdat,
|
||||
unsigned long * zones_size, unsigned long zone_start_pfn,
|
||||
unsigned long *zholes_size);
|
||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
/*
|
||||
* With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
|
||||
* zones, allocate the backing mem_map and account for memory holes in a more
|
||||
* architecture independent manner. This is a substitute for creating the
|
||||
* zone_sizes[] and zholes_size[] arrays and passing them to
|
||||
* free_area_init_node()
|
||||
*
|
||||
* An architecture is expected to register range of page frames backed by
|
||||
* physical memory with add_active_range() before calling
|
||||
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
|
||||
* usage, an architecture is expected to do something like
|
||||
*
|
||||
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
|
||||
* max_highmem_pfn};
|
||||
* for_each_valid_physical_page_range()
|
||||
* add_active_range(node_id, start_pfn, end_pfn)
|
||||
* free_area_init_nodes(max_zone_pfns);
|
||||
*
|
||||
* If the architecture guarantees that there are no holes in the ranges
|
||||
* registered with add_active_range(), free_bootmem_active_regions()
|
||||
* will call free_bootmem_node() for each registered physical page range.
|
||||
* Similarly sparse_memory_present_with_active_regions() calls
|
||||
* memory_present() for each range when SPARSEMEM is enabled.
|
||||
*
|
||||
* See mm/page_alloc.c for more information on each function exposed by
|
||||
* CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
*/
|
||||
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
|
||||
extern void add_active_range(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
|
||||
unsigned long new_end_pfn);
|
||||
extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void remove_all_active_ranges(void);
|
||||
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern void get_pfn_range_for_nid(unsigned int nid,
|
||||
unsigned long *start_pfn, unsigned long *end_pfn);
|
||||
extern unsigned long find_min_pfn_with_active_regions(void);
|
||||
extern unsigned long find_max_pfn_with_active_regions(void);
|
||||
extern void free_bootmem_with_active_regions(int nid,
|
||||
unsigned long max_low_pfn);
|
||||
extern void sparse_memory_present_with_active_regions(int nid);
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
extern int early_pfn_to_nid(unsigned long pfn);
|
||||
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
|
||||
extern void setup_per_zone_pages_min(void);
|
||||
extern void mem_init(void);
|
||||
@@ -1072,7 +1131,7 @@ void drop_slab(void);
|
||||
extern int randomize_va_space;
|
||||
#endif
|
||||
|
||||
const char *arch_vma_name(struct vm_area_struct *vma);
|
||||
__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_MM_H */
|
||||
|
||||
67
include/linux/mm_types.h
Normal file
67
include/linux/mm_types.h
Normal file
@@ -0,0 +1,67 @@
|
||||
#ifndef _LINUX_MM_TYPES_H
|
||||
#define _LINUX_MM_TYPES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct address_space;
|
||||
|
||||
/*
|
||||
* Each physical page in the system has a struct page associated with
|
||||
* it to keep track of whatever it is we are using the page for at the
|
||||
* moment. Note that we have no way to track which tasks are using
|
||||
* a page, though if it is a pagecache page, rmap structures can tell us
|
||||
* who is mapping it.
|
||||
*/
|
||||
struct page {
|
||||
unsigned long flags; /* Atomic flags, some possibly
|
||||
* updated asynchronously */
|
||||
atomic_t _count; /* Usage count, see below. */
|
||||
atomic_t _mapcount; /* Count of ptes mapped in mms,
|
||||
* to show when page is mapped
|
||||
* & limit reverse map searches.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
unsigned long private; /* Mapping-private opaque data:
|
||||
* usually used for buffer_heads
|
||||
* if PagePrivate set; used for
|
||||
* swp_entry_t if PageSwapCache;
|
||||
* indicates order in the buddy
|
||||
* system if PG_buddy is set.
|
||||
*/
|
||||
struct address_space *mapping; /* If low bit clear, points to
|
||||
* inode address_space, or NULL.
|
||||
* If page mapped as anonymous
|
||||
* memory, low bit is set, and
|
||||
* it points to anon_vma object:
|
||||
* see PAGE_MAPPING_ANON below.
|
||||
*/
|
||||
};
|
||||
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
||||
spinlock_t ptl;
|
||||
#endif
|
||||
};
|
||||
pgoff_t index; /* Our offset within mapping. */
|
||||
struct list_head lru; /* Pageout list, eg. active_list
|
||||
* protected by zone->lru_lock !
|
||||
*/
|
||||
/*
|
||||
* On machines where all RAM is mapped into kernel address space,
|
||||
* we can simply calculate the virtual address. On machines with
|
||||
* highmem some memory is mapped into kernel virtual memory
|
||||
* dynamically, so we need a place to store that address.
|
||||
* Note that this field could be 16 bits on x86 ... ;)
|
||||
*
|
||||
* Architectures with slow multiplication can define
|
||||
* WANT_PAGE_VIRTUAL in asm/page.h
|
||||
*/
|
||||
#if defined(WANT_PAGE_VIRTUAL)
|
||||
void *virtual; /* Kernel virtual address (NULL if
|
||||
not kmapped, ie. highmem) */
|
||||
#endif /* WANT_PAGE_VIRTUAL */
|
||||
};
|
||||
|
||||
#endif /* _LINUX_MM_TYPES_H */
|
||||
@@ -51,12 +51,14 @@ enum zone_stat_item {
|
||||
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
|
||||
only modified from process context */
|
||||
NR_FILE_PAGES,
|
||||
NR_SLAB, /* Pages used by slab allocator */
|
||||
NR_SLAB_RECLAIMABLE,
|
||||
NR_SLAB_UNRECLAIMABLE,
|
||||
NR_PAGETABLE, /* used for pagetables */
|
||||
NR_FILE_DIRTY,
|
||||
NR_WRITEBACK,
|
||||
NR_UNSTABLE_NFS, /* NFS unstable pages */
|
||||
NR_BOUNCE,
|
||||
NR_VMSCAN_WRITE,
|
||||
#ifdef CONFIG_NUMA
|
||||
NUMA_HIT, /* allocated in intended node */
|
||||
NUMA_MISS, /* allocated in non intended node */
|
||||
@@ -88,53 +90,68 @@ struct per_cpu_pageset {
|
||||
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
|
||||
#endif
|
||||
|
||||
#define ZONE_DMA 0
|
||||
#define ZONE_DMA32 1
|
||||
#define ZONE_NORMAL 2
|
||||
#define ZONE_HIGHMEM 3
|
||||
|
||||
#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
|
||||
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
|
||||
|
||||
enum zone_type {
|
||||
/*
|
||||
* ZONE_DMA is used when there are devices that are not able
|
||||
* to do DMA to all of addressable memory (ZONE_NORMAL). Then we
|
||||
* carve out the portion of memory that is needed for these devices.
|
||||
* The range is arch specific.
|
||||
*
|
||||
* Some examples
|
||||
*
|
||||
* Architecture Limit
|
||||
* ---------------------------
|
||||
* parisc, ia64, sparc <4G
|
||||
* s390 <2G
|
||||
* arm26 <48M
|
||||
* arm Various
|
||||
* alpha Unlimited or 0-16MB.
|
||||
*
|
||||
* i386, x86_64 and multiple other arches
|
||||
* <16M.
|
||||
*/
|
||||
ZONE_DMA,
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
/*
|
||||
* x86_64 needs two ZONE_DMAs because it supports devices that are
|
||||
* only able to do DMA to the lower 16M but also 32 bit devices that
|
||||
* can only do DMA areas below 4G.
|
||||
*/
|
||||
ZONE_DMA32,
|
||||
#endif
|
||||
/*
|
||||
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
|
||||
* performed on pages in ZONE_NORMAL if the DMA devices support
|
||||
* transfers to all addressable memory.
|
||||
*/
|
||||
ZONE_NORMAL,
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* A memory area that is only addressable by the kernel through
|
||||
* mapping portions into its own address space. This is for example
|
||||
* used by i386 to allow the kernel to address the memory beyond
|
||||
* 900MB. The kernel will set up special mappings (page
|
||||
* table entries on i386) for each page that the kernel needs to
|
||||
* access.
|
||||
*/
|
||||
ZONE_HIGHMEM,
|
||||
#endif
|
||||
MAX_NR_ZONES
|
||||
};
|
||||
|
||||
/*
|
||||
* When a memory allocation must conform to specific limitations (such
|
||||
* as being suitable for DMA) the caller will pass in hints to the
|
||||
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
||||
* are used to select a priority ordered list of memory zones which
|
||||
* match the requested limits. GFP_ZONEMASK defines which bits within
|
||||
* the gfp_mask should be considered as zone modifiers. Each valid
|
||||
* combination of the zone modifier bits has a corresponding list
|
||||
* of zones (in node_zonelists). Thus for two zone modifiers there
|
||||
* will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will
|
||||
* be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible
|
||||
* combinations of zone modifiers in "zone modifier space".
|
||||
*
|
||||
* As an optimisation any zone modifier bits which are only valid when
|
||||
* no other zone modifier bits are set (loners) should be placed in
|
||||
* the highest order bits of this field. This allows us to reduce the
|
||||
* extent of the zonelists thus saving space. For example in the case
|
||||
* of three zone modifier bits, we could require up to eight zonelists.
|
||||
* If the left most zone modifier is a "loner" then the highest valid
|
||||
* zonelist would be four allowing us to allocate only five zonelists.
|
||||
* Use the first form for GFP_ZONETYPES when the left most bit is not
|
||||
* a "loner", otherwise use the second.
|
||||
*
|
||||
* NOTE! Make sure this matches the zones in <linux/gfp.h>
|
||||
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
||||
*/
|
||||
#define GFP_ZONEMASK 0x07
|
||||
/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
|
||||
#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
|
||||
|
||||
/*
|
||||
* On machines where it is needed (eg PCs) we divide physical memory
|
||||
* into multiple physical zones. On a 32bit PC we have 4 zones:
|
||||
*
|
||||
* ZONE_DMA < 16 MB ISA DMA capable memory
|
||||
* ZONE_DMA32 0 MB Empty
|
||||
* ZONE_NORMAL 16-896 MB direct mapped by the kernel
|
||||
* ZONE_HIGHMEM > 896 MB only page cache and user processes
|
||||
*/
|
||||
#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
|
||||
#define ZONES_SHIFT 1
|
||||
#else
|
||||
#define ZONES_SHIFT 2
|
||||
#endif
|
||||
|
||||
struct zone {
|
||||
/* Fields commonly accessed by the page allocator */
|
||||
@@ -151,10 +168,12 @@ struct zone {
|
||||
unsigned long lowmem_reserve[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int node;
|
||||
/*
|
||||
* zone reclaim becomes active if more unmapped pages exist.
|
||||
*/
|
||||
unsigned long min_unmapped_ratio;
|
||||
unsigned long min_unmapped_pages;
|
||||
unsigned long min_slab_pages;
|
||||
struct per_cpu_pageset *pageset[NR_CPUS];
|
||||
#else
|
||||
struct per_cpu_pageset pageset[NR_CPUS];
|
||||
@@ -266,7 +285,6 @@ struct zone {
|
||||
char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
@@ -289,6 +307,18 @@ struct zonelist {
|
||||
struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
||||
struct node_active_region {
|
||||
unsigned long start_pfn;
|
||||
unsigned long end_pfn;
|
||||
int nid;
|
||||
};
|
||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
|
||||
extern struct page *mem_map;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
|
||||
@@ -304,7 +334,7 @@ struct zonelist {
|
||||
struct bootmem_data;
|
||||
typedef struct pglist_data {
|
||||
struct zone node_zones[MAX_NR_ZONES];
|
||||
struct zonelist node_zonelists[GFP_ZONETYPES];
|
||||
struct zonelist node_zonelists[MAX_NR_ZONES];
|
||||
int nr_zones;
|
||||
#ifdef CONFIG_FLAT_NODE_MEM_MAP
|
||||
struct page *node_mem_map;
|
||||
@@ -373,12 +403,16 @@ static inline int populated_zone(struct zone *zone)
|
||||
return (!!zone->present_pages);
|
||||
}
|
||||
|
||||
static inline int is_highmem_idx(int idx)
|
||||
static inline int is_highmem_idx(enum zone_type idx)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
return (idx == ZONE_HIGHMEM);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_normal_idx(int idx)
|
||||
static inline int is_normal_idx(enum zone_type idx)
|
||||
{
|
||||
return (idx == ZONE_NORMAL);
|
||||
}
|
||||
@@ -391,7 +425,11 @@ static inline int is_normal_idx(int idx)
|
||||
*/
|
||||
static inline int is_highmem(struct zone *zone)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_normal(struct zone *zone)
|
||||
@@ -401,7 +439,11 @@ static inline int is_normal(struct zone *zone)
|
||||
|
||||
static inline int is_dma32(struct zone *zone)
|
||||
{
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_dma(struct zone *zone)
|
||||
@@ -421,6 +463,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file
|
||||
void __user *, size_t *, loff_t *);
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
struct file *, void __user *, size_t *, loff_t *);
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
struct file *, void __user *, size_t *, loff_t *);
|
||||
|
||||
#include <linux/topology.h>
|
||||
/* Returns the number of the current Node. */
|
||||
@@ -488,7 +532,8 @@ extern struct zone *next_zone(struct zone *zone);
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
|
||||
!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
|
||||
#define early_pfn_to_nid(nid) (0UL)
|
||||
#endif
|
||||
|
||||
|
||||
@@ -308,4 +308,16 @@ struct input_device_id {
|
||||
kernel_ulong_t driver_info;
|
||||
};
|
||||
|
||||
/* EISA */
|
||||
|
||||
#define EISA_SIG_LEN 8
|
||||
|
||||
/* The EISA signature, in ASCII form, null terminated */
|
||||
struct eisa_device_id {
|
||||
char sig[EISA_SIG_LEN];
|
||||
kernel_ulong_t driver_data;
|
||||
};
|
||||
|
||||
#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s"
|
||||
|
||||
#endif /* LINUX_MOD_DEVICETABLE_H */
|
||||
|
||||
@@ -334,7 +334,6 @@ struct net_device
|
||||
|
||||
|
||||
struct net_device_stats* (*get_stats)(struct net_device *dev);
|
||||
struct iw_statistics* (*get_wireless_stats)(struct net_device *dev);
|
||||
|
||||
/* List of functions to handle Wireless Extensions (instead of ioctl).
|
||||
* See <net/iw_handler.h> for details. Jean II */
|
||||
@@ -1016,7 +1015,8 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* On bonding slaves other than the currently active slave, suppress
|
||||
* duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
|
||||
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
|
||||
* ARP on active-backup slaves with arp_validate enabled.
|
||||
*/
|
||||
static inline int skb_bond_should_drop(struct sk_buff *skb)
|
||||
{
|
||||
@@ -1025,6 +1025,10 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
|
||||
|
||||
if (master &&
|
||||
(dev->priv_flags & IFF_SLAVE_INACTIVE)) {
|
||||
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
|
||||
skb->protocol == __constant_htons(ETH_P_ARP))
|
||||
return 0;
|
||||
|
||||
if (master->priv_flags & IFF_MASTER_ALB) {
|
||||
if (skb->pkt_type != PACKET_BROADCAST &&
|
||||
skb->pkt_type != PACKET_MULTICAST)
|
||||
|
||||
@@ -10,6 +10,8 @@ header-y += xt_connmark.h
|
||||
header-y += xt_CONNMARK.h
|
||||
header-y += xt_conntrack.h
|
||||
header-y += xt_dccp.h
|
||||
header-y += xt_dscp.h
|
||||
header-y += xt_DSCP.h
|
||||
header-y += xt_esp.h
|
||||
header-y += xt_helper.h
|
||||
header-y += xt_length.h
|
||||
|
||||
@@ -315,10 +315,6 @@ extern void nfs_end_data_update(struct inode *);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
|
||||
extern struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
|
||||
const struct dentry *dentry,
|
||||
struct nfs_fh *fh,
|
||||
struct nfs_fattr *fattr);
|
||||
|
||||
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
|
||||
extern u32 root_nfs_parse_addr(char *name); /*__init*/
|
||||
|
||||
@@ -269,14 +269,8 @@ fill_post_wcc(struct svc_fh *fhp)
|
||||
fhp->fh_post_uid = inode->i_uid;
|
||||
fhp->fh_post_gid = inode->i_gid;
|
||||
fhp->fh_post_size = inode->i_size;
|
||||
if (inode->i_blksize) {
|
||||
fhp->fh_post_blksize = inode->i_blksize;
|
||||
fhp->fh_post_blocks = inode->i_blocks;
|
||||
} else {
|
||||
fhp->fh_post_blksize = BLOCK_SIZE;
|
||||
/* how much do we care for accuracy with MinixFS? */
|
||||
fhp->fh_post_blocks = (inode->i_size+511) >> 9;
|
||||
}
|
||||
fhp->fh_post_blksize = BLOCK_SIZE;
|
||||
fhp->fh_post_blocks = inode->i_blocks;
|
||||
fhp->fh_post_rdev[0] = htonl((u32)imajor(inode));
|
||||
fhp->fh_post_rdev[1] = htonl((u32)iminor(inode));
|
||||
fhp->fh_post_atime = inode->i_atime;
|
||||
|
||||
@@ -13,24 +13,25 @@
|
||||
* PG_reserved is set for special pages, which can never be swapped out. Some
|
||||
* of them might not even exist (eg empty_bad_page)...
|
||||
*
|
||||
* The PG_private bitflag is set if page->private contains a valid value.
|
||||
* The PG_private bitflag is set on pagecache pages if they contain filesystem
|
||||
* specific data (which is normally at page->private). It can be used by
|
||||
* private allocations for its own usage.
|
||||
*
|
||||
* During disk I/O, PG_locked is used. This bit is set before I/O and
|
||||
* reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
|
||||
* waiting for the I/O on this page to complete.
|
||||
* During initiation of disk I/O, PG_locked is set. This bit is set before I/O
|
||||
* and cleared when writeback _starts_ or when read _completes_. PG_writeback
|
||||
* is set before writeback starts and cleared when it finishes.
|
||||
*
|
||||
* PG_locked also pins a page in pagecache, and blocks truncation of the file
|
||||
* while it is held.
|
||||
*
|
||||
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
|
||||
* to become unlocked.
|
||||
*
|
||||
* PG_uptodate tells whether the page's contents is valid. When a read
|
||||
* completes, the page becomes uptodate, unless a disk I/O error happened.
|
||||
*
|
||||
* For choosing which pages to swap out, inode pages carry a PG_referenced bit,
|
||||
* which is set any time the system accesses that page through the (mapping,
|
||||
* index) hash table. This referenced bit, together with the referenced bit
|
||||
* in the page tables, is used to manipulate page->age and move the page across
|
||||
* the active, inactive_dirty and inactive_clean lists.
|
||||
*
|
||||
* Note that the referenced bit, the page->lru list_head and the active,
|
||||
* inactive_dirty and inactive_clean lists are protected by the
|
||||
* zone->lru_lock, and *NOT* by the usual PG_locked bit!
|
||||
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
|
||||
* file-backed pagecache (see mm/vmscan.c).
|
||||
*
|
||||
* PG_error is set to indicate that an I/O error occurred on this page.
|
||||
*
|
||||
@@ -42,6 +43,10 @@
|
||||
* space, they need to be kmapped separately for doing IO on the pages. The
|
||||
* struct page (these bits with information) are always mapped into kernel
|
||||
* address space...
|
||||
*
|
||||
* PG_buddy is set to indicate that the page is free and in the buddy system
|
||||
* (see mm/page_alloc.c).
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -74,7 +79,7 @@
|
||||
#define PG_checked 8 /* kill me in 2.5.<early>. */
|
||||
#define PG_arch_1 9
|
||||
#define PG_reserved 10
|
||||
#define PG_private 11 /* Has something at ->private */
|
||||
#define PG_private 11 /* If pagecache, has fs-private data */
|
||||
|
||||
#define PG_writeback 12 /* Page is under writeback */
|
||||
#define PG_nosave 13 /* Used for system suspend/resume */
|
||||
@@ -83,7 +88,7 @@
|
||||
|
||||
#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
|
||||
#define PG_reclaim 17 /* To be reclaimed asap */
|
||||
#define PG_nosave_free 18 /* Free, should not be written */
|
||||
#define PG_nosave_free 18 /* Used for system suspend/resume */
|
||||
#define PG_buddy 19 /* Page is free, on buddy lists */
|
||||
|
||||
|
||||
|
||||
@@ -130,14 +130,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
extern void FASTCALL(__lock_page(struct page *page));
|
||||
extern void FASTCALL(__lock_page_nosync(struct page *page));
|
||||
extern void FASTCALL(unlock_page(struct page *page));
|
||||
|
||||
/*
|
||||
* lock_page may only be called if we have the page's inode pinned.
|
||||
*/
|
||||
static inline void lock_page(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (TestSetPageLocked(page))
|
||||
__lock_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_page_nosync should only be used if we can't pin the page's inode.
|
||||
* Doesn't play quite so well with block device plugging.
|
||||
*/
|
||||
static inline void lock_page_nosync(struct page *page)
|
||||
{
|
||||
might_sleep();
|
||||
if (TestSetPageLocked(page))
|
||||
__lock_page_nosync(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
|
||||
|
||||
@@ -49,6 +49,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
@@ -346,6 +347,8 @@ struct pci_driver {
|
||||
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
|
||||
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
|
||||
int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
|
||||
int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
|
||||
int (*resume_early) (struct pci_dev *dev);
|
||||
int (*resume) (struct pci_dev *dev); /* Device woken up */
|
||||
int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */
|
||||
void (*shutdown) (struct pci_dev *dev);
|
||||
@@ -353,6 +356,8 @@ struct pci_driver {
|
||||
struct pci_error_handlers *err_handler;
|
||||
struct device_driver driver;
|
||||
struct pci_dynids dynids;
|
||||
|
||||
int multithread_probe;
|
||||
};
|
||||
|
||||
#define to_pci_driver(drv) container_of(drv,struct pci_driver, driver)
|
||||
@@ -401,7 +406,7 @@ extern struct list_head pci_root_buses; /* list of all known PCI buses */
|
||||
extern struct list_head pci_devices; /* list of all devices */
|
||||
|
||||
void pcibios_fixup_bus(struct pci_bus *);
|
||||
int pcibios_enable_device(struct pci_dev *, int mask);
|
||||
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
|
||||
char *pcibios_setup (char *str);
|
||||
|
||||
/* Used only when drivers/pci/setup.c is used */
|
||||
@@ -428,7 +433,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn);
|
||||
struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
|
||||
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
|
||||
unsigned int pci_scan_child_bus(struct pci_bus *bus);
|
||||
void pci_bus_add_device(struct pci_dev *dev);
|
||||
int __must_check pci_bus_add_device(struct pci_dev *dev);
|
||||
void pci_read_bridge_bases(struct pci_bus *child);
|
||||
struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res);
|
||||
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
|
||||
@@ -436,6 +441,7 @@ extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
|
||||
extern void pci_dev_put(struct pci_dev *dev);
|
||||
extern void pci_remove_bus(struct pci_bus *b);
|
||||
extern void pci_remove_bus_device(struct pci_dev *dev);
|
||||
extern void pci_stop_bus_device(struct pci_dev *dev);
|
||||
void pci_setup_cardbus(struct pci_bus *bus);
|
||||
|
||||
/* Generic PCI functions exported to card drivers */
|
||||
@@ -488,19 +494,19 @@ static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val
|
||||
return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val);
|
||||
}
|
||||
|
||||
int pci_enable_device(struct pci_dev *dev);
|
||||
int pci_enable_device_bars(struct pci_dev *dev, int mask);
|
||||
int __must_check pci_enable_device(struct pci_dev *dev);
|
||||
int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
|
||||
void pci_disable_device(struct pci_dev *dev);
|
||||
void pci_set_master(struct pci_dev *dev);
|
||||
#define HAVE_PCI_SET_MWI
|
||||
int pci_set_mwi(struct pci_dev *dev);
|
||||
int __must_check pci_set_mwi(struct pci_dev *dev);
|
||||
void pci_clear_mwi(struct pci_dev *dev);
|
||||
void pci_intx(struct pci_dev *dev, int enable);
|
||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
|
||||
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
|
||||
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
|
||||
int pci_assign_resource(struct pci_dev *dev, int i);
|
||||
int pci_assign_resource_fixed(struct pci_dev *dev, int i);
|
||||
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
|
||||
int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
|
||||
void pci_restore_bars(struct pci_dev *dev);
|
||||
|
||||
/* ROM control related routines */
|
||||
@@ -526,23 +532,24 @@ void pdev_sort_resources(struct pci_dev *, struct resource_list *);
|
||||
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
|
||||
int (*)(struct pci_dev *, u8, u8));
|
||||
#define HAVE_PCI_REQ_REGIONS 2
|
||||
int pci_request_regions(struct pci_dev *, const char *);
|
||||
int __must_check pci_request_regions(struct pci_dev *, const char *);
|
||||
void pci_release_regions(struct pci_dev *);
|
||||
int pci_request_region(struct pci_dev *, int, const char *);
|
||||
int __must_check pci_request_region(struct pci_dev *, int, const char *);
|
||||
void pci_release_region(struct pci_dev *, int);
|
||||
|
||||
/* drivers/pci/bus.c */
|
||||
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
|
||||
resource_size_t size, resource_size_t align,
|
||||
resource_size_t min, unsigned int type_mask,
|
||||
void (*alignf)(void *, struct resource *,
|
||||
resource_size_t, resource_size_t),
|
||||
void *alignf_data);
|
||||
int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
|
||||
struct resource *res, resource_size_t size,
|
||||
resource_size_t align, resource_size_t min,
|
||||
unsigned int type_mask,
|
||||
void (*alignf)(void *, struct resource *,
|
||||
resource_size_t, resource_size_t),
|
||||
void *alignf_data);
|
||||
void pci_enable_bridges(struct pci_bus *bus);
|
||||
|
||||
/* Proper probing supporting hot-pluggable devices */
|
||||
int __pci_register_driver(struct pci_driver *, struct module *);
|
||||
static inline int pci_register_driver(struct pci_driver *driver)
|
||||
int __must_check __pci_register_driver(struct pci_driver *, struct module *);
|
||||
static inline int __must_check pci_register_driver(struct pci_driver *driver)
|
||||
{
|
||||
return __pci_register_driver(driver, THIS_MODULE);
|
||||
}
|
||||
|
||||
@@ -1411,6 +1411,7 @@
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
|
||||
#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
|
||||
@@ -1482,9 +1483,6 @@
|
||||
#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
|
||||
#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
|
||||
#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480
|
||||
#define PCI_DEVICE_ID_MARVELL_GT96100 0x9652
|
||||
#define PCI_DEVICE_ID_MARVELL_GT96100A 0x9653
|
||||
|
||||
|
||||
#define PCI_VENDOR_ID_V3 0x11b0
|
||||
#define PCI_DEVICE_ID_V3_V960 0x0001
|
||||
|
||||
@@ -196,7 +196,7 @@
|
||||
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
|
||||
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
|
||||
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
|
||||
#define PCI_CAP_ID_HT_IRQCONF 0x08 /* HyperTransport IRQ Configuration */
|
||||
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
|
||||
#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */
|
||||
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
|
||||
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
|
||||
|
||||
@@ -62,6 +62,12 @@ struct pcie_port_service_driver {
|
||||
int (*suspend) (struct pcie_device *dev, pm_message_t state);
|
||||
int (*resume) (struct pcie_device *dev);
|
||||
|
||||
/* Service Error Recovery Handler */
|
||||
struct pci_error_handlers *err_handler;
|
||||
|
||||
/* Link Reset Capability - AER service driver specific */
|
||||
pci_ers_result_t (*reset_link) (struct pci_dev *dev);
|
||||
|
||||
const struct pcie_port_service_id *id_table;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#ifndef __LINUX_PERCPU_H
|
||||
#define __LINUX_PERCPU_H
|
||||
|
||||
#include <linux/spinlock.h> /* For preempt_disable() */
|
||||
#include <linux/slab.h> /* For kmalloc() */
|
||||
#include <linux/smp.h>
|
||||
#include <linux/string.h> /* For memset() */
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
|
||||
@@ -11,8 +14,14 @@
|
||||
#define PERCPU_ENOUGH_ROOM 32768
|
||||
#endif
|
||||
|
||||
/* Must be an lvalue. */
|
||||
#define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
|
||||
/*
|
||||
* Must be an lvalue. Since @var must be a simple identifier,
|
||||
* we force a syntax error here if it isn't.
|
||||
*/
|
||||
#define get_cpu_var(var) (*({ \
|
||||
extern int simple_indentifier_##var(void); \
|
||||
preempt_disable(); \
|
||||
&__get_cpu_var(var); }))
|
||||
#define put_cpu_var(var) preempt_enable()
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -21,39 +30,77 @@ struct percpu_data {
|
||||
void *ptrs[NR_CPUS];
|
||||
};
|
||||
|
||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object allocated using
|
||||
* alloc_percpu. Non-atomic access to the current CPU's version should
|
||||
* Use this to get to a cpu's version of the per-cpu object dynamically
|
||||
* allocated. Non-atomic access to the current CPU's version should
|
||||
* probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define per_cpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = (struct percpu_data *)~(unsigned long)(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
#define percpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
})
|
||||
|
||||
extern void *__alloc_percpu(size_t size);
|
||||
extern void free_percpu(const void *);
|
||||
extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu);
|
||||
extern void percpu_depopulate(void *__pdata, int cpu);
|
||||
extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
|
||||
cpumask_t *mask);
|
||||
extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask);
|
||||
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
|
||||
extern void percpu_free(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static inline void *__alloc_percpu(size_t size)
|
||||
static inline void percpu_depopulate(void *__pdata, int cpu)
|
||||
{
|
||||
void *ret = kmalloc(size, GFP_KERNEL);
|
||||
if (ret)
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
static inline void free_percpu(const void *ptr)
|
||||
{
|
||||
kfree(ptr);
|
||||
|
||||
static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp,
|
||||
int cpu)
|
||||
{
|
||||
return percpu_ptr(__pdata, cpu);
|
||||
}
|
||||
|
||||
static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
|
||||
cpumask_t *mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
{
|
||||
return kzalloc(size, gfp);
|
||||
}
|
||||
|
||||
static inline void percpu_free(void *__pdata)
|
||||
{
|
||||
kfree(__pdata);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* Simple wrapper for the common case: zeros memory. */
|
||||
#define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type))))
|
||||
#define percpu_populate_mask(__pdata, size, gfp, mask) \
|
||||
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
|
||||
#define percpu_depopulate_mask(__pdata, mask) \
|
||||
__percpu_depopulate_mask((__pdata), &(mask))
|
||||
#define percpu_alloc_mask(size, gfp, mask) \
|
||||
__percpu_alloc_mask((size), (gfp), &(mask))
|
||||
|
||||
#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
|
||||
|
||||
/* (legacy) interface for use without CPU hotplug handling */
|
||||
|
||||
#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
|
||||
cpu_possible_map)
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
|
||||
#define free_percpu(ptr) percpu_free((ptr))
|
||||
#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
||||
@@ -76,6 +76,8 @@ extern int FASTCALL(attach_pid(struct task_struct *task,
|
||||
enum pid_type type, int nr));
|
||||
|
||||
extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
|
||||
extern void FASTCALL(transfer_pid(struct task_struct *old,
|
||||
struct task_struct *new, enum pid_type));
|
||||
|
||||
/*
|
||||
* look up a PID in the hash table. Must be called with the tasklist_lock
|
||||
|
||||
@@ -49,6 +49,8 @@ struct platform_driver {
|
||||
int (*remove)(struct platform_device *);
|
||||
void (*shutdown)(struct platform_device *);
|
||||
int (*suspend)(struct platform_device *, pm_message_t state);
|
||||
int (*suspend_late)(struct platform_device *, pm_message_t state);
|
||||
int (*resume_early)(struct platform_device *);
|
||||
int (*resume)(struct platform_device *);
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
@@ -142,29 +142,61 @@ typedef struct pm_message {
|
||||
} pm_message_t;
|
||||
|
||||
/*
|
||||
* There are 4 important states driver can be in:
|
||||
* ON -- driver is working
|
||||
* FREEZE -- stop operations and apply whatever policy is applicable to a
|
||||
* suspended driver of that class, freeze queues for block like IDE
|
||||
* does, drop packets for ethernet, etc... stop DMA engine too etc...
|
||||
* so a consistent image can be saved; but do not power any hardware
|
||||
* down.
|
||||
* SUSPEND - like FREEZE, but hardware is doing as much powersaving as
|
||||
* possible. Roughly pci D3.
|
||||
* Several driver power state transitions are externally visible, affecting
|
||||
* the state of pending I/O queues and (for drivers that touch hardware)
|
||||
* interrupts, wakeups, DMA, and other hardware state. There may also be
|
||||
* internal transitions to various low power modes, which are transparent
|
||||
* to the rest of the driver stack (such as a driver that's ON gating off
|
||||
* clocks which are not in active use).
|
||||
*
|
||||
* Unfortunately, current drivers only recognize numeric values 0 (ON) and 3
|
||||
* (SUSPEND). We'll need to fix the drivers. So yes, putting 3 to all different
|
||||
* defines is intentional, and will go away as soon as drivers are fixed. Also
|
||||
* note that typedef is neccessary, we'll probably want to switch to
|
||||
* typedef struct pm_message_t { int event; int flags; } pm_message_t
|
||||
* or something similar soon.
|
||||
* One transition is triggered by resume(), after a suspend() call; the
|
||||
* message is implicit:
|
||||
*
|
||||
* ON Driver starts working again, responding to hardware events
|
||||
* and software requests. The hardware may have gone through
|
||||
* a power-off reset, or it may have maintained state from the
|
||||
* previous suspend() which the driver will rely on while
|
||||
* resuming. On most platforms, there are no restrictions on
|
||||
* availability of resources like clocks during resume().
|
||||
*
|
||||
* Other transitions are triggered by messages sent using suspend(). All
|
||||
* these transitions quiesce the driver, so that I/O queues are inactive.
|
||||
* That commonly entails turning off IRQs and DMA; there may be rules
|
||||
* about how to quiesce that are specific to the bus or the device's type.
|
||||
* (For example, network drivers mark the link state.) Other details may
|
||||
* differ according to the message:
|
||||
*
|
||||
* SUSPEND Quiesce, enter a low power device state appropriate for
|
||||
* the upcoming system state (such as PCI_D3hot), and enable
|
||||
* wakeup events as appropriate.
|
||||
*
|
||||
* FREEZE Quiesce operations so that a consistent image can be saved;
|
||||
* but do NOT otherwise enter a low power device state, and do
|
||||
* NOT emit system wakeup events.
|
||||
*
|
||||
* PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
|
||||
* the system from a snapshot taken after an earlier FREEZE.
|
||||
* Some drivers will need to reset their hardware state instead
|
||||
* of preserving it, to ensure that it's never mistaken for the
|
||||
* state which that earlier snapshot had set up.
|
||||
*
|
||||
* A minimally power-aware driver treats all messages as SUSPEND, fully
|
||||
* reinitializes its device during resume() -- whether or not it was reset
|
||||
* during the suspend/resume cycle -- and can't issue wakeup events.
|
||||
*
|
||||
* More power-aware drivers may also use low power states at runtime as
|
||||
* well as during system sleep states like PM_SUSPEND_STANDBY. They may
|
||||
* be able to use wakeup events to exit from runtime low-power states,
|
||||
* or from system low-power states such as standby or suspend-to-RAM.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_ON 0
|
||||
#define PM_EVENT_FREEZE 1
|
||||
#define PM_EVENT_SUSPEND 2
|
||||
#define PM_EVENT_PRETHAW 3
|
||||
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
|
||||
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
|
||||
@@ -190,6 +222,7 @@ extern void device_resume(void);
|
||||
extern suspend_disk_method_t pm_disk_mode;
|
||||
|
||||
extern int device_suspend(pm_message_t state);
|
||||
extern int device_prepare_suspend(pm_message_t state);
|
||||
|
||||
#define device_set_wakeup_enable(dev,val) \
|
||||
((dev)->power.should_wakeup = !!(val))
|
||||
|
||||
@@ -268,7 +268,9 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
||||
struct proc_maps_private {
|
||||
struct pid *pid;
|
||||
struct task_struct *task;
|
||||
#ifdef CONFIG_MMU
|
||||
struct vm_area_struct *tail_vma;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* _LINUX_PROC_FS_H */
|
||||
|
||||
@@ -3,21 +3,25 @@
|
||||
|
||||
#ifdef CONFIG_PM_TRACE
|
||||
|
||||
extern int pm_trace_enabled;
|
||||
|
||||
struct device;
|
||||
extern void set_trace_device(struct device *);
|
||||
extern void generate_resume_trace(void *tracedata, unsigned int user);
|
||||
|
||||
#define TRACE_DEVICE(dev) set_trace_device(dev)
|
||||
#define TRACE_RESUME(user) do { \
|
||||
void *tracedata; \
|
||||
asm volatile("movl $1f,%0\n" \
|
||||
".section .tracedata,\"a\"\n" \
|
||||
"1:\t.word %c1\n" \
|
||||
"\t.long %c2\n" \
|
||||
".previous" \
|
||||
:"=r" (tracedata) \
|
||||
: "i" (__LINE__), "i" (__FILE__)); \
|
||||
generate_resume_trace(tracedata, user); \
|
||||
#define TRACE_RESUME(user) do { \
|
||||
if (pm_trace_enabled) { \
|
||||
void *tracedata; \
|
||||
asm volatile("movl $1f,%0\n" \
|
||||
".section .tracedata,\"a\"\n" \
|
||||
"1:\t.word %c1\n" \
|
||||
"\t.long %c2\n" \
|
||||
".previous" \
|
||||
:"=r" (tracedata) \
|
||||
: "i" (__LINE__), "i" (__FILE__)); \
|
||||
generate_resume_trace(tracedata, user); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *,
|
||||
*/
|
||||
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
||||
|
||||
/*
|
||||
* Cleans the PTEs of shared mappings.
|
||||
* (and since clean PTEs should also be readonly, write protects them too)
|
||||
*
|
||||
* returns the number of cleaned PTEs.
|
||||
*/
|
||||
int page_mkclean(struct page *);
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
||||
#define anon_vma_init() do {} while (0)
|
||||
@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
||||
#define page_referenced(page,l) TestClearPageReferenced(page)
|
||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
||||
|
||||
static inline int page_mkclean(struct page *page)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
|
||||
@@ -819,6 +819,11 @@ struct task_struct {
|
||||
unsigned did_exec:1;
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
/* Canary value for the -fstack-protector gcc feature */
|
||||
unsigned long stack_canary;
|
||||
#endif
|
||||
/*
|
||||
* pointers to (original) parent process, youngest child, younger sibling,
|
||||
* older sibling, respectively. (p->father can be replaced with
|
||||
@@ -865,6 +870,15 @@ struct task_struct {
|
||||
struct key *thread_keyring; /* keyring private to this thread */
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
#endif
|
||||
/*
|
||||
* fpu_counter contains the number of consecutive context switches
|
||||
* that the FPU is used. If this is over a threshold, the lazy fpu
|
||||
* saving becomes unlazy to save the trap. This is an unsigned char
|
||||
* so that after 256 times the counter wraps and the behavior turns
|
||||
* lazy again; this to deal with bursty apps that only use FPU for
|
||||
* a short time
|
||||
*/
|
||||
unsigned char fpu_counter;
|
||||
int oomkilladj; /* OOM kill score adjustment (bit shift). */
|
||||
char comm[TASK_COMM_LEN]; /* executable name excluding path
|
||||
- access with [gs]et_task_comm (which lock
|
||||
|
||||
@@ -46,7 +46,7 @@ void selinux_audit_rule_free(struct selinux_audit_rule *rule);
|
||||
|
||||
/**
|
||||
* selinux_audit_rule_match - determine if a context ID matches a rule.
|
||||
* @ctxid: the context ID to check
|
||||
* @sid: the context ID to check
|
||||
* @field: the field this rule refers to
|
||||
* @op: the operater the rule uses
|
||||
* @rule: pointer to the audit rule to check against
|
||||
@@ -55,7 +55,7 @@ void selinux_audit_rule_free(struct selinux_audit_rule *rule);
|
||||
* Returns 1 if the context id matches the rule, 0 if it does not, and
|
||||
* -errno on failure.
|
||||
*/
|
||||
int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op,
|
||||
int selinux_audit_rule_match(u32 sid, u32 field, u32 op,
|
||||
struct selinux_audit_rule *rule,
|
||||
struct audit_context *actx);
|
||||
|
||||
@@ -70,18 +70,8 @@ int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op,
|
||||
void selinux_audit_set_callback(int (*callback)(void));
|
||||
|
||||
/**
|
||||
* selinux_task_ctxid - determine a context ID for a process.
|
||||
* @tsk: the task object
|
||||
* @ctxid: ID value returned via this
|
||||
*
|
||||
* On return, ctxid will contain an ID for the context. This value
|
||||
* should only be used opaquely.
|
||||
*/
|
||||
void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid);
|
||||
|
||||
/**
|
||||
* selinux_ctxid_to_string - map a security context ID to a string
|
||||
* @ctxid: security context ID to be converted.
|
||||
* selinux_sid_to_string - map a security context ID to a string
|
||||
* @sid: security context ID to be converted.
|
||||
* @ctx: address of context string to be returned
|
||||
* @ctxlen: length of returned context string.
|
||||
*
|
||||
@@ -89,7 +79,7 @@ void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid);
|
||||
* string will be allocated internally, and the caller must call
|
||||
* kfree() on it after use.
|
||||
*/
|
||||
int selinux_ctxid_to_string(u32 ctxid, char **ctx, u32 *ctxlen);
|
||||
int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen);
|
||||
|
||||
/**
|
||||
* selinux_get_inode_sid - get the inode's security context ID
|
||||
@@ -154,7 +144,7 @@ static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op,
|
||||
static inline int selinux_audit_rule_match(u32 sid, u32 field, u32 op,
|
||||
struct selinux_audit_rule *rule,
|
||||
struct audit_context *actx)
|
||||
{
|
||||
@@ -166,12 +156,7 @@ static inline void selinux_audit_set_callback(int (*callback)(void))
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void selinux_task_ctxid(struct task_struct *tsk, u32 *ctxid)
|
||||
{
|
||||
*ctxid = 0;
|
||||
}
|
||||
|
||||
static inline int selinux_ctxid_to_string(u32 ctxid, char **ctx, u32 *ctxlen)
|
||||
static inline int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen)
|
||||
{
|
||||
*ctx = NULL;
|
||||
*ctxlen = 0;
|
||||
|
||||
@@ -60,14 +60,13 @@ extern void __init kmem_cache_init(void);
|
||||
extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
|
||||
void (*)(void *, kmem_cache_t *, unsigned long),
|
||||
void (*)(void *, kmem_cache_t *, unsigned long));
|
||||
extern int kmem_cache_destroy(kmem_cache_t *);
|
||||
extern void kmem_cache_destroy(kmem_cache_t *);
|
||||
extern int kmem_cache_shrink(kmem_cache_t *);
|
||||
extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
|
||||
extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
|
||||
extern void kmem_cache_free(kmem_cache_t *, void *);
|
||||
extern unsigned int kmem_cache_size(kmem_cache_t *);
|
||||
extern const char *kmem_cache_name(kmem_cache_t *);
|
||||
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
|
||||
|
||||
/* Size description struct for general caches. */
|
||||
struct cache_sizes {
|
||||
@@ -203,7 +202,30 @@ extern int slab_is_available(void);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
|
||||
extern void *kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
|
||||
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
if (__builtin_constant_p(size)) {
|
||||
int i = 0;
|
||||
#define CACHE(x) \
|
||||
if (size <= x) \
|
||||
goto found; \
|
||||
else \
|
||||
i++;
|
||||
#include "kmalloc_sizes.h"
|
||||
#undef CACHE
|
||||
{
|
||||
extern void __you_cannot_kmalloc_that_much(void);
|
||||
__you_cannot_kmalloc_that_much();
|
||||
}
|
||||
found:
|
||||
return kmem_cache_alloc_node((flags & GFP_DMA) ?
|
||||
malloc_sizes[i].cs_dmacachep :
|
||||
malloc_sizes[i].cs_cachep, flags, node);
|
||||
}
|
||||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
#else
|
||||
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
|
||||
{
|
||||
@@ -223,12 +245,11 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
|
||||
/* SLOB allocator routines */
|
||||
|
||||
void kmem_cache_init(void);
|
||||
struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
|
||||
struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
|
||||
unsigned long,
|
||||
void (*)(void *, struct kmem_cache *, unsigned long),
|
||||
void (*)(void *, struct kmem_cache *, unsigned long));
|
||||
int kmem_cache_destroy(struct kmem_cache *c);
|
||||
void kmem_cache_destroy(struct kmem_cache *c);
|
||||
void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
|
||||
void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
|
||||
void kmem_cache_free(struct kmem_cache *c, void *b);
|
||||
@@ -263,8 +284,6 @@ extern kmem_cache_t *fs_cachep;
|
||||
extern kmem_cache_t *sighand_cachep;
|
||||
extern kmem_cache_t *bio_cachep;
|
||||
|
||||
extern atomic_t slab_reclaim_pages;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_SLAB_H */
|
||||
|
||||
@@ -89,7 +89,6 @@ struct smb_fattr {
|
||||
struct timespec f_atime;
|
||||
struct timespec f_mtime;
|
||||
struct timespec f_ctime;
|
||||
unsigned long f_blksize;
|
||||
unsigned long f_blocks;
|
||||
int f_unix;
|
||||
};
|
||||
|
||||
@@ -53,6 +53,9 @@ extern void smp_cpus_done(unsigned int max_cpus);
|
||||
*/
|
||||
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
|
||||
|
||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||
int retry, int wait);
|
||||
|
||||
/*
|
||||
* Call a function on all processors
|
||||
*/
|
||||
|
||||
@@ -5,15 +5,16 @@
|
||||
struct stack_trace {
|
||||
unsigned int nr_entries, max_entries;
|
||||
unsigned long *entries;
|
||||
int skip; /* input argument: How many entries to skip */
|
||||
int all_contexts; /* input argument: if true do than one stack */
|
||||
};
|
||||
|
||||
extern void save_stack_trace(struct stack_trace *trace,
|
||||
struct task_struct *task, int all_contexts,
|
||||
unsigned int skip);
|
||||
struct task_struct *task);
|
||||
|
||||
extern void print_stack_trace(struct stack_trace *trace, int spaces);
|
||||
#else
|
||||
# define save_stack_trace(trace, task, all, skip) do { } while (0)
|
||||
# define save_stack_trace(trace, task) do { } while (0)
|
||||
# define print_stack_trace(trace) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
||||
@@ -10,29 +10,11 @@
|
||||
#include <linux/pm.h>
|
||||
|
||||
/* page backup entry */
|
||||
typedef struct pbe {
|
||||
struct pbe {
|
||||
unsigned long address; /* address of the copy */
|
||||
unsigned long orig_address; /* original address of page */
|
||||
struct pbe *next;
|
||||
} suspend_pagedir_t;
|
||||
|
||||
#define for_each_pbe(pbe, pblist) \
|
||||
for (pbe = pblist ; pbe ; pbe = pbe->next)
|
||||
|
||||
#define PBES_PER_PAGE (PAGE_SIZE/sizeof(struct pbe))
|
||||
#define PB_PAGE_SKIP (PBES_PER_PAGE-1)
|
||||
|
||||
#define for_each_pb_page(pbe, pblist) \
|
||||
for (pbe = pblist ; pbe ; pbe = (pbe+PB_PAGE_SKIP)->next)
|
||||
|
||||
|
||||
#define SWAP_FILENAME_MAXLENGTH 32
|
||||
|
||||
|
||||
extern dev_t swsusp_resume_device;
|
||||
|
||||
/* mm/vmscan.c */
|
||||
extern int shrink_mem(void);
|
||||
};
|
||||
|
||||
/* mm/page_alloc.c */
|
||||
extern void drain_local_pages(void);
|
||||
@@ -53,18 +35,10 @@ static inline void pm_restore_console(void) {}
|
||||
static inline int software_suspend(void)
|
||||
{
|
||||
printk("Warning: fake suspend called\n");
|
||||
return -EPERM;
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_SUSPEND_SMP
|
||||
extern void disable_nonboot_cpus(void);
|
||||
extern void enable_nonboot_cpus(void);
|
||||
#else
|
||||
static inline void disable_nonboot_cpus(void) {}
|
||||
static inline void enable_nonboot_cpus(void) {}
|
||||
#endif
|
||||
|
||||
void save_processor_state(void);
|
||||
void restore_processor_state(void);
|
||||
struct saved_context;
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
struct bio;
|
||||
|
||||
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
|
||||
#define SWAP_FLAG_PRIO_MASK 0x7fff
|
||||
#define SWAP_FLAG_PRIO_SHIFT 0
|
||||
@@ -156,13 +160,14 @@ struct swap_list_t {
|
||||
|
||||
/* linux/mm/oom_kill.c */
|
||||
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
|
||||
extern int register_oom_notifier(struct notifier_block *nb);
|
||||
extern int unregister_oom_notifier(struct notifier_block *nb);
|
||||
|
||||
/* linux/mm/memory.c */
|
||||
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
|
||||
|
||||
/* linux/mm/page_alloc.c */
|
||||
extern unsigned long totalram_pages;
|
||||
extern unsigned long totalhigh_pages;
|
||||
extern unsigned long totalreserve_pages;
|
||||
extern long nr_swap_pages;
|
||||
extern unsigned int nr_free_pages(void);
|
||||
@@ -190,6 +195,7 @@ extern long vm_total_pages;
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int zone_reclaim_mode;
|
||||
extern int sysctl_min_unmapped_ratio;
|
||||
extern int sysctl_min_slab_ratio;
|
||||
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
|
||||
#else
|
||||
#define zone_reclaim_mode 0
|
||||
@@ -212,7 +218,9 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
|
||||
/* linux/mm/page_io.c */
|
||||
extern int swap_readpage(struct file *, struct page *);
|
||||
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
|
||||
extern int rw_swap_page_sync(int, swp_entry_t, struct page *);
|
||||
extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
|
||||
struct bio **bio_chain);
|
||||
extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
|
||||
|
||||
/* linux/mm/swap_state.c */
|
||||
extern struct address_space swapper_space;
|
||||
|
||||
@@ -53,6 +53,7 @@ struct mq_attr;
|
||||
struct compat_stat;
|
||||
struct compat_timeval;
|
||||
struct robust_list_head;
|
||||
struct getcpu_cache;
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/aio_abi.h>
|
||||
@@ -596,5 +597,6 @@ asmlinkage long sys_get_robust_list(int pid,
|
||||
size_t __user *len_ptr);
|
||||
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
|
||||
size_t len);
|
||||
asmlinkage long sys_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *cache);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -150,6 +150,8 @@ enum
|
||||
KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
|
||||
KERN_COMPAT_LOG=73, /* int: print compat layer messages */
|
||||
KERN_MAX_LOCK_DEPTH=74,
|
||||
KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
|
||||
KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
|
||||
};
|
||||
|
||||
|
||||
@@ -191,6 +193,7 @@ enum
|
||||
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
|
||||
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
|
||||
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
|
||||
VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#ifndef _SYSFS_H_
|
||||
#define _SYSFS_H_
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
struct kobject;
|
||||
@@ -86,40 +87,44 @@ struct sysfs_dirent {
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_create_dir(struct kobject *);
|
||||
|
||||
extern void
|
||||
sysfs_remove_dir(struct kobject *);
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_rename_dir(struct kobject *, const char *new_name);
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_create_file(struct kobject *, const struct attribute *);
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_update_file(struct kobject *, const struct attribute *);
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode);
|
||||
|
||||
extern void
|
||||
sysfs_remove_file(struct kobject *, const struct attribute *);
|
||||
|
||||
extern int
|
||||
extern int __must_check
|
||||
sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name);
|
||||
|
||||
extern void
|
||||
sysfs_remove_link(struct kobject *, const char * name);
|
||||
|
||||
int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr);
|
||||
int sysfs_remove_bin_file(struct kobject * kobj, struct bin_attribute * attr);
|
||||
int __must_check sysfs_create_bin_file(struct kobject *kobj,
|
||||
struct bin_attribute *attr);
|
||||
void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr);
|
||||
|
||||
int sysfs_create_group(struct kobject *, const struct attribute_group *);
|
||||
int __must_check sysfs_create_group(struct kobject *,
|
||||
const struct attribute_group *);
|
||||
void sysfs_remove_group(struct kobject *, const struct attribute_group *);
|
||||
void sysfs_notify(struct kobject * k, char *dir, char *attr);
|
||||
|
||||
extern int __must_check sysfs_init(void);
|
||||
|
||||
#else /* CONFIG_SYSFS */
|
||||
|
||||
static inline int sysfs_create_dir(struct kobject * k)
|
||||
@@ -191,6 +196,11 @@ static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int __must_check sysfs_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
#endif /* _SYSFS_H_ */
|
||||
|
||||
@@ -19,4 +19,26 @@ static inline unsigned long __copy_from_user_nocache(void *to,
|
||||
|
||||
#endif /* ARCH_HAS_NOCACHE_UACCESS */
|
||||
|
||||
/**
|
||||
* probe_kernel_address(): safely attempt to read from a location
|
||||
* @addr: address to read from - its type is type typeof(retval)*
|
||||
* @retval: read into this variable
|
||||
*
|
||||
* Safely read from address @addr into variable @revtal. If a kernel fault
|
||||
* happens, handle that and return -EFAULT.
|
||||
* We ensure that the __get_user() is executed in atomic context so that
|
||||
* do_page_fault() doesn't attempt to take mmap_sem. This makes
|
||||
* probe_kernel_address() suitable for use within regions where the caller
|
||||
* already holds mmap_sem, or other locks which nest inside mmap_sem.
|
||||
*/
|
||||
#define probe_kernel_address(addr, retval) \
|
||||
({ \
|
||||
long ret; \
|
||||
\
|
||||
inc_preempt_count(); \
|
||||
ret = __get_user(retval, addr); \
|
||||
dec_preempt_count(); \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#endif /* __LINUX_UACCESS_H__ */
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include <linux/fs.h> /* for struct file_operations */
|
||||
#include <linux/completion.h> /* for struct completion */
|
||||
#include <linux/sched.h> /* for current && schedule_timeout */
|
||||
#include <linux/mutex.h> /* for struct mutex */
|
||||
|
||||
struct usb_device;
|
||||
struct usb_driver;
|
||||
@@ -102,8 +103,13 @@ enum usb_interface_condition {
|
||||
* number from the USB core by calling usb_register_dev().
|
||||
* @condition: binding state of the interface: not bound, binding
|
||||
* (in probe()), bound to a driver, or unbinding (in disconnect())
|
||||
* @is_active: flag set when the interface is bound and not suspended.
|
||||
* @needs_remote_wakeup: flag set when the driver requires remote-wakeup
|
||||
* capability during autosuspend.
|
||||
* @dev: driver model's view of this device
|
||||
* @class_dev: driver model's class view of this device.
|
||||
* @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
|
||||
* allowed unless the counter is 0.
|
||||
*
|
||||
* USB device drivers attach to interfaces on a physical device. Each
|
||||
* interface encapsulates a single high level function, such as feeding
|
||||
@@ -142,8 +148,12 @@ struct usb_interface {
|
||||
int minor; /* minor number this interface is
|
||||
* bound to */
|
||||
enum usb_interface_condition condition; /* state of binding */
|
||||
unsigned is_active:1; /* the interface is not suspended */
|
||||
unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
|
||||
|
||||
struct device dev; /* interface specific device info */
|
||||
struct class_device *class_dev;
|
||||
int pm_usage_cnt; /* usage counter for autosuspend */
|
||||
};
|
||||
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
|
||||
#define interface_to_usbdev(intf) \
|
||||
@@ -254,8 +264,6 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
|
||||
|
||||
/* ----------------------------------------------------------------------- */
|
||||
|
||||
struct usb_operations;
|
||||
|
||||
/* USB device number allocation bitmap */
|
||||
struct usb_devmap {
|
||||
unsigned long devicemap[128 / (8*sizeof(unsigned long))];
|
||||
@@ -268,6 +276,7 @@ struct usb_bus {
|
||||
struct device *controller; /* host/master side hardware */
|
||||
int busnum; /* Bus number (in order of reg) */
|
||||
char *bus_name; /* stable id (PCI slot_name etc) */
|
||||
u8 uses_dma; /* Does the host controller use DMA? */
|
||||
u8 otg_port; /* 0, or number of OTG/HNP port */
|
||||
unsigned is_b_host:1; /* true during some HNP roleswitches */
|
||||
unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
|
||||
@@ -276,10 +285,8 @@ struct usb_bus {
|
||||
* round-robin allocation */
|
||||
|
||||
struct usb_devmap devmap; /* device address allocation map */
|
||||
struct usb_operations *op; /* Operations (specific to the HC) */
|
||||
struct usb_device *root_hub; /* Root hub */
|
||||
struct list_head bus_list; /* list of busses */
|
||||
void *hcpriv; /* Host Controller private data */
|
||||
|
||||
int bandwidth_allocated; /* on this bus: how much of the time
|
||||
* reserved for periodic (intr/iso)
|
||||
@@ -294,8 +301,6 @@ struct usb_bus {
|
||||
struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */
|
||||
|
||||
struct class_device *class_dev; /* class device for this bus */
|
||||
struct kref kref; /* reference counting for this bus */
|
||||
void (*release)(struct usb_bus *bus);
|
||||
|
||||
#if defined(CONFIG_USB_MON)
|
||||
struct mon_bus *mon_bus; /* non-null when associated */
|
||||
@@ -350,6 +355,7 @@ struct usb_device {
|
||||
|
||||
unsigned short bus_mA; /* Current available from the bus */
|
||||
u8 portnum; /* Parent port number (origin 1) */
|
||||
u8 level; /* Number of USB hub ancestors */
|
||||
|
||||
int have_langid; /* whether string_langid is valid */
|
||||
int string_langid; /* language ID for strings */
|
||||
@@ -373,6 +379,15 @@ struct usb_device {
|
||||
|
||||
int maxchild; /* Number of ports if hub */
|
||||
struct usb_device *children[USB_MAXCHILDREN];
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
struct work_struct autosuspend; /* for delayed autosuspends */
|
||||
struct mutex pm_mutex; /* protects PM operations */
|
||||
int pm_usage_cnt; /* usage counter for autosuspend */
|
||||
|
||||
unsigned auto_pm:1; /* autosuspend/resume in progress */
|
||||
unsigned do_remote_wakeup:1; /* remote wakeup should be enabled */
|
||||
#endif
|
||||
};
|
||||
#define to_usb_device(d) container_of(d, struct usb_device, dev)
|
||||
|
||||
@@ -384,7 +399,7 @@ extern void usb_put_dev(struct usb_device *dev);
|
||||
#define usb_unlock_device(udev) up(&(udev)->dev.sem)
|
||||
#define usb_trylock_device(udev) down_trylock(&(udev)->dev.sem)
|
||||
extern int usb_lock_device_for_reset(struct usb_device *udev,
|
||||
struct usb_interface *iface);
|
||||
const struct usb_interface *iface);
|
||||
|
||||
/* USB port reset for device reinitialization */
|
||||
extern int usb_reset_device(struct usb_device *dev);
|
||||
@@ -393,6 +408,17 @@ extern int usb_reset_composite_device(struct usb_device *dev,
|
||||
|
||||
extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
|
||||
|
||||
/* USB autosuspend and autoresume */
|
||||
#ifdef CONFIG_USB_SUSPEND
|
||||
extern int usb_autopm_get_interface(struct usb_interface *intf);
|
||||
extern void usb_autopm_put_interface(struct usb_interface *intf);
|
||||
|
||||
#else
|
||||
#define usb_autopm_get_interface(intf) 0
|
||||
#define usb_autopm_put_interface(intf) do {} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
/* for drivers using iso endpoints */
|
||||
@@ -423,10 +449,10 @@ const struct usb_device_id *usb_match_id(struct usb_interface *interface,
|
||||
|
||||
extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
|
||||
int minor);
|
||||
extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev,
|
||||
extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
|
||||
unsigned ifnum);
|
||||
extern struct usb_host_interface *usb_altnum_to_altsetting(
|
||||
struct usb_interface *intf, unsigned int altnum);
|
||||
const struct usb_interface *intf, unsigned int altnum);
|
||||
|
||||
|
||||
/**
|
||||
@@ -464,6 +490,20 @@ static inline int usb_make_path (struct usb_device *dev, char *buf,
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
extern int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd);
|
||||
extern int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd);
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
#define USB_DEVICE_ID_MATCH_DEVICE \
|
||||
(USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
|
||||
#define USB_DEVICE_ID_MATCH_DEV_RANGE \
|
||||
@@ -540,7 +580,17 @@ struct usb_dynids {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct usb_driver - identifies USB driver to usbcore
|
||||
* struct usbdrv_wrap - wrapper for driver-model structure
|
||||
* @driver: The driver-model core driver structure.
|
||||
* @for_devices: Non-zero for device drivers, 0 for interface drivers.
|
||||
*/
|
||||
struct usbdrv_wrap {
|
||||
struct device_driver driver;
|
||||
int for_devices;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct usb_driver - identifies USB interface driver to usbcore
|
||||
* @name: The driver name should be unique among USB drivers,
|
||||
* and should normally be the same as the module name.
|
||||
* @probe: Called to see if the driver is willing to manage a particular
|
||||
@@ -567,12 +617,14 @@ struct usb_dynids {
|
||||
* or your driver's probe function will never get called.
|
||||
* @dynids: used internally to hold the list of dynamically added device
|
||||
* ids for this driver.
|
||||
* @driver: the driver model core driver structure.
|
||||
* @drvwrap: Driver-model core structure wrapper.
|
||||
* @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
|
||||
* added to this driver by preventing the sysfs file from being created.
|
||||
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
|
||||
* for interfaces bound to this driver.
|
||||
*
|
||||
* USB drivers must provide a name, probe() and disconnect() methods,
|
||||
* and an id_table. Other driver fields are optional.
|
||||
* USB interface drivers must provide a name, probe() and disconnect()
|
||||
* methods, and an id_table. Other driver fields are optional.
|
||||
*
|
||||
* The id_table is used in hotplugging. It holds a set of descriptors,
|
||||
* and specialized data may be associated with each entry. That table
|
||||
@@ -606,10 +658,44 @@ struct usb_driver {
|
||||
const struct usb_device_id *id_table;
|
||||
|
||||
struct usb_dynids dynids;
|
||||
struct device_driver driver;
|
||||
struct usbdrv_wrap drvwrap;
|
||||
unsigned int no_dynamic_id:1;
|
||||
unsigned int supports_autosuspend:1;
|
||||
};
|
||||
#define to_usb_driver(d) container_of(d, struct usb_driver, driver)
|
||||
#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
|
||||
|
||||
/**
|
||||
* struct usb_device_driver - identifies USB device driver to usbcore
|
||||
* @name: The driver name should be unique among USB drivers,
|
||||
* and should normally be the same as the module name.
|
||||
* @probe: Called to see if the driver is willing to manage a particular
|
||||
* device. If it is, probe returns zero and uses dev_set_drvdata()
|
||||
* to associate driver-specific data with the device. If unwilling
|
||||
* to manage the device, return a negative errno value.
|
||||
* @disconnect: Called when the device is no longer accessible, usually
|
||||
* because it has been (or is being) disconnected or the driver's
|
||||
* module is being unloaded.
|
||||
* @suspend: Called when the device is going to be suspended by the system.
|
||||
* @resume: Called when the device is being resumed by the system.
|
||||
* @drvwrap: Driver-model core structure wrapper.
|
||||
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
|
||||
* for devices bound to this driver.
|
||||
*
|
||||
* USB drivers must provide all the fields listed above except drvwrap.
|
||||
*/
|
||||
struct usb_device_driver {
|
||||
const char *name;
|
||||
|
||||
int (*probe) (struct usb_device *udev);
|
||||
void (*disconnect) (struct usb_device *udev);
|
||||
|
||||
int (*suspend) (struct usb_device *udev, pm_message_t message);
|
||||
int (*resume) (struct usb_device *udev);
|
||||
struct usbdrv_wrap drvwrap;
|
||||
unsigned int supports_autosuspend:1;
|
||||
};
|
||||
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
|
||||
drvwrap.driver)
|
||||
|
||||
extern struct bus_type usb_bus_type;
|
||||
|
||||
@@ -633,13 +719,17 @@ struct usb_class_driver {
|
||||
* use these in module_init()/module_exit()
|
||||
* and don't forget MODULE_DEVICE_TABLE(usb, ...)
|
||||
*/
|
||||
int usb_register_driver(struct usb_driver *, struct module *);
|
||||
extern int usb_register_driver(struct usb_driver *, struct module *);
|
||||
static inline int usb_register(struct usb_driver *driver)
|
||||
{
|
||||
return usb_register_driver(driver, THIS_MODULE);
|
||||
}
|
||||
extern void usb_deregister(struct usb_driver *);
|
||||
|
||||
extern int usb_register_device_driver(struct usb_device_driver *,
|
||||
struct module *);
|
||||
extern void usb_deregister_device_driver(struct usb_device_driver *);
|
||||
|
||||
extern int usb_register_dev(struct usb_interface *intf,
|
||||
struct usb_class_driver *class_driver);
|
||||
extern void usb_deregister_dev(struct usb_interface *intf,
|
||||
@@ -885,7 +975,7 @@ struct urb
|
||||
* @setup_packet: pointer to the setup_packet buffer
|
||||
* @transfer_buffer: pointer to the transfer buffer
|
||||
* @buffer_length: length of the transfer buffer
|
||||
* @complete: pointer to the usb_complete_t function
|
||||
* @complete_fn: pointer to the usb_complete_t function
|
||||
* @context: what to set the urb context to.
|
||||
*
|
||||
* Initializes a control urb with the proper information needed to submit
|
||||
@@ -897,7 +987,7 @@ static inline void usb_fill_control_urb (struct urb *urb,
|
||||
unsigned char *setup_packet,
|
||||
void *transfer_buffer,
|
||||
int buffer_length,
|
||||
usb_complete_t complete,
|
||||
usb_complete_t complete_fn,
|
||||
void *context)
|
||||
{
|
||||
spin_lock_init(&urb->lock);
|
||||
@@ -906,7 +996,7 @@ static inline void usb_fill_control_urb (struct urb *urb,
|
||||
urb->setup_packet = setup_packet;
|
||||
urb->transfer_buffer = transfer_buffer;
|
||||
urb->transfer_buffer_length = buffer_length;
|
||||
urb->complete = complete;
|
||||
urb->complete = complete_fn;
|
||||
urb->context = context;
|
||||
}
|
||||
|
||||
@@ -917,7 +1007,7 @@ static inline void usb_fill_control_urb (struct urb *urb,
|
||||
* @pipe: the endpoint pipe
|
||||
* @transfer_buffer: pointer to the transfer buffer
|
||||
* @buffer_length: length of the transfer buffer
|
||||
* @complete: pointer to the usb_complete_t function
|
||||
* @complete_fn: pointer to the usb_complete_t function
|
||||
* @context: what to set the urb context to.
|
||||
*
|
||||
* Initializes a bulk urb with the proper information needed to submit it
|
||||
@@ -928,7 +1018,7 @@ static inline void usb_fill_bulk_urb (struct urb *urb,
|
||||
unsigned int pipe,
|
||||
void *transfer_buffer,
|
||||
int buffer_length,
|
||||
usb_complete_t complete,
|
||||
usb_complete_t complete_fn,
|
||||
void *context)
|
||||
{
|
||||
spin_lock_init(&urb->lock);
|
||||
@@ -936,7 +1026,7 @@ static inline void usb_fill_bulk_urb (struct urb *urb,
|
||||
urb->pipe = pipe;
|
||||
urb->transfer_buffer = transfer_buffer;
|
||||
urb->transfer_buffer_length = buffer_length;
|
||||
urb->complete = complete;
|
||||
urb->complete = complete_fn;
|
||||
urb->context = context;
|
||||
}
|
||||
|
||||
@@ -947,7 +1037,7 @@ static inline void usb_fill_bulk_urb (struct urb *urb,
|
||||
* @pipe: the endpoint pipe
|
||||
* @transfer_buffer: pointer to the transfer buffer
|
||||
* @buffer_length: length of the transfer buffer
|
||||
* @complete: pointer to the usb_complete_t function
|
||||
* @complete_fn: pointer to the usb_complete_t function
|
||||
* @context: what to set the urb context to.
|
||||
* @interval: what to set the urb interval to, encoded like
|
||||
* the endpoint descriptor's bInterval value.
|
||||
@@ -963,7 +1053,7 @@ static inline void usb_fill_int_urb (struct urb *urb,
|
||||
unsigned int pipe,
|
||||
void *transfer_buffer,
|
||||
int buffer_length,
|
||||
usb_complete_t complete,
|
||||
usb_complete_t complete_fn,
|
||||
void *context,
|
||||
int interval)
|
||||
{
|
||||
@@ -972,7 +1062,7 @@ static inline void usb_fill_int_urb (struct urb *urb,
|
||||
urb->pipe = pipe;
|
||||
urb->transfer_buffer = transfer_buffer;
|
||||
urb->transfer_buffer_length = buffer_length;
|
||||
urb->complete = complete;
|
||||
urb->complete = complete_fn;
|
||||
urb->context = context;
|
||||
if (dev->speed == USB_SPEED_HIGH)
|
||||
urb->interval = 1 << (interval - 1);
|
||||
@@ -990,7 +1080,6 @@ extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
|
||||
extern int usb_unlink_urb(struct urb *urb);
|
||||
extern void usb_kill_urb(struct urb *urb);
|
||||
|
||||
#define HAVE_USB_BUFFERS
|
||||
void *usb_buffer_alloc (struct usb_device *dev, size_t size,
|
||||
gfp_t mem_flags, dma_addr_t *dma);
|
||||
void usb_buffer_free (struct usb_device *dev, size_t size,
|
||||
@@ -1003,14 +1092,14 @@ void usb_buffer_unmap (struct urb *urb);
|
||||
#endif
|
||||
|
||||
struct scatterlist;
|
||||
int usb_buffer_map_sg (struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int nents);
|
||||
int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int nents);
|
||||
#if 0
|
||||
void usb_buffer_dmasync_sg (struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int n_hw_ents);
|
||||
void usb_buffer_dmasync_sg(const struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int n_hw_ents);
|
||||
#endif
|
||||
void usb_buffer_unmap_sg (struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int n_hw_ents);
|
||||
void usb_buffer_unmap_sg(const struct usb_device *dev, unsigned pipe,
|
||||
struct scatterlist *sg, int n_hw_ents);
|
||||
|
||||
/*-------------------------------------------------------------------*
|
||||
* SYNCHRONOUS CALL SUPPORT *
|
||||
@@ -1038,6 +1127,9 @@ extern int usb_clear_halt(struct usb_device *dev, int pipe);
|
||||
extern int usb_reset_configuration(struct usb_device *dev);
|
||||
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
|
||||
|
||||
/* this request isn't really synchronous, but it belongs with the others */
|
||||
extern int usb_driver_set_configuration(struct usb_device *udev, int config);
|
||||
|
||||
/*
|
||||
* timeouts, in milliseconds, used for sending/receiving control messages
|
||||
* they typically complete within a few frames (msec) after they're issued
|
||||
|
||||
53
include/linux/usb/audio.h
Normal file
53
include/linux/usb/audio.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* <linux/usb/audio.h> -- USB Audio definitions.
|
||||
*
|
||||
* Copyright (C) 2006 Thumtronics Pty Ltd.
|
||||
* Developed for Thumtronics by Grey Innovation
|
||||
* Ben Williamson <ben.williamson@greyinnovation.com>
|
||||
*
|
||||
* This software is distributed under the terms of the GNU General Public
|
||||
* License ("GPL") version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This file holds USB constants and structures defined
|
||||
* by the USB Device Class Definition for Audio Devices.
|
||||
* Comments below reference relevant sections of that document:
|
||||
*
|
||||
* http://www.usb.org/developers/devclass_docs/audio10.pdf
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_USB_AUDIO_H
|
||||
#define __LINUX_USB_AUDIO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* A.2 Audio Interface Subclass Codes */
|
||||
#define USB_SUBCLASS_AUDIOCONTROL 0x01
|
||||
#define USB_SUBCLASS_AUDIOSTREAMING 0x02
|
||||
#define USB_SUBCLASS_MIDISTREAMING 0x03
|
||||
|
||||
/* 4.3.2 Class-Specific AC Interface Descriptor */
|
||||
struct usb_ac_header_descriptor {
|
||||
__u8 bLength; // 8+n
|
||||
__u8 bDescriptorType; // USB_DT_CS_INTERFACE
|
||||
__u8 bDescriptorSubtype; // USB_MS_HEADER
|
||||
__le16 bcdADC; // 0x0100
|
||||
__le16 wTotalLength; // includes Unit and Terminal desc.
|
||||
__u8 bInCollection; // n
|
||||
__u8 baInterfaceNr[]; // [n]
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define USB_DT_AC_HEADER_SIZE(n) (8+(n))
|
||||
|
||||
/* As above, but more useful for defining your own descriptors: */
|
||||
#define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \
|
||||
struct usb_ac_header_descriptor_##n { \
|
||||
__u8 bLength; \
|
||||
__u8 bDescriptorType; \
|
||||
__u8 bDescriptorSubtype; \
|
||||
__le16 bcdADC; \
|
||||
__le16 wTotalLength; \
|
||||
__u8 bInCollection; \
|
||||
__u8 baInterfaceNr[n]; \
|
||||
} __attribute__ ((packed))
|
||||
|
||||
#endif
|
||||
112
include/linux/usb/midi.h
Normal file
112
include/linux/usb/midi.h
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* <linux/usb/midi.h> -- USB MIDI definitions.
|
||||
*
|
||||
* Copyright (C) 2006 Thumtronics Pty Ltd.
|
||||
* Developed for Thumtronics by Grey Innovation
|
||||
* Ben Williamson <ben.williamson@greyinnovation.com>
|
||||
*
|
||||
* This software is distributed under the terms of the GNU General Public
|
||||
* License ("GPL") version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This file holds USB constants and structures defined
|
||||
* by the USB Device Class Definition for MIDI Devices.
|
||||
* Comments below reference relevant sections of that document:
|
||||
*
|
||||
* http://www.usb.org/developers/devclass_docs/midi10.pdf
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_USB_MIDI_H
|
||||
#define __LINUX_USB_MIDI_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* A.1 MS Class-Specific Interface Descriptor Subtypes */
|
||||
#define USB_MS_HEADER 0x01
|
||||
#define USB_MS_MIDI_IN_JACK 0x02
|
||||
#define USB_MS_MIDI_OUT_JACK 0x03
|
||||
#define USB_MS_ELEMENT 0x04
|
||||
|
||||
/* A.2 MS Class-Specific Endpoint Descriptor Subtypes */
|
||||
#define USB_MS_GENERAL 0x01
|
||||
|
||||
/* A.3 MS MIDI IN and OUT Jack Types */
|
||||
#define USB_MS_EMBEDDED 0x01
|
||||
#define USB_MS_EXTERNAL 0x02
|
||||
|
||||
/* 6.1.2.1 Class-Specific MS Interface Header Descriptor */
|
||||
struct usb_ms_header_descriptor {
|
||||
__u8 bLength;
|
||||
__u8 bDescriptorType;
|
||||
__u8 bDescriptorSubtype;
|
||||
__le16 bcdMSC;
|
||||
__le16 wTotalLength;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define USB_DT_MS_HEADER_SIZE 7
|
||||
|
||||
/* 6.1.2.2 MIDI IN Jack Descriptor */
|
||||
struct usb_midi_in_jack_descriptor {
|
||||
__u8 bLength;
|
||||
__u8 bDescriptorType; // USB_DT_CS_INTERFACE
|
||||
__u8 bDescriptorSubtype; // USB_MS_MIDI_IN_JACK
|
||||
__u8 bJackType; // USB_MS_EMBEDDED/EXTERNAL
|
||||
__u8 bJackID;
|
||||
__u8 iJack;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define USB_DT_MIDI_IN_SIZE 6
|
||||
|
||||
struct usb_midi_source_pin {
|
||||
__u8 baSourceID;
|
||||
__u8 baSourcePin;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* 6.1.2.3 MIDI OUT Jack Descriptor */
|
||||
struct usb_midi_out_jack_descriptor {
|
||||
__u8 bLength;
|
||||
__u8 bDescriptorType; // USB_DT_CS_INTERFACE
|
||||
__u8 bDescriptorSubtype; // USB_MS_MIDI_OUT_JACK
|
||||
__u8 bJackType; // USB_MS_EMBEDDED/EXTERNAL
|
||||
__u8 bJackID;
|
||||
__u8 bNrInputPins; // p
|
||||
struct usb_midi_source_pin pins[]; // [p]
|
||||
/*__u8 iJack; -- ommitted due to variable-sized pins[] */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define USB_DT_MIDI_OUT_SIZE(p) (7 + 2 * (p))
|
||||
|
||||
/* As above, but more useful for defining your own descriptors: */
|
||||
#define DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(p) \
|
||||
struct usb_midi_out_jack_descriptor_##p { \
|
||||
__u8 bLength; \
|
||||
__u8 bDescriptorType; \
|
||||
__u8 bDescriptorSubtype; \
|
||||
__u8 bJackType; \
|
||||
__u8 bJackID; \
|
||||
__u8 bNrInputPins; \
|
||||
struct usb_midi_source_pin pins[p]; \
|
||||
__u8 iJack; \
|
||||
} __attribute__ ((packed))
|
||||
|
||||
/* 6.2.2 Class-Specific MS Bulk Data Endpoint Descriptor */
|
||||
struct usb_ms_endpoint_descriptor {
|
||||
__u8 bLength; // 4+n
|
||||
__u8 bDescriptorType; // USB_DT_CS_ENDPOINT
|
||||
__u8 bDescriptorSubtype; // USB_MS_GENERAL
|
||||
__u8 bNumEmbMIDIJack; // n
|
||||
__u8 baAssocJackID[]; // [n]
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define USB_DT_MS_ENDPOINT_SIZE(n) (4 + (n))
|
||||
|
||||
/* As above, but more useful for defining your own descriptors: */
|
||||
#define DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(n) \
|
||||
struct usb_ms_endpoint_descriptor_##n { \
|
||||
__u8 bLength; \
|
||||
__u8 bDescriptorType; \
|
||||
__u8 bDescriptorSubtype; \
|
||||
__u8 bNumEmbMIDIJack; \
|
||||
__u8 baAssocJackID[n]; \
|
||||
} __attribute__ ((packed))
|
||||
|
||||
#endif
|
||||
@@ -1,4 +1,4 @@
|
||||
// include/linux/usb_otg.h
|
||||
// include/linux/usb/otg.h
|
||||
|
||||
/*
|
||||
* These APIs may be used between USB controllers. USB device drivers
|
||||
@@ -52,7 +52,7 @@ struct otg_transceiver {
|
||||
u16 port_change;
|
||||
|
||||
/* bind/unbind the host controller */
|
||||
int (*set_host)(struct otg_transceiver *otg,
|
||||
int (*set_host)(struct otg_transceiver *otg,
|
||||
struct usb_bus *host);
|
||||
|
||||
/* bind/unbind the peripheral controller */
|
||||
@@ -108,6 +108,9 @@ enum { US_DO_ALL_FLAGS };
|
||||
#ifdef CONFIG_USB_STORAGE_ALAUDA
|
||||
#define US_PR_ALAUDA 0xf4 /* Alauda chipsets */
|
||||
#endif
|
||||
#ifdef CONFIG_USB_STORAGE_KARMA
|
||||
#define US_PR_KARMA 0xf5 /* Rio Karma */
|
||||
#endif
|
||||
|
||||
#define US_PR_DEVICE 0xff /* Use device's value */
|
||||
|
||||
|
||||
@@ -24,5 +24,5 @@
|
||||
#define VERMAGIC_STRING \
|
||||
UTS_RELEASE " " \
|
||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC \
|
||||
"gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
|
||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC
|
||||
|
||||
|
||||
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
|
||||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
||||
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
|
||||
pgprot_t prot);
|
||||
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
|
||||
pgprot_t prot, int node);
|
||||
extern void vfree(void *addr);
|
||||
|
||||
extern void *vmap(struct page **pages, unsigned int count,
|
||||
@@ -64,7 +62,6 @@ extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
||||
extern struct vm_struct *get_vm_area_node(unsigned long size,
|
||||
unsigned long flags, int node);
|
||||
extern struct vm_struct *remove_vm_area(void *addr);
|
||||
extern struct vm_struct *__remove_vm_area(void *addr);
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern void unmap_vm_area(struct vm_struct *area);
|
||||
|
||||
@@ -18,7 +18,19 @@
|
||||
* generated will simply be the increment of a global address.
|
||||
*/
|
||||
|
||||
#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
#define DMA32_ZONE(xx) xx##_DMA32,
|
||||
#else
|
||||
#define DMA32_ZONE(xx)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#define HIGHMEM_ZONE(xx) , xx##_HIGH
|
||||
#else
|
||||
#define HIGHMEM_ZONE(xx)
|
||||
#endif
|
||||
|
||||
#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
|
||||
|
||||
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
||||
FOR_ALL_ZONES(PGALLOC),
|
||||
@@ -124,12 +136,10 @@ static inline unsigned long node_page_state(int node,
|
||||
struct zone *zones = NODE_DATA(node)->node_zones;
|
||||
|
||||
return
|
||||
#ifndef CONFIG_DMA_IS_NORMAL
|
||||
#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
zone_page_state(&zones[ZONE_DMA32], item) +
|
||||
#endif
|
||||
zone_page_state(&zones[ZONE_NORMAL], item) +
|
||||
#endif
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
zone_page_state(&zones[ZONE_HIGHMEM], item) +
|
||||
#endif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* This file define a set of standard wireless extensions
|
||||
*
|
||||
* Version : 20 17.2.06
|
||||
* Version : 21 14.3.06
|
||||
*
|
||||
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
|
||||
* Copyright (c) 1997-2006 Jean Tourrilhes, All Rights Reserved.
|
||||
@@ -69,9 +69,14 @@
|
||||
|
||||
/***************************** INCLUDES *****************************/
|
||||
|
||||
/* This header is used in user-space, therefore need to be sanitised
|
||||
* for that purpose. Those includes are usually not compatible with glibc.
|
||||
* To know which includes to use in user-space, check iwlib.h. */
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/types.h> /* for "caddr_t" et al */
|
||||
#include <linux/socket.h> /* for "struct sockaddr" et al */
|
||||
#include <linux/if.h> /* for IFNAMSIZ and co... */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/***************************** VERSION *****************************/
|
||||
/*
|
||||
@@ -80,7 +85,7 @@
|
||||
* (there is some stuff that will be added in the future...)
|
||||
* I just plan to increment with each new version.
|
||||
*/
|
||||
#define WIRELESS_EXT 20
|
||||
#define WIRELESS_EXT 21
|
||||
|
||||
/*
|
||||
* Changes :
|
||||
@@ -208,6 +213,14 @@
|
||||
* V19 to V20
|
||||
* ----------
|
||||
* - RtNetlink requests support (SET/GET)
|
||||
*
|
||||
* V20 to V21
|
||||
* ----------
|
||||
* - Remove (struct net_device *)->get_wireless_stats()
|
||||
* - Change length in ESSID and NICK to strlen() instead of strlen()+1
|
||||
* - Add IW_RETRY_SHORT/IW_RETRY_LONG retry modifiers
|
||||
* - Power/Retry relative values no longer * 100000
|
||||
* - Add explicit flag to tell stats are in 802.11k RCPI : IW_QUAL_RCPI
|
||||
*/
|
||||
|
||||
/**************************** CONSTANTS ****************************/
|
||||
@@ -448,6 +461,7 @@
|
||||
#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */
|
||||
#define IW_QUAL_LEVEL_INVALID 0x20
|
||||
#define IW_QUAL_NOISE_INVALID 0x40
|
||||
#define IW_QUAL_RCPI 0x80 /* Level + Noise are 802.11k RCPI */
|
||||
#define IW_QUAL_ALL_INVALID 0x70
|
||||
|
||||
/* Frequency flags */
|
||||
@@ -500,10 +514,12 @@
|
||||
#define IW_RETRY_TYPE 0xF000 /* Type of parameter */
|
||||
#define IW_RETRY_LIMIT 0x1000 /* Maximum number of retries*/
|
||||
#define IW_RETRY_LIFETIME 0x2000 /* Maximum duration of retries in us */
|
||||
#define IW_RETRY_MODIFIER 0x000F /* Modify a parameter */
|
||||
#define IW_RETRY_MODIFIER 0x00FF /* Modify a parameter */
|
||||
#define IW_RETRY_MIN 0x0001 /* Value is a minimum */
|
||||
#define IW_RETRY_MAX 0x0002 /* Value is a maximum */
|
||||
#define IW_RETRY_RELATIVE 0x0004 /* Value is not in seconds/ms/us */
|
||||
#define IW_RETRY_SHORT 0x0010 /* Value is for short packets */
|
||||
#define IW_RETRY_LONG 0x0020 /* Value is for long packets */
|
||||
|
||||
/* Scanning request flags */
|
||||
#define IW_SCAN_DEFAULT 0x0000 /* Default scan of the driver */
|
||||
@@ -1017,7 +1033,7 @@ struct iw_range
|
||||
/* Note : this frequency list doesn't need to fit channel numbers,
|
||||
* because each entry contain its channel index */
|
||||
|
||||
__u32 enc_capa; /* IW_ENC_CAPA_* bit field */
|
||||
__u32 enc_capa; /* IW_ENC_CAPA_* bit field */
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -116,6 +116,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
|
||||
loff_t pos, loff_t count);
|
||||
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
|
||||
loff_t pos, loff_t count);
|
||||
void set_page_dirty_balance(struct page *page);
|
||||
|
||||
/* pdflush.c */
|
||||
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
|
||||
|
||||
Reference in New Issue
Block a user