Merge branch 'master'
This commit is contained in:
@@ -85,7 +85,7 @@ enum adb_message {
|
||||
ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
|
||||
};
|
||||
extern struct adb_driver *adb_controller;
|
||||
extern struct notifier_block *adb_client_list;
|
||||
extern struct blocking_notifier_head adb_client_list;
|
||||
|
||||
int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
|
||||
int flags, int nbytes, ...);
|
||||
|
||||
@@ -158,4 +158,10 @@
|
||||
#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
|
||||
#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct amba_pl010_data {
|
||||
void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl);
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -33,27 +33,42 @@
|
||||
* 1200 - 1299 messages internal to the audit daemon
|
||||
* 1300 - 1399 audit event messages
|
||||
* 1400 - 1499 SE Linux use
|
||||
* 1500 - 1999 future use
|
||||
* 2000 is for otherwise unclassified kernel audit messages
|
||||
* 1500 - 1599 kernel LSPP events
|
||||
* 1600 - 1699 kernel crypto events
|
||||
* 1700 - 1799 kernel anomaly records
|
||||
* 1800 - 1999 future kernel use (maybe integrity labels and related events)
|
||||
* 2000 is for otherwise unclassified kernel audit messages (legacy)
|
||||
* 2001 - 2099 unused (kernel)
|
||||
* 2100 - 2199 user space anomaly records
|
||||
* 2200 - 2299 user space actions taken in response to anomalies
|
||||
* 2300 - 2399 user space generated LSPP events
|
||||
* 2400 - 2499 user space crypto events
|
||||
* 2500 - 2999 future user space (maybe integrity labels and related events)
|
||||
*
|
||||
* Messages from 1000-1199 are bi-directional. 1200-1299 are exclusively user
|
||||
* space. Anything over that is kernel --> user space communication.
|
||||
* Messages from 1000-1199 are bi-directional. 1200-1299 & 2100 - 2999 are
|
||||
* exclusively user space. 1300-2099 is kernel --> user space
|
||||
* communication.
|
||||
*/
|
||||
#define AUDIT_GET 1000 /* Get status */
|
||||
#define AUDIT_SET 1001 /* Set status (enable/disable/auditd) */
|
||||
#define AUDIT_LIST 1002 /* List syscall filtering rules */
|
||||
#define AUDIT_ADD 1003 /* Add syscall filtering rule */
|
||||
#define AUDIT_DEL 1004 /* Delete syscall filtering rule */
|
||||
#define AUDIT_LIST 1002 /* List syscall rules -- deprecated */
|
||||
#define AUDIT_ADD 1003 /* Add syscall rule -- deprecated */
|
||||
#define AUDIT_DEL 1004 /* Delete syscall rule -- deprecated */
|
||||
#define AUDIT_USER 1005 /* Message from userspace -- deprecated */
|
||||
#define AUDIT_LOGIN 1006 /* Define the login id and information */
|
||||
#define AUDIT_WATCH_INS 1007 /* Insert file/dir watch entry */
|
||||
#define AUDIT_WATCH_REM 1008 /* Remove file/dir watch entry */
|
||||
#define AUDIT_WATCH_LIST 1009 /* List all file/dir watches */
|
||||
#define AUDIT_SIGNAL_INFO 1010 /* Get info about sender of signal to auditd */
|
||||
#define AUDIT_ADD_RULE 1011 /* Add syscall filtering rule */
|
||||
#define AUDIT_DEL_RULE 1012 /* Delete syscall filtering rule */
|
||||
#define AUDIT_LIST_RULES 1013 /* List syscall filtering rules */
|
||||
|
||||
#define AUDIT_FIRST_USER_MSG 1100 /* Userspace messages mostly uninteresting to kernel */
|
||||
#define AUDIT_USER_AVC 1107 /* We filter this differently */
|
||||
#define AUDIT_LAST_USER_MSG 1199
|
||||
#define AUDIT_FIRST_USER_MSG2 2100 /* More user space messages */
|
||||
#define AUDIT_LAST_USER_MSG2 2999
|
||||
|
||||
#define AUDIT_DAEMON_START 1200 /* Daemon startup record */
|
||||
#define AUDIT_DAEMON_END 1201 /* Daemon normal stop record */
|
||||
@@ -72,6 +87,13 @@
|
||||
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
|
||||
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
|
||||
#define AUDIT_AVC_PATH 1402 /* dentry, vfsmount pair from avc */
|
||||
#define AUDIT_MAC_POLICY_LOAD 1403 /* Policy file load */
|
||||
#define AUDIT_MAC_STATUS 1404 /* Changed enforcing,permissive,off */
|
||||
#define AUDIT_MAC_CONFIG_CHANGE 1405 /* Changes to booleans */
|
||||
|
||||
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
|
||||
#define AUDIT_LAST_KERN_ANOM_MSG 1799
|
||||
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
|
||||
|
||||
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
|
||||
|
||||
@@ -81,8 +103,9 @@
|
||||
#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */
|
||||
#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
|
||||
#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
|
||||
#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */
|
||||
|
||||
#define AUDIT_NR_FILTERS 5
|
||||
#define AUDIT_NR_FILTERS 6
|
||||
|
||||
#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */
|
||||
|
||||
@@ -98,6 +121,13 @@
|
||||
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
|
||||
#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
|
||||
|
||||
/* This bitmask is used to validate user input. It represents all bits that
|
||||
* are currently used in an audit field constant understood by the kernel.
|
||||
* If you are adding a new #define AUDIT_<whatever>, please ensure that
|
||||
* AUDIT_UNUSED_BITS is updated if need be. */
|
||||
#define AUDIT_UNUSED_BITS 0x0FFFFC00
|
||||
|
||||
|
||||
/* Rule fields */
|
||||
/* These are useful when checking the
|
||||
* task structure at task creation time
|
||||
@@ -114,6 +144,7 @@
|
||||
#define AUDIT_LOGINUID 9
|
||||
#define AUDIT_PERS 10
|
||||
#define AUDIT_ARCH 11
|
||||
#define AUDIT_MSGTYPE 12
|
||||
|
||||
/* These are ONLY useful when checking
|
||||
* at syscall exit time (AUDIT_AT_EXIT). */
|
||||
@@ -128,8 +159,28 @@
|
||||
#define AUDIT_ARG2 (AUDIT_ARG0+2)
|
||||
#define AUDIT_ARG3 (AUDIT_ARG0+3)
|
||||
|
||||
#define AUDIT_NEGATE 0x80000000
|
||||
#define AUDIT_NEGATE 0x80000000
|
||||
|
||||
/* These are the supported operators.
|
||||
* 4 2 1
|
||||
* = > <
|
||||
* -------
|
||||
* 0 0 0 0 nonsense
|
||||
* 0 0 1 1 <
|
||||
* 0 1 0 2 >
|
||||
* 0 1 1 3 !=
|
||||
* 1 0 0 4 =
|
||||
* 1 0 1 5 <=
|
||||
* 1 1 0 6 >=
|
||||
* 1 1 1 7 all operators
|
||||
*/
|
||||
#define AUDIT_LESS_THAN 0x10000000
|
||||
#define AUDIT_GREATER_THAN 0x20000000
|
||||
#define AUDIT_NOT_EQUAL 0x30000000
|
||||
#define AUDIT_EQUAL 0x40000000
|
||||
#define AUDIT_LESS_THAN_OR_EQUAL (AUDIT_LESS_THAN|AUDIT_EQUAL)
|
||||
#define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL)
|
||||
#define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL)
|
||||
|
||||
/* Status symbols */
|
||||
/* Mask values */
|
||||
@@ -186,6 +237,26 @@ struct audit_status {
|
||||
__u32 backlog; /* messages waiting in queue */
|
||||
};
|
||||
|
||||
/* audit_rule_data supports filter rules with both integer and string
|
||||
* fields. It corresponds with AUDIT_ADD_RULE, AUDIT_DEL_RULE and
|
||||
* AUDIT_LIST_RULES requests.
|
||||
*/
|
||||
struct audit_rule_data {
|
||||
__u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */
|
||||
__u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */
|
||||
__u32 field_count;
|
||||
__u32 mask[AUDIT_BITMASK_SIZE]; /* syscall(s) affected */
|
||||
__u32 fields[AUDIT_MAX_FIELDS];
|
||||
__u32 values[AUDIT_MAX_FIELDS];
|
||||
__u32 fieldflags[AUDIT_MAX_FIELDS];
|
||||
__u32 buflen; /* total length of string fields */
|
||||
char buf[0]; /* string fields buffer */
|
||||
};
|
||||
|
||||
/* audit_rule is supported to maintain backward compatibility with
|
||||
* userspace. It supports integer fields only and corresponds to
|
||||
* AUDIT_ADD, AUDIT_DEL and AUDIT_LIST requests.
|
||||
*/
|
||||
struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */
|
||||
__u32 flags; /* AUDIT_PER_{TASK,CALL}, AUDIT_PREPEND */
|
||||
__u32 action; /* AUDIT_NEVER, AUDIT_POSSIBLE, AUDIT_ALWAYS */
|
||||
@@ -222,22 +293,33 @@ extern void audit_syscall_entry(struct task_struct *task, int arch,
|
||||
extern void audit_syscall_exit(struct task_struct *task, int failed, long return_code);
|
||||
extern void audit_getname(const char *name);
|
||||
extern void audit_putname(const char *name);
|
||||
extern void audit_inode(const char *name, const struct inode *inode, unsigned flags);
|
||||
extern void __audit_inode(const char *name, const struct inode *inode, unsigned flags);
|
||||
extern void __audit_inode_child(const char *dname, const struct inode *inode,
|
||||
unsigned long pino);
|
||||
static inline void audit_inode(const char *name, const struct inode *inode,
|
||||
unsigned flags) {
|
||||
if (unlikely(current->audit_context))
|
||||
__audit_inode(name, inode, flags);
|
||||
}
|
||||
static inline void audit_inode_child(const char *dname,
|
||||
const struct inode *inode,
|
||||
unsigned long pino) {
|
||||
if (unlikely(current->audit_context))
|
||||
__audit_inode_child(dname, inode, pino);
|
||||
}
|
||||
|
||||
/* Private API (for audit.c only) */
|
||||
extern int audit_receive_filter(int type, int pid, int uid, int seq,
|
||||
void *data, uid_t loginuid);
|
||||
extern unsigned int audit_serial(void);
|
||||
extern void auditsc_get_stamp(struct audit_context *ctx,
|
||||
struct timespec *t, unsigned int *serial);
|
||||
extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
|
||||
extern uid_t audit_get_loginuid(struct audit_context *ctx);
|
||||
extern int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
|
||||
extern int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, struct kern_ipc_perm *ipcp);
|
||||
extern int audit_socketcall(int nargs, unsigned long *args);
|
||||
extern int audit_sockaddr(int len, void *addr);
|
||||
extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt);
|
||||
extern void audit_signal_info(int sig, struct task_struct *t);
|
||||
extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
|
||||
extern int audit_set_macxattr(const char *name);
|
||||
#else
|
||||
#define audit_alloc(t) ({ 0; })
|
||||
#define audit_free(t) do { ; } while (0)
|
||||
@@ -245,16 +327,18 @@ extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
|
||||
#define audit_syscall_exit(t,f,r) do { ; } while (0)
|
||||
#define audit_getname(n) do { ; } while (0)
|
||||
#define audit_putname(n) do { ; } while (0)
|
||||
#define __audit_inode(n,i,f) do { ; } while (0)
|
||||
#define __audit_inode_child(d,i,p) do { ; } while (0)
|
||||
#define audit_inode(n,i,f) do { ; } while (0)
|
||||
#define audit_receive_filter(t,p,u,s,d,l) ({ -EOPNOTSUPP; })
|
||||
#define audit_inode_child(d,i,p) do { ; } while (0)
|
||||
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
|
||||
#define audit_get_loginuid(c) ({ -1; })
|
||||
#define audit_ipc_perms(q,u,g,m) ({ 0; })
|
||||
#define audit_ipc_perms(q,u,g,m,i) ({ 0; })
|
||||
#define audit_socketcall(n,a) ({ 0; })
|
||||
#define audit_sockaddr(len, addr) ({ 0; })
|
||||
#define audit_avc_path(dentry, mnt) ({ 0; })
|
||||
#define audit_signal_info(s,t) do { ; } while (0)
|
||||
#define audit_filter_user(cb,t) ({ 1; })
|
||||
#define audit_set_macxattr(n) do { ; } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDIT
|
||||
@@ -278,12 +362,11 @@ extern void audit_log_d_path(struct audit_buffer *ab,
|
||||
const char *prefix,
|
||||
struct dentry *dentry,
|
||||
struct vfsmount *vfsmnt);
|
||||
/* Private API (for auditsc.c only) */
|
||||
extern void audit_send_reply(int pid, int seq, int type,
|
||||
int done, int multi,
|
||||
void *payload, int size);
|
||||
extern void audit_log_lost(const char *message);
|
||||
extern struct semaphore audit_netlink_sem;
|
||||
/* Private API (for audit.c only) */
|
||||
extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
|
||||
extern int audit_filter_type(int type);
|
||||
extern int audit_receive_filter(int type, int pid, int uid, int seq,
|
||||
void *data, size_t datasz, uid_t loginuid);
|
||||
#else
|
||||
#define audit_log(c,g,t,f,...) do { ; } while (0)
|
||||
#define audit_log_start(c,g,t) ({ NULL; })
|
||||
@@ -293,6 +376,7 @@ extern struct semaphore audit_netlink_sem;
|
||||
#define audit_log_hex(a,b,l) do { ; } while (0)
|
||||
#define audit_log_untrustedstring(a,s) do { ; } while (0)
|
||||
#define audit_log_d_path(b,p,d,v) do { ; } while (0)
|
||||
#define audit_panic(m) do { ; } while (0)
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -19,18 +19,37 @@
|
||||
#undef AUTOFS_MIN_PROTO_VERSION
|
||||
#undef AUTOFS_MAX_PROTO_VERSION
|
||||
|
||||
#define AUTOFS_PROTO_VERSION 4
|
||||
#define AUTOFS_PROTO_VERSION 5
|
||||
#define AUTOFS_MIN_PROTO_VERSION 3
|
||||
#define AUTOFS_MAX_PROTO_VERSION 4
|
||||
#define AUTOFS_MAX_PROTO_VERSION 5
|
||||
|
||||
#define AUTOFS_PROTO_SUBVERSION 7
|
||||
#define AUTOFS_PROTO_SUBVERSION 0
|
||||
|
||||
/* Mask for expire behaviour */
|
||||
#define AUTOFS_EXP_IMMEDIATE 1
|
||||
#define AUTOFS_EXP_LEAVES 2
|
||||
|
||||
/* New message type */
|
||||
#define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */
|
||||
/* Daemon notification packet types */
|
||||
enum autofs_notify {
|
||||
NFY_NONE,
|
||||
NFY_MOUNT,
|
||||
NFY_EXPIRE
|
||||
};
|
||||
|
||||
/* Kernel protocol version 4 packet types */
|
||||
|
||||
/* Expire entry (umount request) */
|
||||
#define autofs_ptype_expire_multi 2
|
||||
|
||||
/* Kernel protocol version 5 packet types */
|
||||
|
||||
/* Indirect mount missing and expire requests. */
|
||||
#define autofs_ptype_missing_indirect 3
|
||||
#define autofs_ptype_expire_indirect 4
|
||||
|
||||
/* Direct mount missing and expire requests */
|
||||
#define autofs_ptype_missing_direct 5
|
||||
#define autofs_ptype_expire_direct 6
|
||||
|
||||
/* v4 multi expire (via pipe) */
|
||||
struct autofs_packet_expire_multi {
|
||||
@@ -40,14 +59,36 @@ struct autofs_packet_expire_multi {
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
||||
/* autofs v5 common packet struct */
|
||||
struct autofs_v5_packet {
|
||||
struct autofs_packet_hdr hdr;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
__u32 dev;
|
||||
__u64 ino;
|
||||
__u32 uid;
|
||||
__u32 gid;
|
||||
__u32 pid;
|
||||
__u32 tgid;
|
||||
__u32 len;
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
||||
typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
|
||||
typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
|
||||
typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
|
||||
typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
|
||||
|
||||
union autofs_packet_union {
|
||||
struct autofs_packet_hdr hdr;
|
||||
struct autofs_packet_missing missing;
|
||||
struct autofs_packet_expire expire;
|
||||
struct autofs_packet_expire_multi expire_multi;
|
||||
struct autofs_v5_packet v5_packet;
|
||||
};
|
||||
|
||||
#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int)
|
||||
#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
|
||||
#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
|
||||
#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int)
|
||||
#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int)
|
||||
#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int)
|
||||
|
||||
@@ -2,89 +2,12 @@
|
||||
#define _LINUX_BITOPS_H
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
* ffs: find first bit set. This is defined the same way as
|
||||
* the libc and compiler builtin ffs routines, therefore
|
||||
* differs in spirit from the above ffz (man ffs).
|
||||
*/
|
||||
|
||||
static inline int generic_ffs(int x)
|
||||
{
|
||||
int r = 1;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff)) {
|
||||
x >>= 16;
|
||||
r += 16;
|
||||
}
|
||||
if (!(x & 0xff)) {
|
||||
x >>= 8;
|
||||
r += 8;
|
||||
}
|
||||
if (!(x & 0xf)) {
|
||||
x >>= 4;
|
||||
r += 4;
|
||||
}
|
||||
if (!(x & 3)) {
|
||||
x >>= 2;
|
||||
r += 2;
|
||||
}
|
||||
if (!(x & 1)) {
|
||||
x >>= 1;
|
||||
r += 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* fls: find last bit set.
|
||||
*/
|
||||
|
||||
static __inline__ int generic_fls(int x)
|
||||
{
|
||||
int r = 32;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff0000u)) {
|
||||
x <<= 16;
|
||||
r -= 16;
|
||||
}
|
||||
if (!(x & 0xff000000u)) {
|
||||
x <<= 8;
|
||||
r -= 8;
|
||||
}
|
||||
if (!(x & 0xf0000000u)) {
|
||||
x <<= 4;
|
||||
r -= 4;
|
||||
}
|
||||
if (!(x & 0xc0000000u)) {
|
||||
x <<= 2;
|
||||
r -= 2;
|
||||
}
|
||||
if (!(x & 0x80000000u)) {
|
||||
x <<= 1;
|
||||
r -= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Include this here because some architectures need generic_ffs/fls in
|
||||
* scope
|
||||
*/
|
||||
#include <asm/bitops.h>
|
||||
|
||||
|
||||
static inline int generic_fls64(__u64 x)
|
||||
{
|
||||
__u32 h = x >> 32;
|
||||
if (h)
|
||||
return fls(h) + 32;
|
||||
return fls(x);
|
||||
}
|
||||
|
||||
static __inline__ int get_bitmask_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
@@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count)
|
||||
return order;
|
||||
}
|
||||
|
||||
/*
|
||||
* hweightN: returns the hamming weight (i.e. the number
|
||||
* of bits set) of a N-bit word
|
||||
*/
|
||||
|
||||
static inline unsigned int generic_hweight32(unsigned int w)
|
||||
{
|
||||
unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
|
||||
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
||||
res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
|
||||
res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
|
||||
return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
|
||||
}
|
||||
|
||||
static inline unsigned int generic_hweight16(unsigned int w)
|
||||
{
|
||||
unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
|
||||
res = (res & 0x3333) + ((res >> 2) & 0x3333);
|
||||
res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
|
||||
return (res & 0x00FF) + ((res >> 8) & 0x00FF);
|
||||
}
|
||||
|
||||
static inline unsigned int generic_hweight8(unsigned int w)
|
||||
{
|
||||
unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
|
||||
res = (res & 0x33) + ((res >> 2) & 0x33);
|
||||
return (res & 0x0F) + ((res >> 4) & 0x0F);
|
||||
}
|
||||
|
||||
static inline unsigned long generic_hweight64(__u64 w)
|
||||
{
|
||||
#if BITS_PER_LONG < 64
|
||||
return generic_hweight32((unsigned int)(w >> 32)) +
|
||||
generic_hweight32((unsigned int)w);
|
||||
#else
|
||||
u64 res;
|
||||
res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
|
||||
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
|
||||
res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
|
||||
res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
|
||||
res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
|
||||
return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long hweight_long(unsigned long w)
|
||||
{
|
||||
return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
|
||||
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -175,4 +53,11 @@ static inline __u32 ror32(__u32 word, unsigned int shift)
|
||||
return (word >> shift) | (word << (32 - shift));
|
||||
}
|
||||
|
||||
static inline unsigned fls_long(unsigned long l)
|
||||
{
|
||||
if (sizeof(l) == 4)
|
||||
return fls(l);
|
||||
return fls64(l);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -55,25 +55,29 @@ struct as_io_context {
|
||||
|
||||
struct cfq_queue;
|
||||
struct cfq_io_context {
|
||||
/*
|
||||
* circular list of cfq_io_contexts belonging to a process io context
|
||||
*/
|
||||
struct list_head list;
|
||||
struct cfq_queue *cfqq[2];
|
||||
struct rb_node rb_node;
|
||||
void *key;
|
||||
|
||||
struct cfq_queue *cfqq[2];
|
||||
|
||||
struct io_context *ioc;
|
||||
|
||||
unsigned long last_end_request;
|
||||
unsigned long last_queue;
|
||||
sector_t last_request_pos;
|
||||
unsigned long last_queue;
|
||||
|
||||
unsigned long ttime_total;
|
||||
unsigned long ttime_samples;
|
||||
unsigned long ttime_mean;
|
||||
|
||||
unsigned int seek_samples;
|
||||
u64 seek_total;
|
||||
sector_t seek_mean;
|
||||
|
||||
struct list_head queue_list;
|
||||
|
||||
void (*dtor)(struct cfq_io_context *);
|
||||
void (*exit)(struct cfq_io_context *);
|
||||
void (*dtor)(struct io_context *); /* destructor */
|
||||
void (*exit)(struct io_context *); /* called on task exit */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -94,7 +98,7 @@ struct io_context {
|
||||
int nr_batch_requests; /* Number of requests left in the batch */
|
||||
|
||||
struct as_io_context *aic;
|
||||
struct cfq_io_context *cic;
|
||||
struct rb_root cic_root;
|
||||
};
|
||||
|
||||
void put_io_context(struct io_context *ioc);
|
||||
|
||||
@@ -38,6 +38,7 @@ typedef struct bootmem_data {
|
||||
unsigned long last_pos;
|
||||
unsigned long last_success; /* Previous allocation point. To speed
|
||||
* up searching */
|
||||
struct list_head list;
|
||||
} bootmem_data_t;
|
||||
|
||||
extern unsigned long __init bootmem_bootmap_pages (unsigned long);
|
||||
@@ -51,6 +52,9 @@ extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void * __init __alloc_bootmem_core(struct bootmem_data *bdata,
|
||||
unsigned long size, unsigned long align, unsigned long goal,
|
||||
unsigned long limit);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
|
||||
#define alloc_bootmem(x) \
|
||||
|
||||
@@ -46,25 +46,28 @@ struct address_space;
|
||||
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
|
||||
|
||||
/*
|
||||
* Keep related fields in common cachelines. The most commonly accessed
|
||||
* field (b_state) goes at the start so the compiler does not generate
|
||||
* indexed addressing for it.
|
||||
* Historically, a buffer_head was used to map a single block
|
||||
* within a page, and of course as the unit of I/O through the
|
||||
* filesystem and block layers. Nowadays the basic I/O unit
|
||||
* is the bio, and buffer_heads are used for extracting block
|
||||
* mappings (via a get_block_t call), for tracking state within
|
||||
* a page (via a page_mapping) and for wrapping bio submission
|
||||
* for backward compatibility reasons (e.g. submit_bh).
|
||||
*/
|
||||
struct buffer_head {
|
||||
/* First cache line: */
|
||||
unsigned long b_state; /* buffer state bitmap (see above) */
|
||||
struct buffer_head *b_this_page;/* circular list of page's buffers */
|
||||
struct page *b_page; /* the page this bh is mapped to */
|
||||
atomic_t b_count; /* users using this block */
|
||||
u32 b_size; /* block size */
|
||||
|
||||
sector_t b_blocknr; /* block number */
|
||||
char *b_data; /* pointer to data block */
|
||||
sector_t b_blocknr; /* start block number */
|
||||
size_t b_size; /* size of mapping */
|
||||
char *b_data; /* pointer to data within the page */
|
||||
|
||||
struct block_device *b_bdev;
|
||||
bh_end_io_t *b_end_io; /* I/O completion */
|
||||
void *b_private; /* reserved for b_end_io */
|
||||
struct list_head b_assoc_buffers; /* associated with another mapping */
|
||||
atomic_t b_count; /* users using this buffer_head */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -189,8 +192,8 @@ extern int buffer_heads_over_limit;
|
||||
* address_spaces.
|
||||
*/
|
||||
int try_to_release_page(struct page * page, gfp_t gfp_mask);
|
||||
int block_invalidatepage(struct page *page, unsigned long offset);
|
||||
int do_invalidatepage(struct page *page, unsigned long offset);
|
||||
void block_invalidatepage(struct page *page, unsigned long offset);
|
||||
void do_invalidatepage(struct page *page, unsigned long offset);
|
||||
int block_write_full_page(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc);
|
||||
int block_read_full_page(struct page*, get_block_t*);
|
||||
@@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
|
||||
int generic_cont_expand(struct inode *inode, loff_t size);
|
||||
int generic_cont_expand_simple(struct inode *inode, loff_t size);
|
||||
int block_commit_write(struct page *page, unsigned from, unsigned to);
|
||||
int block_sync_page(struct page *);
|
||||
void block_sync_page(struct page *);
|
||||
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
|
||||
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
|
||||
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
|
||||
@@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
|
||||
set_buffer_mapped(bh);
|
||||
bh->b_bdev = sb->s_bdev;
|
||||
bh->b_blocknr = block;
|
||||
bh->b_size = sb->s_blocksize;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -357,7 +357,8 @@ static inline kernel_cap_t cap_invert(kernel_cap_t c)
|
||||
|
||||
#define cap_is_fs_cap(c) (CAP_TO_MASK(c) & CAP_FS_MASK)
|
||||
|
||||
extern int capable(int cap);
|
||||
int capable(int cap);
|
||||
int __capable(struct task_struct *t, int cap);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
struct cdev {
|
||||
struct kobject kobj;
|
||||
struct module *owner;
|
||||
struct file_operations *ops;
|
||||
const struct file_operations *ops;
|
||||
struct list_head list;
|
||||
dev_t dev;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
void cdev_init(struct cdev *, struct file_operations *);
|
||||
void cdev_init(struct cdev *, const struct file_operations *);
|
||||
|
||||
struct cdev *cdev_alloc(void);
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ASMARM_CLOCK_H
|
||||
#define ASMARM_CLOCK_H
|
||||
#ifndef __LINUX_CLK_H
|
||||
#define __LINUX_CLK_H
|
||||
|
||||
struct device;
|
||||
|
||||
|
||||
@@ -30,9 +30,9 @@ extern struct inode_operations coda_ioctl_inode_operations;
|
||||
extern struct address_space_operations coda_file_aops;
|
||||
extern struct address_space_operations coda_symlink_aops;
|
||||
|
||||
extern struct file_operations coda_dir_operations;
|
||||
extern struct file_operations coda_file_operations;
|
||||
extern struct file_operations coda_ioctl_operations;
|
||||
extern const struct file_operations coda_dir_operations;
|
||||
extern const struct file_operations coda_file_operations;
|
||||
extern const struct file_operations coda_ioctl_operations;
|
||||
|
||||
/* operations shared over more than one file */
|
||||
int coda_open(struct inode *i, struct file *f);
|
||||
|
||||
@@ -45,6 +45,32 @@ struct compat_tms {
|
||||
compat_clock_t tms_cstime;
|
||||
};
|
||||
|
||||
struct compat_timex {
|
||||
compat_uint_t modes;
|
||||
compat_long_t offset;
|
||||
compat_long_t freq;
|
||||
compat_long_t maxerror;
|
||||
compat_long_t esterror;
|
||||
compat_int_t status;
|
||||
compat_long_t constant;
|
||||
compat_long_t precision;
|
||||
compat_long_t tolerance;
|
||||
struct compat_timeval time;
|
||||
compat_long_t tick;
|
||||
compat_long_t ppsfreq;
|
||||
compat_long_t jitter;
|
||||
compat_int_t shift;
|
||||
compat_long_t stabil;
|
||||
compat_long_t jitcnt;
|
||||
compat_long_t calcnt;
|
||||
compat_long_t errcnt;
|
||||
compat_long_t stbcnt;
|
||||
|
||||
compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
|
||||
compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
|
||||
compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
|
||||
};
|
||||
|
||||
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
|
||||
|
||||
typedef struct {
|
||||
@@ -121,6 +147,24 @@ typedef struct compat_sigevent {
|
||||
} _sigev_un;
|
||||
} compat_sigevent_t;
|
||||
|
||||
struct compat_robust_list {
|
||||
compat_uptr_t next;
|
||||
};
|
||||
|
||||
struct compat_robust_list_head {
|
||||
struct compat_robust_list list;
|
||||
compat_long_t futex_offset;
|
||||
compat_uptr_t list_op_pending;
|
||||
};
|
||||
|
||||
extern void compat_exit_robust_list(struct task_struct *curr);
|
||||
|
||||
asmlinkage long
|
||||
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
|
||||
compat_size_t len);
|
||||
asmlinkage long
|
||||
compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
|
||||
compat_size_t __user *len_ptr);
|
||||
|
||||
long compat_sys_semctl(int first, int second, int third, void __user *uptr);
|
||||
long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
|
||||
@@ -181,5 +225,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs,
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* _LINUX_COMPAT_H */
|
||||
|
||||
@@ -140,6 +140,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
|
||||
COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
|
||||
COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
|
||||
COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
|
||||
COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
|
||||
COMPATIBLE_IOCTL(DM_VERSION)
|
||||
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
|
||||
COMPATIBLE_IOCTL(DM_LIST_DEVICES)
|
||||
@@ -155,6 +156,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS)
|
||||
COMPATIBLE_IOCTL(DM_TABLE_STATUS)
|
||||
COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
|
||||
COMPATIBLE_IOCTL(DM_TARGET_MSG)
|
||||
COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
|
||||
/* Big K */
|
||||
COMPATIBLE_IOCTL(PIO_FONT)
|
||||
COMPATIBLE_IOCTL(GIO_FONT)
|
||||
|
||||
@@ -74,7 +74,6 @@ extern int lock_cpu_hotplug_interruptible(void);
|
||||
register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
int cpu_down(unsigned int cpu);
|
||||
extern int __attribute__((weak)) smp_prepare_cpu(int cpu);
|
||||
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
|
||||
#else
|
||||
#define lock_cpu_hotplug() do { } while (0)
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
*
|
||||
* int any_online_cpu(mask) First online cpu in mask
|
||||
*
|
||||
* for_each_cpu(cpu) for-loop cpu over cpu_possible_map
|
||||
* for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
|
||||
* for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
|
||||
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
|
||||
*
|
||||
@@ -212,17 +212,15 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
|
||||
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
|
||||
}
|
||||
|
||||
#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
|
||||
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
|
||||
}
|
||||
|
||||
#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
|
||||
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
int __first_cpu(const cpumask_t *srcp);
|
||||
#define first_cpu(src) __first_cpu(&(src))
|
||||
int __next_cpu(int n, const cpumask_t *srcp);
|
||||
#define next_cpu(n, src) __next_cpu((n), &(src))
|
||||
#else
|
||||
#define first_cpu(src) 0
|
||||
#define next_cpu(n, src) 1
|
||||
#endif
|
||||
|
||||
#define cpumask_of_cpu(cpu) \
|
||||
({ \
|
||||
@@ -398,27 +396,18 @@ extern cpumask_t cpu_present_map;
|
||||
#define cpu_present(cpu) ((cpu) == 0)
|
||||
#endif
|
||||
|
||||
#define any_online_cpu(mask) \
|
||||
({ \
|
||||
int cpu; \
|
||||
for_each_cpu_mask(cpu, (mask)) \
|
||||
if (cpu_online(cpu)) \
|
||||
break; \
|
||||
cpu; \
|
||||
})
|
||||
#ifdef CONFIG_SMP
|
||||
int highest_possible_processor_id(void);
|
||||
#define any_online_cpu(mask) __any_online_cpu(&(mask))
|
||||
int __any_online_cpu(const cpumask_t *mask);
|
||||
#else
|
||||
#define highest_possible_processor_id() 0
|
||||
#define any_online_cpu(mask) 0
|
||||
#endif
|
||||
|
||||
#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
|
||||
#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
|
||||
#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
|
||||
#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
|
||||
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
|
||||
|
||||
/* Find the highest possible smp_processor_id() */
|
||||
#define highest_possible_processor_id() \
|
||||
({ \
|
||||
unsigned int cpu, highest = 0; \
|
||||
for_each_cpu_mask(cpu, cpu_possible_map) \
|
||||
highest = cpu; \
|
||||
highest; \
|
||||
})
|
||||
|
||||
|
||||
#endif /* __LINUX_CPUMASK_H */
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
extern unsigned long long elfcorehdr_addr;
|
||||
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
||||
unsigned long, int);
|
||||
extern struct file_operations proc_vmcore_operations;
|
||||
extern const struct file_operations proc_vmcore_operations;
|
||||
extern struct proc_dir_entry *proc_vmcore;
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
@@ -162,6 +162,8 @@ d_iput: no no no yes
|
||||
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
|
||||
#define DCACHE_UNHASHED 0x0010
|
||||
|
||||
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */
|
||||
|
||||
extern spinlock_t dcache_lock;
|
||||
|
||||
/**
|
||||
|
||||
@@ -29,7 +29,7 @@ struct debugfs_blob_wrapper {
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *debugfs_create_file(const char *name, mode_t mode,
|
||||
struct dentry *parent, void *data,
|
||||
struct file_operations *fops);
|
||||
const struct file_operations *fops);
|
||||
|
||||
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
|
||||
|
||||
|
||||
@@ -97,6 +97,7 @@ struct io_restrictions {
|
||||
unsigned short hardsect_size;
|
||||
unsigned int max_segment_size;
|
||||
unsigned long seg_boundary_mask;
|
||||
unsigned char no_cluster; /* inverted so that 0 is default */
|
||||
};
|
||||
|
||||
struct dm_target {
|
||||
|
||||
@@ -276,37 +276,5 @@ static inline void dio_set_drvdata (struct dio_dev *d, void *data)
|
||||
dev_set_drvdata(&d->dev, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* A helper function which helps ensure correct dio_driver
|
||||
* setup and cleanup for commonly-encountered hotplug/modular cases
|
||||
*
|
||||
* This MUST stay in a header, as it checks for -DMODULE
|
||||
*/
|
||||
static inline int dio_module_init(struct dio_driver *drv)
|
||||
{
|
||||
int rc = dio_register_driver(drv);
|
||||
|
||||
if (rc > 0)
|
||||
return 0;
|
||||
|
||||
/* iff CONFIG_HOTPLUG and built into kernel, we should
|
||||
* leave the driver around for future hotplug events.
|
||||
* For the module case, a hotplug daemon of some sort
|
||||
* should load a module in response to an insert event. */
|
||||
#if defined(CONFIG_HOTPLUG) && !defined(MODULE)
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
#else
|
||||
if (rc == 0)
|
||||
rc = -ENODEV;
|
||||
#endif
|
||||
|
||||
/* if we get here, we need to clean up DIO driver instance
|
||||
* and return some sort of error */
|
||||
dio_unregister_driver(drv);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* ndef _LINUX_DIO_H */
|
||||
|
||||
@@ -80,6 +80,16 @@
|
||||
*
|
||||
* DM_TARGET_MSG:
|
||||
* Pass a message string to the target at a specific offset of a device.
|
||||
*
|
||||
* DM_DEV_SET_GEOMETRY:
|
||||
* Set the geometry of a device by passing in a string in this format:
|
||||
*
|
||||
* "cylinders heads sectors_per_track start_sector"
|
||||
*
|
||||
* Beware that CHS geometry is nearly obsolete and only provided
|
||||
* for compatibility with dm devices that can be booted by a PC
|
||||
* BIOS. See struct hd_geometry for range limits. Also note that
|
||||
* the geometry is erased if the device size changes.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -218,6 +228,7 @@ enum {
|
||||
/* Added later */
|
||||
DM_LIST_VERSIONS_CMD,
|
||||
DM_TARGET_MSG_CMD,
|
||||
DM_DEV_SET_GEOMETRY_CMD
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -247,6 +258,7 @@ typedef char ioctl_struct[308];
|
||||
#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct)
|
||||
#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct)
|
||||
#define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct)
|
||||
#define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct)
|
||||
#endif
|
||||
|
||||
#define DM_IOCTL 0xfd
|
||||
@@ -270,11 +282,12 @@ typedef char ioctl_struct[308];
|
||||
#define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
|
||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 5
|
||||
#define DM_VERSION_MINOR 6
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
#define DM_VERSION_EXTRA "-ioctl (2005-10-04)"
|
||||
#define DM_VERSION_EXTRA "-ioctl (2006-02-17)"
|
||||
|
||||
/* Status bits */
|
||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||
|
||||
@@ -21,6 +21,7 @@ enum dma_data_direction {
|
||||
#define DMA_30BIT_MASK 0x000000003fffffffULL
|
||||
#define DMA_29BIT_MASK 0x000000001fffffffULL
|
||||
#define DMA_28BIT_MASK 0x000000000fffffffULL
|
||||
#define DMA_24BIT_MASK 0x0000000000ffffffULL
|
||||
|
||||
#include <asm/dma-mapping.h>
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ extern char * dmi_get_system_info(int field);
|
||||
extern struct dmi_device * dmi_find_device(int type, const char *name,
|
||||
struct dmi_device *from);
|
||||
extern void dmi_scan_machine(void);
|
||||
extern int dmi_get_year(int field);
|
||||
|
||||
#else
|
||||
|
||||
@@ -75,6 +76,7 @@ static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
|
||||
static inline char * dmi_get_system_info(int field) { return NULL; }
|
||||
static inline struct dmi_device * dmi_find_device(int type, const char *name,
|
||||
struct dmi_device *from) { return NULL; }
|
||||
static inline int dmi_get_year(int year) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -240,19 +240,21 @@ struct efi_memory_map {
|
||||
unsigned long desc_size;
|
||||
};
|
||||
|
||||
#define EFI_INVALID_TABLE_ADDR (~0UL)
|
||||
|
||||
/*
|
||||
* All runtime access to EFI goes through this structure:
|
||||
*/
|
||||
extern struct efi {
|
||||
efi_system_table_t *systab; /* EFI system table */
|
||||
void *mps; /* MPS table */
|
||||
void *acpi; /* ACPI table (IA64 ext 0.71) */
|
||||
void *acpi20; /* ACPI table (ACPI 2.0) */
|
||||
void *smbios; /* SM BIOS table */
|
||||
void *sal_systab; /* SAL system table */
|
||||
void *boot_info; /* boot info table */
|
||||
void *hcdp; /* HCDP table */
|
||||
void *uga; /* UGA table */
|
||||
unsigned long mps; /* MPS table */
|
||||
unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
|
||||
unsigned long acpi20; /* ACPI table (ACPI 2.0) */
|
||||
unsigned long smbios; /* SM BIOS table */
|
||||
unsigned long sal_systab; /* SAL system table */
|
||||
unsigned long boot_info; /* boot info table */
|
||||
unsigned long hcdp; /* HCDP table */
|
||||
unsigned long uga; /* UGA table */
|
||||
efi_get_time_t *get_time;
|
||||
efi_set_time_t *set_time;
|
||||
efi_get_wakeup_time_t *get_wakeup_time;
|
||||
@@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
|
||||
extern u64 efi_get_iobase (void);
|
||||
extern u32 efi_mem_type (unsigned long phys_addr);
|
||||
extern u64 efi_mem_attributes (unsigned long phys_addr);
|
||||
extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
|
||||
u64 attr);
|
||||
extern int __init efi_uart_console_only (void);
|
||||
extern void efi_initialize_iomem_resources(struct resource *code_resource,
|
||||
struct resource *data_resource);
|
||||
|
||||
@@ -37,7 +37,7 @@ static inline struct efs_sb_info *SUPER_INFO(struct super_block *sb)
|
||||
struct statfs;
|
||||
|
||||
extern struct inode_operations efs_dir_inode_operations;
|
||||
extern struct file_operations efs_dir_operations;
|
||||
extern const struct file_operations efs_dir_operations;
|
||||
extern struct address_space_operations efs_symlink_aops;
|
||||
|
||||
extern void efs_read_inode(struct inode *);
|
||||
|
||||
@@ -36,7 +36,8 @@ struct statfs;
|
||||
* Define EXT3_RESERVATION to reserve data blocks for expanding files
|
||||
*/
|
||||
#define EXT3_DEFAULT_RESERVE_BLOCKS 8
|
||||
#define EXT3_MAX_RESERVE_BLOCKS 1024
|
||||
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
|
||||
#define EXT3_MAX_RESERVE_BLOCKS 1027
|
||||
#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
|
||||
/*
|
||||
* Always enable hashed directories
|
||||
@@ -732,6 +733,8 @@ struct dir_private_info {
|
||||
extern int ext3_bg_has_super(struct super_block *sb, int group);
|
||||
extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
|
||||
extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
|
||||
extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long,
|
||||
unsigned long *, int *);
|
||||
extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
|
||||
unsigned long);
|
||||
extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
|
||||
@@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
|
||||
int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
|
||||
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
|
||||
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
|
||||
int ext3_get_block_handle(handle_t *handle, struct inode *inode,
|
||||
sector_t iblock, struct buffer_head *bh_result, int create,
|
||||
int extend_disksize);
|
||||
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
|
||||
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
|
||||
int create, int extend_disksize);
|
||||
|
||||
extern void ext3_read_inode (struct inode *);
|
||||
extern int ext3_write_inode (struct inode *, int);
|
||||
@@ -830,11 +833,11 @@ do { \
|
||||
*/
|
||||
|
||||
/* dir.c */
|
||||
extern struct file_operations ext3_dir_operations;
|
||||
extern const struct file_operations ext3_dir_operations;
|
||||
|
||||
/* file.c */
|
||||
extern struct inode_operations ext3_file_inode_operations;
|
||||
extern struct file_operations ext3_file_operations;
|
||||
extern const struct file_operations ext3_file_operations;
|
||||
|
||||
/* namei.c */
|
||||
extern struct inode_operations ext3_dir_inode_operations;
|
||||
|
||||
@@ -734,7 +734,7 @@ struct fb_tile_ops {
|
||||
|
||||
/* A driver may set this flag to indicate that it does want a set_par to be
|
||||
* called every time when fbcon_switch is executed. The advantage is that with
|
||||
* this flag set you can really be shure that set_par is always called before
|
||||
* this flag set you can really be sure that set_par is always called before
|
||||
* any of the functions dependant on the correct hardware state or altering
|
||||
* that state, even if you are using some broken X releases. The disadvantage
|
||||
* is that it introduces unwanted delays to every console switch if set_par
|
||||
|
||||
@@ -65,6 +65,11 @@ extern int dir_notify_enable;
|
||||
#define FMODE_PREAD 8
|
||||
#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
|
||||
|
||||
/* File is being opened for execution. Primary users of this flag are
|
||||
distributed filesystems that can use it to achieve correct ETXTBUSY
|
||||
behavior for cross-node execution/opening_for_writing of files */
|
||||
#define FMODE_EXEC 16
|
||||
|
||||
#define RW_MASK 1
|
||||
#define RWA_MASK 2
|
||||
#define READ 0
|
||||
@@ -247,9 +252,6 @@ extern void __init files_init(unsigned long);
|
||||
struct buffer_head;
|
||||
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh_result, int create);
|
||||
typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
|
||||
unsigned long max_blocks,
|
||||
struct buffer_head *bh_result, int create);
|
||||
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
ssize_t bytes, void *private);
|
||||
|
||||
@@ -345,7 +347,7 @@ struct writeback_control;
|
||||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
int (*readpage)(struct file *, struct page *);
|
||||
int (*sync_page)(struct page *);
|
||||
void (*sync_page)(struct page *);
|
||||
|
||||
/* Write back some dirty pages from this mapping. */
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
@@ -364,7 +366,7 @@ struct address_space_operations {
|
||||
int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
|
||||
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
|
||||
sector_t (*bmap)(struct address_space *, sector_t);
|
||||
int (*invalidatepage) (struct page *, unsigned long);
|
||||
void (*invalidatepage) (struct page *, unsigned long);
|
||||
int (*releasepage) (struct page *, gfp_t);
|
||||
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs);
|
||||
@@ -408,6 +410,9 @@ struct block_device {
|
||||
struct list_head bd_inodes;
|
||||
void * bd_holder;
|
||||
int bd_holders;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct list_head bd_holder_list;
|
||||
#endif
|
||||
struct block_device * bd_contains;
|
||||
unsigned bd_block_size;
|
||||
struct hd_struct * bd_part;
|
||||
@@ -485,13 +490,13 @@ struct inode {
|
||||
unsigned int i_blkbits;
|
||||
unsigned long i_blksize;
|
||||
unsigned long i_version;
|
||||
unsigned long i_blocks;
|
||||
blkcnt_t i_blocks;
|
||||
unsigned short i_bytes;
|
||||
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
|
||||
struct mutex i_mutex;
|
||||
struct rw_semaphore i_alloc_sem;
|
||||
struct inode_operations *i_op;
|
||||
struct file_operations *i_fop; /* former ->i_op->default_file_ops */
|
||||
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
|
||||
struct super_block *i_sb;
|
||||
struct file_lock *i_flock;
|
||||
struct address_space *i_mapping;
|
||||
@@ -631,7 +636,7 @@ struct file {
|
||||
} f_u;
|
||||
struct dentry *f_dentry;
|
||||
struct vfsmount *f_vfsmnt;
|
||||
struct file_operations *f_op;
|
||||
const struct file_operations *f_op;
|
||||
atomic_t f_count;
|
||||
unsigned int f_flags;
|
||||
mode_t f_mode;
|
||||
@@ -673,7 +678,6 @@ extern spinlock_t files_lock;
|
||||
#define FL_POSIX 1
|
||||
#define FL_FLOCK 2
|
||||
#define FL_ACCESS 8 /* not trying to lock, just looking */
|
||||
#define FL_LOCKD 16 /* lock held by rpc.lockd */
|
||||
#define FL_LEASE 32 /* lease held on this file */
|
||||
#define FL_SLEEP 128 /* A blocking lock */
|
||||
|
||||
@@ -737,8 +741,6 @@ struct file_lock {
|
||||
#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
|
||||
#endif
|
||||
|
||||
extern struct list_head file_lock_list;
|
||||
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
extern int fcntl_getlk(struct file *, struct flock __user *);
|
||||
@@ -760,10 +762,10 @@ extern void locks_init_lock(struct file_lock *);
|
||||
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
|
||||
extern void locks_remove_posix(struct file *, fl_owner_t);
|
||||
extern void locks_remove_flock(struct file *);
|
||||
extern struct file_lock *posix_test_lock(struct file *, struct file_lock *);
|
||||
extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *);
|
||||
extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *);
|
||||
extern int posix_lock_file(struct file *, struct file_lock *);
|
||||
extern int posix_lock_file_wait(struct file *, struct file_lock *);
|
||||
extern void posix_block_lock(struct file_lock *, struct file_lock *);
|
||||
extern int posix_unblock_lock(struct file *, struct file_lock *);
|
||||
extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
|
||||
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
|
||||
@@ -1092,6 +1094,7 @@ struct super_operations {
|
||||
void (*umount_begin) (struct super_block *);
|
||||
|
||||
int (*show_options)(struct seq_file *, struct vfsmount *);
|
||||
int (*show_stats)(struct seq_file *, struct vfsmount *);
|
||||
|
||||
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
|
||||
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
|
||||
@@ -1387,11 +1390,11 @@ extern void bd_set_size(struct block_device *, loff_t size);
|
||||
extern void bd_forget(struct inode *inode);
|
||||
extern void bdput(struct block_device *);
|
||||
extern struct block_device *open_by_devnum(dev_t, unsigned);
|
||||
extern struct file_operations def_blk_fops;
|
||||
extern const struct file_operations def_blk_fops;
|
||||
extern struct address_space_operations def_blk_aops;
|
||||
extern struct file_operations def_chr_fops;
|
||||
extern struct file_operations bad_sock_fops;
|
||||
extern struct file_operations def_fifo_fops;
|
||||
extern const struct file_operations def_chr_fops;
|
||||
extern const struct file_operations bad_sock_fops;
|
||||
extern const struct file_operations def_fifo_fops;
|
||||
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
|
||||
extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
|
||||
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
|
||||
@@ -1399,12 +1402,19 @@ extern int blkdev_get(struct block_device *, mode_t, unsigned);
|
||||
extern int blkdev_put(struct block_device *);
|
||||
extern int bd_claim(struct block_device *, void *);
|
||||
extern void bd_release(struct block_device *);
|
||||
#ifdef CONFIG_SYSFS
|
||||
extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
|
||||
extern void bd_release_from_disk(struct block_device *, struct gendisk *);
|
||||
#else
|
||||
#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
|
||||
#define bd_release_from_disk(bdev, disk) bd_release(bdev)
|
||||
#endif
|
||||
|
||||
/* fs/char_dev.c */
|
||||
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
|
||||
extern int register_chrdev_region(dev_t, unsigned, const char *);
|
||||
extern int register_chrdev(unsigned int, const char *,
|
||||
struct file_operations *);
|
||||
const struct file_operations *);
|
||||
extern int unregister_chrdev(unsigned int, const char *);
|
||||
extern void unregister_chrdev_region(dev_t, unsigned);
|
||||
extern int chrdev_open(struct inode *, struct file *);
|
||||
@@ -1434,9 +1444,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
|
||||
extern void make_bad_inode(struct inode *);
|
||||
extern int is_bad_inode(struct inode *);
|
||||
|
||||
extern struct file_operations read_fifo_fops;
|
||||
extern struct file_operations write_fifo_fops;
|
||||
extern struct file_operations rdwr_fifo_fops;
|
||||
extern const struct file_operations read_fifo_fops;
|
||||
extern const struct file_operations write_fifo_fops;
|
||||
extern const struct file_operations rdwr_fifo_fops;
|
||||
|
||||
extern int fs_may_remount_ro(struct super_block *);
|
||||
|
||||
@@ -1558,7 +1568,6 @@ extern void destroy_inode(struct inode *);
|
||||
extern struct inode *new_inode(struct super_block *);
|
||||
extern int remove_suid(struct dentry *);
|
||||
extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
|
||||
extern struct mutex iprune_mutex;
|
||||
|
||||
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
|
||||
extern void remove_inode_hash(struct inode *);
|
||||
@@ -1643,7 +1652,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
|
||||
|
||||
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
struct block_device *bdev, const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
|
||||
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
|
||||
int lock_type);
|
||||
|
||||
enum {
|
||||
@@ -1654,32 +1663,32 @@ enum {
|
||||
|
||||
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
|
||||
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
|
||||
loff_t offset, unsigned long nr_segs, get_block_t get_block,
|
||||
dio_iodone_t end_io)
|
||||
{
|
||||
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
|
||||
nr_segs, get_blocks, end_io, DIO_LOCKING);
|
||||
nr_segs, get_block, end_io, DIO_LOCKING);
|
||||
}
|
||||
|
||||
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
|
||||
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
|
||||
loff_t offset, unsigned long nr_segs, get_block_t get_block,
|
||||
dio_iodone_t end_io)
|
||||
{
|
||||
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
|
||||
nr_segs, get_blocks, end_io, DIO_NO_LOCKING);
|
||||
nr_segs, get_block, end_io, DIO_NO_LOCKING);
|
||||
}
|
||||
|
||||
static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
|
||||
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
|
||||
loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
|
||||
loff_t offset, unsigned long nr_segs, get_block_t get_block,
|
||||
dio_iodone_t end_io)
|
||||
{
|
||||
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
|
||||
nr_segs, get_blocks, end_io, DIO_OWN_LOCKING);
|
||||
nr_segs, get_block, end_io, DIO_OWN_LOCKING);
|
||||
}
|
||||
|
||||
extern struct file_operations generic_ro_fops;
|
||||
extern const struct file_operations generic_ro_fops;
|
||||
|
||||
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
|
||||
|
||||
@@ -1735,9 +1744,9 @@ extern int simple_commit_write(struct file *file, struct page *page,
|
||||
|
||||
extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
|
||||
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
|
||||
extern struct file_operations simple_dir_operations;
|
||||
extern const struct file_operations simple_dir_operations;
|
||||
extern struct inode_operations simple_dir_inode_operations;
|
||||
struct tree_descr { char *name; struct file_operations *ops; int mode; };
|
||||
struct tree_descr { char *name; const struct file_operations *ops; int mode; };
|
||||
struct dentry *d_alloc_name(struct dentry *, const char *);
|
||||
extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
|
||||
extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
|
||||
|
||||
@@ -15,6 +15,26 @@
|
||||
|
||||
#include <linux/dnotify.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/audit.h>
|
||||
|
||||
/*
|
||||
* fsnotify_d_instantiate - instantiate a dentry for inode
|
||||
* Called with dcache_lock held.
|
||||
*/
|
||||
static inline void fsnotify_d_instantiate(struct dentry *entry,
|
||||
struct inode *inode)
|
||||
{
|
||||
inotify_d_instantiate(entry, inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_d_move - entry has been moved
|
||||
* Called with dcache_lock and entry->d_lock held.
|
||||
*/
|
||||
static inline void fsnotify_d_move(struct dentry *entry)
|
||||
{
|
||||
inotify_d_move(entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
|
||||
@@ -45,6 +65,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
|
||||
if (source) {
|
||||
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL);
|
||||
}
|
||||
audit_inode_child(old_name, source, old_dir->i_ino);
|
||||
audit_inode_child(new_name, target, new_dir->i_ino);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -70,19 +92,22 @@ static inline void fsnotify_inoderemove(struct inode *inode)
|
||||
/*
|
||||
* fsnotify_create - 'name' was linked in
|
||||
*/
|
||||
static inline void fsnotify_create(struct inode *inode, const char *name)
|
||||
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
|
||||
{
|
||||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE, 0, name);
|
||||
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_mkdir - directory 'name' was created
|
||||
*/
|
||||
static inline void fsnotify_mkdir(struct inode *inode, const char *name)
|
||||
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
|
||||
{
|
||||
inode_dir_notify(inode, DN_CREATE);
|
||||
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, name);
|
||||
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
|
||||
dentry->d_name.name);
|
||||
audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef _LINUX_FUTEX_H
|
||||
#define _LINUX_FUTEX_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* Second argument to futex syscall */
|
||||
|
||||
|
||||
@@ -11,10 +13,97 @@
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
|
||||
/*
|
||||
* Support for robust futexes: the kernel cleans up held futexes at
|
||||
* thread exit time.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Per-lock list entry - embedded in user-space locks, somewhere close
|
||||
* to the futex field. (Note: user-space uses a double-linked list to
|
||||
* achieve O(1) list add and remove, but the kernel only needs to know
|
||||
* about the forward link)
|
||||
*
|
||||
* NOTE: this structure is part of the syscall ABI, and must not be
|
||||
* changed.
|
||||
*/
|
||||
struct robust_list {
|
||||
struct robust_list __user *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* Per-thread list head:
|
||||
*
|
||||
* NOTE: this structure is part of the syscall ABI, and must only be
|
||||
* changed if the change is first communicated with the glibc folks.
|
||||
* (When an incompatible change is done, we'll increase the structure
|
||||
* size, which glibc will detect)
|
||||
*/
|
||||
struct robust_list_head {
|
||||
/*
|
||||
* The head of the list. Points back to itself if empty:
|
||||
*/
|
||||
struct robust_list list;
|
||||
|
||||
/*
|
||||
* This relative offset is set by user-space, it gives the kernel
|
||||
* the relative position of the futex field to examine. This way
|
||||
* we keep userspace flexible, to freely shape its data-structure,
|
||||
* without hardcoding any particular offset into the kernel:
|
||||
*/
|
||||
long futex_offset;
|
||||
|
||||
/*
|
||||
* The death of the thread may race with userspace setting
|
||||
* up a lock's links. So to handle this race, userspace first
|
||||
* sets this field to the address of the to-be-taken lock,
|
||||
* then does the lock acquire, and then adds itself to the
|
||||
* list, and then clears this field. Hence the kernel will
|
||||
* always have full knowledge of all locks that the thread
|
||||
* _might_ have taken. We check the owner TID in any case,
|
||||
* so only truly owned locks will be handled.
|
||||
*/
|
||||
struct robust_list __user *list_op_pending;
|
||||
};
|
||||
|
||||
/*
|
||||
* Are there any waiters for this robust futex:
|
||||
*/
|
||||
#define FUTEX_WAITERS 0x80000000
|
||||
|
||||
/*
|
||||
* The kernel signals via this bit that a thread holding a futex
|
||||
* has exited without unlocking the futex. The kernel also does
|
||||
* a FUTEX_WAKE on such futexes, after setting the bit, to wake
|
||||
* up any possible waiters:
|
||||
*/
|
||||
#define FUTEX_OWNER_DIED 0x40000000
|
||||
|
||||
/*
|
||||
* The rest of the robust-futex field is for the TID:
|
||||
*/
|
||||
#define FUTEX_TID_MASK 0x3fffffff
|
||||
|
||||
/*
|
||||
* This limit protects against a deliberately circular list.
|
||||
* (Not worth introducing an rlimit for it)
|
||||
*/
|
||||
#define ROBUST_LIST_LIMIT 2048
|
||||
|
||||
long do_futex(unsigned long uaddr, int op, int val,
|
||||
unsigned long timeout, unsigned long uaddr2, int val2,
|
||||
int val3);
|
||||
|
||||
extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
#else
|
||||
static inline void exit_robust_list(struct task_struct *curr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
|
||||
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
|
||||
#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
|
||||
|
||||
@@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the following fucntions to manipulate gameport's per-port
|
||||
* Use the following functions to manipulate gameport's per-port
|
||||
* driver-specific data.
|
||||
*/
|
||||
static inline void *gameport_get_drvdata(struct gameport *gameport)
|
||||
@@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the following fucntions to pin gameport's driver in process context
|
||||
* Use the following functions to pin gameport's driver in process context
|
||||
*/
|
||||
static inline int gameport_pin_driver(struct gameport *gameport)
|
||||
{
|
||||
|
||||
@@ -78,6 +78,7 @@ struct hd_struct {
|
||||
sector_t start_sect;
|
||||
sector_t nr_sects;
|
||||
struct kobject kobj;
|
||||
struct kobject *holder_dir;
|
||||
unsigned ios[2], sectors[2]; /* READs and WRITEs */
|
||||
int policy, partno;
|
||||
};
|
||||
@@ -89,12 +90,12 @@ struct hd_struct {
|
||||
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
|
||||
|
||||
struct disk_stats {
|
||||
unsigned sectors[2]; /* READs and WRITEs */
|
||||
unsigned ios[2];
|
||||
unsigned merges[2];
|
||||
unsigned ticks[2];
|
||||
unsigned io_ticks;
|
||||
unsigned time_in_queue;
|
||||
unsigned long sectors[2]; /* READs and WRITEs */
|
||||
unsigned long ios[2];
|
||||
unsigned long merges[2];
|
||||
unsigned long ticks[2];
|
||||
unsigned long io_ticks;
|
||||
unsigned long time_in_queue;
|
||||
};
|
||||
|
||||
struct gendisk {
|
||||
@@ -114,6 +115,8 @@ struct gendisk {
|
||||
int number; /* more of the same */
|
||||
struct device *driverfs_dev;
|
||||
struct kobject kobj;
|
||||
struct kobject *holder_dir;
|
||||
struct kobject *slave_dir;
|
||||
|
||||
struct timer_rand_state *random;
|
||||
int policy;
|
||||
@@ -149,14 +152,14 @@ struct disk_attribute {
|
||||
({ \
|
||||
typeof(gendiskp->dkstats->field) res = 0; \
|
||||
int i; \
|
||||
for_each_cpu(i) \
|
||||
for_each_possible_cpu(i) \
|
||||
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
||||
res; \
|
||||
})
|
||||
|
||||
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
||||
int i;
|
||||
for_each_cpu(i)
|
||||
for_each_possible_cpu(i)
|
||||
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
||||
sizeof (struct disk_stats));
|
||||
}
|
||||
|
||||
32
include/linux/gigaset_dev.h
Normal file
32
include/linux/gigaset_dev.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* interface to user space for the gigaset driver
|
||||
*
|
||||
* Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
|
||||
*
|
||||
* =====================================================================
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of
|
||||
* the License, or (at your option) any later version.
|
||||
* =====================================================================
|
||||
* Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $
|
||||
* =====================================================================
|
||||
*/
|
||||
|
||||
#ifndef GIGASET_INTERFACE_H
|
||||
#define GIGASET_INTERFACE_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define GIGASET_IOCTL 0x47
|
||||
|
||||
#define GIGVER_DRIVER 0
|
||||
#define GIGVER_COMPAT 1
|
||||
#define GIGVER_FWBASE 2
|
||||
|
||||
#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int)
|
||||
#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int)
|
||||
#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay?
|
||||
#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4])
|
||||
|
||||
#endif
|
||||
@@ -7,6 +7,18 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
|
||||
static inline void flush_anon_page(struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||
static inline void flush_kernel_dcache_page(struct page *page)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
||||
#include <asm/highmem.h>
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* Offsets into HPET Registers
|
||||
*/
|
||||
@@ -85,22 +87,6 @@ struct hpet {
|
||||
#define Tn_FSB_INT_ADDR_SHIFT (32UL)
|
||||
#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL)
|
||||
|
||||
struct hpet_info {
|
||||
unsigned long hi_ireqfreq; /* Hz */
|
||||
unsigned long hi_flags; /* information */
|
||||
unsigned short hi_hpet;
|
||||
unsigned short hi_timer;
|
||||
};
|
||||
|
||||
#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
|
||||
|
||||
#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
|
||||
#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
|
||||
#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
|
||||
#define HPET_EPI _IO('h', 0x04) /* enable periodic */
|
||||
#define HPET_DPI _IO('h', 0x05) /* disable periodic */
|
||||
#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
|
||||
|
||||
/*
|
||||
* exported interfaces
|
||||
*/
|
||||
@@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int);
|
||||
int hpet_unregister(struct hpet_task *);
|
||||
int hpet_control(struct hpet_task *, unsigned int, unsigned long);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
struct hpet_info {
|
||||
unsigned long hi_ireqfreq; /* Hz */
|
||||
unsigned long hi_flags; /* information */
|
||||
unsigned short hi_hpet;
|
||||
unsigned short hi_timer;
|
||||
};
|
||||
|
||||
#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */
|
||||
|
||||
#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */
|
||||
#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */
|
||||
#define HPET_INFO _IOR('h', 0x03, struct hpet_info)
|
||||
#define HPET_EPI _IO('h', 0x04) /* enable periodic */
|
||||
#define HPET_DPI _IO('h', 0x05) /* disable periodic */
|
||||
#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */
|
||||
|
||||
#endif /* !__HPET__ */
|
||||
|
||||
@@ -34,15 +34,7 @@ enum hrtimer_restart {
|
||||
HRTIMER_RESTART,
|
||||
};
|
||||
|
||||
/*
|
||||
* Timer states:
|
||||
*/
|
||||
enum hrtimer_state {
|
||||
HRTIMER_INACTIVE, /* Timer is inactive */
|
||||
HRTIMER_EXPIRED, /* Timer is expired */
|
||||
HRTIMER_RUNNING, /* Timer is running the callback function */
|
||||
HRTIMER_PENDING, /* Timer is pending */
|
||||
};
|
||||
#define HRTIMER_INACTIVE ((void *)1UL)
|
||||
|
||||
struct hrtimer_base;
|
||||
|
||||
@@ -53,9 +45,7 @@ struct hrtimer_base;
|
||||
* @expires: the absolute expiry time in the hrtimers internal
|
||||
* representation. The time is related to the clock on
|
||||
* which the timer is based.
|
||||
* @state: state of the timer
|
||||
* @function: timer expiry callback function
|
||||
* @data: argument for the callback function
|
||||
* @base: pointer to the timer base (per cpu and per clock)
|
||||
*
|
||||
* The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
|
||||
@@ -63,23 +53,23 @@ struct hrtimer_base;
|
||||
struct hrtimer {
|
||||
struct rb_node node;
|
||||
ktime_t expires;
|
||||
enum hrtimer_state state;
|
||||
int (*function)(void *);
|
||||
void *data;
|
||||
int (*function)(struct hrtimer *);
|
||||
struct hrtimer_base *base;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hrtimer_base - the timer base for a specific clock
|
||||
*
|
||||
* @index: clock type index for per_cpu support when moving a timer
|
||||
* to a base on another cpu.
|
||||
* @lock: lock protecting the base and associated timers
|
||||
* @active: red black tree root node for the active timers
|
||||
* @first: pointer to the timer node which expires first
|
||||
* @resolution: the resolution of the clock, in nanoseconds
|
||||
* @get_time: function to retrieve the current time of the clock
|
||||
* @curr_timer: the timer which is executing a callback right now
|
||||
* @index: clock type index for per_cpu support when moving a timer
|
||||
* to a base on another cpu.
|
||||
* @lock: lock protecting the base and associated timers
|
||||
* @active: red black tree root node for the active timers
|
||||
* @first: pointer to the timer node which expires first
|
||||
* @resolution: the resolution of the clock, in nanoseconds
|
||||
* @get_time: function to retrieve the current time of the clock
|
||||
* @get_sofirq_time: function to retrieve the current time from the softirq
|
||||
* @curr_timer: the timer which is executing a callback right now
|
||||
* @softirq_time: the time when running the hrtimer queue in the softirq
|
||||
*/
|
||||
struct hrtimer_base {
|
||||
clockid_t index;
|
||||
@@ -88,7 +78,9 @@ struct hrtimer_base {
|
||||
struct rb_node *first;
|
||||
ktime_t resolution;
|
||||
ktime_t (*get_time)(void);
|
||||
ktime_t (*get_softirq_time)(void);
|
||||
struct hrtimer *curr_timer;
|
||||
ktime_t softirq_time;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void);
|
||||
|
||||
static inline int hrtimer_active(const struct hrtimer *timer)
|
||||
{
|
||||
return timer->state == HRTIMER_PENDING;
|
||||
return timer->node.rb_parent != HRTIMER_INACTIVE;
|
||||
}
|
||||
|
||||
/* Forward a hrtimer so it expires after now: */
|
||||
extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);
|
||||
extern unsigned long
|
||||
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
|
||||
|
||||
/* Precise sleep: */
|
||||
extern long hrtimer_nanosleep(struct timespec *rqtp,
|
||||
|
||||
@@ -154,7 +154,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
|
||||
return sb->s_fs_info;
|
||||
}
|
||||
|
||||
extern struct file_operations hugetlbfs_file_operations;
|
||||
extern const struct file_operations hugetlbfs_file_operations;
|
||||
extern struct vm_operations_struct hugetlb_vm_ops;
|
||||
struct file *hugetlb_zero_setup(size_t);
|
||||
int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
|
||||
|
||||
@@ -108,6 +108,10 @@
|
||||
#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
|
||||
#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
|
||||
#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
|
||||
#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */
|
||||
#define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */
|
||||
#define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */
|
||||
#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */
|
||||
|
||||
#define I2C_DRIVERID_I2CDEV 900
|
||||
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
|
||||
@@ -184,6 +188,7 @@
|
||||
#define I2C_HW_B_SAVAGE 0x01001d /* savage framebuffer driver */
|
||||
#define I2C_HW_B_RADEON 0x01001e /* radeon framebuffer driver */
|
||||
#define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */
|
||||
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
|
||||
|
||||
/* --- PCF 8584 based algorithms */
|
||||
#define I2C_HW_P_LP 0x020000 /* Parallel port interface */
|
||||
|
||||
@@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
|
||||
if (!pool->slab)
|
||||
goto free_name;
|
||||
|
||||
pool->mempool =
|
||||
mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
|
||||
pool->slab);
|
||||
pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
|
||||
if (!pool->mempool)
|
||||
goto free_slab;
|
||||
|
||||
|
||||
@@ -69,6 +69,10 @@ extern initcall_t __security_initcall_start[], __security_initcall_end[];
|
||||
|
||||
/* Defined in init/main.c */
|
||||
extern char saved_command_line[];
|
||||
|
||||
/* used by init/main.c */
|
||||
extern void setup_arch(char **);
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef MODULE
|
||||
|
||||
@@ -62,6 +62,8 @@
|
||||
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
|
||||
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
|
||||
.rlim = INIT_RLIMITS, \
|
||||
.pgrp = 1, \
|
||||
.session = 1, \
|
||||
}
|
||||
|
||||
#define INIT_SIGHAND(sighand) { \
|
||||
|
||||
@@ -71,6 +71,8 @@ struct inotify_event {
|
||||
|
||||
#ifdef CONFIG_INOTIFY
|
||||
|
||||
extern void inotify_d_instantiate(struct dentry *, struct inode *);
|
||||
extern void inotify_d_move(struct dentry *);
|
||||
extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
|
||||
const char *);
|
||||
extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
|
||||
@@ -81,6 +83,15 @@ extern u32 inotify_get_cookie(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline void inotify_d_instantiate(struct dentry *dentry,
|
||||
struct inode *inode)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inotify_d_move(struct dentry *dentry)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void inotify_inode_queue_event(struct inode *inode,
|
||||
__u32 mask, __u32 cookie,
|
||||
const char *filename)
|
||||
|
||||
@@ -957,7 +957,7 @@ struct input_handler {
|
||||
struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id);
|
||||
void (*disconnect)(struct input_handle *handle);
|
||||
|
||||
struct file_operations *fops;
|
||||
const struct file_operations *fops;
|
||||
int minor;
|
||||
char *name;
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
|
||||
#include <linux/ipmi_msgdefs.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/*
|
||||
* This file describes an interface to an IPMI driver. You have to
|
||||
@@ -397,7 +398,7 @@ struct ipmi_smi_watcher
|
||||
the watcher list. So you can add and remove users from the
|
||||
IPMI interface, send messages, etc., but you cannot add
|
||||
or remove SMI watchers or SMI interfaces. */
|
||||
void (*new_smi)(int if_num);
|
||||
void (*new_smi)(int if_num, struct device *dev);
|
||||
void (*smi_gone)(int if_num);
|
||||
};
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
#define IPMI_NETFN_APP_RESPONSE 0x07
|
||||
#define IPMI_GET_DEVICE_ID_CMD 0x01
|
||||
#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
|
||||
#define IPMI_GET_DEVICE_GUID_CMD 0x08
|
||||
#define IPMI_GET_MSG_FLAGS_CMD 0x31
|
||||
#define IPMI_SEND_MSG_CMD 0x34
|
||||
#define IPMI_GET_MSG_CMD 0x33
|
||||
|
||||
@@ -37,6 +37,9 @@
|
||||
#include <linux/ipmi_msgdefs.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ipmi_smi.h>
|
||||
|
||||
/* This files describes the interface for IPMI system management interface
|
||||
drivers to bind into the IPMI message handler. */
|
||||
@@ -113,12 +116,52 @@ struct ipmi_smi_handlers
|
||||
void (*dec_usecount)(void *send_info);
|
||||
};
|
||||
|
||||
struct ipmi_device_id {
|
||||
unsigned char device_id;
|
||||
unsigned char device_revision;
|
||||
unsigned char firmware_revision_1;
|
||||
unsigned char firmware_revision_2;
|
||||
unsigned char ipmi_version;
|
||||
unsigned char additional_device_support;
|
||||
unsigned int manufacturer_id;
|
||||
unsigned int product_id;
|
||||
unsigned char aux_firmware_revision[4];
|
||||
unsigned int aux_firmware_revision_set : 1;
|
||||
};
|
||||
|
||||
#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
|
||||
#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
|
||||
|
||||
/* Take a pointer to a raw data buffer and a length and extract device
|
||||
id information from it. The first byte of data must point to the
|
||||
byte from the get device id response after the completion code.
|
||||
The caller is responsible for making sure the length is at least
|
||||
11 and the command completed without error. */
|
||||
static inline void ipmi_demangle_device_id(unsigned char *data,
|
||||
unsigned int data_len,
|
||||
struct ipmi_device_id *id)
|
||||
{
|
||||
id->device_id = data[0];
|
||||
id->device_revision = data[1];
|
||||
id->firmware_revision_1 = data[2];
|
||||
id->firmware_revision_2 = data[3];
|
||||
id->ipmi_version = data[4];
|
||||
id->additional_device_support = data[5];
|
||||
id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16);
|
||||
id->product_id = data[9] | (data[10] << 8);
|
||||
if (data_len >= 15) {
|
||||
memcpy(id->aux_firmware_revision, data+11, 4);
|
||||
id->aux_firmware_revision_set = 1;
|
||||
} else
|
||||
id->aux_firmware_revision_set = 0;
|
||||
}
|
||||
|
||||
/* Add a low-level interface to the IPMI driver. Note that if the
|
||||
interface doesn't know its slave address, it should pass in zero. */
|
||||
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
|
||||
void *send_info,
|
||||
unsigned char version_major,
|
||||
unsigned char version_minor,
|
||||
struct ipmi_device_id *device_id,
|
||||
struct device *dev,
|
||||
unsigned char slave_addr,
|
||||
ipmi_smi_t *intf);
|
||||
|
||||
|
||||
@@ -114,53 +114,8 @@ static inline void set_native_irq_info(int irq, cpumask_t mask)
|
||||
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
||||
extern cpumask_t pending_irq_cpumask[NR_IRQS];
|
||||
|
||||
static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
|
||||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->move_irq = 1;
|
||||
pending_irq_cpumask[irq] = mask;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
move_native_irq(int irq)
|
||||
{
|
||||
cpumask_t tmp;
|
||||
irq_desc_t *desc = irq_descp(irq);
|
||||
|
||||
if (likely (!desc->move_irq))
|
||||
return;
|
||||
|
||||
desc->move_irq = 0;
|
||||
|
||||
if (likely(cpus_empty(pending_irq_cpumask[irq])))
|
||||
return;
|
||||
|
||||
if (!desc->handler->set_affinity)
|
||||
return;
|
||||
|
||||
/* note - we hold the desc->lock */
|
||||
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
|
||||
|
||||
/*
|
||||
* If there was a valid mask to work with, please
|
||||
* do the disable, re-program, enable sequence.
|
||||
* This is *not* particularly important for level triggered
|
||||
* but in a edge trigger case, we might be setting rte
|
||||
* when an active trigger is comming in. This could
|
||||
* cause some ioapics to mal-function.
|
||||
* Being paranoid i guess!
|
||||
*/
|
||||
if (unlikely(!cpus_empty(tmp))) {
|
||||
desc->handler->disable(irq);
|
||||
desc->handler->set_affinity(irq,tmp);
|
||||
desc->handler->enable(irq);
|
||||
}
|
||||
cpus_clear(pending_irq_cpumask[irq]);
|
||||
}
|
||||
void set_pending_irq(unsigned int irq, cpumask_t mask);
|
||||
void move_native_irq(int irq);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
/*
|
||||
|
||||
@@ -29,6 +29,8 @@
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
#endif
|
||||
|
||||
@@ -787,7 +789,7 @@ struct journal_s
|
||||
unsigned long j_commit_interval;
|
||||
|
||||
/* The timer used to wakeup the commit thread: */
|
||||
struct timer_list *j_commit_timer;
|
||||
struct timer_list j_commit_timer;
|
||||
|
||||
/*
|
||||
* The revoke table: maintains the list of revoked blocks in the
|
||||
@@ -893,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
|
||||
extern void journal_release_buffer (handle_t *, struct buffer_head *);
|
||||
extern int journal_forget (handle_t *, struct buffer_head *);
|
||||
extern void journal_sync_buffer (struct buffer_head *);
|
||||
extern int journal_invalidatepage(journal_t *,
|
||||
extern void journal_invalidatepage(journal_t *,
|
||||
struct page *, unsigned long);
|
||||
extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
|
||||
extern int journal_stop(handle_t *);
|
||||
|
||||
@@ -87,7 +87,7 @@ extern int cond_resched(void);
|
||||
(__x < 0) ? -__x : __x; \
|
||||
})
|
||||
|
||||
extern struct notifier_block *panic_notifier_list;
|
||||
extern struct atomic_notifier_head panic_notifier_list;
|
||||
extern long (*panic_blink)(long time);
|
||||
NORET_TYPE void panic(const char * fmt, ...)
|
||||
__attribute__ ((NORET_AND format (printf, 1, 2)));
|
||||
@@ -154,9 +154,10 @@ static inline int __attribute_pure__ long_log2(unsigned long x)
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline unsigned long __attribute_const__ roundup_pow_of_two(unsigned long x)
|
||||
static inline unsigned long
|
||||
__attribute_const__ roundup_pow_of_two(unsigned long x)
|
||||
{
|
||||
return (1UL << fls(x - 1));
|
||||
return 1UL << fls_long(x - 1);
|
||||
}
|
||||
|
||||
extern int printk_ratelimit(void);
|
||||
|
||||
@@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq)
|
||||
{
|
||||
int cpu, sum = 0;
|
||||
|
||||
for_each_cpu(cpu)
|
||||
for_each_possible_cpu(cpu)
|
||||
sum += kstat_cpu(cpu).irqs[irq];
|
||||
|
||||
return sum;
|
||||
|
||||
@@ -64,9 +64,6 @@ typedef union {
|
||||
|
||||
#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
|
||||
|
||||
/* Define a ktime_t variable and initialize it to zero: */
|
||||
#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
|
||||
|
||||
/**
|
||||
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
|
||||
*
|
||||
@@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
|
||||
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
|
||||
#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
|
||||
|
||||
/* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */
|
||||
#define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64)
|
||||
|
||||
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
|
||||
#define ktime_to_ns(kt) ((kt).tv64)
|
||||
|
||||
@@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
|
||||
* tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
|
||||
*/
|
||||
|
||||
/* Define a ktime_t variable and initialize it to zero: */
|
||||
#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
|
||||
|
||||
/* Set a ktime_t variable to a value in sec/nsec representation: */
|
||||
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
|
||||
{
|
||||
@@ -254,17 +245,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
|
||||
.tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_to_clock_t - convert a ktime_t variable to clock_t format
|
||||
* @kt: the ktime_t variable to convert
|
||||
*
|
||||
* Returns a clock_t variable with the converted value
|
||||
*/
|
||||
static inline clock_t ktime_to_clock_t(const ktime_t kt)
|
||||
{
|
||||
return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec);
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
|
||||
* @kt: the ktime_t variable to convert
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/config.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/nfsd/nfsfh.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
@@ -58,6 +59,8 @@ struct nlm_host {
|
||||
unsigned long h_expires; /* eligible for GC */
|
||||
struct list_head h_lockowners; /* Lockowners for the client */
|
||||
spinlock_t h_lock;
|
||||
struct list_head h_granted; /* Locks in GRANTED state */
|
||||
struct list_head h_reclaim; /* Locks in RECLAIM state */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -83,9 +86,9 @@ struct nlm_rqst {
|
||||
struct nlm_host * a_host; /* host handle */
|
||||
struct nlm_args a_args; /* arguments */
|
||||
struct nlm_res a_res; /* result */
|
||||
struct nlm_wait * a_block;
|
||||
struct nlm_block * a_block;
|
||||
unsigned int a_retries; /* Retry count */
|
||||
char a_owner[NLMCLNT_OHSIZE];
|
||||
u8 a_owner[NLMCLNT_OHSIZE];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -110,16 +113,16 @@ struct nlm_file {
|
||||
*/
|
||||
#define NLM_NEVER (~(unsigned long) 0)
|
||||
struct nlm_block {
|
||||
struct kref b_count; /* Reference count */
|
||||
struct nlm_block * b_next; /* linked list (all blocks) */
|
||||
struct nlm_block * b_fnext; /* linked list (per file) */
|
||||
struct nlm_rqst b_call; /* RPC args & callback info */
|
||||
struct nlm_rqst * b_call; /* RPC args & callback info */
|
||||
struct svc_serv * b_daemon; /* NLM service */
|
||||
struct nlm_host * b_host; /* host handle for RPC clnt */
|
||||
unsigned long b_when; /* next re-xmit */
|
||||
unsigned int b_id; /* block id */
|
||||
unsigned char b_queued; /* re-queued */
|
||||
unsigned char b_granted; /* VFS granted lock */
|
||||
unsigned char b_incall; /* doing callback */
|
||||
unsigned char b_done; /* callback complete */
|
||||
struct nlm_file * b_file; /* file in question */
|
||||
};
|
||||
@@ -145,15 +148,16 @@ extern unsigned long nlmsvc_timeout;
|
||||
/*
|
||||
* Lockd client functions
|
||||
*/
|
||||
struct nlm_rqst * nlmclnt_alloc_call(void);
|
||||
int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl);
|
||||
void nlmclnt_finish_block(struct nlm_rqst *req);
|
||||
long nlmclnt_block(struct nlm_rqst *req, long timeout);
|
||||
struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
|
||||
void nlm_release_call(struct nlm_rqst *);
|
||||
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
|
||||
void nlmclnt_finish_block(struct nlm_wait *block);
|
||||
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
|
||||
u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *);
|
||||
void nlmclnt_recovery(struct nlm_host *, u32);
|
||||
int nlmclnt_reclaim(struct nlm_host *, struct file_lock *);
|
||||
int nlmclnt_setgrantargs(struct nlm_rqst *, struct nlm_lock *);
|
||||
void nlmclnt_freegrantargs(struct nlm_rqst *);
|
||||
|
||||
/*
|
||||
* Host cache
|
||||
@@ -172,7 +176,6 @@ extern struct nlm_host *nlm_find_client(void);
|
||||
/*
|
||||
* Server-side lock handling
|
||||
*/
|
||||
int nlmsvc_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
|
||||
struct nlm_lock *, int, struct nlm_cookie *);
|
||||
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
|
||||
@@ -180,7 +183,7 @@ u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *,
|
||||
struct nlm_lock *);
|
||||
u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
|
||||
unsigned long nlmsvc_retry_blocked(void);
|
||||
int nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
|
||||
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
|
||||
int action);
|
||||
void nlmsvc_grant_reply(struct svc_rqst *, struct nlm_cookie *, u32);
|
||||
|
||||
|
||||
@@ -25,6 +25,6 @@ u32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *,
|
||||
struct nlm_args *);
|
||||
u32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *,
|
||||
struct nlm_args *);
|
||||
int nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, int);
|
||||
void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, int);
|
||||
|
||||
#endif /* LINUX_LOCKD_SHARE_H */
|
||||
|
||||
@@ -28,6 +28,7 @@ struct nlm_lock {
|
||||
int len; /* length of "caller" */
|
||||
struct nfs_fh fh;
|
||||
struct xdr_netobj oh;
|
||||
u32 svid;
|
||||
struct file_lock fl;
|
||||
};
|
||||
|
||||
|
||||
16
include/linux/m48t86.h
Normal file
16
include/linux/m48t86.h
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* ST M48T86 / Dallas DS12887 RTC driver
|
||||
* Copyright (c) 2006 Tower Technologies
|
||||
*
|
||||
* Author: Alessandro Zummo <a.zummo@towertech.it>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
struct m48t86_ops
|
||||
{
|
||||
void (*writeb)(unsigned char value, unsigned long addr);
|
||||
unsigned char (*readb)(unsigned long addr);
|
||||
};
|
||||
@@ -113,6 +113,7 @@
|
||||
|
||||
#define UBD_MAJOR 98
|
||||
|
||||
#define PP_MAJOR 99
|
||||
#define JSFD_MAJOR 99
|
||||
|
||||
#define PHONE_MAJOR 100
|
||||
|
||||
@@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int);
|
||||
|
||||
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct kmem_cache;
|
||||
|
||||
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
|
||||
typedef void (mempool_free_t)(void *element, void *pool_data);
|
||||
|
||||
@@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool);
|
||||
*/
|
||||
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
|
||||
void mempool_free_slab(void *element, void *pool_data);
|
||||
static inline mempool_t *
|
||||
mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
|
||||
{
|
||||
return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
|
||||
(void *) kc);
|
||||
}
|
||||
|
||||
/*
|
||||
* 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree
|
||||
* the amount of memory specified by pool_data
|
||||
*/
|
||||
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
|
||||
void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
|
||||
void mempool_kfree(void *element, void *pool_data);
|
||||
static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
|
||||
{
|
||||
return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
|
||||
(void *) size);
|
||||
}
|
||||
static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size)
|
||||
{
|
||||
return mempool_create(min_nr, mempool_kzalloc, mempool_kfree,
|
||||
(void *) size);
|
||||
}
|
||||
|
||||
/*
|
||||
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
|
||||
* allocates pages of the order specified by pool_data
|
||||
*/
|
||||
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
||||
void mempool_free_pages(void *element, void *pool_data);
|
||||
static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
|
||||
{
|
||||
return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
|
||||
(void *)(long)order);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_MEMPOOL_H */
|
||||
|
||||
@@ -36,7 +36,7 @@ struct class_device;
|
||||
struct miscdevice {
|
||||
int minor;
|
||||
const char *name;
|
||||
struct file_operations *fops;
|
||||
const struct file_operations *fops;
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
struct class_device *class;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/numa.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/* Free memory management - zoned buddy allocator. */
|
||||
@@ -225,7 +226,6 @@ struct zone {
|
||||
* Discontig memory support fields.
|
||||
*/
|
||||
struct pglist_data *zone_pgdat;
|
||||
struct page *zone_mem_map;
|
||||
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
|
||||
unsigned long zone_start_pfn;
|
||||
|
||||
@@ -307,7 +307,6 @@ typedef struct pglist_data {
|
||||
unsigned long node_spanned_pages; /* total size of physical page
|
||||
range, including holes */
|
||||
int node_id;
|
||||
struct pglist_data *pgdat_next;
|
||||
wait_queue_head_t kswapd_wait;
|
||||
struct task_struct *kswapd;
|
||||
int kswapd_max_order;
|
||||
@@ -324,8 +323,6 @@ typedef struct pglist_data {
|
||||
|
||||
#include <linux/memory_hotplug.h>
|
||||
|
||||
extern struct pglist_data *pgdat_list;
|
||||
|
||||
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
|
||||
unsigned long *free, struct pglist_data *pgdat);
|
||||
void get_zone_counts(unsigned long *active, unsigned long *inactive,
|
||||
@@ -350,57 +347,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
|
||||
*/
|
||||
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
|
||||
|
||||
/**
|
||||
* for_each_pgdat - helper macro to iterate over all nodes
|
||||
* @pgdat - pointer to a pg_data_t variable
|
||||
*
|
||||
* Meant to help with common loops of the form
|
||||
* pgdat = pgdat_list;
|
||||
* while(pgdat) {
|
||||
* ...
|
||||
* pgdat = pgdat->pgdat_next;
|
||||
* }
|
||||
*/
|
||||
#define for_each_pgdat(pgdat) \
|
||||
for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
|
||||
|
||||
/*
|
||||
* next_zone - helper magic for for_each_zone()
|
||||
* Thanks to William Lee Irwin III for this piece of ingenuity.
|
||||
*/
|
||||
static inline struct zone *next_zone(struct zone *zone)
|
||||
{
|
||||
pg_data_t *pgdat = zone->zone_pgdat;
|
||||
|
||||
if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
|
||||
zone++;
|
||||
else if (pgdat->pgdat_next) {
|
||||
pgdat = pgdat->pgdat_next;
|
||||
zone = pgdat->node_zones;
|
||||
} else
|
||||
zone = NULL;
|
||||
|
||||
return zone;
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_zone - helper macro to iterate over all memory zones
|
||||
* @zone - pointer to struct zone variable
|
||||
*
|
||||
* The user only needs to declare the zone variable, for_each_zone
|
||||
* fills it in. This basically means for_each_zone() is an
|
||||
* easier to read version of this piece of code:
|
||||
*
|
||||
* for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
|
||||
* for (i = 0; i < MAX_NR_ZONES; ++i) {
|
||||
* struct zone * z = pgdat->node_zones + i;
|
||||
* ...
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
#define for_each_zone(zone) \
|
||||
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
|
||||
|
||||
static inline int populated_zone(struct zone *zone)
|
||||
{
|
||||
return (!!zone->present_pages);
|
||||
@@ -472,6 +418,30 @@ extern struct pglist_data contig_page_data;
|
||||
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
extern struct pglist_data *first_online_pgdat(void);
|
||||
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
|
||||
extern struct zone *next_zone(struct zone *zone);
|
||||
|
||||
/**
|
||||
* for_each_pgdat - helper macro to iterate over all nodes
|
||||
* @pgdat - pointer to a pg_data_t variable
|
||||
*/
|
||||
#define for_each_online_pgdat(pgdat) \
|
||||
for (pgdat = first_online_pgdat(); \
|
||||
pgdat; \
|
||||
pgdat = next_online_pgdat(pgdat))
|
||||
/**
|
||||
* for_each_zone - helper macro to iterate over all memory zones
|
||||
* @zone - pointer to struct zone variable
|
||||
*
|
||||
* The user only needs to declare the zone variable, for_each_zone
|
||||
* fills it in.
|
||||
*/
|
||||
#define for_each_zone(zone) \
|
||||
for (zone = (first_online_pgdat())->node_zones; \
|
||||
zone; \
|
||||
zone = next_zone(zone))
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#include <asm/sparsemem.h>
|
||||
#endif
|
||||
@@ -602,17 +572,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
|
||||
return __nr_to_section(pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
#define pfn_to_page(pfn) \
|
||||
({ \
|
||||
unsigned long __pfn = (pfn); \
|
||||
__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \
|
||||
})
|
||||
#define page_to_pfn(page) \
|
||||
({ \
|
||||
page - __section_mem_map_addr(__nr_to_section( \
|
||||
page_to_section(page))); \
|
||||
})
|
||||
|
||||
static inline int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
|
||||
@@ -183,6 +183,7 @@ void *__symbol_get_gpl(const char *symbol);
|
||||
|
||||
/* For every exported symbol, place a struct in the __ksymtab section */
|
||||
#define __EXPORT_SYMBOL(sym, sec) \
|
||||
extern typeof(sym) sym; \
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"))) \
|
||||
@@ -554,25 +555,6 @@ static inline void module_remove_driver(struct device_driver *driver)
|
||||
|
||||
/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */
|
||||
|
||||
struct obsolete_modparm {
|
||||
char name[64];
|
||||
char type[64-sizeof(void *)];
|
||||
void *addr;
|
||||
};
|
||||
|
||||
static inline void MODULE_PARM_(void) { }
|
||||
#ifdef MODULE
|
||||
/* DEPRECATED: Do not use. */
|
||||
#define MODULE_PARM(var,type) \
|
||||
extern struct obsolete_modparm __parm_##var \
|
||||
__attribute__((section("__obsparm"))); \
|
||||
struct obsolete_modparm __parm_##var = \
|
||||
{ __stringify(var), type, &MODULE_PARM_ }; \
|
||||
__MODULE_PARM_TYPE(var, type);
|
||||
#else
|
||||
#define MODULE_PARM(var,type) static void __attribute__((__unused__)) *__parm_##var = &MODULE_PARM_;
|
||||
#endif
|
||||
|
||||
#define __MODULE_STRING(x) __stringify(x)
|
||||
|
||||
/* Use symbol_get and symbol_put instead. You'll thank me. */
|
||||
|
||||
@@ -162,13 +162,6 @@ extern int param_array_get(char *buffer, struct kernel_param *kp);
|
||||
extern int param_set_copystring(const char *val, struct kernel_param *kp);
|
||||
extern int param_get_string(char *buffer, struct kernel_param *kp);
|
||||
|
||||
int param_array(const char *name,
|
||||
const char *val,
|
||||
unsigned int min, unsigned int max,
|
||||
void *elem, int elemsize,
|
||||
int (*set)(const char *, struct kernel_param *kp),
|
||||
int *num);
|
||||
|
||||
/* for exporting parameters in /sys/parameters */
|
||||
|
||||
struct module;
|
||||
|
||||
@@ -334,7 +334,7 @@ extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
|
||||
unsigned long *mapped_blocks);
|
||||
|
||||
/* fat/dir.c */
|
||||
extern struct file_operations fat_dir_operations;
|
||||
extern const struct file_operations fat_dir_operations;
|
||||
extern int fat_search_long(struct inode *inode, const unsigned char *name,
|
||||
int name_len, struct fat_slot_info *sinfo);
|
||||
extern int fat_dir_empty(struct inode *dir);
|
||||
@@ -397,7 +397,7 @@ extern int fat_count_free_clusters(struct super_block *sb);
|
||||
/* fat/file.c */
|
||||
extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern struct file_operations fat_file_operations;
|
||||
extern const struct file_operations fat_file_operations;
|
||||
extern struct inode_operations fat_file_inode_operations;
|
||||
extern int fat_notify_change(struct dentry * dentry, struct iattr * attr);
|
||||
extern void fat_truncate(struct inode *inode);
|
||||
@@ -420,6 +420,9 @@ extern int date_dos2unix(unsigned short time, unsigned short date);
|
||||
extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date);
|
||||
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
|
||||
|
||||
int fat_cache_init(void);
|
||||
void fat_cache_destroy(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -209,7 +209,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
|
||||
|
||||
/* linux/fs/ncpfs/dir.c */
|
||||
extern struct inode_operations ncp_dir_inode_operations;
|
||||
extern struct file_operations ncp_dir_operations;
|
||||
extern const struct file_operations ncp_dir_operations;
|
||||
int ncp_conn_logged_in(struct super_block *);
|
||||
int ncp_date_dos2unix(__le16 time, __le16 date);
|
||||
void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
|
||||
@@ -230,7 +230,7 @@ void ncp_unlock_server(struct ncp_server *server);
|
||||
|
||||
/* linux/fs/ncpfs/file.c */
|
||||
extern struct inode_operations ncp_file_inode_operations;
|
||||
extern struct file_operations ncp_file_operations;
|
||||
extern const struct file_operations ncp_file_operations;
|
||||
int ncp_make_open(struct inode *, int);
|
||||
|
||||
/* linux/fs/ncpfs/mmap.c */
|
||||
|
||||
@@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
|
||||
|
||||
#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x)
|
||||
|
||||
extern struct notifier_block *ip_conntrack_chain;
|
||||
extern struct notifier_block *ip_conntrack_expect_chain;
|
||||
extern struct atomic_notifier_head ip_conntrack_chain;
|
||||
extern struct atomic_notifier_head ip_conntrack_expect_chain;
|
||||
|
||||
static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return notifier_chain_register(&ip_conntrack_chain, nb);
|
||||
return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
|
||||
}
|
||||
|
||||
static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return notifier_chain_unregister(&ip_conntrack_chain, nb);
|
||||
return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ip_conntrack_expect_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return notifier_chain_register(&ip_conntrack_expect_chain, nb);
|
||||
return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
|
||||
return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
|
||||
nb);
|
||||
}
|
||||
|
||||
extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
|
||||
@@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event,
|
||||
struct ip_conntrack *ct)
|
||||
{
|
||||
if (is_confirmed(ct) && !is_dying(ct))
|
||||
notifier_call_chain(&ip_conntrack_chain, event, ct);
|
||||
atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
|
||||
struct ip_conntrack_expect *exp)
|
||||
{
|
||||
notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
|
||||
atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
|
||||
}
|
||||
#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
|
||||
static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
|
||||
|
||||
@@ -56,9 +56,7 @@
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
*/
|
||||
#define FLUSH_AGING 0 /* only flush old buffers */
|
||||
#define FLUSH_SYNC 1 /* file being synced, or contention */
|
||||
#define FLUSH_WAIT 2 /* wait for completion */
|
||||
#define FLUSH_STABLE 4 /* commit to stable storage */
|
||||
#define FLUSH_LOWPRI 8 /* low priority background flush */
|
||||
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
|
||||
@@ -78,6 +76,7 @@ struct nfs_access_entry {
|
||||
struct nfs4_state;
|
||||
struct nfs_open_context {
|
||||
atomic_t count;
|
||||
struct vfsmount *vfsmnt;
|
||||
struct dentry *dentry;
|
||||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
@@ -118,8 +117,7 @@ struct nfs_inode {
|
||||
unsigned long cache_validity; /* bit mask */
|
||||
|
||||
/*
|
||||
* read_cache_jiffies is when we started read-caching this inode,
|
||||
* and read_cache_mtime is the mtime of the inode at that time.
|
||||
* read_cache_jiffies is when we started read-caching this inode.
|
||||
* attrtimeo is for how long the cached information is assumed
|
||||
* to be valid. A successful attribute revalidation doubles
|
||||
* attrtimeo (up to acregmax/acdirmax), a failure resets it to
|
||||
@@ -128,11 +126,6 @@ struct nfs_inode {
|
||||
* We need to revalidate the cached attrs for this inode if
|
||||
*
|
||||
* jiffies - read_cache_jiffies > attrtimeo
|
||||
*
|
||||
* and invalidate any cached data/flush out any dirty pages if
|
||||
* we find that
|
||||
*
|
||||
* mtime != read_cache_mtime
|
||||
*/
|
||||
unsigned long read_cache_jiffies;
|
||||
unsigned long attrtimeo;
|
||||
@@ -311,12 +304,9 @@ extern void nfs_begin_attr_update(struct inode *);
|
||||
extern void nfs_end_attr_update(struct inode *);
|
||||
extern void nfs_begin_data_update(struct inode *);
|
||||
extern void nfs_end_data_update(struct inode *);
|
||||
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
|
||||
extern void nfs_file_clear_open_context(struct file *filp);
|
||||
|
||||
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
|
||||
extern u32 root_nfs_parse_addr(char *name); /*__init*/
|
||||
@@ -334,7 +324,7 @@ extern struct inode_operations nfs_file_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_file_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_file_operations;
|
||||
extern const struct file_operations nfs_file_operations;
|
||||
extern struct address_space_operations nfs_file_aops;
|
||||
|
||||
static inline struct rpc_cred *nfs_file_cred(struct file *file)
|
||||
@@ -381,7 +371,7 @@ extern struct inode_operations nfs_dir_inode_operations;
|
||||
#ifdef CONFIG_NFS_V3
|
||||
extern struct inode_operations nfs3_dir_inode_operations;
|
||||
#endif /* CONFIG_NFS_V3 */
|
||||
extern struct file_operations nfs_dir_operations;
|
||||
extern const struct file_operations nfs_dir_operations;
|
||||
extern struct dentry_operations nfs_dentry_operations;
|
||||
|
||||
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
|
||||
@@ -415,21 +405,22 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
|
||||
extern int nfs_writepages(struct address_space *, struct writeback_control *);
|
||||
extern int nfs_flush_incompatible(struct file *file, struct page *page);
|
||||
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
|
||||
extern void nfs_writeback_done(struct rpc_task *task, void *data);
|
||||
extern void nfs_writedata_release(void *data);
|
||||
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
|
||||
extern void nfs_writedata_release(void *);
|
||||
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
extern void nfs_commit_done(struct rpc_task *, void *data);
|
||||
extern void nfs_commit_release(void *data);
|
||||
struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount);
|
||||
void nfs_commit_free(struct nfs_write_data *p);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Try to write back everything synchronously (but check the
|
||||
* return value!)
|
||||
*/
|
||||
extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int);
|
||||
extern int nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int);
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
extern int nfs_commit_inode(struct inode *, int);
|
||||
extern void nfs_commit_release(void *wdata);
|
||||
#else
|
||||
static inline int
|
||||
nfs_commit_inode(struct inode *inode, int how)
|
||||
@@ -447,7 +438,7 @@ nfs_have_writebacks(struct inode *inode)
|
||||
static inline int
|
||||
nfs_wb_all(struct inode *inode)
|
||||
{
|
||||
int error = nfs_sync_inode(inode, 0, 0, FLUSH_WAIT);
|
||||
int error = nfs_sync_inode_wait(inode, 0, 0, 0);
|
||||
return (error < 0) ? error : 0;
|
||||
}
|
||||
|
||||
@@ -456,8 +447,8 @@ nfs_wb_all(struct inode *inode)
|
||||
*/
|
||||
static inline int nfs_wb_page_priority(struct inode *inode, struct page* page, int how)
|
||||
{
|
||||
int error = nfs_sync_inode(inode, page->index, 1,
|
||||
how | FLUSH_WAIT | FLUSH_STABLE);
|
||||
int error = nfs_sync_inode_wait(inode, page->index, 1,
|
||||
how | FLUSH_STABLE);
|
||||
return (error < 0) ? error : 0;
|
||||
}
|
||||
|
||||
@@ -469,37 +460,8 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
|
||||
/*
|
||||
* Allocate and free nfs_write_data structures
|
||||
*/
|
||||
extern mempool_t *nfs_wdata_mempool;
|
||||
|
||||
static inline struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
|
||||
{
|
||||
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
|
||||
|
||||
if (p) {
|
||||
memset(p, 0, sizeof(*p));
|
||||
INIT_LIST_HEAD(&p->pages);
|
||||
if (pagecount < NFS_PAGEVEC_SIZE)
|
||||
p->pagevec = &p->page_array[0];
|
||||
else {
|
||||
size_t size = ++pagecount * sizeof(struct page *);
|
||||
p->pagevec = kmalloc(size, GFP_NOFS);
|
||||
if (p->pagevec) {
|
||||
memset(p->pagevec, 0, size);
|
||||
} else {
|
||||
mempool_free(p, nfs_wdata_mempool);
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void nfs_writedata_free(struct nfs_write_data *p)
|
||||
{
|
||||
if (p && (p->pagevec != &p->page_array[0]))
|
||||
kfree(p->pagevec);
|
||||
mempool_free(p, nfs_wdata_mempool);
|
||||
}
|
||||
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
|
||||
extern void nfs_writedata_free(struct nfs_write_data *p);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs/read.c
|
||||
@@ -507,44 +469,14 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
|
||||
extern int nfs_readpage(struct file *, struct page *);
|
||||
extern int nfs_readpages(struct file *, struct address_space *,
|
||||
struct list_head *, unsigned);
|
||||
extern void nfs_readpage_result(struct rpc_task *, void *);
|
||||
extern void nfs_readdata_release(void *data);
|
||||
|
||||
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
|
||||
extern void nfs_readdata_release(void *data);
|
||||
|
||||
/*
|
||||
* Allocate and free nfs_read_data structures
|
||||
*/
|
||||
extern mempool_t *nfs_rdata_mempool;
|
||||
|
||||
static inline struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
|
||||
{
|
||||
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
|
||||
|
||||
if (p) {
|
||||
memset(p, 0, sizeof(*p));
|
||||
INIT_LIST_HEAD(&p->pages);
|
||||
if (pagecount < NFS_PAGEVEC_SIZE)
|
||||
p->pagevec = &p->page_array[0];
|
||||
else {
|
||||
size_t size = ++pagecount * sizeof(struct page *);
|
||||
p->pagevec = kmalloc(size, GFP_NOFS);
|
||||
if (p->pagevec) {
|
||||
memset(p->pagevec, 0, size);
|
||||
} else {
|
||||
mempool_free(p, nfs_rdata_mempool);
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void nfs_readdata_free(struct nfs_read_data *p)
|
||||
{
|
||||
if (p && (p->pagevec != &p->page_array[0]))
|
||||
kfree(p->pagevec);
|
||||
mempool_free(p, nfs_rdata_mempool);
|
||||
}
|
||||
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
|
||||
extern void nfs_readdata_free(struct nfs_read_data *p);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
|
||||
@@ -12,8 +12,8 @@ struct nlm_lockowner;
|
||||
*/
|
||||
struct nfs_lock_info {
|
||||
u32 state;
|
||||
u32 flags;
|
||||
struct nlm_lockowner *owner;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct nfs4_lock_state;
|
||||
@@ -21,10 +21,4 @@ struct nfs4_lock_info {
|
||||
struct nfs4_lock_state *owner;
|
||||
};
|
||||
|
||||
/*
|
||||
* Lock flag values
|
||||
*/
|
||||
#define NFS_LCK_GRANTED 0x0001 /* lock has been granted */
|
||||
#define NFS_LCK_RECLAIM 0x0002 /* lock marked for reclaiming */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/backing-dev.h>
|
||||
|
||||
struct nfs_iostats;
|
||||
|
||||
/*
|
||||
* NFS client parameters stored in the superblock.
|
||||
*/
|
||||
@@ -12,6 +14,7 @@ struct nfs_server {
|
||||
struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */
|
||||
struct rpc_clnt * client_acl; /* ACL RPC client handle */
|
||||
struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */
|
||||
struct nfs_iostats * io_stats; /* I/O statistics */
|
||||
struct backing_dev_info backing_dev_info;
|
||||
int flags; /* various flags */
|
||||
unsigned int caps; /* server capabilities */
|
||||
@@ -26,10 +29,13 @@ struct nfs_server {
|
||||
unsigned int acregmax;
|
||||
unsigned int acdirmin;
|
||||
unsigned int acdirmax;
|
||||
unsigned long retrans_timeo; /* retransmit timeout */
|
||||
unsigned int retrans_count; /* number of retransmit tries */
|
||||
unsigned int namelen;
|
||||
char * hostname; /* remote hostname */
|
||||
struct nfs_fh fh;
|
||||
struct sockaddr_in addr;
|
||||
unsigned long mount_time; /* when this fs was mounted */
|
||||
#ifdef CONFIG_NFS_V4
|
||||
/* Our own IP address, as a null-terminated string.
|
||||
* This is used to generate the clientid, and the callback address.
|
||||
|
||||
@@ -695,7 +695,6 @@ struct nfs_read_data {
|
||||
#ifdef CONFIG_NFS_V4
|
||||
unsigned long timestamp; /* For lease renewal */
|
||||
#endif
|
||||
void (*complete) (struct nfs_read_data *, int);
|
||||
struct page *page_array[NFS_PAGEVEC_SIZE + 1];
|
||||
};
|
||||
|
||||
@@ -714,7 +713,6 @@ struct nfs_write_data {
|
||||
#ifdef CONFIG_NFS_V4
|
||||
unsigned long timestamp; /* For lease renewal */
|
||||
#endif
|
||||
void (*complete) (struct nfs_write_data *, int);
|
||||
struct page *page_array[NFS_PAGEVEC_SIZE + 1];
|
||||
};
|
||||
|
||||
@@ -769,8 +767,11 @@ struct nfs_rpc_ops {
|
||||
struct nfs_pathconf *);
|
||||
u32 * (*decode_dirent)(u32 *, struct nfs_entry *, int plus);
|
||||
void (*read_setup) (struct nfs_read_data *);
|
||||
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
|
||||
void (*write_setup) (struct nfs_write_data *, int how);
|
||||
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
void (*commit_setup) (struct nfs_write_data *, int how);
|
||||
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
int (*file_open) (struct inode *, struct file *);
|
||||
int (*file_release) (struct inode *, struct file *);
|
||||
int (*lock)(struct file *, int, struct file_lock *);
|
||||
|
||||
@@ -67,7 +67,8 @@ struct svc_expkey {
|
||||
int ek_fsidtype;
|
||||
u32 ek_fsid[3];
|
||||
|
||||
struct svc_export * ek_export;
|
||||
struct vfsmount * ek_mnt;
|
||||
struct dentry * ek_dentry;
|
||||
};
|
||||
|
||||
#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
|
||||
@@ -85,9 +86,6 @@ void nfsd_export_shutdown(void);
|
||||
void nfsd_export_flush(void);
|
||||
void exp_readlock(void);
|
||||
void exp_readunlock(void);
|
||||
struct svc_expkey * exp_find_key(struct auth_domain *clp,
|
||||
int fsid_type, u32 *fsidv,
|
||||
struct cache_req *reqp);
|
||||
struct svc_export * exp_get_by_name(struct auth_domain *clp,
|
||||
struct vfsmount *mnt,
|
||||
struct dentry *dentry,
|
||||
@@ -101,35 +99,20 @@ int exp_rootfh(struct auth_domain *,
|
||||
int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq);
|
||||
int nfserrno(int errno);
|
||||
|
||||
extern void expkey_put(struct cache_head *item, struct cache_detail *cd);
|
||||
extern void svc_export_put(struct cache_head *item, struct cache_detail *cd);
|
||||
extern struct cache_detail svc_export_cache, svc_expkey_cache;
|
||||
extern struct cache_detail svc_export_cache;
|
||||
|
||||
static inline void exp_put(struct svc_export *exp)
|
||||
{
|
||||
svc_export_put(&exp->h, &svc_export_cache);
|
||||
cache_put(&exp->h, &svc_export_cache);
|
||||
}
|
||||
|
||||
static inline void exp_get(struct svc_export *exp)
|
||||
{
|
||||
cache_get(&exp->h);
|
||||
}
|
||||
static inline struct svc_export *
|
||||
extern struct svc_export *
|
||||
exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
|
||||
struct cache_req *reqp)
|
||||
{
|
||||
struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
|
||||
if (ek && !IS_ERR(ek)) {
|
||||
struct svc_export *exp = ek->ek_export;
|
||||
int err;
|
||||
exp_get(exp);
|
||||
expkey_put(&ek->h, &svc_expkey_cache);
|
||||
if ((err = cache_check(&svc_export_cache, &exp->h, reqp)))
|
||||
exp = ERR_PTR(err);
|
||||
return exp;
|
||||
} else
|
||||
return ERR_PTR(PTR_ERR(ek));
|
||||
}
|
||||
struct cache_req *reqp);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
||||
@@ -350,11 +350,15 @@ extern nodemask_t node_possible_map;
|
||||
#define num_possible_nodes() nodes_weight(node_possible_map)
|
||||
#define node_online(node) node_isset((node), node_online_map)
|
||||
#define node_possible(node) node_isset((node), node_possible_map)
|
||||
#define first_online_node first_node(node_online_map)
|
||||
#define next_online_node(nid) next_node((nid), node_online_map)
|
||||
#else
|
||||
#define num_online_nodes() 1
|
||||
#define num_possible_nodes() 1
|
||||
#define node_online(node) ((node) == 0)
|
||||
#define node_possible(node) ((node) == 0)
|
||||
#define first_online_node 0
|
||||
#define next_online_node(nid) (MAX_NUMNODES)
|
||||
#endif
|
||||
|
||||
#define any_online_node(mask) \
|
||||
|
||||
@@ -10,25 +10,107 @@
|
||||
#ifndef _LINUX_NOTIFIER_H
|
||||
#define _LINUX_NOTIFIER_H
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rwsem.h>
|
||||
|
||||
struct notifier_block
|
||||
{
|
||||
int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
|
||||
/*
|
||||
* Notifier chains are of three types:
|
||||
*
|
||||
* Atomic notifier chains: Chain callbacks run in interrupt/atomic
|
||||
* context. Callouts are not allowed to block.
|
||||
* Blocking notifier chains: Chain callbacks run in process context.
|
||||
* Callouts are allowed to block.
|
||||
* Raw notifier chains: There are no restrictions on callbacks,
|
||||
* registration, or unregistration. All locking and protection
|
||||
* must be provided by the caller.
|
||||
*
|
||||
* atomic_notifier_chain_register() may be called from an atomic context,
|
||||
* but blocking_notifier_chain_register() must be called from a process
|
||||
* context. Ditto for the corresponding _unregister() routines.
|
||||
*
|
||||
* atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister()
|
||||
* _must not_ be called from within the call chain.
|
||||
*/
|
||||
|
||||
struct notifier_block {
|
||||
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
|
||||
struct notifier_block *next;
|
||||
int priority;
|
||||
};
|
||||
|
||||
struct atomic_notifier_head {
|
||||
spinlock_t lock;
|
||||
struct notifier_block *head;
|
||||
};
|
||||
|
||||
struct blocking_notifier_head {
|
||||
struct rw_semaphore rwsem;
|
||||
struct notifier_block *head;
|
||||
};
|
||||
|
||||
struct raw_notifier_head {
|
||||
struct notifier_block *head;
|
||||
};
|
||||
|
||||
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
|
||||
spin_lock_init(&(name)->lock); \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
|
||||
init_rwsem(&(name)->rwsem); \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
#define RAW_INIT_NOTIFIER_HEAD(name) do { \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ATOMIC_NOTIFIER_INIT(name) { \
|
||||
.lock = SPIN_LOCK_UNLOCKED, \
|
||||
.head = NULL }
|
||||
#define BLOCKING_NOTIFIER_INIT(name) { \
|
||||
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
|
||||
.head = NULL }
|
||||
#define RAW_NOTIFIER_INIT(name) { \
|
||||
.head = NULL }
|
||||
|
||||
#define ATOMIC_NOTIFIER_HEAD(name) \
|
||||
struct atomic_notifier_head name = \
|
||||
ATOMIC_NOTIFIER_INIT(name)
|
||||
#define BLOCKING_NOTIFIER_HEAD(name) \
|
||||
struct blocking_notifier_head name = \
|
||||
BLOCKING_NOTIFIER_INIT(name)
|
||||
#define RAW_NOTIFIER_HEAD(name) \
|
||||
struct raw_notifier_head name = \
|
||||
RAW_NOTIFIER_INIT(name)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
|
||||
extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
|
||||
extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
|
||||
extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
|
||||
struct notifier_block *);
|
||||
extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
|
||||
struct notifier_block *);
|
||||
extern int raw_notifier_chain_register(struct raw_notifier_head *,
|
||||
struct notifier_block *);
|
||||
|
||||
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
|
||||
struct notifier_block *);
|
||||
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
|
||||
struct notifier_block *);
|
||||
extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
|
||||
struct notifier_block *);
|
||||
|
||||
extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
|
||||
unsigned long val, void *v);
|
||||
extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
|
||||
unsigned long val, void *v);
|
||||
extern int raw_notifier_call_chain(struct raw_notifier_head *,
|
||||
unsigned long val, void *v);
|
||||
|
||||
#define NOTIFY_DONE 0x0000 /* Don't care */
|
||||
#define NOTIFY_OK 0x0001 /* Suits me */
|
||||
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
|
||||
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
|
||||
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
|
||||
/* Bad/Veto action */
|
||||
/*
|
||||
* Clean way to return from the notifier and stop further calls.
|
||||
*/
|
||||
|
||||
@@ -61,6 +61,16 @@ void oprofile_arch_exit(void);
|
||||
*/
|
||||
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
|
||||
|
||||
/**
|
||||
* Add an extended sample. Use this when the PC is not from the regs, and
|
||||
* we cannot determine if we're in kernel mode from the regs.
|
||||
*
|
||||
* This function does perform a backtrace.
|
||||
*
|
||||
*/
|
||||
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
||||
unsigned long event, int is_kernel);
|
||||
|
||||
/* Use this instead when the PC value is not from the regs. Doesn't
|
||||
* backtrace. */
|
||||
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
|
||||
@@ -74,10 +84,10 @@ void oprofile_add_trace(unsigned long eip);
|
||||
* the specified file operations.
|
||||
*/
|
||||
int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
|
||||
char const * name, struct file_operations * fops);
|
||||
char const * name, const struct file_operations * fops);
|
||||
|
||||
int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
|
||||
char const * name, struct file_operations * fops, int perm);
|
||||
char const * name, const struct file_operations * fops, int perm);
|
||||
|
||||
/** Create a file for read/write access to an unsigned long. */
|
||||
int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
|
||||
|
||||
@@ -642,6 +642,7 @@
|
||||
#define PCI_DEVICE_ID_SI_965 0x0965
|
||||
#define PCI_DEVICE_ID_SI_5511 0x5511
|
||||
#define PCI_DEVICE_ID_SI_5513 0x5513
|
||||
#define PCI_DEVICE_ID_SI_5517 0x5517
|
||||
#define PCI_DEVICE_ID_SI_5518 0x5518
|
||||
#define PCI_DEVICE_ID_SI_5571 0x5571
|
||||
#define PCI_DEVICE_ID_SI_5581 0x5581
|
||||
@@ -772,6 +773,7 @@
|
||||
#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803
|
||||
#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b
|
||||
#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803
|
||||
#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809
|
||||
|
||||
#define PCI_VENDOR_ID_PROMISE 0x105a
|
||||
#define PCI_DEVICE_ID_PROMISE_20265 0x0d30
|
||||
@@ -1052,6 +1054,7 @@
|
||||
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
|
||||
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
|
||||
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
|
||||
#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
|
||||
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
|
||||
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
|
||||
#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
|
||||
|
||||
9
include/linux/pfn.h
Normal file
9
include/linux/pfn.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _LINUX_PFN_H_
|
||||
#define _LINUX_PFN_H_
|
||||
|
||||
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
|
||||
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
|
||||
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
|
||||
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
|
||||
|
||||
#endif
|
||||
@@ -4,7 +4,6 @@
|
||||
enum pid_type
|
||||
{
|
||||
PIDTYPE_PID,
|
||||
PIDTYPE_TGID,
|
||||
PIDTYPE_PGID,
|
||||
PIDTYPE_SID,
|
||||
PIDTYPE_MAX
|
||||
@@ -38,7 +37,6 @@ extern struct pid *FASTCALL(find_pid(enum pid_type, int));
|
||||
|
||||
extern int alloc_pidmap(void);
|
||||
extern void FASTCALL(free_pidmap(int));
|
||||
extern void switch_exec_pids(struct task_struct *leader, struct task_struct *thread);
|
||||
|
||||
#define do_each_task_pid(who, type, task) \
|
||||
if ((task = find_task_by_pid_type(type, who))) { \
|
||||
|
||||
@@ -11,6 +11,15 @@
|
||||
#include <linux/mm.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
|
||||
additional memory. */
|
||||
#define MAX_STACK_ALLOC 832
|
||||
#define FRONTEND_STACK_ALLOC 256
|
||||
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
|
||||
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
|
||||
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
|
||||
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
|
||||
|
||||
struct poll_table_struct;
|
||||
|
||||
/*
|
||||
@@ -33,6 +42,12 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
|
||||
pt->qproc = qproc;
|
||||
}
|
||||
|
||||
struct poll_table_entry {
|
||||
struct file * filp;
|
||||
wait_queue_t wait;
|
||||
wait_queue_head_t * wait_address;
|
||||
};
|
||||
|
||||
/*
|
||||
* Structures and helpers for sys_poll/sys_poll
|
||||
*/
|
||||
@@ -40,6 +55,8 @@ struct poll_wqueues {
|
||||
poll_table pt;
|
||||
struct poll_table_page * table;
|
||||
int error;
|
||||
int inline_index;
|
||||
struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
|
||||
};
|
||||
|
||||
extern void poll_initwait(struct poll_wqueues *pwq);
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
* Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001
|
||||
*/
|
||||
|
||||
#define PP_MAJOR 99
|
||||
|
||||
#define PP_IOCTL 'p'
|
||||
|
||||
/* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <linux/config.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
@@ -55,9 +56,9 @@ struct proc_dir_entry {
|
||||
nlink_t nlink;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
unsigned long size;
|
||||
loff_t size;
|
||||
struct inode_operations * proc_iops;
|
||||
struct file_operations * proc_fops;
|
||||
const struct file_operations * proc_fops;
|
||||
get_info_t *get_info;
|
||||
struct module *owner;
|
||||
struct proc_dir_entry *next, *parent, *subdir;
|
||||
@@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus;
|
||||
extern struct proc_dir_entry *proc_root_driver;
|
||||
extern struct proc_dir_entry *proc_root_kcore;
|
||||
|
||||
extern spinlock_t proc_subdir_lock;
|
||||
|
||||
extern void proc_root_init(void);
|
||||
extern void proc_misc_init(void);
|
||||
|
||||
@@ -125,9 +128,9 @@ extern int proc_match(int, const char *,struct proc_dir_entry *);
|
||||
extern int proc_readdir(struct file *, void *, filldir_t);
|
||||
extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
|
||||
|
||||
extern struct file_operations proc_kcore_operations;
|
||||
extern struct file_operations proc_kmsg_operations;
|
||||
extern struct file_operations ppc_htab_operations;
|
||||
extern const struct file_operations proc_kcore_operations;
|
||||
extern const struct file_operations proc_kmsg_operations;
|
||||
extern const struct file_operations ppc_htab_operations;
|
||||
|
||||
/*
|
||||
* proc_tty.c
|
||||
@@ -186,7 +189,7 @@ static inline struct proc_dir_entry *proc_net_create(const char *name,
|
||||
}
|
||||
|
||||
static inline struct proc_dir_entry *proc_net_fops_create(const char *name,
|
||||
mode_t mode, struct file_operations *fops)
|
||||
mode_t mode, const struct file_operations *fops)
|
||||
{
|
||||
struct proc_dir_entry *res = create_proc_entry(name, mode, proc_net);
|
||||
if (res)
|
||||
|
||||
@@ -118,8 +118,8 @@ extern struct buffer_head *qnx4_bread(struct inode *, int, int);
|
||||
|
||||
extern struct inode_operations qnx4_file_inode_operations;
|
||||
extern struct inode_operations qnx4_dir_inode_operations;
|
||||
extern struct file_operations qnx4_file_operations;
|
||||
extern struct file_operations qnx4_dir_operations;
|
||||
extern const struct file_operations qnx4_file_operations;
|
||||
extern const struct file_operations qnx4_dir_operations;
|
||||
extern int qnx4_is_free(struct super_block *sb, long block);
|
||||
extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
|
||||
extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd);
|
||||
|
||||
@@ -209,7 +209,6 @@ extern struct dqstats dqstats;
|
||||
#define DQ_FAKE_B 3 /* no limits only usage */
|
||||
#define DQ_READ_B 4 /* dquot was read into memory */
|
||||
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
|
||||
#define DQ_WAITFREE_B 6 /* dquot being waited (by invalidate_dquots) */
|
||||
|
||||
struct dquot {
|
||||
struct hlist_node dq_hash; /* Hash list in memory */
|
||||
|
||||
@@ -45,6 +45,8 @@ do { \
|
||||
(root)->rnode = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define RADIX_TREE_MAX_TAGS 2
|
||||
|
||||
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
|
||||
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
|
||||
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
|
||||
@@ -55,15 +57,16 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
||||
int radix_tree_preload(gfp_t gfp_mask);
|
||||
void radix_tree_init(void);
|
||||
void *radix_tree_tag_set(struct radix_tree_root *root,
|
||||
unsigned long index, int tag);
|
||||
unsigned long index, unsigned int tag);
|
||||
void *radix_tree_tag_clear(struct radix_tree_root *root,
|
||||
unsigned long index, int tag);
|
||||
unsigned long index, unsigned int tag);
|
||||
int radix_tree_tag_get(struct radix_tree_root *root,
|
||||
unsigned long index, int tag);
|
||||
unsigned long index, unsigned int tag);
|
||||
unsigned int
|
||||
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
|
||||
unsigned long first_index, unsigned int max_items, int tag);
|
||||
int radix_tree_tagged(struct radix_tree_root *root, int tag);
|
||||
unsigned long first_index, unsigned int max_items,
|
||||
unsigned int tag);
|
||||
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
|
||||
|
||||
static inline void radix_tree_preload_end(void)
|
||||
{
|
||||
|
||||
@@ -92,7 +92,10 @@ extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
extern void md_super_wait(mddev_t *mddev);
|
||||
extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
|
||||
struct page *page, int rw);
|
||||
extern void md_do_sync(mddev_t *mddev);
|
||||
extern void md_new_event(mddev_t *mddev);
|
||||
|
||||
extern void md_update_sb(mddev_t * mddev);
|
||||
|
||||
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
|
||||
|
||||
|
||||
@@ -132,6 +132,14 @@ struct mddev_s
|
||||
|
||||
char uuid[16];
|
||||
|
||||
/* If the array is being reshaped, we need to record the
|
||||
* new shape and an indication of where we are up to.
|
||||
* This is written to the superblock.
|
||||
* If reshape_position is MaxSector, then no reshape is happening (yet).
|
||||
*/
|
||||
sector_t reshape_position;
|
||||
int delta_disks, new_level, new_layout, new_chunk;
|
||||
|
||||
struct mdk_thread_s *thread; /* management thread */
|
||||
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
|
||||
sector_t curr_resync; /* blocks scheduled */
|
||||
@@ -143,6 +151,10 @@ struct mddev_s
|
||||
sector_t resync_mismatches; /* count of sectors where
|
||||
* parity/replica mismatch found
|
||||
*/
|
||||
|
||||
/* allow user-space to request suspension of IO to regions of the array */
|
||||
sector_t suspend_lo;
|
||||
sector_t suspend_hi;
|
||||
/* if zero, use the system-wide default */
|
||||
int sync_speed_min;
|
||||
int sync_speed_max;
|
||||
@@ -157,6 +169,9 @@ struct mddev_s
|
||||
* DONE: thread is done and is waiting to be reaped
|
||||
* REQUEST: user-space has requested a sync (used with SYNC)
|
||||
* CHECK: user-space request for for check-only, no repair
|
||||
* RESHAPE: A reshape is happening
|
||||
*
|
||||
* If neither SYNC or RESHAPE are set, then it is a recovery.
|
||||
*/
|
||||
#define MD_RECOVERY_RUNNING 0
|
||||
#define MD_RECOVERY_SYNC 1
|
||||
@@ -166,10 +181,11 @@ struct mddev_s
|
||||
#define MD_RECOVERY_NEEDED 5
|
||||
#define MD_RECOVERY_REQUESTED 6
|
||||
#define MD_RECOVERY_CHECK 7
|
||||
#define MD_RECOVERY_RESHAPE 8
|
||||
unsigned long recovery;
|
||||
|
||||
int in_sync; /* know to not need resync */
|
||||
struct semaphore reconfig_sem;
|
||||
struct mutex reconfig_mutex;
|
||||
atomic_t active;
|
||||
|
||||
int changed; /* true if we might need to reread partition info */
|
||||
@@ -249,7 +265,8 @@ struct mdk_personality
|
||||
int (*spare_active) (mddev_t *mddev);
|
||||
sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
|
||||
int (*resize) (mddev_t *mddev, sector_t sectors);
|
||||
int (*reshape) (mddev_t *mddev, int raid_disks);
|
||||
int (*check_reshape) (mddev_t *mddev);
|
||||
int (*start_reshape) (mddev_t *mddev);
|
||||
int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
|
||||
/* quiesce moves between quiescence states
|
||||
* 0 - fully active
|
||||
|
||||
@@ -102,6 +102,18 @@ typedef struct mdp_device_descriptor_s {
|
||||
#define MD_SB_ERRORS 1
|
||||
|
||||
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
|
||||
|
||||
/*
|
||||
* Notes:
|
||||
* - if an array is being reshaped (restriped) in order to change the
|
||||
* the number of active devices in the array, 'raid_disks' will be
|
||||
* the larger of the old and new numbers. 'delta_disks' will
|
||||
* be the "new - old". So if +ve, raid_disks is the new value, and
|
||||
* "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
|
||||
* old value and "raid_disks+delta_disks" is the new (smaller) value.
|
||||
*/
|
||||
|
||||
|
||||
typedef struct mdp_superblock_s {
|
||||
/*
|
||||
* Constant generic information
|
||||
@@ -146,7 +158,13 @@ typedef struct mdp_superblock_s {
|
||||
__u32 cp_events_hi; /* 10 high-order of checkpoint update count */
|
||||
#endif
|
||||
__u32 recovery_cp; /* 11 recovery checkpoint sector count */
|
||||
__u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 12];
|
||||
/* There are only valid for minor_version > 90 */
|
||||
__u64 reshape_position; /* 12,13 next address in array-space for reshape */
|
||||
__u32 new_level; /* 14 new level we are reshaping to */
|
||||
__u32 delta_disks; /* 15 change in number of raid_disks */
|
||||
__u32 new_layout; /* 16 new layout */
|
||||
__u32 new_chunk; /* 17 new chunk size (bytes) */
|
||||
__u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
|
||||
|
||||
/*
|
||||
* Personality information
|
||||
@@ -207,7 +225,14 @@ struct mdp_superblock_1 {
|
||||
* NOTE: signed, so bitmap can be before superblock
|
||||
* only meaningful of feature_map[0] is set.
|
||||
*/
|
||||
__u8 pad1[128-100]; /* set to 0 when written */
|
||||
|
||||
/* These are only valid with feature bit '4' */
|
||||
__u64 reshape_position; /* next address in array-space for reshape */
|
||||
__u32 new_level; /* new level we are reshaping to */
|
||||
__u32 delta_disks; /* change in number of raid_disks */
|
||||
__u32 new_layout; /* new layout */
|
||||
__u32 new_chunk; /* new chunk size (bytes) */
|
||||
__u8 pad1[128-124]; /* set to 0 when written */
|
||||
|
||||
/* constant this-device information - 64 bytes */
|
||||
__u64 data_offset; /* sector start of data, often 0 */
|
||||
@@ -240,8 +265,9 @@ struct mdp_superblock_1 {
|
||||
|
||||
/* feature_map bits */
|
||||
#define MD_FEATURE_BITMAP_OFFSET 1
|
||||
#define MD_FEATURE_RESHAPE_ACTIVE 4
|
||||
|
||||
#define MD_FEATURE_ALL 1
|
||||
#define MD_FEATURE_ALL 5
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ struct stripe_head {
|
||||
atomic_t count; /* nr of active thread/requests */
|
||||
spinlock_t lock;
|
||||
int bm_seq; /* sequence number for bitmap flushes */
|
||||
int disks; /* disks in stripe */
|
||||
struct r5dev {
|
||||
struct bio req;
|
||||
struct bio_vec vec;
|
||||
@@ -156,6 +157,7 @@ struct stripe_head {
|
||||
#define R5_ReadError 8 /* seen a read error here recently */
|
||||
#define R5_ReWrite 9 /* have tried to over-write the readerror */
|
||||
|
||||
#define R5_Expanded 10 /* This block now has post-expand data */
|
||||
/*
|
||||
* Write method
|
||||
*/
|
||||
@@ -174,7 +176,9 @@ struct stripe_head {
|
||||
#define STRIPE_DELAYED 6
|
||||
#define STRIPE_DEGRADED 7
|
||||
#define STRIPE_BIT_DELAY 8
|
||||
|
||||
#define STRIPE_EXPANDING 9
|
||||
#define STRIPE_EXPAND_SOURCE 10
|
||||
#define STRIPE_EXPAND_READY 11
|
||||
/*
|
||||
* Plugging:
|
||||
*
|
||||
@@ -211,12 +215,24 @@ struct raid5_private_data {
|
||||
int raid_disks, working_disks, failed_disks;
|
||||
int max_nr_stripes;
|
||||
|
||||
/* used during an expand */
|
||||
sector_t expand_progress; /* MaxSector when no expand happening */
|
||||
sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
|
||||
* as we haven't flushed the metadata yet
|
||||
*/
|
||||
int previous_raid_disks;
|
||||
|
||||
struct list_head handle_list; /* stripes needing handling */
|
||||
struct list_head delayed_list; /* stripes that have plugged requests */
|
||||
struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
|
||||
atomic_t preread_active_stripes; /* stripes with scheduled io */
|
||||
|
||||
char cache_name[20];
|
||||
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
|
||||
/* unfortunately we need two cache names as we temporarily have
|
||||
* two caches.
|
||||
*/
|
||||
int active_name;
|
||||
char cache_name[2][20];
|
||||
kmem_cache_t *slab_cache; /* for allocating stripes */
|
||||
|
||||
int seq_flush, seq_write;
|
||||
@@ -238,9 +254,10 @@ struct raid5_private_data {
|
||||
wait_queue_head_t wait_for_overlap;
|
||||
int inactive_blocked; /* release of inactive stripes blocked,
|
||||
* waiting for 25% to be free
|
||||
*/
|
||||
*/
|
||||
int pool_size; /* number of disks in stripeheads in pool */
|
||||
spinlock_t device_lock;
|
||||
struct disk_info disks[0];
|
||||
struct disk_info *disks;
|
||||
};
|
||||
|
||||
typedef struct raid5_private_data raid5_conf_t;
|
||||
|
||||
@@ -15,7 +15,7 @@ extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
|
||||
extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
#endif
|
||||
|
||||
extern struct file_operations ramfs_file_operations;
|
||||
extern const struct file_operations ramfs_file_operations;
|
||||
extern struct vm_operations_struct generic_file_vm_ops;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1704,6 +1704,11 @@ static inline int reiserfs_transaction_running(struct super_block *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int reiserfs_transaction_free_space(struct reiserfs_transaction_handle *th)
|
||||
{
|
||||
return th->t_blocks_allocated - th->t_blocks_logged;
|
||||
}
|
||||
|
||||
int reiserfs_async_progress_wait(struct super_block *s);
|
||||
|
||||
struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
|
||||
@@ -1955,7 +1960,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
|
||||
extern struct inode_operations reiserfs_dir_inode_operations;
|
||||
extern struct inode_operations reiserfs_symlink_inode_operations;
|
||||
extern struct inode_operations reiserfs_special_inode_operations;
|
||||
extern struct file_operations reiserfs_dir_operations;
|
||||
extern const struct file_operations reiserfs_dir_operations;
|
||||
|
||||
/* tail_conversion.c */
|
||||
int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
|
||||
@@ -1967,7 +1972,7 @@ void reiserfs_unmap_buffer(struct buffer_head *);
|
||||
|
||||
/* file.c */
|
||||
extern struct inode_operations reiserfs_file_inode_operations;
|
||||
extern struct file_operations reiserfs_file_operations;
|
||||
extern const struct file_operations reiserfs_file_operations;
|
||||
extern struct address_space_operations reiserfs_address_space_operations;
|
||||
|
||||
/* fix_nodes.c */
|
||||
|
||||
@@ -101,13 +101,13 @@ static inline void reiserfs_mark_inode_private(struct inode *inode)
|
||||
#else
|
||||
|
||||
#define is_reiserfs_priv_object(inode) 0
|
||||
#define reiserfs_mark_inode_private(inode)
|
||||
#define reiserfs_mark_inode_private(inode) do {;} while(0)
|
||||
#define reiserfs_getxattr NULL
|
||||
#define reiserfs_setxattr NULL
|
||||
#define reiserfs_listxattr NULL
|
||||
#define reiserfs_removexattr NULL
|
||||
#define reiserfs_write_lock_xattrs(sb)
|
||||
#define reiserfs_write_unlock_xattrs(sb)
|
||||
#define reiserfs_write_lock_xattrs(sb) do {;} while(0)
|
||||
#define reiserfs_write_unlock_xattrs(sb) do {;} while(0)
|
||||
#define reiserfs_read_lock_xattrs(sb)
|
||||
#define reiserfs_read_unlock_xattrs(sb)
|
||||
|
||||
|
||||
@@ -1,287 +0,0 @@
|
||||
/*
|
||||
* linux/include/linux/relayfs_fs.h
|
||||
*
|
||||
* Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
|
||||
* Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
|
||||
*
|
||||
* RelayFS definitions and declarations
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RELAYFS_FS_H
|
||||
#define _LINUX_RELAYFS_FS_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
/*
|
||||
* Tracks changes to rchan/rchan_buf structs
|
||||
*/
|
||||
#define RELAYFS_CHANNEL_VERSION 6
|
||||
|
||||
/*
|
||||
* Per-cpu relay channel buffer
|
||||
*/
|
||||
struct rchan_buf
|
||||
{
|
||||
void *start; /* start of channel buffer */
|
||||
void *data; /* start of current sub-buffer */
|
||||
size_t offset; /* current offset into sub-buffer */
|
||||
size_t subbufs_produced; /* count of sub-buffers produced */
|
||||
size_t subbufs_consumed; /* count of sub-buffers consumed */
|
||||
struct rchan *chan; /* associated channel */
|
||||
wait_queue_head_t read_wait; /* reader wait queue */
|
||||
struct work_struct wake_readers; /* reader wake-up work struct */
|
||||
struct dentry *dentry; /* channel file dentry */
|
||||
struct kref kref; /* channel buffer refcount */
|
||||
struct page **page_array; /* array of current buffer pages */
|
||||
unsigned int page_count; /* number of current buffer pages */
|
||||
unsigned int finalized; /* buffer has been finalized */
|
||||
size_t *padding; /* padding counts per sub-buffer */
|
||||
size_t prev_padding; /* temporary variable */
|
||||
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
|
||||
unsigned int cpu; /* this buf's cpu */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/*
|
||||
* Relay channel data structure
|
||||
*/
|
||||
struct rchan
|
||||
{
|
||||
u32 version; /* the version of this struct */
|
||||
size_t subbuf_size; /* sub-buffer size */
|
||||
size_t n_subbufs; /* number of sub-buffers per buffer */
|
||||
size_t alloc_size; /* total buffer size allocated */
|
||||
struct rchan_callbacks *cb; /* client callbacks */
|
||||
struct kref kref; /* channel refcount */
|
||||
void *private_data; /* for user-defined data */
|
||||
size_t last_toobig; /* tried to log event > subbuf size */
|
||||
struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
|
||||
};
|
||||
|
||||
/*
|
||||
* Relay channel client callbacks
|
||||
*/
|
||||
struct rchan_callbacks
|
||||
{
|
||||
/*
|
||||
* subbuf_start - called on buffer-switch to a new sub-buffer
|
||||
* @buf: the channel buffer containing the new sub-buffer
|
||||
* @subbuf: the start of the new sub-buffer
|
||||
* @prev_subbuf: the start of the previous sub-buffer
|
||||
* @prev_padding: unused space at the end of previous sub-buffer
|
||||
*
|
||||
* The client should return 1 to continue logging, 0 to stop
|
||||
* logging.
|
||||
*
|
||||
* NOTE: subbuf_start will also be invoked when the buffer is
|
||||
* created, so that the first sub-buffer can be initialized
|
||||
* if necessary. In this case, prev_subbuf will be NULL.
|
||||
*
|
||||
* NOTE: the client can reserve bytes at the beginning of the new
|
||||
* sub-buffer by calling subbuf_start_reserve() in this callback.
|
||||
*/
|
||||
int (*subbuf_start) (struct rchan_buf *buf,
|
||||
void *subbuf,
|
||||
void *prev_subbuf,
|
||||
size_t prev_padding);
|
||||
|
||||
/*
|
||||
* buf_mapped - relayfs buffer mmap notification
|
||||
* @buf: the channel buffer
|
||||
* @filp: relayfs file pointer
|
||||
*
|
||||
* Called when a relayfs file is successfully mmapped
|
||||
*/
|
||||
void (*buf_mapped)(struct rchan_buf *buf,
|
||||
struct file *filp);
|
||||
|
||||
/*
|
||||
* buf_unmapped - relayfs buffer unmap notification
|
||||
* @buf: the channel buffer
|
||||
* @filp: relayfs file pointer
|
||||
*
|
||||
* Called when a relayfs file is successfully unmapped
|
||||
*/
|
||||
void (*buf_unmapped)(struct rchan_buf *buf,
|
||||
struct file *filp);
|
||||
/*
|
||||
* create_buf_file - create file to represent a relayfs channel buffer
|
||||
* @filename: the name of the file to create
|
||||
* @parent: the parent of the file to create
|
||||
* @mode: the mode of the file to create
|
||||
* @buf: the channel buffer
|
||||
* @is_global: outparam - set non-zero if the buffer should be global
|
||||
*
|
||||
* Called during relay_open(), once for each per-cpu buffer,
|
||||
* to allow the client to create a file to be used to
|
||||
* represent the corresponding channel buffer. If the file is
|
||||
* created outside of relayfs, the parent must also exist in
|
||||
* that filesystem.
|
||||
*
|
||||
* The callback should return the dentry of the file created
|
||||
* to represent the relay buffer.
|
||||
*
|
||||
* Setting the is_global outparam to a non-zero value will
|
||||
* cause relay_open() to create a single global buffer rather
|
||||
* than the default set of per-cpu buffers.
|
||||
*
|
||||
* See Documentation/filesystems/relayfs.txt for more info.
|
||||
*/
|
||||
struct dentry *(*create_buf_file)(const char *filename,
|
||||
struct dentry *parent,
|
||||
int mode,
|
||||
struct rchan_buf *buf,
|
||||
int *is_global);
|
||||
|
||||
/*
|
||||
* remove_buf_file - remove file representing a relayfs channel buffer
|
||||
* @dentry: the dentry of the file to remove
|
||||
*
|
||||
* Called during relay_close(), once for each per-cpu buffer,
|
||||
* to allow the client to remove a file used to represent a
|
||||
* channel buffer.
|
||||
*
|
||||
* The callback should return 0 if successful, negative if not.
|
||||
*/
|
||||
int (*remove_buf_file)(struct dentry *dentry);
|
||||
};
|
||||
|
||||
/*
|
||||
* relayfs kernel API, fs/relayfs/relay.c
|
||||
*/
|
||||
|
||||
struct rchan *relay_open(const char *base_filename,
|
||||
struct dentry *parent,
|
||||
size_t subbuf_size,
|
||||
size_t n_subbufs,
|
||||
struct rchan_callbacks *cb);
|
||||
extern void relay_close(struct rchan *chan);
|
||||
extern void relay_flush(struct rchan *chan);
|
||||
extern void relay_subbufs_consumed(struct rchan *chan,
|
||||
unsigned int cpu,
|
||||
size_t consumed);
|
||||
extern void relay_reset(struct rchan *chan);
|
||||
extern int relay_buf_full(struct rchan_buf *buf);
|
||||
|
||||
extern size_t relay_switch_subbuf(struct rchan_buf *buf,
|
||||
size_t length);
|
||||
extern struct dentry *relayfs_create_dir(const char *name,
|
||||
struct dentry *parent);
|
||||
extern int relayfs_remove_dir(struct dentry *dentry);
|
||||
extern struct dentry *relayfs_create_file(const char *name,
|
||||
struct dentry *parent,
|
||||
int mode,
|
||||
struct file_operations *fops,
|
||||
void *data);
|
||||
extern int relayfs_remove_file(struct dentry *dentry);
|
||||
|
||||
/**
|
||||
* relay_write - write data into the channel
|
||||
* @chan: relay channel
|
||||
* @data: data to be written
|
||||
* @length: number of bytes to write
|
||||
*
|
||||
* Writes data into the current cpu's channel buffer.
|
||||
*
|
||||
* Protects the buffer by disabling interrupts. Use this
|
||||
* if you might be logging from interrupt context. Try
|
||||
* __relay_write() if you know you won't be logging from
|
||||
* interrupt context.
|
||||
*/
|
||||
static inline void relay_write(struct rchan *chan,
|
||||
const void *data,
|
||||
size_t length)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rchan_buf *buf;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = chan->buf[smp_processor_id()];
|
||||
if (unlikely(buf->offset + length > chan->subbuf_size))
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
memcpy(buf->data + buf->offset, data, length);
|
||||
buf->offset += length;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* __relay_write - write data into the channel
|
||||
* @chan: relay channel
|
||||
* @data: data to be written
|
||||
* @length: number of bytes to write
|
||||
*
|
||||
* Writes data into the current cpu's channel buffer.
|
||||
*
|
||||
* Protects the buffer by disabling preemption. Use
|
||||
* relay_write() if you might be logging from interrupt
|
||||
* context.
|
||||
*/
|
||||
static inline void __relay_write(struct rchan *chan,
|
||||
const void *data,
|
||||
size_t length)
|
||||
{
|
||||
struct rchan_buf *buf;
|
||||
|
||||
buf = chan->buf[get_cpu()];
|
||||
if (unlikely(buf->offset + length > buf->chan->subbuf_size))
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
memcpy(buf->data + buf->offset, data, length);
|
||||
buf->offset += length;
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/**
|
||||
* relay_reserve - reserve slot in channel buffer
|
||||
* @chan: relay channel
|
||||
* @length: number of bytes to reserve
|
||||
*
|
||||
* Returns pointer to reserved slot, NULL if full.
|
||||
*
|
||||
* Reserves a slot in the current cpu's channel buffer.
|
||||
* Does not protect the buffer at all - caller must provide
|
||||
* appropriate synchronization.
|
||||
*/
|
||||
static inline void *relay_reserve(struct rchan *chan, size_t length)
|
||||
{
|
||||
void *reserved;
|
||||
struct rchan_buf *buf = chan->buf[smp_processor_id()];
|
||||
|
||||
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
|
||||
length = relay_switch_subbuf(buf, length);
|
||||
if (!length)
|
||||
return NULL;
|
||||
}
|
||||
reserved = buf->data + buf->offset;
|
||||
buf->offset += length;
|
||||
|
||||
return reserved;
|
||||
}
|
||||
|
||||
/**
|
||||
* subbuf_start_reserve - reserve bytes at the start of a sub-buffer
|
||||
* @buf: relay channel buffer
|
||||
* @length: number of bytes to reserve
|
||||
*
|
||||
* Helper function used to reserve bytes at the beginning of
|
||||
* a sub-buffer in the subbuf_start() callback.
|
||||
*/
|
||||
static inline void subbuf_start_reserve(struct rchan_buf *buf,
|
||||
size_t length)
|
||||
{
|
||||
BUG_ON(length >= buf->chan->subbuf_size - 1);
|
||||
buf->offset = length;
|
||||
}
|
||||
|
||||
/*
|
||||
* exported relay file operations, fs/relayfs/inode.c
|
||||
*/
|
||||
extern struct file_operations relay_file_operations;
|
||||
|
||||
#endif /* _LINUX_RELAYFS_FS_H */
|
||||
|
||||
@@ -91,10 +91,102 @@ struct rtc_pll_info {
|
||||
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
|
||||
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
|
||||
|
||||
/* interrupt flags */
|
||||
#define RTC_IRQF 0x80 /* any of the following is active */
|
||||
#define RTC_PF 0x40
|
||||
#define RTC_AF 0x20
|
||||
#define RTC_UF 0x10
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
extern int rtc_month_days(unsigned int month, unsigned int year);
|
||||
extern int rtc_valid_tm(struct rtc_time *tm);
|
||||
extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
|
||||
extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
extern struct class *rtc_class;
|
||||
|
||||
struct rtc_class_ops {
|
||||
int (*open)(struct device *);
|
||||
void (*release)(struct device *);
|
||||
int (*ioctl)(struct device *, unsigned int, unsigned long);
|
||||
int (*read_time)(struct device *, struct rtc_time *);
|
||||
int (*set_time)(struct device *, struct rtc_time *);
|
||||
int (*read_alarm)(struct device *, struct rtc_wkalrm *);
|
||||
int (*set_alarm)(struct device *, struct rtc_wkalrm *);
|
||||
int (*proc)(struct device *, struct seq_file *);
|
||||
int (*set_mmss)(struct device *, unsigned long secs);
|
||||
int (*irq_set_state)(struct device *, int enabled);
|
||||
int (*irq_set_freq)(struct device *, int freq);
|
||||
int (*read_callback)(struct device *, int data);
|
||||
};
|
||||
|
||||
#define RTC_DEVICE_NAME_SIZE 20
|
||||
struct rtc_task;
|
||||
|
||||
struct rtc_device
|
||||
{
|
||||
struct class_device class_dev;
|
||||
struct module *owner;
|
||||
|
||||
int id;
|
||||
char name[RTC_DEVICE_NAME_SIZE];
|
||||
|
||||
struct rtc_class_ops *ops;
|
||||
struct mutex ops_lock;
|
||||
|
||||
struct class_device *rtc_dev;
|
||||
struct cdev char_dev;
|
||||
struct mutex char_lock;
|
||||
|
||||
unsigned long irq_data;
|
||||
spinlock_t irq_lock;
|
||||
wait_queue_head_t irq_queue;
|
||||
struct fasync_struct *async_queue;
|
||||
|
||||
struct rtc_task *irq_task;
|
||||
spinlock_t irq_task_lock;
|
||||
int irq_freq;
|
||||
};
|
||||
#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev)
|
||||
|
||||
extern struct rtc_device *rtc_device_register(const char *name,
|
||||
struct device *dev,
|
||||
struct rtc_class_ops *ops,
|
||||
struct module *owner);
|
||||
extern void rtc_device_unregister(struct rtc_device *rdev);
|
||||
extern int rtc_interface_register(struct class_interface *intf);
|
||||
|
||||
extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm);
|
||||
extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm);
|
||||
extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs);
|
||||
extern int rtc_read_alarm(struct class_device *class_dev,
|
||||
struct rtc_wkalrm *alrm);
|
||||
extern int rtc_set_alarm(struct class_device *class_dev,
|
||||
struct rtc_wkalrm *alrm);
|
||||
extern void rtc_update_irq(struct class_device *class_dev,
|
||||
unsigned long num, unsigned long events);
|
||||
|
||||
extern struct class_device *rtc_class_open(char *name);
|
||||
extern void rtc_class_close(struct class_device *class_dev);
|
||||
|
||||
extern int rtc_irq_register(struct class_device *class_dev,
|
||||
struct rtc_task *task);
|
||||
extern void rtc_irq_unregister(struct class_device *class_dev,
|
||||
struct rtc_task *task);
|
||||
extern int rtc_irq_set_state(struct class_device *class_dev,
|
||||
struct rtc_task *task, int enabled);
|
||||
extern int rtc_irq_set_freq(struct class_device *class_dev,
|
||||
struct rtc_task *task, int freq);
|
||||
|
||||
typedef struct rtc_task {
|
||||
void (*func)(void *private_data);
|
||||
void *private_data;
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <linux/topology.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/futex.h>
|
||||
|
||||
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
|
||||
|
||||
@@ -354,16 +355,8 @@ struct sighand_struct {
|
||||
atomic_t count;
|
||||
struct k_sigaction action[_NSIG];
|
||||
spinlock_t siglock;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
extern void sighand_free_cb(struct rcu_head *rhp);
|
||||
|
||||
static inline void sighand_free(struct sighand_struct *sp)
|
||||
{
|
||||
call_rcu(&sp->rcu, sighand_free_cb);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE! "signal_struct" does not have it's own
|
||||
* locking, because a shared signal_struct always
|
||||
@@ -402,6 +395,7 @@ struct signal_struct {
|
||||
|
||||
/* ITIMER_REAL timer for the process */
|
||||
struct hrtimer real_timer;
|
||||
struct task_struct *tsk;
|
||||
ktime_t it_real_incr;
|
||||
|
||||
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
|
||||
@@ -758,6 +752,7 @@ struct task_struct {
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid pids[PIDTYPE_MAX];
|
||||
struct list_head thread_group;
|
||||
|
||||
struct completion *vfork_done; /* for vfork() */
|
||||
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
||||
@@ -871,6 +866,11 @@ struct task_struct {
|
||||
int cpuset_mems_generation;
|
||||
int cpuset_mem_spread_rotor;
|
||||
#endif
|
||||
struct robust_list_head __user *robust_list;
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_robust_list_head __user *compat_robust_list;
|
||||
#endif
|
||||
|
||||
atomic_t fs_excl; /* holding fs exclusive resources */
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
@@ -1094,7 +1094,6 @@ extern void force_sig_specific(int, struct task_struct *);
|
||||
extern int send_sig(int, struct task_struct *, int);
|
||||
extern void zap_other_threads(struct task_struct *p);
|
||||
extern int kill_pg(pid_t, int, int);
|
||||
extern int kill_sl(pid_t, int, int);
|
||||
extern int kill_proc(pid_t, int, int);
|
||||
extern struct sigqueue *sigqueue_alloc(void);
|
||||
extern void sigqueue_free(struct sigqueue *);
|
||||
@@ -1151,10 +1150,8 @@ extern void flush_thread(void);
|
||||
extern void exit_thread(void);
|
||||
|
||||
extern void exit_files(struct task_struct *);
|
||||
extern void exit_signal(struct task_struct *);
|
||||
extern void __exit_signal(struct task_struct *);
|
||||
extern void exit_sighand(struct task_struct *);
|
||||
extern void __exit_sighand(struct task_struct *);
|
||||
extern void __cleanup_signal(struct signal_struct *);
|
||||
extern void __cleanup_sighand(struct sighand_struct *);
|
||||
extern void exit_itimers(struct signal_struct *);
|
||||
|
||||
extern NORET_TYPE void do_group_exit(int);
|
||||
@@ -1178,19 +1175,7 @@ extern void wait_task_inactive(task_t * p);
|
||||
#endif
|
||||
|
||||
#define remove_parent(p) list_del_init(&(p)->sibling)
|
||||
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
|
||||
|
||||
#define REMOVE_LINKS(p) do { \
|
||||
if (thread_group_leader(p)) \
|
||||
list_del_init(&(p)->tasks); \
|
||||
remove_parent(p); \
|
||||
} while (0)
|
||||
|
||||
#define SET_LINKS(p) do { \
|
||||
if (thread_group_leader(p)) \
|
||||
list_add_tail(&(p)->tasks,&init_task.tasks); \
|
||||
add_parent(p, (p)->parent); \
|
||||
} while (0)
|
||||
#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
|
||||
|
||||
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
|
||||
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
|
||||
@@ -1208,20 +1193,22 @@ extern void wait_task_inactive(task_t * p);
|
||||
#define while_each_thread(g, t) \
|
||||
while ((t = next_thread(t)) != g)
|
||||
|
||||
extern task_t * FASTCALL(next_thread(const task_t *p));
|
||||
|
||||
#define thread_group_leader(p) (p->pid == p->tgid)
|
||||
|
||||
static inline task_t *next_thread(task_t *p)
|
||||
{
|
||||
return list_entry(rcu_dereference(p->thread_group.next),
|
||||
task_t, thread_group);
|
||||
}
|
||||
|
||||
static inline int thread_group_empty(task_t *p)
|
||||
{
|
||||
return list_empty(&p->pids[PIDTYPE_TGID].pid_list);
|
||||
return list_empty(&p->thread_group);
|
||||
}
|
||||
|
||||
#define delay_group_leader(p) \
|
||||
(thread_group_leader(p) && !thread_group_empty(p))
|
||||
|
||||
extern void unhash_process(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
@@ -1241,6 +1228,15 @@ static inline void task_unlock(struct task_struct *p)
|
||||
spin_unlock(&p->alloc_lock);
|
||||
}
|
||||
|
||||
extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
|
||||
unsigned long *flags);
|
||||
|
||||
static inline void unlock_task_sighand(struct task_struct *tsk,
|
||||
unsigned long *flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_THREAD_FUNCTIONS
|
||||
|
||||
#define task_thread_info(task) (task)->thread_info
|
||||
|
||||
@@ -869,6 +869,11 @@ struct swap_info_struct;
|
||||
* @ipcp contains the kernel IPC permission structure
|
||||
* @flag contains the desired (requested) permission set
|
||||
* Return 0 if permission is granted.
|
||||
* @ipc_getsecurity:
|
||||
* Copy the security label associated with the ipc object into
|
||||
* @buffer. @buffer may be NULL to request the size of the buffer
|
||||
* required. @size indicates the size of @buffer in bytes. Return
|
||||
* number of bytes used/required on success.
|
||||
*
|
||||
* Security hooks for individual messages held in System V IPC message queues
|
||||
* @msg_msg_alloc_security:
|
||||
@@ -1040,6 +1045,11 @@ struct swap_info_struct;
|
||||
* @effective contains the effective capability set.
|
||||
* @inheritable contains the inheritable capability set.
|
||||
* @permitted contains the permitted capability set.
|
||||
* @capable:
|
||||
* Check whether the @tsk process has the @cap capability.
|
||||
* @tsk contains the task_struct for the process.
|
||||
* @cap contains the capability <include/linux/capability.h>.
|
||||
* Return 0 if the capability is granted for @tsk.
|
||||
* @acct:
|
||||
* Check permission before enabling or disabling process accounting. If
|
||||
* accounting is being enabled, then @file refers to the open file used to
|
||||
@@ -1053,11 +1063,6 @@ struct swap_info_struct;
|
||||
* @table contains the ctl_table structure for the sysctl variable.
|
||||
* @op contains the operation (001 = search, 002 = write, 004 = read).
|
||||
* Return 0 if permission is granted.
|
||||
* @capable:
|
||||
* Check whether the @tsk process has the @cap capability.
|
||||
* @tsk contains the task_struct for the process.
|
||||
* @cap contains the capability <include/linux/capability.h>.
|
||||
* Return 0 if the capability is granted for @tsk.
|
||||
* @syslog:
|
||||
* Check permission before accessing the kernel message ring or changing
|
||||
* logging to the console.
|
||||
@@ -1099,9 +1104,9 @@ struct security_operations {
|
||||
kernel_cap_t * effective,
|
||||
kernel_cap_t * inheritable,
|
||||
kernel_cap_t * permitted);
|
||||
int (*capable) (struct task_struct * tsk, int cap);
|
||||
int (*acct) (struct file * file);
|
||||
int (*sysctl) (struct ctl_table * table, int op);
|
||||
int (*capable) (struct task_struct * tsk, int cap);
|
||||
int (*quotactl) (int cmds, int type, int id, struct super_block * sb);
|
||||
int (*quota_on) (struct dentry * dentry);
|
||||
int (*syslog) (int type);
|
||||
@@ -1168,7 +1173,8 @@ struct security_operations {
|
||||
int (*inode_getxattr) (struct dentry *dentry, char *name);
|
||||
int (*inode_listxattr) (struct dentry *dentry);
|
||||
int (*inode_removexattr) (struct dentry *dentry, char *name);
|
||||
int (*inode_getsecurity)(struct inode *inode, const char *name, void *buffer, size_t size, int err);
|
||||
const char *(*inode_xattr_getsuffix) (void);
|
||||
int (*inode_getsecurity)(const struct inode *inode, const char *name, void *buffer, size_t size, int err);
|
||||
int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
|
||||
int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
|
||||
|
||||
@@ -1217,6 +1223,7 @@ struct security_operations {
|
||||
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
|
||||
|
||||
int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag);
|
||||
int (*ipc_getsecurity)(struct kern_ipc_perm *ipcp, void *buffer, size_t size);
|
||||
|
||||
int (*msg_msg_alloc_security) (struct msg_msg * msg);
|
||||
void (*msg_msg_free_security) (struct msg_msg * msg);
|
||||
@@ -1347,6 +1354,11 @@ static inline void security_capset_set (struct task_struct *target,
|
||||
security_ops->capset_set (target, effective, inheritable, permitted);
|
||||
}
|
||||
|
||||
static inline int security_capable(struct task_struct *tsk, int cap)
|
||||
{
|
||||
return security_ops->capable(tsk, cap);
|
||||
}
|
||||
|
||||
static inline int security_acct (struct file *file)
|
||||
{
|
||||
return security_ops->acct (file);
|
||||
@@ -1675,7 +1687,12 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
|
||||
return security_ops->inode_removexattr (dentry, name);
|
||||
}
|
||||
|
||||
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
|
||||
static inline const char *security_inode_xattr_getsuffix(void)
|
||||
{
|
||||
return security_ops->inode_xattr_getsuffix();
|
||||
}
|
||||
|
||||
static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
|
||||
{
|
||||
if (unlikely (IS_PRIVATE (inode)))
|
||||
return 0;
|
||||
@@ -1870,6 +1887,11 @@ static inline int security_ipc_permission (struct kern_ipc_perm *ipcp,
|
||||
return security_ops->ipc_permission (ipcp, flag);
|
||||
}
|
||||
|
||||
static inline int security_ipc_getsecurity(struct kern_ipc_perm *ipcp, void *buffer, size_t size)
|
||||
{
|
||||
return security_ops->ipc_getsecurity(ipcp, buffer, size);
|
||||
}
|
||||
|
||||
static inline int security_msg_msg_alloc (struct msg_msg * msg)
|
||||
{
|
||||
return security_ops->msg_msg_alloc_security (msg);
|
||||
@@ -2050,6 +2072,11 @@ static inline void security_capset_set (struct task_struct *target,
|
||||
cap_capset_set (target, effective, inheritable, permitted);
|
||||
}
|
||||
|
||||
static inline int security_capable(struct task_struct *tsk, int cap)
|
||||
{
|
||||
return cap_capable(tsk, cap);
|
||||
}
|
||||
|
||||
static inline int security_acct (struct file *file)
|
||||
{
|
||||
return 0;
|
||||
@@ -2317,7 +2344,12 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
|
||||
return cap_inode_removexattr(dentry, name);
|
||||
}
|
||||
|
||||
static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size, int err)
|
||||
static inline const char *security_inode_xattr_getsuffix (void)
|
||||
{
|
||||
return NULL ;
|
||||
}
|
||||
|
||||
static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -2500,6 +2532,11 @@ static inline int security_ipc_permission (struct kern_ipc_perm *ipcp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_ipc_getsecurity(struct kern_ipc_perm *ipcp, void *buffer, size_t size)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int security_msg_msg_alloc (struct msg_msg * msg)
|
||||
{
|
||||
return 0;
|
||||
|
||||
@@ -37,6 +37,7 @@ enum {
|
||||
PLAT8250_DEV_LEGACY = -1,
|
||||
PLAT8250_DEV_PLATFORM,
|
||||
PLAT8250_DEV_PLATFORM1,
|
||||
PLAT8250_DEV_PLATFORM2,
|
||||
PLAT8250_DEV_FOURPORT,
|
||||
PLAT8250_DEV_ACCENT,
|
||||
PLAT8250_DEV_BOCA,
|
||||
|
||||
@@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the following fucntions to manipulate serio's per-port
|
||||
* Use the following functions to manipulate serio's per-port
|
||||
* driver-specific data.
|
||||
*/
|
||||
static inline void *serio_get_drvdata(struct serio *serio)
|
||||
@@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the following fucntions to protect critical sections in
|
||||
* Use the following functions to protect critical sections in
|
||||
* driver code from port's interrupt handler
|
||||
*/
|
||||
static inline void serio_pause_rx(struct serio *serio)
|
||||
@@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the following fucntions to pin serio's driver in process context
|
||||
* Use the following functions to pin serio's driver in process context
|
||||
*/
|
||||
static inline int serio_pin_driver(struct serio *serio)
|
||||
{
|
||||
|
||||
@@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig)
|
||||
INIT_LIST_HEAD(&sig->list);
|
||||
}
|
||||
|
||||
extern void flush_sigqueue(struct sigpending *queue);
|
||||
|
||||
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
|
||||
static inline int valid_signal(unsigned long sig)
|
||||
{
|
||||
|
||||
@@ -64,6 +64,7 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo
|
||||
extern int kmem_cache_destroy(kmem_cache_t *);
|
||||
extern int kmem_cache_shrink(kmem_cache_t *);
|
||||
extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
|
||||
extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
|
||||
extern void kmem_cache_free(kmem_cache_t *, void *);
|
||||
extern unsigned int kmem_cache_size(kmem_cache_t *);
|
||||
extern const char *kmem_cache_name(kmem_cache_t *);
|
||||
@@ -77,11 +78,12 @@ struct cache_sizes {
|
||||
};
|
||||
extern struct cache_sizes malloc_sizes[];
|
||||
|
||||
#ifndef CONFIG_DEBUG_SLAB
|
||||
extern void *__kmalloc(size_t, gfp_t);
|
||||
#ifndef CONFIG_DEBUG_SLAB
|
||||
#define ____kmalloc(size, flags) __kmalloc(size, flags)
|
||||
#else
|
||||
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
|
||||
#define __kmalloc(size, flags) \
|
||||
#define ____kmalloc(size, flags) \
|
||||
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
|
||||
#endif
|
||||
|
||||
@@ -108,7 +110,30 @@ found:
|
||||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
extern void *kzalloc(size_t, gfp_t);
|
||||
extern void *__kzalloc(size_t, gfp_t);
|
||||
|
||||
static inline void *kzalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
if (__builtin_constant_p(size)) {
|
||||
int i = 0;
|
||||
#define CACHE(x) \
|
||||
if (size <= x) \
|
||||
goto found; \
|
||||
else \
|
||||
i++;
|
||||
#include "kmalloc_sizes.h"
|
||||
#undef CACHE
|
||||
{
|
||||
extern void __you_cannot_kzalloc_that_much(void);
|
||||
__you_cannot_kzalloc_that_much();
|
||||
}
|
||||
found:
|
||||
return kmem_cache_zalloc((flags & GFP_DMA) ?
|
||||
malloc_sizes[i].cs_dmacachep :
|
||||
malloc_sizes[i].cs_cachep, flags);
|
||||
}
|
||||
return __kzalloc(size, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kcalloc - allocate memory for an array. The memory is set to zero.
|
||||
@@ -155,17 +180,18 @@ struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
|
||||
void (*)(void *, struct kmem_cache *, unsigned long));
|
||||
int kmem_cache_destroy(struct kmem_cache *c);
|
||||
void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
|
||||
void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
|
||||
void kmem_cache_free(struct kmem_cache *c, void *b);
|
||||
const char *kmem_cache_name(struct kmem_cache *);
|
||||
void *kmalloc(size_t size, gfp_t flags);
|
||||
void *kzalloc(size_t size, gfp_t flags);
|
||||
void *__kzalloc(size_t size, gfp_t flags);
|
||||
void kfree(const void *m);
|
||||
unsigned int ksize(const void *m);
|
||||
unsigned int kmem_cache_size(struct kmem_cache *c);
|
||||
|
||||
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
return kzalloc(n * size, flags);
|
||||
return __kzalloc(n * size, flags);
|
||||
}
|
||||
|
||||
#define kmem_cache_shrink(d) (0)
|
||||
@@ -173,6 +199,8 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
||||
#define kmem_ptr_validate(a, b) (0)
|
||||
#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
|
||||
#define kmalloc_node(s, f, n) kmalloc(s, f)
|
||||
#define kzalloc(s, f) __kzalloc(s, f)
|
||||
#define ____kmalloc kmalloc
|
||||
|
||||
#endif /* CONFIG_SLOB */
|
||||
|
||||
@@ -182,7 +210,6 @@ extern kmem_cache_t *names_cachep;
|
||||
extern kmem_cache_t *files_cachep;
|
||||
extern kmem_cache_t *filp_cachep;
|
||||
extern kmem_cache_t *fs_cachep;
|
||||
extern kmem_cache_t *signal_cachep;
|
||||
extern kmem_cache_t *sighand_cachep;
|
||||
extern kmem_cache_t *bio_cachep;
|
||||
|
||||
|
||||
@@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void);
|
||||
*/
|
||||
#define raw_smp_processor_id() 0
|
||||
#define hard_smp_processor_id() 0
|
||||
#define smp_call_function(func,info,retry,wait) ({ 0; })
|
||||
static inline int up_smp_call_function(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define smp_call_function(func,info,retry,wait) (up_smp_call_function())
|
||||
#define on_each_cpu(func,info,retry,wait) \
|
||||
({ \
|
||||
local_irq_disable(); \
|
||||
|
||||
@@ -30,12 +30,12 @@
|
||||
*/
|
||||
|
||||
struct device;
|
||||
extern int register_sound_special(struct file_operations *fops, int unit);
|
||||
extern int register_sound_special_device(struct file_operations *fops, int unit, struct device *dev);
|
||||
extern int register_sound_mixer(struct file_operations *fops, int dev);
|
||||
extern int register_sound_midi(struct file_operations *fops, int dev);
|
||||
extern int register_sound_dsp(struct file_operations *fops, int dev);
|
||||
extern int register_sound_synth(struct file_operations *fops, int dev);
|
||||
extern int register_sound_special(const struct file_operations *fops, int unit);
|
||||
extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
|
||||
extern int register_sound_mixer(const struct file_operations *fops, int dev);
|
||||
extern int register_sound_midi(const struct file_operations *fops, int dev);
|
||||
extern int register_sound_dsp(const struct file_operations *fops, int dev);
|
||||
extern int register_sound_synth(const struct file_operations *fops, int dev);
|
||||
|
||||
extern void unregister_sound_special(int unit);
|
||||
extern void unregister_sound_mixer(int unit);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user