fasync: RCU and fine grained locking
kill_fasync() uses a central rwlock, candidate for RCU conversion, to
avoid cache line ping pongs on SMP.
fasync_remove_entry() and fasync_add_entry() can disable IRQS on a short
section instead during whole list scan.
Use a spinlock per fasync_struct to synchronize kill_fasync_rcu() and
fasync_{remove|add}_entry(). This spinlock is IRQ safe, so sock_fasync()
doesnt need its own implementation and can use fasync_helper(), to
reduce code size and complexity.
We can remove __kill_fasync() direct use in net/socket.c, and rename it
to kill_fasync_rcu().
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
e5700aff14
commit
989a297920
@@ -1280,10 +1280,12 @@ static inline int lock_may_write(struct inode *inode, loff_t start,
|
||||
|
||||
|
||||
struct fasync_struct {
|
||||
int magic;
|
||||
int fa_fd;
|
||||
struct fasync_struct *fa_next; /* singly linked list */
|
||||
struct file *fa_file;
|
||||
spinlock_t fa_lock;
|
||||
int magic;
|
||||
int fa_fd;
|
||||
struct fasync_struct *fa_next; /* singly linked list */
|
||||
struct file *fa_file;
|
||||
struct rcu_head fa_rcu;
|
||||
};
|
||||
|
||||
#define FASYNC_MAGIC 0x4601
|
||||
@@ -1292,8 +1294,6 @@ struct fasync_struct {
|
||||
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
|
||||
/* can be called from interrupts */
|
||||
extern void kill_fasync(struct fasync_struct **, int, int);
|
||||
/* only for net: no internal synchronization */
|
||||
extern void __kill_fasync(struct fasync_struct *, int, int);
|
||||
|
||||
extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
|
||||
extern int f_setown(struct file *filp, unsigned long arg, int force);
|
||||
|
||||
Reference in New Issue
Block a user