Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
@@ -3854,8 +3854,28 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
static int tcp_prune_ofo_queue(struct sock *sk);
|
||||
static int tcp_prune_queue(struct sock *sk);
|
||||
|
||||
static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
|
||||
{
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||
!sk_rmem_schedule(sk, size)) {
|
||||
|
||||
if (tcp_prune_queue(sk) < 0)
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size)) {
|
||||
if (!tcp_prune_ofo_queue(sk))
|
||||
return -1;
|
||||
|
||||
if (!sk_rmem_schedule(sk, size))
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcphdr *th = tcp_hdr(skb);
|
||||
@@ -3905,12 +3925,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
if (eaten <= 0) {
|
||||
queue_and_out:
|
||||
if (eaten < 0 &&
|
||||
(atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||
!sk_rmem_schedule(sk, skb->truesize))) {
|
||||
if (tcp_prune_queue(sk) < 0 ||
|
||||
!sk_rmem_schedule(sk, skb->truesize))
|
||||
goto drop;
|
||||
}
|
||||
tcp_try_rmem_schedule(sk, skb->truesize))
|
||||
goto drop;
|
||||
|
||||
skb_set_owner_r(skb, sk);
|
||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
}
|
||||
@@ -3979,12 +3996,8 @@ drop:
|
||||
|
||||
TCP_ECN_check_ce(tp, skb);
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||
!sk_rmem_schedule(sk, skb->truesize)) {
|
||||
if (tcp_prune_queue(sk) < 0 ||
|
||||
!sk_rmem_schedule(sk, skb->truesize))
|
||||
goto drop;
|
||||
}
|
||||
if (tcp_try_rmem_schedule(sk, skb->truesize))
|
||||
goto drop;
|
||||
|
||||
/* Disable header prediction. */
|
||||
tp->pred_flags = 0;
|
||||
@@ -4211,6 +4224,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge the out-of-order queue.
|
||||
* Return true if queue was pruned.
|
||||
*/
|
||||
static int tcp_prune_ofo_queue(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int res = 0;
|
||||
|
||||
if (!skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
* do the same at a timeout based retransmit. When a connection
|
||||
* is in a sad state like this, we care only about integrity
|
||||
* of the connection not performance.
|
||||
*/
|
||||
if (tp->rx_opt.sack_ok)
|
||||
tcp_sack_reset(&tp->rx_opt);
|
||||
sk_mem_reclaim(sk);
|
||||
res = 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Reduce allocated memory if we can, trying to get
|
||||
* the socket within its memory limits again.
|
||||
*
|
||||
@@ -4244,20 +4283,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
/* Collapsing did not help, destructive actions follow.
|
||||
* This must not ever occur. */
|
||||
|
||||
/* First, purge the out_of_order queue. */
|
||||
if (!skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
* do the same at a timeout based retransmit. When a connection
|
||||
* is in a sad state like this, we care only about integrity
|
||||
* of the connection not performance.
|
||||
*/
|
||||
if (tcp_is_sack(tp))
|
||||
tcp_sack_reset(&tp->rx_opt);
|
||||
sk_mem_reclaim(sk);
|
||||
}
|
||||
tcp_prune_ofo_queue(sk);
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user