Merge branch 'from-linus' into upstream
This commit is contained in:
@@ -85,7 +85,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
|
||||
goto err_out;
|
||||
|
||||
err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0);
|
||||
if (err)
|
||||
if (err < 0)
|
||||
goto err_kfree;
|
||||
|
||||
NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
|
||||
|
||||
@@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup)
|
||||
if (timer_pending(&ub->timer))
|
||||
del_timer(&ub->timer);
|
||||
|
||||
if (!ub->skb)
|
||||
return;
|
||||
|
||||
/* last nlmsg needs NLMSG_DONE */
|
||||
if (ub->qlen > 1)
|
||||
ub->lastnlh->nlmsg_type = NLMSG_DONE;
|
||||
|
||||
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
|
||||
dst_gc_timer_inc = DST_GC_INC;
|
||||
dst_gc_timer_expires = DST_GC_MIN;
|
||||
}
|
||||
dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
|
||||
#if RT_CACHE_DEBUG >= 2
|
||||
printk("dst_total: %d/%d %ld\n",
|
||||
atomic_read(&dst_total), delayed, dst_gc_timer_expires);
|
||||
#endif
|
||||
add_timer(&dst_gc_timer);
|
||||
mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
|
||||
|
||||
out:
|
||||
spin_unlock(&dst_lock);
|
||||
|
||||
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
|
||||
skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
|
||||
skb->dev = odev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb->nh.iph = iph;
|
||||
skb->h.uh = udph;
|
||||
|
||||
if (pkt_dev->nfrags <= 0)
|
||||
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
|
||||
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
|
||||
skb->protocol = protocol;
|
||||
skb->dev = odev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb->nh.ipv6h = iph;
|
||||
skb->h.uh = udph;
|
||||
|
||||
if (pkt_dev->nfrags <= 0)
|
||||
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
|
||||
|
||||
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
||||
}
|
||||
|
||||
if (ida[IFLA_ADDRESS - 1]) {
|
||||
struct sockaddr *sa;
|
||||
int len;
|
||||
|
||||
if (!dev->set_mac_address) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
||||
if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
|
||||
goto out;
|
||||
|
||||
err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1]));
|
||||
len = sizeof(sa_family_t) + dev->addr_len;
|
||||
sa = kmalloc(len, GFP_KERNEL);
|
||||
if (!sa) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
sa->sa_family = dev->type;
|
||||
memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]),
|
||||
dev->addr_len);
|
||||
err = dev->set_mac_address(dev, sa);
|
||||
kfree(sa);
|
||||
if (err)
|
||||
goto out;
|
||||
send_addr_notify = 1;
|
||||
|
||||
@@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
|
||||
if (likely(skb))
|
||||
if (likely(skb)) {
|
||||
skb_reserve(skb, NET_SKB_PAD);
|
||||
skb->dev = dev;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
||||
@@ -947,7 +947,7 @@ alloc_new_skb:
|
||||
skb_prev->csum = csum_sub(skb_prev->csum,
|
||||
skb->csum);
|
||||
data += fraggap;
|
||||
skb_trim(skb_prev, maxfraglen);
|
||||
pskb_trim_unique(skb_prev, maxfraglen);
|
||||
}
|
||||
|
||||
copy = datalen - transhdrlen - fraggap;
|
||||
@@ -1142,7 +1142,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
|
||||
data, fraggap, 0);
|
||||
skb_prev->csum = csum_sub(skb_prev->csum,
|
||||
skb->csum);
|
||||
skb_trim(skb_prev, maxfraglen);
|
||||
pskb_trim_unique(skb_prev, maxfraglen);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1170,21 +1170,34 @@ static int __init arp_tables_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
xt_proto_init(NF_ARP);
|
||||
ret = xt_proto_init(NF_ARP);
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
/* Noone else will be downing sem now, so we won't sleep */
|
||||
xt_register_target(&arpt_standard_target);
|
||||
xt_register_target(&arpt_error_target);
|
||||
ret = xt_register_target(&arpt_standard_target);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
ret = xt_register_target(&arpt_error_target);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
|
||||
/* Register setsockopt */
|
||||
ret = nf_register_sockopt(&arpt_sockopts);
|
||||
if (ret < 0) {
|
||||
duprintf("Unable to register sockopts.\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
|
||||
printk("arp_tables: (C) 2002 David S. Miller\n");
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
xt_unregister_target(&arpt_error_target);
|
||||
err3:
|
||||
xt_unregister_target(&arpt_standard_target);
|
||||
err2:
|
||||
xt_proto_fini(NF_ARP);
|
||||
err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit arp_tables_fini(void)
|
||||
|
||||
@@ -2239,22 +2239,39 @@ static int __init ip_tables_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
xt_proto_init(AF_INET);
|
||||
ret = xt_proto_init(AF_INET);
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
/* Noone else will be downing sem now, so we won't sleep */
|
||||
xt_register_target(&ipt_standard_target);
|
||||
xt_register_target(&ipt_error_target);
|
||||
xt_register_match(&icmp_matchstruct);
|
||||
ret = xt_register_target(&ipt_standard_target);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
ret = xt_register_target(&ipt_error_target);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
ret = xt_register_match(&icmp_matchstruct);
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
|
||||
/* Register setsockopt */
|
||||
ret = nf_register_sockopt(&ipt_sockopts);
|
||||
if (ret < 0) {
|
||||
duprintf("Unable to register sockopts.\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto err5;
|
||||
|
||||
printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
|
||||
return 0;
|
||||
|
||||
err5:
|
||||
xt_unregister_match(&icmp_matchstruct);
|
||||
err4:
|
||||
xt_unregister_target(&ipt_error_target);
|
||||
err3:
|
||||
xt_unregister_target(&ipt_standard_target);
|
||||
err2:
|
||||
xt_proto_fini(AF_INET);
|
||||
err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip_tables_fini(void)
|
||||
|
||||
@@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum)
|
||||
del_timer(&ub->timer);
|
||||
}
|
||||
|
||||
if (!ub->skb) {
|
||||
DEBUGP("ipt_ULOG: ulog_send: nothing to send\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* last nlmsg needs NLMSG_DONE */
|
||||
if (ub->qlen > 1)
|
||||
ub->lastnlh->nlmsg_type = NLMSG_DONE;
|
||||
|
||||
@@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb,
|
||||
dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
|
||||
hinfo->cfg.burst);
|
||||
dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
|
||||
|
||||
spin_unlock_bh(&hinfo->lock);
|
||||
return 1;
|
||||
} else {
|
||||
/* update expiration timeout */
|
||||
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
|
||||
rateinfo_recalc(dh, now);
|
||||
}
|
||||
|
||||
/* update expiration timeout */
|
||||
dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
|
||||
|
||||
rateinfo_recalc(dh, now);
|
||||
if (dh->rateinfo.credit >= dh->rateinfo.cost) {
|
||||
/* We're underlimit. */
|
||||
dh->rateinfo.credit -= dh->rateinfo.cost;
|
||||
|
||||
@@ -3157,7 +3157,7 @@ int __init ip_rt_init(void)
|
||||
rhash_entries,
|
||||
(num_physpages >= 128 * 1024) ?
|
||||
15 : 17,
|
||||
HASH_HIGHMEM,
|
||||
0,
|
||||
&rt_hash_log,
|
||||
&rt_hash_mask,
|
||||
0);
|
||||
|
||||
@@ -3541,7 +3541,8 @@ void tcp_cwnd_application_limited(struct sock *sk)
|
||||
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
|
||||
sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
/* Limited by application or receiver window. */
|
||||
u32 win_used = max(tp->snd_cwnd_used, 2U);
|
||||
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
||||
u32 win_used = max(tp->snd_cwnd_used, init_win);
|
||||
if (win_used < tp->snd_cwnd) {
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
|
||||
|
||||
@@ -466,7 +466,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
if (skb->len != tcp_header_size)
|
||||
tcp_event_data_sent(tp, skb, sk);
|
||||
|
||||
TCP_INC_STATS(TCP_MIB_OUTSEGS);
|
||||
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
|
||||
TCP_INC_STATS(TCP_MIB_OUTSEGS);
|
||||
|
||||
err = icsk->icsk_af_ops->queue_xmit(skb, 0);
|
||||
if (likely(err <= 0))
|
||||
@@ -2157,10 +2158,9 @@ int tcp_connect(struct sock *sk)
|
||||
skb_shinfo(buff)->gso_size = 0;
|
||||
skb_shinfo(buff)->gso_type = 0;
|
||||
buff->csum = 0;
|
||||
tp->snd_nxt = tp->write_seq;
|
||||
TCP_SKB_CB(buff)->seq = tp->write_seq++;
|
||||
TCP_SKB_CB(buff)->end_seq = tp->write_seq;
|
||||
tp->snd_nxt = tp->write_seq;
|
||||
tp->pushed_seq = tp->write_seq;
|
||||
|
||||
/* Send it off. */
|
||||
TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
||||
@@ -2170,6 +2170,12 @@ int tcp_connect(struct sock *sk)
|
||||
sk_charge_skb(sk, buff);
|
||||
tp->packets_out += tcp_skb_pcount(buff);
|
||||
tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
|
||||
|
||||
/* We change tp->snd_nxt after the tcp_transmit_skb() call
|
||||
* in order to make this packet get counted in tcpOutSegs.
|
||||
*/
|
||||
tp->snd_nxt = tp->write_seq;
|
||||
tp->pushed_seq = tp->write_seq;
|
||||
TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
|
||||
|
||||
/* Timer for repeating the SYN until an answer. */
|
||||
|
||||
@@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
|
||||
error = wait_event_interruptible(tcpw.wait,
|
||||
__kfifo_len(tcpw.fifo) != 0);
|
||||
if (error)
|
||||
return error;
|
||||
goto out_free;
|
||||
|
||||
cnt = kfifo_get(tcpw.fifo, tbuf, len);
|
||||
error = copy_to_user(buf, tbuf, cnt);
|
||||
|
||||
out_free:
|
||||
vfree(tbuf);
|
||||
|
||||
return error ? error : cnt;
|
||||
|
||||
@@ -1909,11 +1909,11 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
|
||||
ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
|
||||
|
||||
if (!IS_ERR(ifp)) {
|
||||
spin_lock(&ifp->lock);
|
||||
spin_lock_bh(&ifp->lock);
|
||||
ifp->valid_lft = valid_lft;
|
||||
ifp->prefered_lft = prefered_lft;
|
||||
ifp->tstamp = jiffies;
|
||||
spin_unlock(&ifp->lock);
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
|
||||
addrconf_dad_start(ifp, 0);
|
||||
in6_ifa_put(ifp);
|
||||
|
||||
@@ -1095,7 +1095,7 @@ alloc_new_skb:
|
||||
skb_prev->csum = csum_sub(skb_prev->csum,
|
||||
skb->csum);
|
||||
data += fraggap;
|
||||
skb_trim(skb_prev, maxfraglen);
|
||||
pskb_trim_unique(skb_prev, maxfraglen);
|
||||
}
|
||||
copy = datalen - transhdrlen - fraggap;
|
||||
if (copy < 0) {
|
||||
|
||||
@@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
xt_proto_init(AF_INET6);
|
||||
ret = xt_proto_init(AF_INET6);
|
||||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
/* Noone else will be downing sem now, so we won't sleep */
|
||||
xt_register_target(&ip6t_standard_target);
|
||||
xt_register_target(&ip6t_error_target);
|
||||
xt_register_match(&icmp6_matchstruct);
|
||||
ret = xt_register_target(&ip6t_standard_target);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
ret = xt_register_target(&ip6t_error_target);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
ret = xt_register_match(&icmp6_matchstruct);
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
|
||||
/* Register setsockopt */
|
||||
ret = nf_register_sockopt(&ip6t_sockopts);
|
||||
if (ret < 0) {
|
||||
duprintf("Unable to register sockopts.\n");
|
||||
xt_proto_fini(AF_INET6);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto err5;
|
||||
|
||||
printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
|
||||
return 0;
|
||||
|
||||
err5:
|
||||
xt_unregister_match(&icmp6_matchstruct);
|
||||
err4:
|
||||
xt_unregister_target(&ip6t_error_target);
|
||||
err3:
|
||||
xt_unregister_target(&ip6t_standard_target);
|
||||
err2:
|
||||
xt_proto_fini(AF_INET6);
|
||||
err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6_tables_fini(void)
|
||||
|
||||
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
|
||||
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
|
||||
goto out;
|
||||
|
||||
ipx = ipx_hdr(skb);
|
||||
ipx_pktsize = ntohs(ipx->ipx_pktsize);
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
|
||||
goto drop;
|
||||
|
||||
ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize);
|
||||
|
||||
/* Too small or invalid header? */
|
||||
if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len)
|
||||
if (ipx_pktsize < sizeof(struct ipxhdr) ||
|
||||
!pskb_may_pull(skb, ipx_pktsize))
|
||||
goto drop;
|
||||
|
||||
ipx = ipx_hdr(skb);
|
||||
if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
|
||||
ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
|
||||
goto drop;
|
||||
|
||||
@@ -238,11 +238,13 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
|
||||
goto out_put;
|
||||
|
||||
if (lapb->state == LAPB_STATE_0) {
|
||||
if (((parms->mode & LAPB_EXTENDED) &&
|
||||
(parms->window < 1 || parms->window > 127)) ||
|
||||
(parms->window < 1 || parms->window > 7))
|
||||
goto out_put;
|
||||
|
||||
if (parms->mode & LAPB_EXTENDED) {
|
||||
if (parms->window < 1 || parms->window > 127)
|
||||
goto out_put;
|
||||
} else {
|
||||
if (parms->window < 1 || parms->window > 7)
|
||||
goto out_put;
|
||||
}
|
||||
lapb->mode = parms->mode;
|
||||
lapb->window = parms->window;
|
||||
}
|
||||
|
||||
@@ -784,24 +784,20 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
copied += used;
|
||||
len -= used;
|
||||
|
||||
if (used + offset < skb->len)
|
||||
continue;
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
sk_eat_skb(sk, skb, 0);
|
||||
*seq = 0;
|
||||
}
|
||||
|
||||
/* For non stream protcols we get one packet per recvmsg call */
|
||||
if (sk->sk_type != SOCK_STREAM)
|
||||
goto copy_uaddr;
|
||||
|
||||
/* Partial read */
|
||||
if (used + offset < skb->len)
|
||||
continue;
|
||||
} while (len > 0);
|
||||
|
||||
/*
|
||||
* According to UNIX98, msg_name/msg_namelen are ignored
|
||||
* on connected socket. -ANK
|
||||
* But... af_llc still doesn't have separate sets of methods for
|
||||
* SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will
|
||||
* eventually fix this tho :-) -acme
|
||||
*/
|
||||
if (sk->sk_type == SOCK_DGRAM)
|
||||
goto copy_uaddr;
|
||||
out:
|
||||
release_sock(sk);
|
||||
return copied;
|
||||
|
||||
@@ -51,10 +51,10 @@ void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim)
|
||||
{
|
||||
struct sockaddr_llc *addr;
|
||||
|
||||
if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */
|
||||
return;
|
||||
/* save primitive for use by the user. */
|
||||
addr = llc_ui_skb_cb(skb);
|
||||
|
||||
memset(addr, 0, sizeof(*addr));
|
||||
addr->sllc_family = sk->sk_family;
|
||||
addr->sllc_arphrd = skb->dev->type;
|
||||
addr->sllc_test = prim == LLC_TEST_PRIM;
|
||||
@@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap,
|
||||
if (llc->laddr.lsap != laddr->lsap)
|
||||
continue;
|
||||
|
||||
if (llc->dev != skb->dev)
|
||||
continue;
|
||||
|
||||
skb1 = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!skb1)
|
||||
break;
|
||||
|
||||
@@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst)
|
||||
if (timer_pending(&inst->timer))
|
||||
del_timer(&inst->timer);
|
||||
|
||||
if (!inst->skb)
|
||||
return 0;
|
||||
|
||||
if (inst->qlen > 1)
|
||||
inst->lastnlh->nlmsg_type = NLMSG_DONE;
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb,
|
||||
|
||||
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
|
||||
conf->to_offset, conf->config, &state)
|
||||
!= UINT_MAX) && !conf->invert;
|
||||
!= UINT_MAX) ^ conf->invert;
|
||||
}
|
||||
|
||||
#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
|
||||
|
||||
@@ -430,7 +430,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
|
||||
}
|
||||
#endif
|
||||
|
||||
err = -EINVAL;
|
||||
err = -ENOENT;
|
||||
if (ops == NULL)
|
||||
goto err_out;
|
||||
|
||||
|
||||
@@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
|
||||
new = detail->alloc();
|
||||
if (!new)
|
||||
return NULL;
|
||||
/* must fully initialise 'new', else
|
||||
* we might get lose if we need to
|
||||
* cache_put it soon.
|
||||
*/
|
||||
cache_init(new);
|
||||
detail->init(new, key);
|
||||
|
||||
write_lock(&detail->hash_lock);
|
||||
|
||||
@@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
detail->init(new, key);
|
||||
new->next = *head;
|
||||
*head = new;
|
||||
detail->entries++;
|
||||
|
||||
@@ -921,26 +921,43 @@ call_transmit(struct rpc_task *task)
|
||||
task->tk_status = xprt_prepare_transmit(task);
|
||||
if (task->tk_status != 0)
|
||||
return;
|
||||
task->tk_action = call_transmit_status;
|
||||
/* Encode here so that rpcsec_gss can use correct sequence number. */
|
||||
if (rpc_task_need_encode(task)) {
|
||||
task->tk_rqstp->rq_bytes_sent = 0;
|
||||
BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
|
||||
call_encode(task);
|
||||
/* Did the encode result in an error condition? */
|
||||
if (task->tk_status != 0)
|
||||
goto out_nosend;
|
||||
return;
|
||||
}
|
||||
task->tk_action = call_transmit_status;
|
||||
xprt_transmit(task);
|
||||
if (task->tk_status < 0)
|
||||
return;
|
||||
if (!task->tk_msg.rpc_proc->p_decode) {
|
||||
task->tk_action = rpc_exit_task;
|
||||
rpc_wake_up_task(task);
|
||||
}
|
||||
return;
|
||||
out_nosend:
|
||||
/* release socket write lock before attempting to handle error */
|
||||
xprt_abort_transmit(task);
|
||||
/*
|
||||
* On success, ensure that we call xprt_end_transmit() before sleeping
|
||||
* in order to allow access to the socket to other RPC requests.
|
||||
*/
|
||||
call_transmit_status(task);
|
||||
if (task->tk_msg.rpc_proc->p_decode != NULL)
|
||||
return;
|
||||
task->tk_action = rpc_exit_task;
|
||||
rpc_wake_up_task(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* 5a. Handle cleanup after a transmission
|
||||
*/
|
||||
static void
|
||||
call_transmit_status(struct rpc_task *task)
|
||||
{
|
||||
task->tk_action = call_status;
|
||||
/*
|
||||
* Special case: if we've been waiting on the socket's write_space()
|
||||
* callback, then don't call xprt_end_transmit().
|
||||
*/
|
||||
if (task->tk_status == -EAGAIN)
|
||||
return;
|
||||
xprt_end_transmit(task);
|
||||
rpc_task_force_reencode(task);
|
||||
}
|
||||
|
||||
@@ -992,18 +1009,7 @@ call_status(struct rpc_task *task)
|
||||
}
|
||||
|
||||
/*
|
||||
* 6a. Handle transmission errors.
|
||||
*/
|
||||
static void
|
||||
call_transmit_status(struct rpc_task *task)
|
||||
{
|
||||
if (task->tk_status != -EAGAIN)
|
||||
rpc_task_force_reencode(task);
|
||||
call_status(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* 6b. Handle RPC timeout
|
||||
* 6a. Handle RPC timeout
|
||||
* We do not release the request slot, so we keep using the
|
||||
* same XID for all retransmits.
|
||||
*/
|
||||
|
||||
@@ -667,10 +667,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
|
||||
RPCAUTH_info, RPCAUTH_EOF);
|
||||
if (error)
|
||||
goto err_depopulate;
|
||||
dget(dentry);
|
||||
out:
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
rpc_release_path(&nd);
|
||||
return dget(dentry);
|
||||
return dentry;
|
||||
err_depopulate:
|
||||
rpc_depopulate(dentry);
|
||||
__rpc_rmdir(dir, dentry);
|
||||
@@ -731,10 +732,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
|
||||
rpci->flags = flags;
|
||||
rpci->ops = ops;
|
||||
inode_dir_notify(dir, DN_CREATE);
|
||||
dget(dentry);
|
||||
out:
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
rpc_release_path(&nd);
|
||||
return dget(dentry);
|
||||
return dentry;
|
||||
err_dput:
|
||||
dput(dentry);
|
||||
dentry = ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -707,12 +707,9 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
xprt_abort_transmit(struct rpc_task *task)
|
||||
void xprt_end_transmit(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
|
||||
xprt_release_write(xprt, task);
|
||||
xprt_release_write(task->tk_xprt, task);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task)
|
||||
task->tk_status = -ENOTCONN;
|
||||
else if (!req->rq_received)
|
||||
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
|
||||
|
||||
xprt->ops->release_xprt(xprt, task);
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
return;
|
||||
}
|
||||
@@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task)
|
||||
* schedq, and being picked up by a parallel run of rpciod().
|
||||
*/
|
||||
task->tk_status = status;
|
||||
|
||||
switch (status) {
|
||||
case -ECONNREFUSED:
|
||||
if (status == -ECONNREFUSED)
|
||||
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
|
||||
case -EAGAIN:
|
||||
case -ENOTCONN:
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
xprt_release_write(xprt, task);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void do_xprt_reserve(struct rpc_task *task)
|
||||
|
||||
@@ -413,6 +413,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* xs_tcp_release_xprt - clean up after a tcp transmission
|
||||
* @xprt: transport
|
||||
* @task: rpc task
|
||||
*
|
||||
* This cleans up if an error causes us to abort the transmission of a request.
|
||||
* In this case, the socket may need to be reset in order to avoid confusing
|
||||
* the server.
|
||||
*/
|
||||
static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *req;
|
||||
|
||||
if (task != xprt->snd_task)
|
||||
return;
|
||||
if (task == NULL)
|
||||
goto out_release;
|
||||
req = task->tk_rqstp;
|
||||
if (req->rq_bytes_sent == 0)
|
||||
goto out_release;
|
||||
if (req->rq_bytes_sent == req->rq_snd_buf.len)
|
||||
goto out_release;
|
||||
set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
|
||||
out_release:
|
||||
xprt_release_xprt(xprt, task);
|
||||
}
|
||||
|
||||
/**
|
||||
* xs_close - close a socket
|
||||
* @xprt: transport
|
||||
@@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
|
||||
|
||||
static struct rpc_xprt_ops xs_tcp_ops = {
|
||||
.reserve_xprt = xprt_reserve_xprt,
|
||||
.release_xprt = xprt_release_xprt,
|
||||
.release_xprt = xs_tcp_release_xprt,
|
||||
.set_port = xs_set_port,
|
||||
.connect = xs_connect,
|
||||
.buf_alloc = rpc_malloc,
|
||||
|
||||
@@ -1134,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
|
||||
}
|
||||
EXPORT_SYMBOL(__xfrm_route_forward);
|
||||
|
||||
/* Optimize later using cookies and generation ids. */
|
||||
|
||||
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
/* If it is marked obsolete, which is how we even get here,
|
||||
* then we have purged it from the policy bundle list and we
|
||||
* did that for a good reason.
|
||||
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
|
||||
* to "-1" to force all XFRM destinations to get validated by
|
||||
* dst_ops->check on every use. We do this because when a
|
||||
* normal route referenced by an XFRM dst is obsoleted we do
|
||||
* not go looking around for all parent referencing XFRM dsts
|
||||
* so that we can invalidate them. It is just too much work.
|
||||
* Instead we make the checks here on every use. For example:
|
||||
*
|
||||
* XFRM dst A --> IPv4 dst X
|
||||
*
|
||||
* X is the "xdst->route" of A (X is also the "dst->path" of A
|
||||
* in this example). If X is marked obsolete, "A" will not
|
||||
* notice. That's what we are validating here via the
|
||||
* stale_bundle() check.
|
||||
*
|
||||
* When a policy's bundle is pruned, we dst_free() the XFRM
|
||||
* dst which causes it's ->obsolete field to be set to a
|
||||
* positive non-zero integer. If an XFRM dst has been pruned
|
||||
* like this, we want to force a new route lookup.
|
||||
*/
|
||||
if (dst->obsolete < 0 && !stale_bundle(dst))
|
||||
return dst;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user