1
0
Fork 0

net: Introduce skb_tunnel_rx() helper

skb rxhash should be cleared when a skb is handled by a tunnel before
being delivered again, so that correct packet steering can take place.

There are other cleanups and accounting that we can factorize in a new
helper, skb_tunnel_rx()

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Eric Dumazet 2010-05-17 22:36:55 -07:00 committed by David S. Miller
parent de213e5eed
commit d19d56ddc8
7 changed files with 34 additions and 34 deletions

View File

@ -226,6 +226,26 @@ static inline void skb_dst_force(struct sk_buff *skb)
}
}
/**
* skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups, and perform accounting.
*/
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->rxhash = 0;
skb_dst_drop(skb);
nf_reset(skb);
}
/* Children define the path of the packet through the
* Linux networking. Thus, destinations are stackable.
*/

View File

@ -538,7 +538,6 @@ static int ipgre_rcv(struct sk_buff *skb)
struct ip_tunnel *tunnel;
int offset = 4;
__be16 gre_proto;
unsigned int len;
if (!pskb_may_pull(skb, 16))
goto drop_nolock;
@ -629,8 +628,6 @@ static int ipgre_rcv(struct sk_buff *skb)
tunnel->i_seqno = seqno + 1;
}
len = skb->len;
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
@ -644,11 +641,7 @@ static int ipgre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
stats->rx_packets++;
stats->rx_bytes += len;
skb->dev = tunnel->dev;
skb_dst_drop(skb);
nf_reset(skb);
skb_tunnel_rx(skb, tunnel->dev);
skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb);

View File

@ -374,11 +374,8 @@ static int ipip_rcv(struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
tunnel->dev->stats.rx_packets++;
tunnel->dev->stats.rx_bytes += skb->len;
skb->dev = tunnel->dev;
skb_dst_drop(skb);
nf_reset(skb);
skb_tunnel_rx(skb, tunnel->dev);
ipip_ecn_decapsulate(iph, skb);
netif_rx(skb);
rcu_read_unlock();

View File

@ -1831,14 +1831,12 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
skb->mac_header = skb->network_header;
skb_pull(skb, (u8*)encap - skb->data);
skb_reset_network_header(skb);
skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
skb_dst_drop(skb);
reg_dev->stats.rx_bytes += skb->len;
reg_dev->stats.rx_packets++;
nf_reset(skb);
skb_tunnel_rx(skb, reg_dev);
netif_rx(skb);
dev_put(reg_dev);

View File

@ -723,14 +723,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
skb->protocol = htons(protocol);
skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
skb->dev = t->dev;
skb_dst_drop(skb);
nf_reset(skb);
skb_tunnel_rx(skb, t->dev);
dscp_ecn_decapsulate(t, ipv6h, skb);
t->dev->stats.rx_packets++;
t->dev->stats.rx_bytes += skb->len;
netif_rx(skb);
rcu_read_unlock();
return 0;

View File

@ -658,14 +658,12 @@ static int pim6_rcv(struct sk_buff *skb)
skb->mac_header = skb->network_header;
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
skb_dst_drop(skb);
reg_dev->stats.rx_bytes += skb->len;
reg_dev->stats.rx_packets++;
nf_reset(skb);
skb_tunnel_rx(skb, reg_dev);
netif_rx(skb);
dev_put(reg_dev);
return 0;

View File

@ -566,11 +566,9 @@ static int ipip6_rcv(struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
tunnel->dev->stats.rx_packets++;
tunnel->dev->stats.rx_bytes += skb->len;
skb->dev = tunnel->dev;
skb_dst_drop(skb);
nf_reset(skb);
skb_tunnel_rx(skb, tunnel->dev);
ipip6_ecn_decapsulate(iph, skb);
netif_rx(skb);
rcu_read_unlock();