1
0
Fork 0

net offloading: Pass features into netif_needs_gso().

Now that there is a single function that can compute the device
features relevant to a packet, we don't want to run it for each
offload.  This converts netif_needs_gso() to take the features
of the device, rather than computing them itself.

Signed-off-by: Jesse Gross <jesse@nicira.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Jesse Gross 2011-01-09 06:23:32 +00:00 committed by David S. Miller
parent f01a5236bd
commit fc741216db
3 changed files with 10 additions and 12 deletions

View File

@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) {
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock);
goto drop;
}

View File

@ -2317,16 +2317,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
static inline int netif_needs_gso(struct sk_buff *skb, int features)
{
if (skb_is_gso(skb)) {
int features = netif_skb_features(skb);
return (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
return 0;
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
static inline void netif_set_gso_max_size(struct net_device *dev,

View File

@ -2086,6 +2086,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
int features;
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
@ -2098,8 +2100,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) &&
!(dev->features & NETIF_F_HW_VLAN_TX)) {
!(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
@ -2107,7 +2111,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0;
}
if (netif_needs_gso(dev, skb)) {
if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb)))
goto out_kfree_skb;
if (skb->next)