1
0
Fork 0

mlx4: only pull headers into skb head

Use the new fancy eth_get_headlen() to pull exactly the headers
into skb->head.

This speeds up GRE traffic (or more generally tunneled traffuc),
as GRO can aggregate up to 17 MSS per GRO packet instead of 8.

(Pulling too much data was forcing GRO to keep 2 frags per MSS)

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Eric Dumazet 2014-09-05 18:29:45 -07:00 committed by David S. Miller
parent 126859b992
commit cfecec56ae
1 changed files with 8 additions and 5 deletions

View File

@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb_copy_to_linear_data(skb, va, length);
skb->tail += length;
} else {
unsigned int pull_len;
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
skb, length);
@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
}
skb_shinfo(skb)->nr_frags = used_frags;
pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
/* Copy headers into the skb linear buffer */
memcpy(skb->data, va, HEADER_COPY_SIZE);
skb->tail += HEADER_COPY_SIZE;
memcpy(skb->data, va, pull_len);
skb->tail += pull_len;
/* Skip headers in first fragment */
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
skb_shinfo(skb)->frags[0].page_offset += pull_len;
/* Adjust size of first fragment */
skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
skb->data_len = length - HEADER_COPY_SIZE;
skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
skb->data_len = length - pull_len;
}
return skb;
}