Merge branch 'vlan-untag-and-insert-fixes'

Toshiaki Makita says:

====================
Fix vlan untag and insertion for bridge and vlan with reorder_hdr off

As Brandon Carpenter reported[1], sending non-vlan-offloaded packets from
bridge devices ends up with corrupted packets. He narrowed down this problem
and found that the root cause is in skb_reorder_vlan_header().

While I was working on fixing this problem, I found that the function does
not work properly for double tagged packets with reorder_hdr off as well.

Patch 1 fixes these 2 problems in skb_reorder_vlan_header().

And it turned out that fixing skb_reorder_vlan_header() is not sufficient
to receive double tagged packets with reorder_hdr off while I was testing the
fix. Vlan tags got out of order when vlan devices with reorder_hdr disabled
were stacked. Patch 2 fixes this problem.

[1] https://www.spinics.net/lists/linux-ethernet-bridging/msg07039.html
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-16 10:03:48 -04:00
commit e693be293f
4 changed files with 76 additions and 28 deletions

View file

@ -299,6 +299,44 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
return false;
}
/**
* __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */
veth->h_vlan_proto = vlan_proto;
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
return 0;
}
/**
* __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
@ -313,24 +351,37 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
}
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
/**
* vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
*/
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci,
unsigned int mac_len)
{
int err;
veth = skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */
veth->h_vlan_proto = vlan_proto;
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
return 0;
err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
}
return skb;
}
/**
@ -350,14 +401,7 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
int err;
err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
}
return skb;
return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
}
/**

View file

@ -30,6 +30,7 @@
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
#define ETH_TLEN 2 /* Octets in ethernet type field */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */

View file

@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
* original position later
*/
skb_push(skb, offset);
skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
skb->vlan_tci);
skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
skb->vlan_tci, skb->mac_len);
if (!skb)
return false;
skb_pull(skb, offset + VLAN_HLEN);

View file

@ -5020,13 +5020,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
int mac_len;
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}
memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
2 * ETH_ALEN);
mac_len = skb->data - skb_mac_header(skb);
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
mac_len - VLAN_HLEN - ETH_TLEN);
skb->mac_header += VLAN_HLEN;
return skb;
}