1
0
Fork 0

Merge branch 'net-qualcomm-rmnet-Enable-csum-offloads'

Subash Abhinov Kasiviswanathan says:

====================
net: qualcomm: rmnet: Enable csum offloads

This series introduces the MAPv4 packet format for checksum
offload plus some other minor changes.

Patches 1-3 are cleanups.

Patch 4 renames the ingress format to data format so that all data
formats can be configured using this going forward.

Patch 5 uses the pacing helper to improve TCP transmit performance.

Patch 6-9 defines the the MAPv4 for checksum offload for RX and TX.
A new header and trailer format are used as part of MAPv4.
For RX checksum offload, only the 1's complement of the IP payload
portion is computed by hardware. The meta data from RX header is
used to verify the checksum field in the packet. Note that the
IP packet and its field itself is not modified by hardware.
This gives metadata to help with the RX checksum. For TX, the
required metadata is filled up so hardware can compute the
checksum.

Patch 10 enables GSO on rmnet devices

v1->v2: Fix sparse errors reported by kbuild test robot

v2->v3: Update the commit message for Patch 5 based on Eric's comments
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-01-08 13:58:50 -05:00
commit b60f699ee8
8 changed files with 378 additions and 25 deletions

View File

@ -143,7 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
int ingress_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@ -185,11 +185,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct ifla_vlan_flags *flags;
flags = nla_data(data[IFLA_VLAN_FLAGS]);
ingress_format = flags->flags & flags->mask;
data_format = flags->flags & flags->mask;
}
netdev_dbg(dev, "data format [ingress 0x%08X]\n", ingress_format);
port->ingress_data_format = ingress_format;
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
port->data_format = data_format;
return 0;
@ -353,7 +353,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct ifla_vlan_flags *flags;
flags = nla_data(data[IFLA_VLAN_FLAGS]);
port->ingress_data_format = flags->flags & flags->mask;
port->data_format = flags->flags & flags->mask;
}
return 0;

View File

@ -32,7 +32,7 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
u32 ingress_data_format;
u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];

View File

@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
@ -65,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
u16 len, pad;
u8 mux_id;
u16 len;
if (RMNET_MAP_GET_CD_BIT(skb)) {
if (port->ingress_data_format
& RMNET_INGRESS_FORMAT_MAP_COMMANDS)
if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
pad = RMNET_MAP_GET_PAD(skb);
len = RMNET_MAP_GET_LENGTH(skb) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@ -90,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
skb_trim(skb, len);
rmnet_set_skb_proto(skb);
if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@ -114,8 +121,8 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
skb_push(skb, ETH_HLEN);
}
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
@ -134,19 +141,24 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
required_headroom += additional_header_len;
}
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
goto fail;
}
if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
rmnet_map_checksum_uplink_packet(skb, orig_dev);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
goto fail;
if (mux_id == 0xff)
map_header->mux_id = 0;
else
map_header->mux_id = mux_id;
map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
@ -208,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_priv *priv;
u8 mux_id;
sk_pacing_shift_update(skb->sk, 8);
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;

View File

@ -47,6 +47,22 @@ struct rmnet_map_header {
u16 pkt_len;
} __aligned(1);
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
u8 valid:1;
u8 reserved2:7;
u16 csum_start_offset;
u16 csum_length;
__be16 csum_value;
} __aligned(1);
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ip4_ind:1;
u16 csum_enabled:1;
} __aligned(1);
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@ -67,10 +83,13 @@ struct rmnet_map_header {
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
u8 rmnet_map_demultiplex(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */

View File

@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type)
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
int xmit_status;
if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
if (skb->len < sizeof(struct rmnet_map_header) +
RMNET_MAP_GET_LENGTH(skb) +
sizeof(struct rmnet_map_dl_csum_trailer)) {
kfree_skb(skb);
return;
}
skb_trim(skb, skb->len -
sizeof(struct rmnet_map_dl_csum_trailer));
}
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc);
rmnet_map_send_ack(skb, rc, port);
}

View File

@ -14,6 +14,9 @@
*/
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@ -21,6 +24,233 @@
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
__sum16 *check = NULL;
switch (protocol) {
case IPPROTO_TCP:
check = &(((struct tcphdr *)txporthdr)->check);
break;
case IPPROTO_UDP:
check = &(((struct udphdr *)txporthdr)->check);
break;
default:
check = NULL;
break;
}
return check;
}
static int
rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer)
{
__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
u16 csum_value, csum_value_final;
struct iphdr *ip4h;
void *txporthdr;
__be16 addend;
ip4h = (struct iphdr *)(skb->data);
if ((ntohs(ip4h->frag_off) & IP_MF) ||
((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
return -EOPNOTSUPP;
txporthdr = skb->data + ip4h->ihl * 4;
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
if (!csum_field)
return -EPROTONOSUPPORT;
/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
return 0;
csum_value = ~ntohs(csum_trailer->csum_value);
hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
ip_payload_csum = csum16_sub((__force __sum16)csum_value,
(__force __be16)hdr_csum);
pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
ntohs(ip4h->tot_len) - ip4h->ihl * 4,
ip4h->protocol, 0);
addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
pseudo_csum = csum16_add(ip_payload_csum, addend);
addend = (__force __be16)ntohs((__force __be16)*csum_field);
csum_temp = ~csum16_sub(pseudo_csum, addend);
csum_value_final = (__force u16)csum_temp;
if (unlikely(csum_value_final == 0)) {
switch (ip4h->protocol) {
case IPPROTO_UDP:
/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
csum_value_final = ~csum_value_final;
break;
case IPPROTO_TCP:
/* DL4 Non-RFC compliant TCP checksum found */
if (*csum_field == (__force __sum16)0xFFFF)
csum_value_final = ~csum_value_final;
break;
}
}
if (csum_value_final == ntohs((__force __be16)*csum_field))
return 0;
else
return -EINVAL;
}
#if IS_ENABLED(CONFIG_IPV6)
static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer)
{
__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
u16 csum_value, csum_value_final;
__be16 ip6_hdr_csum, addend;
struct ipv6hdr *ip6h;
void *txporthdr;
u32 length;
ip6h = (struct ipv6hdr *)(skb->data);
txporthdr = skb->data + sizeof(struct ipv6hdr);
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
if (!csum_field)
return -EPROTONOSUPPORT;
csum_value = ~ntohs(csum_trailer->csum_value);
ip6_hdr_csum = (__force __be16)
~ntohs((__force __be16)ip_compute_csum(ip6h,
(int)(txporthdr - (void *)(skb->data))));
ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
ip6_hdr_csum);
length = (ip6h->nexthdr == IPPROTO_UDP) ?
ntohs(((struct udphdr *)txporthdr)->len) :
ntohs(ip6h->payload_len);
pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
length, ip6h->nexthdr, 0));
addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
pseudo_csum = csum16_add(ip6_payload_csum, addend);
addend = (__force __be16)ntohs((__force __be16)*csum_field);
csum_temp = ~csum16_sub(pseudo_csum, addend);
csum_value_final = (__force u16)csum_temp;
if (unlikely(csum_value_final == 0)) {
switch (ip6h->nexthdr) {
case IPPROTO_UDP:
/* RFC 2460 section 8.1
* DL6 One's complement rule for UDP checksum 0
*/
csum_value_final = ~csum_value_final;
break;
case IPPROTO_TCP:
/* DL6 Non-RFC compliant TCP checksum found */
if (*csum_field == (__force __sum16)0xFFFF)
csum_value_final = ~csum_value_final;
break;
}
}
if (csum_value_final == ntohs((__force __be16)*csum_field))
return 0;
else
return -EINVAL;
}
#endif
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
txphdr = iphdr + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)iphdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ip4_ind = 1;
else
ul_header->udp_ip4_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
txphdr = ip6hdr + sizeof(struct ipv6hdr);
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)ip6hdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
ul_header->udp_ip4_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
}
#endif
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
u32 padding, map_datalen;
u8 *padbytes;
if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
return NULL;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
@ -69,7 +296,8 @@ done:
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
return skbn;
}
/* Validates packet checksums. Function takes a pointer to
* the beginning of a buffer which contains the IP payload +
* padding + checksum trailer.
* Only IPv4 and IPv6 are supported along with TCP & UDP.
* Fragmented or tunneled packets are not supported.
*/
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
{
struct rmnet_map_dl_csum_trailer *csum_trailer;
if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
return -EOPNOTSUPP;
csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
if (!csum_trailer->valid)
return -EINVAL;
if (skb->protocol == htons(ETH_P_IP))
return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
else if (skb->protocol == htons(ETH_P_IPV6))
#if IS_ENABLED(CONFIG_IPV6)
return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
#else
return -EPROTONOSUPPORT;
#endif
return 0;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
ul_header = (struct rmnet_map_ul_csum_header *)
skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
if (unlikely(!(orig_dev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
return;
#else
goto sw_csum;
#endif
}
}
sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ip4_ind = 0;
}

View File

@ -21,6 +21,8 @@
/* Constants */
#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)

View File

@ -188,6 +188,10 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
if (rmnet_get_endpoint(port, id))
return -EBUSY;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;