1
0
Fork 0

Merge branch 'qualcomm-rmnet-Add-64-bit-stats-and-GRO'

Subash Abhinov Kasiviswanathan says:

====================
net: qualcomm: rmnet: Add 64 bit stats and GRO

This series adds support for 64 bit per cpu stats and GRO

Patches 1-2 are cleanups of return code and a redundant condition
Patch 3 adds support for 64 bit per cpu stats
Patch 4 adds support for GRO using GRO cells

v1->v2: Since gro_cells_init() could potentially fail, move it from device
setup to ndo_init() as mentioned by Eric.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2017-10-28 00:10:24 +09:00
commit c9d0dc4b11
6 changed files with 114 additions and 32 deletions

View File

@ -5,6 +5,7 @@
menuconfig RMNET
tristate "RmNet MAP driver"
default n
select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)

View File

@ -14,6 +14,7 @@
*/
#include <linux/skbuff.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
@ -41,9 +42,24 @@ struct rmnet_port {
extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_vnd_stats {
u64 rx_pkts;
u64 rx_bytes;
u64 tx_pkts;
u64 tx_bytes;
u32 tx_drops;
};
struct rmnet_pcpu_stats {
struct rmnet_vnd_stats stats;
struct u64_stats_sync syncp;
};
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);

View File

@ -43,22 +43,23 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)
/* Generic handler */
static rx_handler_result_t
static void
rmnet_deliver_skb(struct sk_buff *skb)
{
struct rmnet_priv *priv = netdev_priv(skb->dev);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
rmnet_vnd_rx_fixup(skb, skb->dev);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
netif_receive_skb(skb);
return RX_HANDLER_CONSUMED;
gro_cells_receive(&priv->gro_cells, skb);
}
/* MAP handler */
static rx_handler_result_t
static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
@ -84,38 +85,33 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
if (!ep)
goto free_skb;
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
skb->dev = ep->egress_dev;
skb->dev = ep->egress_dev;
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
skb_trim(skb, len);
rmnet_set_skb_proto(skb);
return rmnet_deliver_skb(skb);
rmnet_deliver_skb(skb);
return;
free_skb:
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
static rx_handler_result_t
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
int rc;
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
rc = RX_HANDLER_CONSUMED;
} else {
rc = __rmnet_map_ingress_handler(skb, port);
__rmnet_map_ingress_handler(skb, port);
}
return rc;
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
@ -149,15 +145,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return RMNET_MAP_SUCCESS;
}
static rx_handler_result_t
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
return RX_HANDLER_CONSUMED;
}
/* Ingress / Egress Entry Points */
@ -168,13 +162,12 @@ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
*/
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
int rc = RX_HANDLER_CONSUMED;
struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev;
if (!skb)
return RX_HANDLER_CONSUMED;
goto done;
dev = skb->dev;
port = rmnet_get_port(dev);
@ -182,14 +175,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
rc = rmnet_map_ingress_handler(skb, port);
rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rc = rmnet_bridge_handler(skb, port->bridge_ep);
rmnet_bridge_handler(skb, port->bridge_ep);
break;
}
return rc;
done:
return RX_HANDLER_CONSUMED;
}
/* Modifies packet as per logical endpoint configuration and egress data format

View File

@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
struct rmnet_port *port);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

View File

@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
struct rmnet_port *port)
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc);
return RX_HANDLER_CONSUMED;
}

View File

@ -27,14 +27,28 @@
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
{
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.rx_pkts++;
pcpu_ptr->stats.rx_bytes += skb->len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
{
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
pcpu_ptr->stats.tx_bytes += skb->len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
/* Network Device Operations */
@ -48,7 +62,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
if (priv->real_dev) {
rmnet_egress_handler(skb);
} else {
dev->stats.tx_dropped++;
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}
return NETDEV_TX_OK;
@ -70,12 +84,72 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
return priv->real_dev->ifindex;
}
static int rmnet_vnd_init(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
int err;
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
if (!priv->pcpu_stats)
return -ENOMEM;
err = gro_cells_init(&priv->gro_cells, dev);
if (err) {
free_percpu(priv->pcpu_stats);
return err;
}
return 0;
}
static void rmnet_vnd_uninit(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
}
static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_vnd_stats total_stats;
struct rmnet_pcpu_stats *pcpu_ptr;
unsigned int cpu, start;
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
for_each_possible_cpu(cpu) {
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
s->rx_bytes = total_stats.rx_bytes;
s->tx_packets = total_stats.tx_pkts;
s->tx_bytes = total_stats.tx_bytes;
s->tx_dropped = total_stats.tx_drops;
}
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink,
.ndo_add_slave = rmnet_add_bridge,
.ndo_del_slave = rmnet_del_bridge,
.ndo_init = rmnet_vnd_init,
.ndo_uninit = rmnet_vnd_uninit,
.ndo_get_stats64 = rmnet_get_stats64,
};
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,