Merge branch 'netpoll-next'

Eric W. Biederman says:

====================
netpoll: Cleanups and fixes

This should be a small set of safe cleanups and fixes to netpoll.

The fixes are vlan headers are now always inserted when needed, and
napi polling is always avoided when network devices are closed.

There are a bunch of little cleanups removing unnecessary code, fixing
function naming, not taking unnecessary locks and removing general
silliness.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-03-29 18:00:37 -04:00
commit 587daaf382
10 changed files with 90 additions and 84 deletions

View file

@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
struct netpoll *np;
int err = 0;
np = kzalloc(sizeof(*np), GFP_ATOMIC);
np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
@ -962,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
slave_disable_netpoll(slave);
}
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;

View file

@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static int team_port_enable_netpoll(struct team *team, struct team_port *port,
gfp_t gfp)
static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
struct netpoll *np;
int err;
@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
if (!team->dev->npinfo)
return 0;
np = kzalloc(sizeof(*np), gfp);
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, port->dev, gfp);
err = __netpoll_setup(np, port->dev);
if (err) {
kfree(np);
return err;
@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
static int team_port_enable_netpoll(struct team *team, struct team_port *port,
gfp_t gfp)
static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
return 0;
}
@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
err = team_port_enable_netpoll(team, port, GFP_KERNEL);
err = team_port_enable_netpoll(team, port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@ -1850,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
struct netpoll_info *npifo, gfp_t gfp)
struct netpoll_info *npifo)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@ -1858,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
err = team_port_enable_netpoll(team, port, gfp);
err = team_port_enable_netpoll(team, port);
if (err) {
__team_netpoll_cleanup(team);
break;

View file

@ -1037,8 +1037,7 @@ struct net_device_ops {
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev,
struct netpoll_info *info,
gfp_t gfp);
struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
@ -2910,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
} \
}
#define HARD_TX_TRYLOCK(dev, txq) \
(((dev->features & NETIF_F_LLTX) == 0) ? \
__netif_tx_trylock(txq) : \
true )
#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \

View file

@ -47,17 +47,17 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
extern void netpoll_rx_disable(struct net_device *dev);
extern void netpoll_rx_enable(struct net_device *dev);
extern void netpoll_poll_disable(struct net_device *dev);
extern void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_rx_disable(struct net_device *dev) { return; }
static inline void netpoll_rx_enable(struct net_device *dev) { return; }
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_async(struct netpoll *np);

View file

@ -707,20 +707,19 @@ static void vlan_dev_poll_controller(struct net_device *dev)
return;
}
static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo,
gfp_t gfp)
static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
struct netpoll *netpoll;
int err = 0;
netpoll = kzalloc(sizeof(*netpoll), gfp);
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
err = -ENOMEM;
if (!netpoll)
goto out;
err = __netpoll_setup(netpoll, real_dev, gfp);
err = __netpoll_setup(netpoll, real_dev);
if (err) {
kfree(netpoll);
goto out;

View file

@ -218,16 +218,16 @@ static void br_netpoll_cleanup(struct net_device *dev)
br_netpoll_disable(p);
}
static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
static int __br_netpoll_enable(struct net_bridge_port *p)
{
struct netpoll *np;
int err;
np = kzalloc(sizeof(*p->np), gfp);
np = kzalloc(sizeof(*p->np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, p->dev, gfp);
err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
return err;
@ -237,16 +237,15 @@ static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
return err;
}
int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
int br_netpoll_enable(struct net_bridge_port *p)
{
if (!p->br->dev->npinfo)
return 0;
return __br_netpoll_enable(p, gfp);
return __br_netpoll_enable(p);
}
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
gfp_t gfp)
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
@ -255,7 +254,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
err = __br_netpoll_enable(p, gfp);
err = __br_netpoll_enable(p);
if (err)
goto fail;
}

View file

@ -366,7 +366,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
err = br_netpoll_enable(p, GFP_KERNEL);
err = br_netpoll_enable(p);
if (err)
goto err3;

View file

@ -349,7 +349,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
netpoll_send_skb(np, skb);
}
int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
int br_netpoll_enable(struct net_bridge_port *p);
void br_netpoll_disable(struct net_bridge_port *p);
#else
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
@ -357,7 +357,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
{
}
static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
static inline int br_netpoll_enable(struct net_bridge_port *p)
{
return 0;
}

View file

@ -1245,7 +1245,7 @@ static int __dev_open(struct net_device *dev)
* If we don't do this there is a chance ndo_poll_controller
* or ndo_poll may be running while we open the device
*/
netpoll_rx_disable(dev);
netpoll_poll_disable(dev);
ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
ret = notifier_to_errno(ret);
@ -1260,7 +1260,7 @@ static int __dev_open(struct net_device *dev)
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
netpoll_rx_enable(dev);
netpoll_poll_enable(dev);
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head)
might_sleep();
list_for_each_entry(dev, head, close_list) {
/* Temporarily disable netpoll until the interface is down */
netpoll_poll_disable(dev);
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head)
dev->flags &= ~IFF_UP;
net_dmaengine_put();
netpoll_poll_enable(dev);
}
return 0;
@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev)
int retval;
LIST_HEAD(single);
/* Temporarily disable netpoll until the interface is down */
netpoll_rx_disable(dev);
list_add(&dev->close_list, &single);
retval = __dev_close_many(&single);
list_del(&single);
netpoll_rx_enable(dev);
return retval;
}
@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev)
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
/* Block netpoll rx while the interface is going down */
netpoll_rx_disable(dev);
list_add(&dev->close_list, &single);
dev_close_many(&single);
list_del(&single);
netpoll_rx_enable(dev);
}
return 0;
}

View file

@ -69,6 +69,37 @@ module_param(carrier_timeout, uint, 0644);
#define np_notice(np, fmt, ...) \
pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int status = NETDEV_TX_OK;
netdev_features_t features;
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code that calls this
* function to try and operate on a NULL skb.
*/
goto out;
}
skb->vlan_tci = 0;
}
status = ops->ndo_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
out:
return status;
}
static void queue_process(struct work_struct *work)
{
struct netpoll_info *npinfo =
@ -78,28 +109,27 @@ static void queue_process(struct work_struct *work)
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
const struct net_device_ops *ops = dev->netdev_ops;
struct netdev_queue *txq;
if (!netif_device_present(dev) || !netif_running(dev)) {
__kfree_skb(skb);
kfree_skb(skb);
continue;
}
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
__netif_tx_unlock(txq);
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
}
}
@ -184,7 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev)
zap_completion_queue();
}
void netpoll_rx_disable(struct net_device *dev)
void netpoll_poll_disable(struct net_device *dev)
{
struct netpoll_info *ni;
int idx;
@ -195,9 +225,9 @@ void netpoll_rx_disable(struct net_device *dev)
down(&ni->dev_lock);
srcu_read_unlock(&netpoll_srcu, idx);
}
EXPORT_SYMBOL(netpoll_rx_disable);
EXPORT_SYMBOL(netpoll_poll_disable);
void netpoll_rx_enable(struct net_device *dev)
void netpoll_poll_enable(struct net_device *dev)
{
struct netpoll_info *ni;
rcu_read_lock();
@ -206,7 +236,7 @@ void netpoll_rx_enable(struct net_device *dev)
up(&ni->dev_lock);
rcu_read_unlock();
}
EXPORT_SYMBOL(netpoll_rx_enable);
EXPORT_SYMBOL(netpoll_poll_enable);
static void refill_skbs(void)
{
@ -295,7 +325,6 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
{
int status = NETDEV_TX_BUSY;
unsigned long tries;
const struct net_device_ops *ops = dev->netdev_ops;
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
@ -303,7 +332,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
__kfree_skb(skb);
dev_kfree_skb_irq(skb);
return;
}
@ -316,29 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_xmit_stopped(txq)) {
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(netif_skb_features(skb),
skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code at the end of this
* function to try and re-queue a NULL skb.
*/
status = NETDEV_TX_OK;
goto unlock_txq;
}
skb->vlan_tci = 0;
}
if (HARD_TX_TRYLOCK(dev, txq)) {
if (!netif_xmit_stopped(txq))
status = netpoll_start_xmit(skb, dev, txq);
status = ops->ndo_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
unlock_txq:
__netif_tx_unlock(txq);
HARD_TX_UNLOCK(dev, txq);
if (status == NETDEV_TX_OK)
break;
@ -353,7 +364,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
WARN_ONCE(!irqs_disabled(),
"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
dev->name, ops->ndo_start_xmit);
dev->name, dev->netdev_ops->ndo_start_xmit);
}
@ -584,7 +595,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
}
EXPORT_SYMBOL(netpoll_parse_options);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
{
struct netpoll_info *npinfo;
const struct net_device_ops *ops;
@ -603,7 +614,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
}
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), gfp);
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto out;
@ -617,7 +628,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
err = ops->ndo_netpoll_setup(ndev, npinfo);
if (err)
goto free_npinfo;
}
@ -749,7 +760,7 @@ int netpoll_setup(struct netpoll *np)
/* fill up the skb queue */
refill_skbs();
err = __netpoll_setup(np, ndev, GFP_KERNEL);
err = __netpoll_setup(np, ndev);
if (err)
goto put;