1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Frag and UDP handling fixes in i40e driver, from Amritha Nambiar and
    Alexander Duyck.

 2) Undo unintentional UAPI change in netfilter conntrack, from Florian
    Westphal.

 3) Revert a change to how error codes are returned from
    dev_get_valid_name(), it broke some apps.

 4) Cannot cache routes for ipv6 tunnels in the tunnel is ipv4/ipv6
    dual-stack. From Eli Cooper.

 5) Fix missed PMTU updates in geneve, from Xin Long.

 6) Cure double free in macvlan, from Gao Feng.

 7) Fix heap out-of-bounds write in rds_message_alloc_sgs(), from
    Mohamed Ghannam.

 8) FEC bug fixes from FUgang Duan (mis-accounting of dev_id, missed
    deferral of probe when the regulator is not ready yet).

 9) Missing DMA mapping error checks in 3c59x, from Neil Horman.

10) Turn off Broadcom tags for some b53 switches, from Florian Fainelli.

11) Fix OOPS when get_target_net() is passed an SKB whose NETLINK_CB()
    isn't initialized. From Andrei Vagin.

12) Fix crashes in fib6_add(), from Wei Wang.

13) PMTU bug fixes in SCTP from Marcelo Ricardo Leitner.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits)
  sh_eth: fix TXALCR1 offsets
  mdio-sun4i: Fix a memory leak
  phylink: mark expected switch fall-throughs in phylink_mii_ioctl
  sctp: fix the handling of ICMP Frag Needed for too small MTUs
  sctp: do not retransmit upon FragNeeded if PMTU discovery is disabled
  xen-netfront: enable device after manual module load
  bnxt_en: Fix the 'Invalid VF' id check in bnxt_vf_ndo_prep routine.
  bnxt_en: Fix population of flow_type in bnxt_hwrm_cfa_flow_alloc()
  sh_eth: fix SH7757 GEther initialization
  net: fec: free/restore resource in related probe error pathes
  uapi/if_ether.h: prevent redefinition of struct ethhdr
  ipv6: fix general protection fault in fib6_add()
  RDS: null pointer dereference in rds_atomic_free_op
  sh_eth: fix TSU resource handling
  net: stmmac: enable EEE in MII, GMII or RGMII only
  rtnetlink: give a user socket to get_target_net()
  MAINTAINERS: Update my email address.
  can: ems_usb: improve error reporting for error warning and error passive
  can: flex_can: Correct the checking for frame length in flexcan_start_xmit()
  can: gs_usb: fix return value of the "set_bittiming" callback
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-01-08 20:21:39 -08:00
commit ef7f8cec80
53 changed files with 474 additions and 220 deletions

View File

@ -10134,7 +10134,7 @@ F: drivers/irqchip/irq-ompic.c
F: drivers/irqchip/irq-or1k-*
OPENVSWITCH
M: Pravin Shelar <pshelar@nicira.com>
M: Pravin B Shelar <pshelar@ovn.org>
L: netdev@vger.kernel.org
L: dev@openvswitch.org
W: http://openvswitch.org

View File

@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
data = be32_to_cpup((__be32 *)&cf->data[0]);
flexcan_write(data, &priv->tx_mb->data[0]);
}
if (cf->can_dlc > 3) {
if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
flexcan_write(data, &priv->tx_mb->data[1]);
}

View File

@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}

View File

@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
rc);
return rc;
return (rc > 0) ? 0 : rc;
}
static void gs_usb_xmit_callback(struct urb *urb)

View File

@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
tbp = peer_tb;
}
if (tbp[IFLA_IFNAME]) {
if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {

View File

@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
{
struct b53_device *dev = ds->priv;
/* Older models support a different tag format that we do not
* support in net/dsa/tag_brcm.c yet.
/* Older models (5325, 5365) support a different tag format that we do
* not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
* mode to be turned on which means we need to specifically manage ARL
* misses on multicast addresses (TBD).
*/
if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
!b53_can_enable_brcm_tags(ds, port))
return DSA_TAG_PROTO_NONE;
/* Broadcom BCM58xx chips have a flow accelerator on Port 8

View File

@ -602,7 +602,7 @@ struct vortex_private {
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
unsigned int dirty_tx; /* The ring entries to be free()ed. */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@ -618,7 +618,6 @@ struct vortex_private {
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(struct timer_list *t);
static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->cur_rx = vp->dirty_rx = 0;
vp->cur_rx = 0;
/* Initialize the RxEarly register as recommended. */
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
iowrite32(0x0020, ioaddr + PktStatus);
@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
int i;
int retval;
dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
dma = pci_map_single(VORTEX_PCI(vp), skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
break;
vp->rx_ring[i].addr = cpu_to_le32(dma);
}
if (i != RX_RING_SIZE) {
pr_emerg("%s: no memory for rx ring\n", dev->name);
@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
int rx_work_limit = RX_RING_SIZE;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
struct sk_buff *skb, *newskb;
dma_addr_t newdma;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
/* Pre-allocate the replacement skb. If it or its
* mapping fails then recycle the buffer thats already
* in place
*/
newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
if (!newskb) {
dev->stats.rx_dropped++;
goto clear_complete;
}
newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
dev->stats.rx_dropped++;
consume_skb(newskb);
goto clear_complete;
}
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
vp->rx_skbuff[entry] = newskb;
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
netif_rx(skb);
dev->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
pr_warn("%s: memory shortage\n",
dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
clear_complete:
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
return 0;
}
/*
* If we've hit a total OOM refilling the Rx ring we poll once a second
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
rx_oom_timer(struct timer_list *t)
{
struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
struct net_device *dev = vp->mii.dev;
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
pr_debug("%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
}
static void
vortex_down(struct net_device *dev, int final_down)
{
@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */

View File

@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
{
@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
static int ena_up_complete(struct ena_adapter *adapter)
{
int rc, i;
int rc;
rc = ena_rss_configure(adapter);
if (rc)
@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
ena_napi_enable_all(adapter);
/* Enable completion queues interrupt */
for (i = 0; i < adapter->num_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
for (i = 0; i < adapter->num_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return 0;
}
@ -1731,7 +1723,7 @@ create_err:
static int ena_up(struct ena_adapter *adapter)
{
int rc;
int rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
/* Enable completion queues interrupt */
for (i = 0; i < adapter->num_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
for (i = 0; i < adapter->num_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
err_up:
@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
/* Check for device status and issue reset if needed*/
check_for_admin_com_state(adapter);
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
netif_err(adapter, ifdown, adapter->netdev,
"Destroy failure, restarting device\n");
ena_dump_stats_to_dmesg(adapter);
/* rtnl lock already obtained in dev_ioctl() layer */
ena_destroy_device(adapter);
ena_restore_device(adapter);
}
return 0;
}
@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
ena_com_set_admin_running_state(ena_dev, false);
ena_close(netdev);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
/* Before releasing the ENA resources, a device reset is required.
* (to prevent the device from accessing them).
* In case the reset flag is set and the device is up, ena_close
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))

View File

@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
if (vf_id >= bp->pf.max_vfs) {
if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}

View File

@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {

View File

@ -344,7 +344,6 @@ struct adapter_params {
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers; /* firmware version */
unsigned int bs_vers; /* bootstrap version */

View File

@ -2844,8 +2844,6 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_img_start = adap->params.sf_fw_start;
unsigned int fw_start_sec = fw_img_start / sf_sec_size;
unsigned int fw_start_sec = FLASH_FW_START_SEC;
unsigned int fw_size = FLASH_FW_MAX_SIZE;
unsigned int fw_start = FLASH_FW_START;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size differs from size in FW header\n");
return -EINVAL;
}
if (size > FW_MAX_SIZE) {
if (size > fw_size) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
FW_MAX_SIZE);
fw_size);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
addr = fw_img_start;
addr = fw_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
fw_img_start + offsetof(struct fw_hdr, fw_ver),
fw_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)

View File

@ -3469,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
goto failed_regulator;
}
} else {
if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto failed_regulator;
}
fep->reg_phy = NULL;
}
@ -3552,8 +3556,9 @@ failed_clk_ipg:
failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
failed_phy:
of_node_put(phy_node);
failed_phy:
dev_id--;
failed_ioremap:
free_netdev(ndev);

View File

@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
__E1000_DOWN
__E1000_DOWN,
__E1000_DISABLED
};
#undef pr_fmt

View File

@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@ -1259,11 +1260,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
pci_disable_device(pdev);
if (!adapter || disable_dev)
pci_disable_device(pdev);
return err;
}
@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
pci_disable_device(pdev);
if (disable_dev)
pci_disable_device(pdev);
}
/**
@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
pci_disable_device(pdev);
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
return 0;
}
@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);

View File

@ -1367,6 +1367,9 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
*
* Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
* up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
return 0;
return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
if (ret_val)
if (ret_val) {
e_dbg("Error configuring flow control\n");
return ret_val;
}
return ret_val;
return 1;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)

View File

@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
/* Copy the address first, so that we avoid a possible race with
* .set_rx_mode(). If we copy after changing the address in the filter
* list, we might open ourselves to a narrow race window where
* .set_rx_mode could delete our dev_addr filter and prevent traffic
* from passing.
*/
ether_addr_copy(netdev->dev_addr, addr->sa_data);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
/* Under some circumstances, we might receive a request to delete
* our own device address from our uc list. Because we store the
* device address in the VSI's MAC/VLAN filter list, we need to ignore
* such requests and not delete our device address from this list.
*/
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
i40e_del_mac_filter(vsi, addr);
return 0;
@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
/* Set L4type to both TCP and UDP support */
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
/* Set L4type for TCP support */
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
return -EINVAL;
return -EOPNOTSUPP;
/* Make sure port is specified, otherwise bail out, for channel
* specific cloud filter needs 'L4 port' to be non-zero
/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
* ports are not supported via big buffer now.
*/
if (!filter->dst_port)
return -EINVAL;
if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EINVAL;
return -EOPNOTSUPP;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
return -EINVAL;
return -EOPNOTSUPP;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
return -EINVAL;
return -EOPNOTSUPP;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||

View File

@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++);
/* The stale fragment may present us with a smaller
* descriptor than the actual fragment size. To account
* for that we need to remove all the data on the front and
* figure out what the remainder would be in the last
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
int align_pad = -(stale->page_offset) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > I40E_MAX_DATA_PER_TXD);
}
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
sum -= stale_size;
}
return false;

View File

@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++);
/* The stale fragment may present us with a smaller
* descriptor than the actual fragment size. To account
* for that we need to remove all the data on the front and
* figure out what the remainder would be in the last
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
int align_pad = -(stale->page_offset) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > I40E_MAX_DATA_PER_TXD);
}
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
sum -= stale_size;
}
return false;

View File

@ -4376,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev)) {
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
NL_SET_ERR_MSG(extack,
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
@ -4504,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
@ -4520,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev)) {
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}

View File

@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,

View File

@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
{
if (!removing)
nh->should_offload = 1;
else if (nh->offloaded)
else
nh->should_offload = 0;
nh->update = 1;
}

View File

@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
return NULL;
}
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev)
{
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
}
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)

View File

@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
[TXALCR1] = 0x00a0,
[TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
[TXALCR1] = 0x00a0,
[TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* ioremap the TSU registers */
if (mdp->cd->tsu) {
struct resource *rtsu;
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
if (IS_ERR(mdp->tsu_addr)) {
ret = PTR_ERR(mdp->tsu_addr);
if (!rtsu) {
dev_err(&pdev->dev, "no TSU resource\n");
ret = -ENODEV;
goto out_release;
}
/* We can only request the TSU region for the first port
* of the two sharing this TSU for the probe to succeed...
*/
if (devno % 2 == 0 &&
!devm_request_mem_region(&pdev->dev, rtsu->start,
resource_size(rtsu),
dev_name(&pdev->dev))) {
dev_err(&pdev->dev, "can't request TSU resource.\n");
ret = -EBUSY;
goto out_release;
}
mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
resource_size(rtsu));
if (!mdp->tsu_addr) {
dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
ret = -ENOMEM;
goto out_release;
}
mdp->port = devno % 2;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
}
/* initialize first or needed device */
if (!devno || pd->needs_init) {
/* Need to init only the first port of the two sharing a TSU */
if (devno % 2 == 0) {
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);

View File

@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
int interface = priv->plat->interface;
unsigned long flags;
bool ret = false;
if ((interface != PHY_INTERFACE_MODE_MII) &&
(interface != PHY_INTERFACE_MODE_GMII) &&
!phy_interface_mode_is_rgmii(interface))
goto out;
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/

View File

@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
if (skb_dst(skb)) {
int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
GENEVE_BASE_HLEN - info->options_len - 14;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
}
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
if (skb_dst(skb)) {
int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
GENEVE_BASE_HLEN - info->options_len - 14;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
}
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);

View File

@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
/* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
return err;
destroy_macvlan_port:
if (create)
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}

View File

@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_out_free_mdiobus;
}
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;

View File

@ -1296,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
/* fall through */
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@ -1318,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
/* fall through */
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@ -1429,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
WARN_ON(!lockdep_rtnl_is_held());
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
netif_carrier_off(pl->netdev);
}
static void phylink_sfp_link_up(void *upstream)

View File

@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
void sfp_unregister_upstream(struct sfp_bus *bus)
{
rtnl_lock();
sfp_unregister_bus(bus);
if (bus->sfp)
sfp_unregister_bus(bus);
bus->upstream = NULL;
bus->netdev = NULL;
rtnl_unlock();
@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
sfp_unregister_bus(bus);
if (bus->netdev)
sfp_unregister_bus(bus);
bus->sfp_dev = NULL;
bus->sfp = NULL;
bus->socket_ops = NULL;

View File

@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */

View File

@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:

View File

@ -17,7 +17,6 @@ struct sh_eth_plat_data {
unsigned char mac_addr[ETH_ALEN];
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
unsigned needs_init:1;
};
#endif

View File

@ -966,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *t);
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);

View File

@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
np_applied:1,
instance_applied:1,
version:2,
reserved_flags2:2;
reserved_flags2:2;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved_flags2:2,
version:2,

View File

@ -23,6 +23,7 @@
#define _UAPI_LINUX_IF_ETHER_H
#include <linux/types.h>
#include <linux/libc-compat.h>
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@ -149,11 +150,13 @@
* This is an Ethernet frame header.
*/
#if __UAPI_DEF_ETHHDR
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
#endif
#endif /* _UAPI_LINUX_IF_ETHER_H */

View File

@ -168,47 +168,106 @@
/* If we did not see any headers from any supported C libraries,
* or we are being included in the kernel, then define everything
* that we need. */
* that we need. Check for previous __UAPI_* definitions to give
* unsupported C libraries a way to opt out of any kernel definition. */
#else /* !defined(__GLIBC__) */
/* Definitions for if.h */
#ifndef __UAPI_DEF_IF_IFCONF
#define __UAPI_DEF_IF_IFCONF 1
#endif
#ifndef __UAPI_DEF_IF_IFMAP
#define __UAPI_DEF_IF_IFMAP 1
#endif
#ifndef __UAPI_DEF_IF_IFNAMSIZ
#define __UAPI_DEF_IF_IFNAMSIZ 1
#endif
#ifndef __UAPI_DEF_IF_IFREQ
#define __UAPI_DEF_IF_IFREQ 1
#endif
/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
#endif
/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
#endif
/* Definitions for in.h */
#ifndef __UAPI_DEF_IN_ADDR
#define __UAPI_DEF_IN_ADDR 1
#endif
#ifndef __UAPI_DEF_IN_IPPROTO
#define __UAPI_DEF_IN_IPPROTO 1
#endif
#ifndef __UAPI_DEF_IN_PKTINFO
#define __UAPI_DEF_IN_PKTINFO 1
#endif
#ifndef __UAPI_DEF_IP_MREQ
#define __UAPI_DEF_IP_MREQ 1
#endif
#ifndef __UAPI_DEF_SOCKADDR_IN
#define __UAPI_DEF_SOCKADDR_IN 1
#endif
#ifndef __UAPI_DEF_IN_CLASS
#define __UAPI_DEF_IN_CLASS 1
#endif
/* Definitions for in6.h */
#ifndef __UAPI_DEF_IN6_ADDR
#define __UAPI_DEF_IN6_ADDR 1
#endif
#ifndef __UAPI_DEF_IN6_ADDR_ALT
#define __UAPI_DEF_IN6_ADDR_ALT 1
#endif
#ifndef __UAPI_DEF_SOCKADDR_IN6
#define __UAPI_DEF_SOCKADDR_IN6 1
#endif
#ifndef __UAPI_DEF_IPV6_MREQ
#define __UAPI_DEF_IPV6_MREQ 1
#endif
#ifndef __UAPI_DEF_IPPROTO_V6
#define __UAPI_DEF_IPPROTO_V6 1
#endif
#ifndef __UAPI_DEF_IPV6_OPTIONS
#define __UAPI_DEF_IPV6_OPTIONS 1
#endif
#ifndef __UAPI_DEF_IN6_PKTINFO
#define __UAPI_DEF_IN6_PKTINFO 1
#endif
#ifndef __UAPI_DEF_IP6_MTUINFO
#define __UAPI_DEF_IP6_MTUINFO 1
#endif
/* Definitions for ipx.h */
#ifndef __UAPI_DEF_SOCKADDR_IPX
#define __UAPI_DEF_SOCKADDR_IPX 1
#endif
#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
#endif
#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
#endif
#ifndef __UAPI_DEF_IPX_CONFIG_DATA
#define __UAPI_DEF_IPX_CONFIG_DATA 1
#endif
#ifndef __UAPI_DEF_IPX_ROUTE_DEF
#define __UAPI_DEF_IPX_ROUTE_DEF 1
#endif
/* Definitions for xattr.h */
#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1
#endif
#endif /* __GLIBC__ */
/* Definitions for if_ether.h */
/* allow libcs like musl to deactivate this, glibc does not implement this. */
#ifndef __UAPI_DEF_ETHHDR
#define __UAPI_DEF_ETHHDR 1
#endif
#endif /* _UAPI_LIBC_COMPAT_H */

View File

@ -36,7 +36,7 @@ enum ip_conntrack_info {
#define NF_CT_STATE_INVALID_BIT (1 << 0)
#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1))
#define NF_CT_STATE_UNTRACKED_BIT (1 << 6)
/* Bitset representing status of connection. */
enum ip_conntrack_status {

View File

@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
return dev_alloc_name_ns(net, dev, name);
BUG_ON(!net);
if (!dev_valid_name(name))
return -EINVAL;
if (strchr(name, '%'))
return dev_alloc_name_ns(net, dev, name);
else if (__dev_get_by_name(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
return 0;
}
EXPORT_SYMBOL(dev_get_valid_name);

View File

@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
}
static void
warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
{
char name[sizeof(current->comm)];
pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
get_task_comm(name, current), details);
}
/* Query device for its ethtool_cmd settings.
*
* Backward compatibility note: for compatibility with legacy ethtool,
@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
&link_ksettings);
if (err < 0)
return err;
if (!convert_link_ksettings_to_legacy_settings(&cmd,
&link_ksettings))
warn_incomplete_ethtool_legacy_settings_conversion(
"link modes are only partially reported");
convert_link_ksettings_to_legacy_settings(&cmd,
&link_ksettings);
/* send a sensible cmd tag back to user */
cmd.cmd = ETHTOOL_GSET;

View File

@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
return false;
}
static struct net *get_target_net(struct sk_buff *skb, int netnsid)
static struct net *get_target_net(struct sock *sk, int netnsid)
{
struct net *net;
net = get_net_ns_by_id(sock_net(skb->sk), netnsid);
net = get_net_ns_by_id(sock_net(sk), netnsid);
if (!net)
return ERR_PTR(-EINVAL);
/* For now, the caller is required to have CAP_NET_ADMIN in
* the user namespace owning the target net ns.
*/
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
put_net(net);
return ERR_PTR(-EACCES);
}
@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
ifla_policy, NULL) >= 0) {
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
tgt_net = get_target_net(skb, netnsid);
tgt_net = get_target_net(skb->sk, netnsid);
if (IS_ERR(tgt_net)) {
tgt_net = net;
netnsid = -1;
@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
tgt_net = get_target_net(skb, netnsid);
tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}

View File

@ -1241,23 +1241,28 @@ out:
* If fib6_add_1 has cleared the old leaf pointer in the
* super-tree leaf node we have to find a new one for it.
*/
struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf,
lockdep_is_held(&table->tb6_lock));
if (pn != fn && pn_leaf == rt) {
pn_leaf = NULL;
RCU_INIT_POINTER(pn->leaf, NULL);
atomic_dec(&rt->rt6i_ref);
}
if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
#if RT6_DEBUG >= 2
if (!pn_leaf) {
WARN_ON(!pn_leaf);
pn_leaf = info->nl_net->ipv6.ip6_null_entry;
if (pn != fn) {
struct rt6_info *pn_leaf =
rcu_dereference_protected(pn->leaf,
lockdep_is_held(&table->tb6_lock));
if (pn_leaf == rt) {
pn_leaf = NULL;
RCU_INIT_POINTER(pn->leaf, NULL);
atomic_dec(&rt->rt6i_ref);
}
if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn_leaf = fib6_find_prefix(info->nl_net, table,
pn);
#if RT6_DEBUG >= 2
if (!pn_leaf) {
WARN_ON(!pn_leaf);
pn_leaf =
info->nl_net->ipv6.ip6_null_entry;
}
#endif
atomic_inc(&pn_leaf->rt6i_ref);
rcu_assign_pointer(pn->leaf, pn_leaf);
atomic_inc(&pn_leaf->rt6i_ref);
rcu_assign_pointer(pn->leaf, pn_leaf);
}
}
#endif
goto failure;

View File

@ -1074,10 +1074,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
}
} else if (!(t->parms.flags &
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
/* enable the cache only only if the routing decision does
* not depend on the current inner header value
} else if (t->parms.proto != 0 && !(t->parms.flags &
(IP6_TNL_F_USE_ORIG_TCLASS |
IP6_TNL_F_USE_ORIG_FWMARK))) {
/* enable the cache only if neither the outer protocol nor the
* routing decision depends on the current inner header value
*/
use_cache = true;
}
@ -1676,11 +1677,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip6_tnl *tnl = netdev_priv(dev);
if (tnl->parms.proto == IPPROTO_IPIP) {
if (new_mtu < ETH_MIN_MTU)
if (tnl->parms.proto == IPPROTO_IPV6) {
if (new_mtu < IPV6_MIN_MTU)
return -EINVAL;
} else {
if (new_mtu < IPV6_MIN_MTU)
if (new_mtu < ETH_MIN_MTU)
return -EINVAL;
}
if (new_mtu > 0xFFF8 - dev->hard_header_len)

View File

@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
}
return true;
case NL80211_IFTYPE_MESH_POINT:
if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
return false;
if (multicast)
return true;
return ether_addr_equal(sdata->vif.addr, hdr->addr1);

View File

@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
continue;
list_for_each_entry_rcu(chain, &table->chains, list) {
if (ctx && ctx->chain[0] &&
if (ctx && ctx->chain &&
strcmp(ctx->chain, chain->name) != 0)
continue;
@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_filter *filter = cb->data;
kfree(filter->table);
kfree(filter);
if (filter) {
kfree(filter->table);
kfree(filter);
}
return 0;
}

View File

@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
if (args->nr_local == 0)
return -EINVAL;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
err:
if (page)
put_page(page);
rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
return ret;

View File

@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
if (action == TC_ACT_SHOT)
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
tm->lastuse = lastuse;
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,

View File

@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
struct tcf_t *tm = &m->tcf_tm;
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
tm->lastuse = lastuse;
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,

View File

@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
return;
}
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
sctp_transport_update_pmtu(t, pmtu);
if (!(t->param_flags & SPP_PMTUD_ENABLE))
/* We can't allow retransmitting in such case, as the
* retransmission would be sized just as before, and thus we
* would get another icmp, and retransmit again.
*/
return;
/* Update association pmtu. */
sctp_assoc_sync_pmtu(asoc);
}
/* Retransmit with the new pmtu setting.
* Normally, if PMTU discovery is disabled, an ICMP Fragmentation
* Needed will never be sent, but if a message was sent before
* PMTU discovery was disabled that was larger than the PMTU, it
* would not be fragmented, so it must be re-transmitted fragmented.
/* Update transports view of the MTU. Return if no update was needed.
* If an update wasn't needed/possible, it also doesn't make sense to
* try to retransmit now.
*/
if (!sctp_transport_update_pmtu(t, pmtu))
return;
/* Update association pmtu. */
sctp_assoc_sync_pmtu(asoc);
/* Retransmit with the new pmtu setting. */
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}

View File

@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
sctp_stream_outq_migrate(stream, NULL, outcnt);
sched->sched_all(stream);
i = sctp_stream_alloc_out(stream, outcnt, gfp);
if (i)
return i;
ret = sctp_stream_alloc_out(stream, outcnt, gfp);
if (ret)
goto out;
stream->outcnt = outcnt;
for (i = 0; i < stream->outcnt; i++)
@ -170,19 +170,17 @@ in:
if (!incnt)
goto out;
i = sctp_stream_alloc_in(stream, incnt, gfp);
if (i) {
ret = -ENOMEM;
goto free;
ret = sctp_stream_alloc_in(stream, incnt, gfp);
if (ret) {
sched->free(stream);
kfree(stream->out);
stream->out = NULL;
stream->outcnt = 0;
goto out;
}
stream->incnt = incnt;
goto out;
free:
sched->free(stream);
kfree(stream->out);
stream->out = NULL;
out:
return ret;
}

View File

@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst = sctp_transport_dst_check(t);
bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
__func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
} else {
t->pathmtu = pmtu;
pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
__func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment instead */
pmtu = SCTP_DEFAULT_MINSEGMENT;
}
pmtu = SCTP_TRUNC4(pmtu);
if (dst) {
dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
dst = sctp_transport_dst_check(t);
}
if (!dst)
if (!dst) {
t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
dst = t->dst;
}
if (dst) {
/* Re-fetch, as under layers may have a higher minimum size */
pmtu = SCTP_TRUNC4(dst_mtu(dst));
change = t->pathmtu != pmtu;
}
t->pathmtu = pmtu;
return change;
}
/* Caches the dst entry and source address for a transport's destination

View File

@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
static void tipc_group_decr_active(struct tipc_group *grp,
struct tipc_member *m)
{
if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING)
if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
m->state == MBR_REMITTED)
grp->active_cnt--;
}
@ -562,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
int max_active = grp->max_active;
int reclaim_limit = max_active * 3 / 4;
int active_cnt = grp->active_cnt;
struct tipc_member *m, *rm;
struct tipc_member *m, *rm, *pm;
m = tipc_group_find_member(grp, node, port);
if (!m)
@ -605,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
}
grp->active_cnt--;
list_del_init(&m->list);
if (list_empty(&grp->pending))
return;
/* Set oldest pending member to active and advertise */
pm = list_first_entry(&grp->pending, struct tipc_member, list);
pm->state = MBR_ACTIVE;
list_move_tail(&pm->list, &grp->active);
grp->active_cnt++;
tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
break;
case MBR_RECLAIMING:
case MBR_DISCOVERED:
@ -742,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
if (!m || m->state != MBR_RECLAIMING)
return;
list_del_init(&m->list);
grp->active_cnt--;
remitted = msg_grp_remitted(hdr);
/* Messages preceding the REMIT still in receive queue */
if (m->advertised > remitted) {
m->state = MBR_REMITTED;
in_flight = m->advertised - remitted;
m->advertised = ADV_IDLE + in_flight;
return;
}
/* All messages preceding the REMIT have been read */
if (m->advertised <= remitted) {
@ -761,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
m->advertised = ADV_IDLE + in_flight;
grp->active_cnt--;
list_del_init(&m->list);
/* Set oldest pending member to active and advertise */
if (list_empty(&grp->pending))

View File

@ -11361,7 +11361,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
break;
case NL80211_NAN_FUNC_FOLLOW_UP:
if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
!tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
!tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
!tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
err = -EINVAL;
goto out;
}