1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix double-free in batman-adv, from Sven Eckelmann.

 2) Fix packet stats for fast-RX path, from Joannes Berg.

 3) Netfilter's ip_route_me_harder() doesn't handle request sockets
    properly, fix from Florian Westphal.

 4) Fix sendmsg deadlock in rxrpc, from David Howells.

 5) Add missing RCU locking to transport hashtable scan, from Xin Long.

 6) Fix potential packet loss in mlxsw driver, from Ido Schimmel.

 7) Fix race in NAPI handling between poll handlers and busy polling,
    from Eric Dumazet.

 8) TX path in vxlan and geneve need proper RCU locking, from Jakub
    Kicinski.

 9) SYN processing in DCCP and TCP need to disable BH, from Eric
    Dumazet.

10) Properly handle net_enable_timestamp() being invoked from IRQ
    context, also from Eric Dumazet.

11) Fix crash on device-tree systems in xgene driver, from Alban Bedel.

12) Do not call sk_free() on a locked socket, from Arnaldo Carvalho de
    Melo.

13) Fix use-after-free in netvsc driver, from Dexuan Cui.

14) Fix max MTU setting in bonding driver, from WANG Cong.

15) xen-netback hash table can be allocated from softirq context, so use
    GFP_ATOMIC. From Anoob Soman.

16) Fix MAC address change bug in bgmac driver, from Hari Vyas.

17) strparser needs to destroy strp_wq on module exit, from WANG Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits)
  strparser: destroy workqueue on module exit
  sfc: fix IPID endianness in TSOv2
  sfc: avoid max() in array size
  rds: remove unnecessary returned value check
  rxrpc: Fix potential NULL-pointer exception
  nfp: correct DMA direction in XDP DMA sync
  nfp: don't tell FW about the reserved buffer space
  net: ethernet: bgmac: mac address change bug
  net: ethernet: bgmac: init sequence bug
  xen-netback: don't vfree() queues under spinlock
  xen-netback: keep a local pointer for vif in backend_disconnect()
  netfilter: nf_tables: don't call nfnetlink_set_err() if nfnetlink_send() fails
  netfilter: nft_set_rbtree: incorrect assumption on lower interval lookups
  netfilter: nf_conntrack_sip: fix wrong memory initialisation
  can: flexcan: fix typo in comment
  can: usb_8dev: Fix memory leak of priv->cmd_msg_buffer
  can: gs_usb: fix coding style
  can: gs_usb: Don't use stack memory for USB transfers
  ixgbe: Limit use of 2K buffers on architectures with 256B or larger cache lines
  ixgbe: update the rss key on h/w, when ethtool ask for it
  ...
zero-colors
Linus Torvalds 2017-03-04 17:31:39 -08:00
commit 8d70eeb84a
86 changed files with 896 additions and 369 deletions

View File

@ -6011,9 +6011,8 @@ F: include/linux/hsi/
F: include/uapi/linux/hsi/
HSO 3G MODEM DRIVER
M: Jan Dumon <j.dumon@option.com>
W: http://www.pharscape.org
S: Maintained
L: linux-usb@vger.kernel.org
S: Orphan
F: drivers/net/usb/hso.c
HSR NETWORK PROTOCOL

View File

@ -4179,6 +4179,7 @@ void bond_setup(struct net_device *bond_dev)
/* Initialize the device entry points */
ether_setup(bond_dev);
bond_dev->max_mtu = ETH_MAX_MTU;
bond_dev->netdev_ops = &bond_netdev_ops;
bond_dev->ethtool_ops = &bond_ethtool_ops;

View File

@ -196,7 +196,7 @@
#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disble Memory error detection */
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
/* Structure of the message buffer */

View File

@ -258,7 +258,7 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
rc = usb_control_msg(interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_MODE,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
gsdev->channel,
0,
dm,
@ -432,7 +432,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
rc = usb_control_msg(interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BITTIMING,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
dev->channel,
0,
dbt,
@ -546,7 +546,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf,
urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
} else {
@ -804,7 +803,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
rc = usb_control_msg(interface_to_usbdev(intf),
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BT_CONST,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
channel,
0,
bt_const,
@ -908,57 +907,72 @@ static int gs_usb_probe(struct usb_interface *intf,
struct gs_usb *dev;
int rc = -ENOMEM;
unsigned int icount, i;
struct gs_host_config hconf = {
.byte_order = 0x0000beef,
};
struct gs_device_config dconf;
struct gs_host_config *hconf;
struct gs_device_config *dconf;
hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
if (!hconf)
return -ENOMEM;
hconf->byte_order = 0x0000beef;
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
&hconf,
sizeof(hconf),
hconf,
sizeof(*hconf),
1000);
kfree(hconf);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
rc);
return rc;
}
dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
if (!dconf)
return -ENOMEM;
/* read device config */
rc = usb_control_msg(interface_to_usbdev(intf),
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
&dconf,
sizeof(dconf),
dconf,
sizeof(*dconf),
1000);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
kfree(dconf);
return rc;
}
icount = dconf.icount + 1;
icount = dconf->icount + 1;
dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %d CAN interfaces\n",
GS_MAX_INTF);
kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
if (!dev) {
kfree(dconf);
return -ENOMEM;
}
init_usb_anchor(&dev->rx_submitted);
atomic_set(&dev->active_channels, 0);
@ -967,7 +981,7 @@ static int gs_usb_probe(struct usb_interface *intf,
dev->udev = interface_to_usbdev(intf);
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf, &dconf);
dev->canch[i] = gs_make_candev(i, intf, dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@ -978,12 +992,15 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
}
kfree(dconf);
return 0;
}

View File

@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
GFP_KERNEL);
priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg),
GFP_KERNEL);
if (!priv->cmd_msg_buffer)
goto cleanup_candev;
@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
if (err) {
netdev_err(netdev,
"couldn't register CAN device: %d\n", err);
goto cleanup_cmd_msg_buffer;
goto cleanup_candev;
}
err = usb_8dev_cmd_version(priv, &version);
@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
cleanup_unregister_candev:
unregister_netdev(priv->netdev);
cleanup_cmd_msg_buffer:
kfree(priv->cmd_msg_buffer);
cleanup_candev:
free_candev(netdev);

View File

@ -1276,18 +1276,6 @@ err_out:
return ret;
}
static void __exit dec_lance_remove(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
resource_size_t start, len;
unregister_netdev(dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
release_mem_region(start, len);
free_netdev(dev);
}
/* Find all the lance cards on the system and initialize them */
static int __init dec_lance_platform_probe(void)
{
@ -1320,7 +1308,7 @@ static void __exit dec_lance_platform_remove(void)
#ifdef CONFIG_TC
static int dec_lance_tc_probe(struct device *dev);
static int __exit dec_lance_tc_remove(struct device *dev);
static int dec_lance_tc_remove(struct device *dev);
static const struct tc_device_id dec_lance_tc_table[] = {
{ "DEC ", "PMAD-AA " },
@ -1334,7 +1322,7 @@ static struct tc_driver dec_lance_tc_driver = {
.name = "declance",
.bus = &tc_bus_type,
.probe = dec_lance_tc_probe,
.remove = __exit_p(dec_lance_tc_remove),
.remove = dec_lance_tc_remove,
},
};
@ -1346,7 +1334,19 @@ static int dec_lance_tc_probe(struct device *dev)
return status;
}
static int __exit dec_lance_tc_remove(struct device *dev)
static void dec_lance_remove(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
resource_size_t start, len;
unregister_netdev(dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
release_mem_region(start, len);
free_netdev(dev);
}
static int dec_lance_tc_remove(struct device *dev)
{
put_device(dev);
dec_lance_remove(dev);

View File

@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
enum xgbe_mdio_mode mode)
{
unsigned int reg_val = 0;
unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
switch (mode) {
case XGBE_MDIO_MODE_CL22:

View File

@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
phy_if->phy_stop(pdata);
xgbe_free_irqs(pdata);
xgbe_napi_disable(pdata, 1);
phy_if->phy_stop(pdata);
hw_if->exit(pdata);
channel = pdata->channel;

View File

@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_UNKNOWN;
pdata->phy.autoneg = AUTONEG_ENABLE;
pdata->phy.advertising = pdata->phy.supported;
return;
}
pdata->phy.advertising &= ~ADVERTISED_Autoneg;
@ -875,6 +877,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
!phy_data->sfp_phy_avail)
return 0;
/* Set the proper MDIO mode for the PHY */
ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
phy_data->phydev_mode);
if (ret) {
netdev_err(pdata->netdev,
"mdio port/clause not compatible (%u/%u)\n",
phy_data->mdio_addr, phy_data->phydev_mode);
return ret;
}
/* Create and connect to the PHY device */
phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
(phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
@ -2722,6 +2734,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
if (ret)
return ret;
/* Set the proper MDIO mode for the re-driver */
if (phy_data->redrv && !phy_data->redrv_if) {
ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
XGBE_MDIO_MODE_CL22);
if (ret) {
netdev_err(pdata->netdev,
"redriver mdio port not compatible (%u)\n",
phy_data->redrv_addr);
return ret;
}
}
/* Start in highest supported mode */
xgbe_phy_set_mode(pdata, phy_data->start_mode);

View File

@ -1749,6 +1749,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Abort if the clock is defined but couldn't be retrived.
* Always abort if the clock is missing on DT system as
* the driver can't cope with this case.
*/
if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
return PTR_ERR(pdata->clk);
/* Firmware may have set up the clock already. */
dev_info(dev, "clocks have been setup already\n");
}

View File

@ -51,8 +51,7 @@ static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
{
if ((bgmac_idm_read(bgmac, BCMA_IOCTL) &
(BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK)
if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
return false;
if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return false;
@ -61,15 +60,25 @@ static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
{
bgmac_idm_write(bgmac, BCMA_IOCTL,
(BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
bgmac_idm_read(bgmac, BCMA_IOCTL);
u32 val;
bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
bgmac_idm_read(bgmac, BCMA_RESET_CTL);
udelay(1);
/* The Reset Control register only contains a single bit to show if the
* controller is currently in reset. Do a sanity check here, just in
* case the bootloader happened to leave the device in reset.
*/
val = bgmac_idm_read(bgmac, BCMA_RESET_CTL);
if (val) {
bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
bgmac_idm_read(bgmac, BCMA_RESET_CTL);
udelay(1);
}
bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
val = bgmac_idm_read(bgmac, BCMA_IOCTL);
/* Some bits of BCMA_IOCTL set by HW/ATF and should not change */
val |= flags & ~(BGMAC_AWCACHE | BGMAC_ARCACHE | BGMAC_AWUSER |
BGMAC_ARUSER);
val |= BGMAC_CLK_EN;
bgmac_idm_write(bgmac, BCMA_IOCTL, val);
bgmac_idm_read(bgmac, BCMA_IOCTL);
udelay(1);
}

View File

@ -1223,12 +1223,16 @@ static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
struct bgmac *bgmac = netdev_priv(net_dev);
struct sockaddr *sa = addr;
int ret;
ret = eth_prepare_mac_addr_change(net_dev, addr);
if (ret < 0)
return ret;
bgmac_write_mac_address(bgmac, (u8 *)addr);
ether_addr_copy(net_dev->dev_addr, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
return 0;
}

View File

@ -213,6 +213,22 @@
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
/* The IOCTL values appear to be different in NS, NSP, and NS2, and do not match
* the values directly above
*/
#define BGMAC_CLK_EN BIT(0)
#define BGMAC_RESERVED_0 BIT(1)
#define BGMAC_SOURCE_SYNC_MODE_EN BIT(2)
#define BGMAC_DEST_SYNC_MODE_EN BIT(3)
#define BGMAC_TX_CLK_OUT_INVERT_EN BIT(4)
#define BGMAC_DIRECT_GMII_MODE BIT(5)
#define BGMAC_CLK_250_SEL BIT(6)
#define BGMAC_AWCACHE (0xf << 7)
#define BGMAC_RESERVED_1 (0x1f << 11)
#define BGMAC_ARCACHE (0xf << 16)
#define BGMAC_AWUSER (0x3f << 20)
#define BGMAC_ARUSER (0x3f << 26)
#define BGMAC_RESERVED BIT(31)
/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
#define BGMAC_BCMA_IOST_ATTACHED 0x00000800

View File

@ -2617,7 +2617,7 @@ out_out:
return err;
}
static int __exit sbmac_remove(struct platform_device *pldev)
static int sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = platform_get_drvdata(pldev);
struct sbmac_softc *sc = netdev_priv(dev);
@ -2634,7 +2634,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
.remove = sbmac_remove,
.driver = {
.name = sbmac_string,
},

View File

@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10
#define T4FW_VERSION_MICRO 0x1A
#define T4FW_VERSION_MICRO 0x21
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10
#define T5FW_VERSION_MICRO 0x1A
#define T5FW_VERSION_MICRO 0x21
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10
#define T6FW_VERSION_MICRO 0x1A
#define T6FW_VERSION_MICRO 0x21
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00

View File

@ -1456,7 +1456,7 @@ err_alloc_etherdev:
return err;
}
static int __exit ftgmac100_remove(struct platform_device *pdev)
static int ftgmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftgmac100 *priv;
@ -1483,7 +1483,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
.remove = __exit_p(ftgmac100_remove),
.remove = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,

View File

@ -1156,7 +1156,7 @@ err_alloc_etherdev:
return err;
}
static int __exit ftmac100_remove(struct platform_device *pdev)
static int ftmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftmac100 *priv;
@ -1176,7 +1176,7 @@ static int __exit ftmac100_remove(struct platform_device *pdev)
static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe,
.remove = __exit_p(ftmac100_remove),
.remove = ftmac100_remove,
.driver = {
.name = DRV_NAME,
},

View File

@ -96,7 +96,7 @@
#define IXGBE_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
#else
#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
#endif
/*
@ -929,6 +929,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring);
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);

View File

@ -2998,8 +2998,10 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Fill out the rss hash key */
if (key)
if (key) {
memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
ixgbe_store_key(adapter);
}
ixgbe_store_reta(adapter);

View File

@ -3473,6 +3473,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
return 512;
}
/**
* ixgbe_store_key - Write the RSS key to HW
* @adapter: device handle
*
* Write the RSS key stored in adapter.rss_key to HW.
*/
void ixgbe_store_key(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
}
/**
* ixgbe_store_reta - Write the RETA table to HW
* @adapter: device handle
@ -3538,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 i, j;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
@ -3551,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
rss_i = 4;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
ixgbe_store_key(adapter);
/* Fill out redirection table */
memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
@ -3959,7 +3972,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
(max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}

View File

@ -441,30 +441,40 @@ static int
mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
struct mlxsw_sp_prefix_usage *req_prefix_usage)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
struct mlxsw_sp_lpm_tree *new_tree;
int err;
if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
&vr->lpm_tree->prefix_usage))
if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
return 0;
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
vr->proto, false);
if (IS_ERR(lpm_tree)) {
if (IS_ERR(new_tree)) {
/* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good
* for us if our requirement is subset of the prefixes used
* in the tree.
*/
if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
&vr->lpm_tree->prefix_usage))
&lpm_tree->prefix_usage))
return 0;
return PTR_ERR(lpm_tree);
return PTR_ERR(new_tree);
}
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
/* Prevent packet loss by overwriting existing binding */
vr->lpm_tree = new_tree;
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
if (err)
goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
return 0;
err_tree_bind:
vr->lpm_tree = lpm_tree;
return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
return err;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,

View File

@ -1498,7 +1498,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
txbuf->real_len = pkt_len;
dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
pkt_len, DMA_TO_DEVICE);
pkt_len, DMA_BIDIRECTIONAL);
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
@ -1611,7 +1611,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
dma_sync_single_for_cpu(&nn->pdev->dev,
rxbuf->dma_addr + pkt_off,
pkt_len, DMA_FROM_DEVICE);
pkt_len, DMA_BIDIRECTIONAL);
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
pkt_len);
switch (act) {
@ -2198,7 +2198,8 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nfp_net_write_mac_addr(nn);
nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;

View File

@ -807,7 +807,7 @@ err_out:
return err;
}
static int __exit sgiseeq_remove(struct platform_device *pdev)
static int sgiseeq_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sgiseeq_private *sp = netdev_priv(dev);
@ -822,7 +822,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
.remove = __exit_p(sgiseeq_remove),
.remove = sgiseeq_remove,
.driver = {
.name = "sgiseeq",
}

View File

@ -828,9 +828,7 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
static int efx_ef10_link_piobufs(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
_MCDI_DECLARE_BUF(inbuf,
max(MC_CMD_LINK_PIOBUF_IN_LEN,
MC_CMD_UNLINK_PIOBUF_IN_LEN));
MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned int offset, index;
@ -839,8 +837,6 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
memset(inbuf, 0, sizeof(inbuf));
/* Link a buffer to each VI in the write-combining mapping */
for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@ -920,6 +916,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
return 0;
fail:
/* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
* buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
*/
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
while (index--) {
MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
nic_data->pio_write_vi_base + index);
@ -2183,7 +2183,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
/* Modify IPv4 header if needed. */
ip->tot_len = 0;
ip->check = 0;
ipv4_id = ip->id;
ipv4_id = ntohs(ip->id);
} else {
/* Modify IPv6 header if needed. */
struct ipv6hdr *ipv6 = ipv6_hdr(skb);

View File

@ -854,7 +854,7 @@ static int meth_probe(struct platform_device *pdev)
return 0;
}
static int __exit meth_remove(struct platform_device *pdev)
static int meth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@ -866,7 +866,7 @@ static int __exit meth_remove(struct platform_device *pdev)
static struct platform_driver meth_driver = {
.probe = meth_probe,
.remove = __exit_p(meth_remove),
.remove = meth_remove,
.driver = {
.name = "meth",
}

View File

@ -881,12 +881,14 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
info = &geneve->info;
}
rcu_read_lock();
#if IS_ENABLED(CONFIG_IPV6)
if (info->mode & IP_TUNNEL_INFO_IPV6)
err = geneve6_xmit_skb(skb, dev, geneve, info);
else
#endif
err = geneve_xmit_skb(skb, dev, geneve, info);
rcu_read_unlock();
if (likely(!err))
return NETDEV_TX_OK;

View File

@ -859,15 +859,22 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
ndevctx->start_remove = true;
rndis_filter_device_remove(hdev, nvdev);
ndev->mtu = mtu;
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.max_num_vrss_chns = nvdev->num_chn;
ndevctx->start_remove = true;
rndis_filter_device_remove(hdev, nvdev);
/* 'nvdev' has been freed in rndis_filter_device_remove() ->
* netvsc_device_remove () -> free_netvsc_device().
* We mustn't access it before it's re-created in
* rndis_filter_device_add() -> netvsc_device_add().
*/
ndev->mtu = mtu;
rndis_filter_device_add(hdev, &device_info);
out:

View File

@ -346,7 +346,7 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0);
ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0);
if (ret < 0)
goto out;

View File

@ -51,7 +51,7 @@ module_param(gso, bool, 0444);
* at once, the weight is chosen so that the EWMA will be insensitive to short-
* term, transient changes in packet size.
*/
DECLARE_EWMA(pkt_len, 1, 64)
DECLARE_EWMA(pkt_len, 0, 64)
/* With mergeable buffers we align buffer address and use the low bits to
* encode its true size. Buffer size is up to 1 page so we need to align to

View File

@ -2105,6 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
rcu_read_lock();
if (dst->sa.sa_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
@ -2127,7 +2128,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port, vni, &rt->dst,
rt->rt_flags);
if (err)
return;
goto out_unlock;
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
}
@ -2166,7 +2167,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port, vni, ndst,
rt6i_flags);
if (err)
return;
goto out_unlock;
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
@ -2183,6 +2184,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
out_unlock:
rcu_read_unlock();
return;
drop:
@ -2191,6 +2194,7 @@ drop:
return;
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)

View File

@ -18,6 +18,8 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <asm/byteorder.h>
#include "core.h"
@ -711,6 +713,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return 0;
}
static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath10k *ar = data;
const char *bdf_ext;
const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
u8 bdf_enabled;
int i;
if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
return;
if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"wrong smbios bdf ext type length (%d).\n",
hdr->length);
return;
}
bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
if (!bdf_enabled) {
ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
return;
}
/* Only one string exists (per spec) */
bdf_ext = (char *)hdr + hdr->length;
if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"bdf variant magic does not match.\n");
return;
}
for (i = 0; i < strlen(bdf_ext); i++) {
if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"bdf variant name contains non ascii chars.\n");
return;
}
}
/* Copy extension name without magic suffix */
if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
sizeof(ar->id.bdf_ext)) < 0) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
bdf_ext);
return;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found and validated bdf variant smbios_type 0x%x bdf %s\n",
ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
}
static int ath10k_core_check_smbios(struct ath10k *ar)
{
ar->id.bdf_ext[0] = '\0';
dmi_walk(ath10k_core_check_bdfext, ar);
if (ar->id.bdf_ext[0] == '\0')
return -ENODATA;
return 0;
}
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@ -1020,6 +1088,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
case ATH10K_BD_IE_BOARD:
ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
boardname);
if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') {
/* try default bdf if variant was not found */
char *s, *v = ",variant=";
char boardname2[100];
strlcpy(boardname2, boardname,
sizeof(boardname2));
s = strstr(boardname2, v);
if (s)
*s = '\0'; /* strip ",variant=%s" */
ret = ath10k_core_parse_bd_ie_board(ar, data,
ie_len,
boardname2);
}
if (ret == -ENOENT)
/* no match found, continue */
break;
@ -1057,6 +1142,9 @@ err:
static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
size_t name_len)
{
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
if (ar->id.bmi_ids_valid) {
scnprintf(name, name_len,
"bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
@ -1066,12 +1154,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
goto out;
}
if (ar->id.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ar->id.bdf_ext);
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
ar->id.vendor, ar->id.device,
ar->id.subsystem_vendor, ar->id.subsystem_device);
ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
out:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
@ -2128,6 +2219,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
ret = ath10k_core_check_smbios(ar);
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);

View File

@ -69,6 +69,23 @@
#define ATH10K_NAPI_BUDGET 64
#define ATH10K_NAPI_QUOTA_LIMIT 60
/* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
/* SMBIOS type structure length (excluding strings-set) */
#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
/* Offset pointing to Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
/* Board Data File Name Extension string length.
* String format: BDF_<Customer ID>_<Extension>\0
*/
#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
/* The magic used by QCA spec */
#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
struct ath10k;
enum ath10k_bus {
@ -798,6 +815,8 @@ struct ath10k {
bool bmi_ids_valid;
u8 bmi_board_id;
u8 bmi_chip_id;
char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
} id;
int fw_api;

View File

@ -1252,7 +1252,7 @@ struct ath5k_statistics {
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
DECLARE_EWMA(beacon_rssi, 1024, 8)
DECLARE_EWMA(beacon_rssi, 10, 8)
/* Driver state associated with an instance of a device */
struct ath5k_hw {

View File

@ -3056,6 +3056,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@ -3069,8 +3070,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME])
param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kasprintf(GFP_KERNEL, "%.*s",
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
(char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
}
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
@ -3098,11 +3105,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
s64 idx = -1;
const char *hwname = NULL;
if (info->attrs[HWSIM_ATTR_RADIO_ID])
if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
else
} else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
hwname = kasprintf(GFP_KERNEL, "%.*s",
nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
(char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
if (!hwname)
return -ENOMEM;
} else
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
@ -3111,7 +3122,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
} else {
if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
if (!hwname ||
strcmp(hwname, wiphy_name(data->hw->wiphy)))
continue;
}
@ -3122,10 +3134,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
kfree(hwname);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
kfree(hwname);
return -ENODEV;
}

View File

@ -257,7 +257,7 @@ struct link_qual {
int tx_failed;
};
DECLARE_EWMA(rssi, 1024, 8)
DECLARE_EWMA(rssi, 10, 8)
/*
* Antenna settings about the currently active link.

View File

@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
unsigned long flags;
bool found;
new = kmalloc(sizeof(*entry), GFP_KERNEL);
new = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!new)
return;

View File

@ -492,24 +492,31 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
struct xenvif *vif = be->vif;
if (vif) {
unsigned int queue_index;
struct xenvif_queue *queues;
xen_unregister_watchers(be->vif);
xen_unregister_watchers(vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
xenvif_deinit_queue(&be->vif->queues[queue_index]);
xenvif_disconnect_data(vif);
for (queue_index = 0;
queue_index < vif->num_queues;
++queue_index)
xenvif_deinit_queue(&vif->queues[queue_index]);
spin_lock(&be->vif->lock);
vfree(be->vif->queues);
be->vif->num_queues = 0;
be->vif->queues = NULL;
spin_unlock(&be->vif->lock);
spin_lock(&vif->lock);
queues = vif->queues;
vif->num_queues = 0;
vif->queues = NULL;
spin_unlock(&vif->lock);
xenvif_disconnect_ctrl(be->vif);
vfree(queues);
xenvif_disconnect_ctrl(vif);
}
}

View File

@ -1,45 +1,66 @@
#ifndef _LINUX_AVERAGE_H
#define _LINUX_AVERAGE_H
/* Exponentially weighted moving average (EWMA) */
/*
* Exponentially weighted moving average (EWMA)
*
* This implements a fixed-precision EWMA algorithm, with both the
* precision and fall-off coefficient determined at compile-time
* and built into the generated helper funtions.
*
* The first argument to the macro is the name that will be used
* for the struct and helper functions.
*
* The second argument, the precision, expresses how many bits are
* used for the fractional part of the fixed-precision values.
*
* The third argument, the weight reciprocal, determines how the
* new values will be weighed vs. the old state, new values will
* get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
* that this parameter must be a power of two for efficiency.
*/
#define DECLARE_EWMA(name, _factor, _weight) \
#define DECLARE_EWMA(name, _precision, _weight_rcp) \
struct ewma_##name { \
unsigned long internal; \
}; \
static inline void ewma_##name##_init(struct ewma_##name *e) \
{ \
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
/* \
* Even if you want to feed it just 0/1 you should have \
* some bits for the non-fractional part... \
*/ \
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
e->internal = 0; \
} \
static inline unsigned long \
ewma_##name##_read(struct ewma_##name *e) \
{ \
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
return e->internal >> ilog2(_factor); \
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
return e->internal >> (_precision); \
} \
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
unsigned long internal = ACCESS_ONCE(e->internal); \
unsigned long weight = ilog2(_weight); \
unsigned long factor = ilog2(_factor); \
unsigned long weight_rcp = ilog2(_weight_rcp); \
unsigned long precision = _precision; \
\
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
\
ACCESS_ONCE(e->internal) = internal ? \
(((internal << weight) - internal) + \
(val << factor)) >> weight : \
(val << factor); \
(((internal << weight_rcp) - internal) + \
(val << precision)) >> weight_rcp : \
(val << precision); \
}
#endif /* _LINUX_AVERAGE_H */

View File

@ -109,7 +109,7 @@ static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
int i;
for (i = ETH_ALEN; i > 0; i--) {
addr[i - 1] = mac && 0xFF;
addr[i - 1] = mac & 0xFF;
mac >>= 8;
}
}

View File

@ -330,6 +330,7 @@ struct napi_struct {
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_MISSED, /* reschedule a napi */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
@ -338,12 +339,13 @@ enum {
};
enum {
NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
};
enum gro_result {
@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
* napi_schedule_prep - check if NAPI can be scheduled
* @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
static inline bool napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
bool napi_schedule_prep(struct napi_struct *n);
/**
* napi_schedule - schedule NAPI poll

View File

@ -988,9 +988,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask);
int nft_obj_notify(struct net *net, struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
int event, int family, int report, gfp_t gfp);
void nft_obj_notify(struct net *net, struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
int event, int family, int report, gfp_t gfp);
/**
* struct nft_object_type - stateful object type

View File

@ -1526,6 +1526,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
void sk_free(struct sock *sk);
void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
void sk_free_unlock_clone(struct sock *sk);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);

View File

@ -119,6 +119,7 @@ enum rxrpc_recvmsg_trace {
rxrpc_recvmsg_full,
rxrpc_recvmsg_hole,
rxrpc_recvmsg_next,
rxrpc_recvmsg_requeue,
rxrpc_recvmsg_return,
rxrpc_recvmsg_terminal,
rxrpc_recvmsg_to_be_accepted,
@ -277,6 +278,7 @@ enum rxrpc_congest_change {
EM(rxrpc_recvmsg_full, "FULL") \
EM(rxrpc_recvmsg_hole, "HOLE") \
EM(rxrpc_recvmsg_next, "NEXT") \
EM(rxrpc_recvmsg_requeue, "REQU") \
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \

View File

@ -33,7 +33,7 @@
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 32k insn, which may be hit even if total number of
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*

View File

@ -239,8 +239,10 @@ err_unlock:
spin_unlock_bh(&chain->lock);
err:
if (!ret)
if (!ret) {
kfree(frag_entry_new);
kfree_skb(skb);
}
return ret;
}
@ -313,7 +315,7 @@ free:
*
* There are three possible outcomes: 1) Packet is merged: Return true and
* set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
* to NULL; 3) Error: Return false and leave skb as is.
* to NULL; 3) Error: Return false and free skb.
*
* Return: true when packet is merged or buffered, false when skb is not not
* used.
@ -338,9 +340,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
goto out_err;
out:
*skb = skb_out;
ret = true;
out_err:
*skb = skb_out;
return ret;
}
@ -499,6 +501,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
/* Eat and send fragments from the tail of skb */
while (skb->len > max_fragment_size) {
/* The initial check in this function should cover this case */
if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
ret = -EINVAL;
goto put_primary_if;
}
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment) {
ret = -ENOMEM;
@ -515,12 +523,6 @@ int batadv_frag_send_packet(struct sk_buff *skb,
}
frag_header.no++;
/* The initial check in this function should cover this case */
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
ret = -EINVAL;
goto put_primary_if;
}
}
/* Make room for the fragment header. */

View File

@ -402,7 +402,7 @@ struct batadv_gw_node {
struct rcu_head rcu;
};
DECLARE_EWMA(throughput, 1024, 8)
DECLARE_EWMA(throughput, 10, 8)
/**
* struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor

View File

@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
/* Do not flood unicast traffic to ports that turn it off */
if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
continue;
/* Do not flood if mc off, except for traffic we originate */
if (pkt_type == BR_PKT_MULTICAST &&
!(p->flags & BR_MCAST_FLOOD))
!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
continue;
/* Do not flood to ports that enable proxy ARP */

View File

@ -997,10 +997,10 @@ err_vlan_add:
RCU_INIT_POINTER(p->vlgrp, NULL);
synchronize_rcu();
vlan_tunnel_deinit(vg);
err_vlan_enabled:
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash);
err_rhtbl:
err_vlan_enabled:
kfree(vg);
goto out;

View File

@ -1698,27 +1698,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
static struct static_key netstamp_needed __read_mostly;
#ifdef HAVE_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
static void netstamp_clear(struct work_struct *work)
{
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
int wanted;
while (deferred--)
static_key_slow_dec(&netstamp_needed);
wanted = atomic_add_return(deferred, &netstamp_wanted);
if (wanted > 0)
static_key_enable(&netstamp_needed);
else
static_key_disable(&netstamp_needed);
}
static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif
void net_enable_timestamp(void)
{
#ifdef HAVE_JUMP_LABEL
int wanted;
while (1) {
wanted = atomic_read(&netstamp_wanted);
if (wanted <= 0)
break;
if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
return;
}
atomic_inc(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_key_slow_inc(&netstamp_needed);
#endif
}
EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
#ifdef HAVE_JUMP_LABEL
/* net_disable_timestamp() can be called from non process context */
atomic_inc(&netstamp_needed_deferred);
int wanted;
while (1) {
wanted = atomic_read(&netstamp_wanted);
if (wanted <= 1)
break;
if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
return;
}
atomic_dec(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_key_slow_dec(&netstamp_needed);
@ -4883,6 +4910,39 @@ void __napi_schedule(struct napi_struct *n)
}
EXPORT_SYMBOL(__napi_schedule);
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
bool napi_schedule_prep(struct napi_struct *n)
{
unsigned long val, new;
do {
val = READ_ONCE(n->state);
if (unlikely(val & NAPIF_STATE_DISABLE))
return false;
new = val | NAPIF_STATE_SCHED;
/* Sets STATE_MISSED bit if STATE_SCHED was already set
* This was suggested by Alexander Duyck, as compiler
* emits better code than :
* if (val & NAPIF_STATE_SCHED)
* new |= NAPIF_STATE_MISSED;
*/
new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
NAPIF_STATE_MISSED;
} while (cmpxchg(&n->state, val, new) != val);
return !(val & NAPIF_STATE_SCHED);
}
EXPORT_SYMBOL(napi_schedule_prep);
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
@ -4897,7 +4957,7 @@ EXPORT_SYMBOL(__napi_schedule_irqoff);
bool napi_complete_done(struct napi_struct *n, int work_done)
{
unsigned long flags;
unsigned long flags, val, new;
/*
* 1) Don't let napi dequeue from the cpu poll list
@ -4927,7 +4987,27 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
list_del_init(&n->poll_list);
local_irq_restore(flags);
}
WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
do {
val = READ_ONCE(n->state);
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
/* If STATE_MISSED was set, leave STATE_SCHED set,
* because we will call napi->poll() one more time.
* This C code was suggested by Alexander Duyck to help gcc.
*/
new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
NAPIF_STATE_SCHED;
} while (cmpxchg(&n->state, val, new) != val);
if (unlikely(val & NAPIF_STATE_MISSED)) {
__napi_schedule(n);
return false;
}
return true;
}
EXPORT_SYMBOL(napi_complete_done);
@ -4953,6 +5033,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
{
int rc;
/* Busy polling means there is a high chance device driver hard irq
* could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
* set in napi_schedule_prep().
* Since we are about to call napi->poll() once more, we can safely
* clear NAPI_STATE_MISSED.
*
* Note: x86 could use a single "lock and ..." instruction
* to perform these two clear_bit()
*/
clear_bit(NAPI_STATE_MISSED, &napi->state);
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable();
@ -5088,8 +5178,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
struct napi_struct *napi;
napi = container_of(timer, struct napi_struct, timer);
if (napi->gro_list)
napi_schedule_irqoff(napi);
/* Note : we use a relaxed variant of napi_schedule_prep() not setting
* NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/
if (napi->gro_list && !napi_disable_pending(napi) &&
!test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
__napi_schedule_irqoff(napi);
return HRTIMER_NORESTART;
}

View File

@ -1539,11 +1539,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
is_charged = sk_filter_charge(newsk, filter);
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
bh_unlock_sock(newsk);
sk_free(newsk);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
}
@ -1592,6 +1588,16 @@ out:
}
EXPORT_SYMBOL_GPL(sk_clone_lock);
void sk_free_unlock_clone(struct sock *sk)
{
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
sk->sk_destruct = NULL;
bh_unlock_sock(sk);
sk_free(sk);
}
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
u32 max_segs = 1;

View File

@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int old_state = sk->sk_state;
bool acceptable;
int queued = 0;
/*
@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
*/
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
skb) < 0)
/* It is possible that we process SYN packets from backlog,
* so we need to make sure to disable BH right there.
*/
local_bh_disable();
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
if (!acceptable)
return 1;
consume_skb(skb);
return 0;

View File

@ -119,10 +119,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
* Activate features: initialise CCIDs, sequence windows etc.
*/
if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
sk_free(newsk);
sk_free_unlock_clone(newsk);
return NULL;
}
dccp_init_xmit_timers(newsk);

View File

@ -622,6 +622,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_UID] = { .type = NLA_U32 },
[RTA_MARK] = { .type = NLA_U32 },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,

View File

@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
struct rtable *rt;
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
__u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
const struct sock *sk = skb_to_full_sk(skb);
__u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
struct net_device *dev = skb_dst(skb)->dev;
unsigned int hh_len;
@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
fl4.daddr = iph->daddr;
fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
if (!fl4.flowi4_oif)
fl4.flowi4_oif = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
struct dst_entry *dst = skb_dst(skb);
skb_dst_set(skb, NULL);
dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0);
dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_dst_set(skb, dst);

View File

@ -1110,9 +1110,14 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
msg->msg_namelen, flags, 1);
inet->defer_connect = 0;
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
/* fastopen_req could already be freed in __inet_stream_connect
* if the connection times out or gets rst
*/
if (tp->fastopen_req) {
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
inet->defer_connect = 0;
}
return err;
}
@ -2318,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags)
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
inet->defer_connect = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);

View File

@ -5886,9 +5886,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (th->syn) {
if (th->fin)
goto discard;
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
/* It is possible that we process SYN packets from backlog,
* so we need to make sure to disable BH right there.
*/
local_bh_disable();
acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
if (!acceptable)
return 1;
consume_skb(skb);
return 0;
}

View File

@ -5693,13 +5693,18 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
struct net *net = (struct net *)ctl->extra2;
if (!rtnl_trylock())
return restart_syscall();
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
new_val = *((int *)ctl->data);
if (check_addr_gen_mode(new_val) < 0)
return -EINVAL;
if (check_addr_gen_mode(new_val) < 0) {
ret = -EINVAL;
goto out;
}
/* request for default */
if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
@ -5708,20 +5713,23 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
/* request for individual net device */
} else {
if (!idev)
return ret;
goto out;
if (check_stable_privacy(idev, net, new_val) < 0)
return -EINVAL;
if (check_stable_privacy(idev, net, new_val) < 0) {
ret = -EINVAL;
goto out;
}
if (idev->cnf.addr_gen_mode != new_val) {
idev->cnf.addr_gen_mode = new_val;
rtnl_lock();
addrconf_dev_config(idev->dev);
rtnl_unlock();
}
}
}
out:
rtnl_unlock();
return ret;
}

View File

@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq == NULL) {

View File

@ -2169,10 +2169,13 @@ int ip6_del_rt(struct rt6_info *rt)
static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
{
struct nl_info *info = &cfg->fc_nlinfo;
struct net *net = info->nl_net;
struct sk_buff *skb = NULL;
struct fib6_table *table;
int err;
int err = -ENOENT;
if (rt == net->ipv6.ip6_null_entry)
goto out_put;
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
@ -2184,7 +2187,7 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
if (skb) {
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
if (rt6_fill_node(info->nl_net, skb, rt,
if (rt6_fill_node(net, skb, rt,
NULL, NULL, 0, RTM_DELROUTE,
info->portid, seq, 0) < 0) {
kfree_skb(skb);
@ -2198,17 +2201,18 @@ static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
rt6i_siblings) {
err = fib6_del(sibling, info);
if (err)
goto out;
goto out_unlock;
}
}
err = fib6_del(rt, info);
out:
out_unlock:
write_unlock_bh(&table->tb6_lock);
out_put:
ip6_rt_put(rt);
if (skb) {
rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE,
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
info->nlh, gfp_any());
}
return err;
@ -2891,6 +2895,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_EXPIRES] = { .type = NLA_U32 },
[RTA_UID] = { .type = NLA_U32 },
[RTA_MARK] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@ -3627,6 +3632,12 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
if (rt == net->ipv6.ip6_null_entry) {
err = rt->dst.error;
ip6_rt_put(rt);
goto errout;
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
ip6_rt_put(rt);

View File

@ -85,7 +85,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
ht_dbg(sta->sdata,
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
sta->sta.addr, tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "initiator",
(int)reason);
if (drv_ampdu_action(local, sta->sdata, &params))
@ -398,6 +398,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
tid_agg_rx->auto_seq = auto_seq;
tid_agg_rx->started = false;
tid_agg_rx->reorder_buf_filtered = 0;
status = WLAN_STATUS_SUCCESS;

View File

@ -428,7 +428,7 @@ struct ieee80211_sta_tx_tspec {
bool downgraded;
};
DECLARE_EWMA(beacon_signal, 16, 4)
DECLARE_EWMA(beacon_signal, 4, 4)
struct ieee80211_if_managed {
struct timer_list timer;

View File

@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
break;
}
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
drv_remove_interface(local, sdata);
}

View File

@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -1034,6 +1034,18 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
buf_size = tid_agg_rx->buf_size;
head_seq_num = tid_agg_rx->head_seq_num;
/*
* If the current MPDU's SN is smaller than the SSN, it shouldn't
* be reordered.
*/
if (unlikely(!tid_agg_rx->started)) {
if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
ret = false;
goto out;
}
tid_agg_rx->started = true;
}
/* frame with out of date sequence number */
if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
dev_kfree_skb(skb);
@ -3880,6 +3892,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
stats->last_rate = sta_stats_encode_rate(status);
stats->fragments++;
stats->packets++;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
@ -4073,15 +4086,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_is_beacon(hdr->frame_control)))
ieee80211_scan_rx(local, skb);
if (pubsta) {
rx.sta = container_of(pubsta, struct sta_info, sta);
rx.sdata = rx.sta->sdata;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
return;
goto out;
} else if (ieee80211_is_data(fc)) {
if (ieee80211_is_data(fc)) {
struct sta_info *sta, *prev_sta;
if (pubsta) {
rx.sta = container_of(pubsta, struct sta_info, sta);
rx.sdata = rx.sta->sdata;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
return;
goto out;
}
prev_sta = NULL;
for_each_sta_info(local, hdr->addr2, sta, tmp) {

View File

@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
return;
if (sta->dead)
@ -1264,7 +1264,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
sta_info_recalc_tim(sta);
ps_dbg(sdata,
"STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
"STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n",
sta->sta.addr, sta->sta.aid, filtered, buffered);
ieee80211_check_fast_xmit(sta);

View File

@ -189,6 +189,7 @@ struct tid_ampdu_tx {
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
* and ssn.
* @removed: this session is removed (but might have been found due to RCU)
* @started: this session has started (head ssn or higher was received)
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@ -212,8 +213,9 @@ struct tid_ampdu_rx {
u16 ssn;
u16 buf_size;
u16 timeout;
bool auto_seq;
bool removed;
u8 auto_seq:1,
removed:1,
started:1;
};
/**
@ -370,7 +372,7 @@ struct mesh_sta {
unsigned int fail_avg;
};
DECLARE_EWMA(signal, 1024, 8)
DECLARE_EWMA(signal, 10, 8)
struct ieee80211_sta_rx_stats {
unsigned long packets;

View File

@ -51,7 +51,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct ieee80211_hdr *hdr = (void *)skb->data;
int ac;
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
IEEE80211_TX_CTL_AMPDU)) {
ieee80211_free_txskb(&local->hw, skb);
return;
}

View File

@ -1628,8 +1628,6 @@ static int __init nf_conntrack_sip_init(void)
ports[ports_c++] = SIP_PORT;
for (i = 0; i < ports_c; i++) {
memset(&sip[i], 0, sizeof(sip[i]));
nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip",
SIP_PORT, ports[i], i, sip_exp_policy,
SIP_EXPECT_MAX,

View File

@ -461,16 +461,15 @@ nla_put_failure:
return -1;
}
static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
{
struct sk_buff *skb;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
@ -482,14 +481,11 @@ static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
goto err;
}
err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
return;
err:
if (err < 0) {
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
err);
}
return err;
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_tables(struct sk_buff *skb,
@ -1050,16 +1046,15 @@ nla_put_failure:
return -1;
}
static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
{
struct sk_buff *skb;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
@ -1072,14 +1067,11 @@ static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
goto err;
}
err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
return;
err:
if (err < 0) {
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
err);
}
return err;
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_chains(struct sk_buff *skb,
@ -1934,18 +1926,16 @@ nla_put_failure:
return -1;
}
static int nf_tables_rule_notify(const struct nft_ctx *ctx,
const struct nft_rule *rule,
int event)
static void nf_tables_rule_notify(const struct nft_ctx *ctx,
const struct nft_rule *rule, int event)
{
struct sk_buff *skb;
int err;
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
@ -1958,14 +1948,11 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx,
goto err;
}
err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
ctx->report, GFP_KERNEL);
return;
err:
if (err < 0) {
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
err);
}
return err;
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
struct nft_rule_dump_ctx {
@ -2696,9 +2683,9 @@ nla_put_failure:
return -1;
}
static int nf_tables_set_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
int event, gfp_t gfp_flags)
static void nf_tables_set_notify(const struct nft_ctx *ctx,
const struct nft_set *set, int event,
gfp_t gfp_flags)
{
struct sk_buff *skb;
u32 portid = ctx->portid;
@ -2706,9 +2693,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
if (!ctx->report &&
!nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags);
if (skb == NULL)
goto err;
@ -2719,12 +2705,11 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
goto err;
}
err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES,
ctx->report, gfp_flags);
nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report,
gfp_flags);
return;
err:
if (err < 0)
nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
return err;
nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
@ -3504,10 +3489,10 @@ nla_put_failure:
return -1;
}
static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nft_set_elem *elem,
int event, u16 flags)
static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nft_set_elem *elem,
int event, u16 flags)
{
struct net *net = ctx->net;
u32 portid = ctx->portid;
@ -3515,9 +3500,8 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
int err;
if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
goto err;
@ -3529,12 +3513,11 @@ static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
goto err;
}
err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
GFP_KERNEL);
nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
GFP_KERNEL);
return;
err:
if (err < 0)
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
return err;
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
@ -4476,18 +4459,17 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
return nft_delobj(&ctx, obj);
}
int nft_obj_notify(struct net *net, struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
int family, int report, gfp_t gfp)
void nft_obj_notify(struct net *net, struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
int family, int report, gfp_t gfp)
{
struct sk_buff *skb;
int err;
if (!report &&
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb = nlmsg_new(NLMSG_GOODSIZE, gfp);
if (skb == NULL)
goto err;
@ -4499,21 +4481,18 @@ int nft_obj_notify(struct net *net, struct nft_table *table,
goto err;
}
err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
return;
err:
if (err < 0) {
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
}
return err;
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
EXPORT_SYMBOL_GPL(nft_obj_notify);
static int nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
static void nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
{
return nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid,
ctx->seq, event, ctx->afi->family, ctx->report,
GFP_KERNEL);
nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
ctx->afi->family, ctx->report, GFP_KERNEL);
}
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@ -4543,7 +4522,8 @@ nla_put_failure:
return -EMSGSIZE;
}
static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
int event)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
struct sk_buff *skb2;
@ -4551,9 +4531,8 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
if (nlmsg_report(nlh) &&
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
return 0;
return;
err = -ENOBUFS;
skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb2 == NULL)
goto err;
@ -4565,14 +4544,12 @@ static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
goto err;
}
err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid,
NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL);
nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
nlmsg_report(nlh), GFP_KERNEL);
return;
err:
if (err < 0) {
nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
err);
}
return err;
nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
-ENOBUFS);
}
static int nf_tables_getgen(struct net *net, struct sock *nlsk,

View File

@ -60,11 +60,10 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
d = memcmp(this, key, set->klen);
if (d < 0) {
parent = parent->rb_left;
/* In case of adjacent ranges, we always see the high
* part of the range in first place, before the low one.
* So don't update interval if the keys are equal.
*/
if (interval && nft_rbtree_equal(set, this, interval))
if (interval &&
nft_rbtree_equal(set, this, interval) &&
nft_rbtree_interval_end(this) &&
!nft_rbtree_interval_end(interval))
continue;
interval = rbe;
} else if (d > 0)

View File

@ -796,9 +796,8 @@ static void ovs_fragment(struct net *net, struct vport *vport,
unsigned long orig_dst;
struct rt6_info ovs_rt;
if (!v6ops) {
if (!v6ops)
goto err;
}
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));

View File

@ -485,7 +485,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
} else if (key->eth.type == htons(ETH_P_IPV6)) {
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
skb_orphan(skb);
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
err = nf_ct_frag6_gather(net, skb, user);
if (err) {

View File

@ -3103,7 +3103,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
char name[15];
char name[sizeof(uaddr->sa_data) + 1];
/*
* Check legality
@ -3111,7 +3111,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
strlcpy(name, uaddr->sa_data, sizeof(name));
/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
* zero-terminated.
*/
memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
name[sizeof(uaddr->sa_data)] = 0;
return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
}

View File

@ -45,8 +45,8 @@
#include "ib.h"
#include "ib_mr.h"
unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
module_param(rds_ib_mr_1m_pool_size, int, 0444);
@ -438,16 +438,12 @@ int rds_ib_init(void)
if (ret)
goto out_sysctl;
ret = rds_trans_register(&rds_ib_transport);
if (ret)
goto out_recv;
rds_trans_register(&rds_ib_transport);
rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
goto out;
out_recv:
rds_ib_recv_exit();
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:

View File

@ -107,8 +107,6 @@ struct rds_ib_mr_pool {
};
extern struct workqueue_struct *rds_ib_mr_wq;
extern unsigned int rds_ib_mr_1m_pool_size;
extern unsigned int rds_ib_mr_8k_pool_size;
extern bool prefer_frmr;
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,

View File

@ -903,7 +903,7 @@ void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
void rds_connect_complete(struct rds_connection *conn);
/* transport.c */
int rds_trans_register(struct rds_transport *trans);
void rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
void rds_trans_put(struct rds_transport *trans);

View File

@ -652,16 +652,12 @@ static int rds_tcp_init(void)
if (ret)
goto out_pernet;
ret = rds_trans_register(&rds_tcp_transport);
if (ret)
goto out_recv;
rds_trans_register(&rds_tcp_transport);
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
goto out;
out_recv:
rds_tcp_recv_exit();
out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
out_notifier:

View File

@ -40,7 +40,7 @@
static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
int rds_trans_register(struct rds_transport *trans)
void rds_trans_register(struct rds_transport *trans)
{
BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ);
@ -55,8 +55,6 @@ int rds_trans_register(struct rds_transport *trans)
}
up_write(&rds_trans_sem);
return 0;
}
EXPORT_SYMBOL_GPL(rds_trans_register);

View File

@ -290,10 +290,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false;
cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp);
/* The socket has been unlocked. */
if (!IS_ERR(call))
call->notify_rx = notify_rx;
release_sock(&rx->sk);
mutex_unlock(&call->user_mutex);
_leave(" = %p", call);
return call;
}
@ -310,7 +311,10 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call);
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
mutex_lock(&call->user_mutex);
rxrpc_release_call(rxrpc_sk(sock->sk), call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
EXPORT_SYMBOL(rxrpc_kernel_end_call);
@ -450,14 +454,16 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
case RXRPC_SERVER_BOUND:
case RXRPC_SERVER_LISTENING:
ret = rxrpc_do_sendmsg(rx, m, len);
break;
/* The socket has been unlocked */
goto out;
default:
ret = -EINVAL;
break;
goto error_unlock;
}
error_unlock:
release_sock(&rx->sk);
out:
_leave(" = %d", ret);
return ret;
}

View File

@ -467,6 +467,7 @@ struct rxrpc_call {
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct mutex user_mutex; /* User access mutex */
ktime_t ack_at; /* When deferred ACK needs to happen */
ktime_t resend_at; /* When next resend needs to happen */
ktime_t ping_at; /* When next to send a ping */

View File

@ -323,6 +323,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
*
* If we want to report an error, we mark the skb with the packet type and
* abort code and return NULL.
*
* The call is returned with the user access mutex held.
*/
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
struct rxrpc_connection *conn,
@ -371,6 +373,18 @@ found_service:
trace_rxrpc_receive(call, rxrpc_receive_incoming,
sp->hdr.serial, sp->hdr.seq);
/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
* sendmsg()/recvmsg() inconveniently stealing the mutex once the
* notification is generated.
*
* The BUG should never happen because the kernel should be well
* behaved enough not to access the call before the first notification
* event and userspace is prevented from doing so until the state is
* appropriate.
*/
if (!mutex_trylock(&call->user_mutex))
BUG();
/* Make the call live. */
rxrpc_incoming_call(rx, call, skb);
conn = call->conn;
@ -429,10 +443,12 @@ out:
/*
* handle acceptance of a call by userspace
* - assign the user call ID to the call at the front of the queue
* - called with the socket locked.
*/
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
unsigned long user_call_ID,
rxrpc_notify_rx_t notify_rx)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
struct rb_node *parent, **pp;
@ -446,6 +462,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
if (list_empty(&rx->to_be_accepted)) {
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
kleave(" = -ENODATA [empty]");
return ERR_PTR(-ENODATA);
}
@ -470,10 +487,39 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
*/
call = list_entry(rx->to_be_accepted.next,
struct rxrpc_call, accept_link);
write_unlock(&rx->call_lock);
/* We need to gain the mutex from the interrupt handler without
* upsetting lockdep, so we have to release it there and take it here.
* We are, however, still holding the socket lock, so other accepts
* must wait for us and no one can add the user ID behind our backs.
*/
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
release_sock(&rx->sk);
kleave(" = -ERESTARTSYS");
return ERR_PTR(-ERESTARTSYS);
}
write_lock(&rx->call_lock);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
rxrpc_see_call(call);
/* Find the user ID insertion point. */
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
call = rb_entry(parent, struct rxrpc_call, sock_node);
if (user_call_ID < call->user_call_ID)
pp = &(*pp)->rb_left;
else if (user_call_ID > call->user_call_ID)
pp = &(*pp)->rb_right;
else
BUG();
}
write_lock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
@ -499,6 +545,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
rxrpc_notify_socket(call);
rxrpc_service_prealloc(rx, GFP_KERNEL);
release_sock(&rx->sk);
_leave(" = %p{%d}", call, call->debug_id);
return call;
@ -515,6 +562,7 @@ id_in_use:
write_unlock(&rx->call_lock);
out:
rxrpc_service_prealloc(rx, GFP_KERNEL);
release_sock(&rx->sk);
_leave(" = %d", ret);
return ERR_PTR(ret);
}

View File

@ -115,6 +115,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
if (!call->rxtx_annotations)
goto nomem_2;
mutex_init(&call->user_mutex);
setup_timer(&call->timer, rxrpc_call_timer_expired,
(unsigned long)call);
INIT_WORK(&call->processor, &rxrpc_process_call);
@ -194,14 +195,16 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
}
/*
* set up a call for the given data
* - called in process context with IRQs enabled
* Set up a call for the given parameters.
* - Called with the socket lock held, which it must release.
* - If it returns a call, the call's lock will need releasing by the caller.
*/
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
unsigned long user_call_ID,
gfp_t gfp)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call, *xcall;
struct rb_node *parent, **pp;
@ -212,6 +215,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
call = rxrpc_alloc_client_call(srx, gfp);
if (IS_ERR(call)) {
release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call));
return call;
}
@ -219,6 +223,11 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
here, (const void *)user_call_ID);
/* We need to protect a partially set up call against the user as we
* will be acting outside the socket lock.
*/
mutex_lock(&call->user_mutex);
/* Publish the call, even though it is incompletely set up as yet */
write_lock(&rx->call_lock);
@ -250,6 +259,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
list_add_tail(&call->link, &rxrpc_calls);
write_unlock(&rxrpc_call_lock);
/* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk);
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
@ -279,6 +291,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
*/
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
ret = -EEXIST;
error:
@ -287,6 +300,7 @@ error:
trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
here, ERR_PTR(ret));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
return ERR_PTR(ret);

View File

@ -1194,6 +1194,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
goto reject_packet;
}
rxrpc_send_ping(call, skb, skew);
mutex_unlock(&call->user_mutex);
}
rxrpc_input_call_packet(call, skb, skew);

View File

@ -489,6 +489,20 @@ try_again:
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
/* We're going to drop the socket lock, so we need to lock the call
* against interference by sendmsg.
*/
if (!mutex_trylock(&call->user_mutex)) {
ret = -EWOULDBLOCK;
if (flags & MSG_DONTWAIT)
goto error_requeue_call;
ret = -ERESTARTSYS;
if (mutex_lock_interruptible(&call->user_mutex) < 0)
goto error_requeue_call;
}
release_sock(&rx->sk);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
@ -504,7 +518,7 @@ try_again:
&call->user_call_ID);
}
if (ret < 0)
goto error;
goto error_unlock_call;
}
if (msg->msg_name) {
@ -535,12 +549,12 @@ try_again:
}
if (ret < 0)
goto error;
goto error_unlock_call;
if (call->state == RXRPC_CALL_COMPLETE) {
ret = rxrpc_recvmsg_term(call, msg);
if (ret < 0)
goto error;
goto error_unlock_call;
if (!(flags & MSG_PEEK))
rxrpc_release_call(rx, call);
msg->msg_flags |= MSG_EOR;
@ -553,8 +567,21 @@ try_again:
msg->msg_flags &= ~MSG_MORE;
ret = copied;
error:
error_unlock_call:
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
return ret;
error_requeue_call:
if (!(flags & MSG_PEEK)) {
write_lock_bh(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
write_unlock_bh(&rx->recvmsg_lock);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put);
}
error_no_call:
release_sock(&rx->sk);
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
@ -611,7 +638,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
iov.iov_len = size - *_offset;
iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);
lock_sock(sock->sk);
mutex_lock(&call->user_mutex);
switch (call->state) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
@ -650,7 +677,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
read_phase_complete:
ret = 1;
out:
release_sock(sock->sk);
mutex_unlock(&call->user_mutex);
_leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
return ret;

View File

@ -61,9 +61,12 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
}
trace_rxrpc_transmit(call, rxrpc_transmit_wait);
release_sock(&rx->sk);
mutex_unlock(&call->user_mutex);
*timeo = schedule_timeout(*timeo);
lock_sock(&rx->sk);
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
ret = sock_intr_errno(*timeo);
break;
}
}
remove_wait_queue(&call->waitq, &myself);
@ -173,7 +176,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
/*
* send data through a socket
* - must be called in process context
* - caller holds the socket locked
* - The caller holds the call user access mutex, but not the socket lock.
*/
static int rxrpc_send_data(struct rxrpc_sock *rx,
struct rxrpc_call *call,
@ -439,10 +442,13 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
/*
* Create a new client call for sendmsg().
* - Called with the socket lock held, which it must release.
* - If it returns a call, the call's lock will need releasing by the caller.
*/
static struct rxrpc_call *
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
unsigned long user_call_ID, bool exclusive)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call *call;
@ -452,8 +458,10 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
_enter("");
if (!msg->msg_name)
if (!msg->msg_name) {
release_sock(&rx->sk);
return ERR_PTR(-EDESTADDRREQ);
}
key = rx->key;
if (key && !rx->key->payload.data[0])
@ -466,6 +474,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | exclusive;
cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
/* The socket is now unlocked */
_leave(" = %p\n", call);
return call;
@ -477,6 +486,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
* - the socket may be either a client socket or a server socket
*/
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
enum rxrpc_command cmd;
struct rxrpc_call *call;
@ -490,12 +500,14 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
&exclusive);
if (ret < 0)
return ret;
goto error_release_sock;
if (cmd == RXRPC_CMD_ACCEPT) {
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
return -EINVAL;
goto error_release_sock;
call = rxrpc_accept_call(rx, user_call_ID, NULL);
/* The socket is now unlocked. */
if (IS_ERR(call))
return PTR_ERR(call);
rxrpc_put_call(call, rxrpc_call_put);
@ -504,12 +516,30 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
if (!call) {
ret = -EBADSLT;
if (cmd != RXRPC_CMD_SEND_DATA)
return -EBADSLT;
goto error_release_sock;
call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
exclusive);
/* The socket is now unlocked... */
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
} else {
ret = -EBUSY;
if (call->state == RXRPC_CALL_UNINITIALISED ||
call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||
call->state == RXRPC_CALL_SERVER_PREALLOC ||
call->state == RXRPC_CALL_SERVER_SECURING ||
call->state == RXRPC_CALL_SERVER_ACCEPTING)
goto error_release_sock;
ret = mutex_lock_interruptible(&call->user_mutex);
release_sock(&rx->sk);
if (ret < 0) {
ret = -ERESTARTSYS;
goto error_put;
}
}
_debug("CALL %d USR %lx ST %d on CONN %p",
@ -537,9 +567,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_send_data(rx, call, msg, len);
}
mutex_unlock(&call->user_mutex);
error_put:
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
return ret;
error_release_sock:
release_sock(&rx->sk);
return ret;
}
/**
@ -564,7 +600,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
ASSERTCMP(msg->msg_name, ==, NULL);
ASSERTCMP(msg->msg_control, ==, NULL);
lock_sock(sock->sk);
mutex_lock(&call->user_mutex);
_debug("CALL %d USR %lx ST %d on CONN %p",
call->debug_id, call->user_call_ID, call->state, call->conn);
@ -579,7 +615,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
}
release_sock(sock->sk);
mutex_unlock(&call->user_mutex);
_leave(" = %d", ret);
return ret;
}
@ -600,12 +636,12 @@ void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
{
_enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
lock_sock(sock->sk);
mutex_lock(&call->user_mutex);
if (rxrpc_abort_call(why, call, 0, abort_code, error))
rxrpc_send_abort_packet(call);
release_sock(sock->sk);
mutex_unlock(&call->user_mutex);
_leave("");
}

View File

@ -884,14 +884,17 @@ int sctp_hash_transport(struct sctp_transport *t)
arg.paddr = &t->ipaddr;
arg.lport = htons(t->asoc->base.bind_addr.port);
rcu_read_lock();
list = rhltable_lookup(&sctp_transport_hashtable, &arg,
sctp_hash_params);
rhl_for_each_entry_rcu(transport, tmp, list, node)
if (transport->asoc->ep == t->asoc->ep) {
rcu_read_unlock();
err = -EEXIST;
goto out;
}
rcu_read_unlock();
err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
&t->node, sctp_hash_params);

View File

@ -504,6 +504,7 @@ static int __init strp_mod_init(void)
static void __exit strp_mod_exit(void)
{
destroy_workqueue(strp_wq);
}
module_init(strp_mod_init);
module_exit(strp_mod_exit);