1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Don't use MMIO on certain iwlwifi devices otherwise we get a
    firmware crash.

 2) Don't corrupt the GRO lists of mac80211 contexts by doing sends via
    timer interrupt, from Johannes Berg.

 3) SKB tailroom is miscalculated in AP_VLAN crypto code, from Michal
    Kazior.

 4) Fix fw_status memory leak in iwlwifi, from Haim Dreyfuss.

 5) Fix use after free in iwl_mvm_d0i3_enable_tx(), from Eliad Peller.

 6) JIT'ing of large BPF programs is broken on x86, from Alexei
    Starovoitov.

 7) EMAC driver ethtool register dump size is miscalculated, from Ivan
    Mikhaylov.

 8) Fix PHY initial link mode when autonegotiation is disabled in
    amd-xgbe, from Tom Lendacky.

 9) Fix NULL deref on SOCK_DEAD socket in AF_UNIX and CAIF protocols,
    from Mark Salyzyn.

10) credit_bytes not initialized properly in xen-netback, from Ross
   Lagerwall.

11) Fallback from MSI-X to INTx interrupts not handled properly in mlx4
    driver, fix from Benjamin Poirier.

12) Perform ->attach() after binding dev->qdisc in packet scheduler,
    otherwise we can crash.  From Cong WANG.

13) Don't clobber data in sctp_v4_map_v6().  From Jason Gunthorpe.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
  sctp: Fix mangled IPv4 addresses on a IPv6 listening socket
  net_sched: invoke ->attach() after setting dev->qdisc
  xen-netfront: properly destroy queues when removing device
  mlx4_core: Fix fallback from MSI-X to INTx
  xen/netback: Properly initialize credit_bytes
  net: netxen: correct sysfs bin attribute return code
  tools: bpf_jit_disasm: fix segfault on disabled debugging log output
  unix/caif: sk_socket can disappear when state is unlocked
  amd-xgbe-phy: Fix initial mode when autoneg is disabled
  net: dp83640: fix improper double spin locking.
  net: dp83640: reinforce locking rules.
  net: dp83640: fix broken calibration routine.
  net: stmmac: create one debugfs dir per net-device
  net/ibm/emac: fix size of emac dump memory areas
  x86: bpf_jit: fix compilation of large bpf programs
  net: phy: bcm7xxx: Fix 7425 PHY ID and flags
  iwlwifi: mvm: avoid use-after-free on iwl_mvm_d0i3_enable_tx()
  iwlwifi: mvm: clean net-detect info if device was reset during suspend
  iwlwifi: mvm: take the UCODE_DOWN reference when resuming
  iwlwifi: mvm: BT Coex - duplicate the command if sent ASYNC
  ...
hifive-unleashed-5.1
Linus Torvalds 2015-05-27 13:41:13 -07:00
commit 8f98bcdf8f
37 changed files with 351 additions and 170 deletions

View File

@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.cleanup_addr = proglen;
for (pass = 0; pass < 10; pass++) {
/* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large bpf programs
* may converge on the last pass. In such case do one more
* pass to emit the final image
*/
for (pass = 0; pass < 10 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
image = NULL;

View File

@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
static int emac_get_regs_len(struct emac_instance *dev)
{
if (emac_has_feature(dev, EMAC_FTR_EMAC4))
return sizeof(struct emac_ethtool_regs_subhdr) +
EMAC4_ETHTOOL_REGS_SIZE(dev);
else
return sizeof(struct emac_ethtool_regs_subhdr) +
EMAC_ETHTOOL_REGS_SIZE(dev);
sizeof(struct emac_regs);
}
static int emac_ethtool_get_regs_len(struct net_device *ndev)
@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
struct emac_ethtool_regs_subhdr *hdr = buf;
hdr->index = dev->cell_index;
if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
} else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
hdr->version = EMAC4_ETHTOOL_REGS_VER;
memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
} else {
hdr->version = EMAC_ETHTOOL_REGS_VER;
memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
}
memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
return (void *)(hdr + 1) + sizeof(struct emac_regs);
}
static void emac_ethtool_get_regs(struct net_device *ndev,

View File

@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
};
#define EMAC_ETHTOOL_REGS_VER 0
#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
(dev)->rsrc_regs.start + 1)
#define EMAC4_ETHTOOL_REGS_VER 1
#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
(dev)->rsrc_regs.start + 1)
#define EMAC4_ETHTOOL_REGS_VER 1
#define EMAC4SYNC_ETHTOOL_REGS_VER 2
#endif /* __IBM_NEWEMAC_CORE_H */

View File

@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
msecs_to_jiffies(timeout))) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
err = -EIO;
goto out_reset;
if (op == MLX4_CMD_NOP) {
err = -EBUSY;
goto out;
} else {
err = -EIO;
goto out_reset;
}
}
err = context->result;

View File

@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
u8 dw, rows, cols, banks, ranks;
u32 val;
if (size != sizeof(struct netxen_dimm_cfg)) {
if (size < attr->size) {
netdev_err(netdev, "Invalid size\n");
return -1;
return -EINVAL;
}
memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@ -3137,7 +3137,7 @@ out:
static struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
.size = 0,
.size = sizeof(struct netxen_dimm_cfg),
.read = netxen_sysfs_read_dimm,
};

View File

@ -117,6 +117,12 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
struct dentry *dbgfs_rings_status;
struct dentry *dbgfs_dma_cap;
#endif
};
int stmmac_mdio_unregister(struct net_device *ndev);

View File

@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
static int stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(void);
static void stmmac_exit_fs(struct net_device *dev);
#endif
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev)
netif_carrier_off(dev);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs();
stmmac_exit_fs(dev);
#endif
stmmac_release_ptp(priv);
@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
static struct dentry *stmmac_rings_status;
static struct dentry *stmmac_dma_cap;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq)
@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
static int stmmac_init_fs(struct net_device *dev)
{
/* Create debugfs entries */
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
struct stmmac_priv *priv = netdev_priv(dev);
if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
pr_err("ERROR %s, debugfs create directory failed\n",
STMMAC_RESOURCE_NAME);
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
pr_err("ERROR %s/%s, debugfs create directory failed\n",
STMMAC_RESOURCE_NAME, dev->name);
return -ENOMEM;
}
/* Entry to report DMA RX/TX rings */
stmmac_rings_status = debugfs_create_file("descriptors_status",
S_IRUGO, stmmac_fs_dir, dev,
&stmmac_rings_status_fops);
priv->dbgfs_rings_status =
debugfs_create_file("descriptors_status", S_IRUGO,
priv->dbgfs_dir, dev,
&stmmac_rings_status_fops);
if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
pr_info("ERROR creating stmmac ring debugfs file\n");
debugfs_remove(stmmac_fs_dir);
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
}
/* Entry to report the DMA HW features */
stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
dev, &stmmac_dma_cap_fops);
priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
priv->dbgfs_dir,
dev, &stmmac_dma_cap_fops);
if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
pr_info("ERROR creating stmmac MMC debugfs file\n");
debugfs_remove(stmmac_rings_status);
debugfs_remove(stmmac_fs_dir);
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
}
@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev)
return 0;
}
static void stmmac_exit_fs(void)
static void stmmac_exit_fs(struct net_device *dev)
{
debugfs_remove(stmmac_rings_status);
debugfs_remove(stmmac_dma_cap);
debugfs_remove(stmmac_fs_dir);
struct stmmac_priv *priv = netdev_priv(dev);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@ -3149,6 +3150,35 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif /* MODULE */
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
/* Create debugfs main directory if it doesn't exist yet */
if (!stmmac_fs_dir) {
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
pr_err("ERROR %s, debugfs create directory failed\n",
STMMAC_RESOURCE_NAME);
return -ENOMEM;
}
}
#endif
return 0;
}
static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(stmmac_fs_dir);
#endif
}
module_init(stmmac_init)
module_exit(stmmac_exit)
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");

View File

@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
return ret;
}
static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
{
if (phydev->autoneg == AUTONEG_ENABLE) {
if (phydev->advertising & ADVERTISED_10000baseKR_Full)
return true;
} else {
if (phydev->speed == SPEED_10000)
return true;
}
return false;
}
static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
{
if (phydev->autoneg == AUTONEG_ENABLE) {
if (phydev->advertising & ADVERTISED_2500baseX_Full)
return true;
} else {
if (phydev->speed == SPEED_2500)
return true;
}
return false;
}
static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
{
if (phydev->autoneg == AUTONEG_ENABLE) {
if (phydev->advertising & ADVERTISED_1000baseKX_Full)
return true;
} else {
if (phydev->speed == SPEED_1000)
return true;
}
return false;
}
static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
bool restart)
{
@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
/* Set initial mode - call the mode setting routines
* directly to insure we are properly configured
*/
if (phydev->advertising & SUPPORTED_10000baseKR_Full)
if (amd_xgbe_phy_use_xgmii_mode(phydev))
ret = amd_xgbe_phy_xgmii_mode(phydev);
else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
else if (amd_xgbe_phy_use_gmii_mode(phydev))
ret = amd_xgbe_phy_gmii_mode(phydev);
else if (phydev->advertising & SUPPORTED_2500baseX_Full)
else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
ret = amd_xgbe_phy_gmii_2500_mode(phydev);
else
ret = -EINVAL;

View File

@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
.name = "Broadcom BCM7425",
.features = PHY_GBIT_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = 0,
.flags = PHY_IS_INTERNAL,
.config_init = bcm7xxx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,

View File

@ -47,7 +47,7 @@
#define PSF_TX 0x1000
#define EXT_EVENT 1
#define CAL_EVENT 7
#define CAL_TRIGGER 7
#define CAL_TRIGGER 1
#define DP83640_N_PINS 12
#define MII_DP83640_MICR 0x11
@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
else
evnt |= EVNT_RISE;
}
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
mutex_unlock(&clock->extreg_lock);
return 0;
case PTP_CLK_REQ_PEROUT:
@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
static void enable_status_frames(struct phy_device *phydev, bool on)
{
struct dp83640_private *dp83640 = phydev->priv;
struct dp83640_clock *clock = dp83640->clock;
u16 cfg0 = 0, ver;
if (on)
@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
mutex_unlock(&clock->extreg_lock);
if (!phydev->attached_dev) {
pr_warn("expected to find an attached netdevice\n");
return;
@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
list_del_init(&rxts->list);
phy2rxts(phy_rxts, rxts);
spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
spin_lock(&dp83640->rx_queue.lock);
skb_queue_walk(&dp83640->rx_queue, skb) {
struct dp83640_skb_info *skb_info;
@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
break;
}
}
spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
spin_unlock(&dp83640->rx_queue.lock);
if (!shhwtstamps)
list_add_tail(&rxts->list, &dp83640->rxts);
@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
if (clock->chosen && !list_empty(&clock->phylist))
recalibrate(clock);
else
else {
mutex_lock(&clock->extreg_lock);
enable_broadcast(phydev, clock->page, 1);
mutex_unlock(&clock->extreg_lock);
}
enable_status_frames(phydev, true);
mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
mutex_unlock(&clock->extreg_lock);
return 0;
}

View File

@ -21,6 +21,7 @@ config IWLWIFI
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
Intel 7265 Wi-Fi Adapter
Intel 3165 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.

View File

@ -70,15 +70,14 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 13
#define IWL3160_UCODE_API_MAX 13
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
#define IWL3160_UCODE_API_OK 12
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 10
#define IWL3160_UCODE_API_MIN 10
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@ -104,9 +103,6 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
#define IWL3165_FW_PRE "iwlwifi-3165-"
#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
const struct iwl_cfg iwl3165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3165",
.fw_name_pre = IWL3165_FW_PRE,
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
/* sparse doens't like the re-assignment but it is safe */
#ifndef __CHECKER__
.ucode_api_ok = IWL3165_UCODE_API_OK,
.ucode_api_min = IWL3165_UCODE_API_MIN,
#endif
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
return;
}
if (data->sku_cap_mimo_disabled)
rx_chains = 1;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -84,6 +86,7 @@ struct iwl_nvm_data {
bool sku_cap_11ac_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
u16 radio_cfg_type;
u8 radio_cfg_step;

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
NVM_SKU_CAP_BAND_52GHZ = BIT(1),
NVM_SKU_CAP_11N_ENABLE = BIT(2),
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
NVM_SKU_CAP_BAND_52GHZ = BIT(1),
NVM_SKU_CAP_11N_ENABLE = BIT(2),
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
};
/*
@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
if (data->sku_cap_mimo_disabled) {
num_rx_ants = 1;
num_tx_ants = 1;
}
if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
const u8 *hw_addr;
if (mac_override) {
static const u8 reserved_mac[] = {
0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
};
hw_addr = (const u8 *)(mac_override +
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
data->hw_addr[4] = hw_addr[5];
data->hw_addr[5] = hw_addr[4];
if (is_valid_ether_addr(data->hw_addr))
/*
* Force the use of the OTP MAC address in case of reserved MAC
* address in the NVM, or if address is given but invalid.
*/
if (is_valid_ether_addr(data->hw_addr) &&
memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
return;
IWL_ERR_DEV(dev,
@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
data->sku_cap_11n_enable = false;
data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
(sku & NVM_SKU_CAP_11AC_ENABLE);
data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);

View File

@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.dataflags = { IWL_HCMD_DFL_DUP, },
.flags = CMD_ASYNC,
};
struct iwl_mvm_sta *mvmsta;

View File

@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
int i, j, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
if (!IS_ERR_OR_NULL(fw_status))
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif))
goto out_unlock;
goto err;
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
goto err;
if (d3_status != IWL_D3_STATUS_ALIVE) {
IWL_INFO(mvm, "Device was reset during suspend\n");
goto out_unlock;
goto err;
}
/* query SRAM first in case we want event logging */
@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto out_iterate;
}
out_unlock:
err:
iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
out_iterate:
@ -1915,6 +1918,14 @@ out:
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/* We always return 1, which causes mac80211 to do a reconfig
* with IEEE80211_RECONFIG_TYPE_RESTART. This type of
* reconfig calls iwl_mvm_restart_complete(), where we unref
* the IWL_MVM_REF_UCODE_DOWN, so we need to take the
* reference here.
*/
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
return 1;
}
@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
iwl_abort_notification_waits(&mvm->notif_wait);
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
ieee80211_restart_hw(mvm->hw);
/* wait for restart and disconnect all interfaces */

View File

@ -3995,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
return;
if (event->u.mlme.status == MLME_SUCCESS)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))

View File

@ -1263,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d0i3_disconnect_iter, mvm);
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
/* qos_seq might point inside resp_pkt, so free it only now */
if (get_status_cmd.resp_pkt)
iwl_free_resp(&get_status_cmd);
/* the FW might have updated the regdomain */
iwl_mvm_update_changed_regdom(mvm);

View File

@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
return false;
if (mvm->nvm_data->sku_cap_mimo_disabled)
return false;
return true;
}

View File

@ -1049,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
}
}
/* Make sure (redundant) we've released our request to stay awake */

View File

@ -793,6 +793,7 @@ static void connect(struct backend_info *be)
goto err;
}
queue->credit_bytes = credit_bytes;
queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;

View File

@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
if (netif_running(info->netdev))
napi_disable(&queue->napi);
del_timer_sync(&queue->rx_refill_timer);
netif_napi_del(&queue->napi);
}
@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = {
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
unsigned int num_queues = info->netdev->real_num_tx_queues;
struct netfront_queue *queue = NULL;
unsigned int i = 0;
dev_dbg(&dev->dev, "%s\n", dev->nodename);
@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev)
unregister_netdev(info->netdev);
for (i = 0; i < num_queues; ++i) {
queue = &info->queues[i];
del_timer_sync(&queue->rx_refill_timer);
}
if (num_queues) {
kfree(info->queues);
info->queues = NULL;
}
xennet_destroy_queues(info);
xennet_free_netdev(info->netdev);
return 0;

View File

@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/*
* Accessing PCI config without a proper delay after devices reset (not
* GPIO reset) was causing reboots on WRT300N v1.0.
* GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704).
* Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
* completely. Flushing all writes was also tested but with no luck.
* The same problem was reported for WRT350N v1 (BCM4705), so we just
* sleep here unconditionally.
*/
if (pc->dev->bus->chip_id == 0x4704)
usleep_range(1000, 2000);
usleep_range(1000, 2000);
/* Enable PCI bridge BAR0 prefetch and burst */
val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;

View File

@ -17,7 +17,7 @@
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7425 0x03625e60
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080

View File

@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
/* Map v4 address to v4-mapped v6 address */
static inline void sctp_v4_map_v6(union sctp_addr *addr)
{
__be16 port;
port = addr->v4.sin_port;
addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_port = port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_scope_id = 0;
addr->v6.sin6_port = addr->v4.sin_port;
addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_addr.s6_addr32[0] = 0;
addr->v6.sin6_addr.s6_addr32[1] = 0;
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);

View File

@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD))
break;
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
goto unlock;
}
skb = skb_dequeue(&sk->sk_receive_queue);
caif_check_flow_release(sk);

View File

@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
struct ieee80211_roc_work *new_roc,
struct ieee80211_roc_work *cur_roc)
{
unsigned long j = jiffies;
unsigned long cur_roc_end = cur_roc->hw_start_time +
msecs_to_jiffies(cur_roc->duration);
struct ieee80211_roc_work *next_roc;
int new_dur;
unsigned long now = jiffies;
unsigned long remaining = cur_roc->hw_start_time +
msecs_to_jiffies(cur_roc->duration) -
now;
if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
return false;
if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
/* if it doesn't fit entirely, schedule a new one */
if (new_roc->duration > jiffies_to_msecs(remaining))
return false;
ieee80211_handle_roc_started(new_roc);
new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
/* cur_roc is long enough - add new_roc to the dependents list. */
if (new_dur <= 0) {
list_add_tail(&new_roc->list, &cur_roc->dependents);
return true;
}
new_roc->duration = new_dur;
/*
* if cur_roc was already coalesced before, we might
* want to extend the next roc instead of adding
* a new one.
*/
next_roc = list_entry(cur_roc->list.next,
struct ieee80211_roc_work, list);
if (&next_roc->list != &local->roc_list &&
next_roc->chan == new_roc->chan &&
next_roc->sdata == new_roc->sdata &&
!WARN_ON(next_roc->started)) {
list_add_tail(&new_roc->list, &next_roc->dependents);
next_roc->duration = max(next_roc->duration,
new_roc->duration);
next_roc->type = max(next_roc->type, new_roc->type);
return true;
}
/* add right after cur_roc */
list_add(&new_roc->list, &cur_roc->list);
/* add to dependents so we send the expired event properly */
list_add_tail(&new_roc->list, &cur_roc->dependents);
return true;
}
@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
* In the offloaded ROC case, if it hasn't begun, add
* this new one to the dependent list to be handled
* when the master one begins. If it has begun,
* check that there's still a minimum time left and
* if so, start this one, transmitting the frame, but
* add it to the list directly after this one with
* a reduced time so we'll ask the driver to execute
* it right after finishing the previous one, in the
* hope that it'll also be executed right afterwards,
* effectively extending the old one.
* If there's no minimum time left, just add it to the
* normal list.
* TODO: the ROC type is ignored here, assuming that it
* is better to immediately use the current ROC.
* check if it fits entirely within the existing one,
* in which case it will just be dependent as well.
* Otherwise, schedule it by itself.
*/
if (!tmp->hw_begun) {
list_add_tail(&roc->list, &tmp->dependents);

View File

@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags {
* @IEEE80211_RX_CMNTR: received on cooked monitor already
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
* to cfg80211_report_obss_beacon().
* @IEEE80211_RX_REORDER_TIMER: this frame is released by the
* reorder buffer timeout timer, not the normal RX path
*
* These flags are used across handling multiple interfaces
* for a single frame.
@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags {
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
IEEE80211_RX_BEACON_REPORTED = BIT(1),
IEEE80211_RX_REORDER_TIMER = BIT(2),
};
struct ieee80211_rx_data {
@ -325,12 +328,6 @@ struct mesh_preq_queue {
u8 flags;
};
#if HZ/100 == 0
#define IEEE80211_ROC_MIN_LEFT 1
#else
#define IEEE80211_ROC_MIN_LEFT (HZ/100)
#endif
struct ieee80211_roc_work {
struct list_head list;
struct list_head dependents;

View File

@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
mutex_lock(&local->key_mtx);
sdata->crypto_tx_tailroom_needed_cnt +=
master->crypto_tx_tailroom_needed_cnt;
mutex_unlock(&local->key_mtx);
break;
}
case NL80211_IFTYPE_AP:

View File

@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
lockdep_assert_held(&local->key_mtx);
}
static void
update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
{
struct ieee80211_sub_if_data *vlan;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
mutex_lock(&sdata->local->mtx);
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
vlan->crypto_tx_tailroom_needed_cnt += delta;
mutex_unlock(&sdata->local->mtx);
}
static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
{
/*
@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
* http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
*/
update_vlan_tailroom_need_count(sdata, 1);
if (!sdata->crypto_tx_tailroom_needed_cnt++) {
/*
* Flush all XMIT packets currently using HW encryption or no
@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
}
}
static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
int delta)
{
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
update_vlan_tailroom_need_count(sdata, -delta);
sdata->crypto_tx_tailroom_needed_cnt -= delta;
}
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
sdata->crypto_tx_tailroom_needed_cnt--;
decrease_tailroom_need_count(sdata, 1);
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
HZ/2);
} else {
sdata->crypto_tx_tailroom_needed_cnt--;
decrease_tailroom_need_count(sdata, 1);
}
}
@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
struct ieee80211_sub_if_data *vlan;
ASSERT_RTNL();
@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
sdata->crypto_tx_tailroom_needed_cnt = 0;
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
sdata->crypto_tx_tailroom_pending_dec);
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
vlan->crypto_tx_tailroom_pending_dec);
}
list_for_each_entry(key, &sdata->key_list, list) {
increment_tailroom_need_count(sdata);
@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&sdata->local->key_mtx);
}
void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_sub_if_data *vlan;
mutex_lock(&sdata->local->key_mtx);
sdata->crypto_tx_tailroom_needed_cnt = 0;
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
vlan->crypto_tx_tailroom_needed_cnt = 0;
}
mutex_unlock(&sdata->local->key_mtx);
}
void ieee80211_iter_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void (*iter)(struct ieee80211_hw *hw,
@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_key *key, *tmp;
sdata->crypto_tx_tailroom_needed_cnt -=
sdata->crypto_tx_tailroom_pending_dec;
decrease_tailroom_need_count(sdata,
sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
ieee80211_debugfs_key_remove_mgmt_default(sdata);
@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *vlan;
struct ieee80211_sub_if_data *master;
struct ieee80211_key *key, *tmp;
LIST_HEAD(keys);
@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
list_for_each_entry_safe(key, tmp, &keys, list)
__ieee80211_key_destroy(key, false);
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
sdata->crypto_tx_tailroom_pending_dec);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
if (sdata->bss) {
master = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
master->crypto_tx_tailroom_needed_cnt);
}
} else {
WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
sdata->crypto_tx_tailroom_pending_dec);
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
*/
mutex_lock(&sdata->local->key_mtx);
sdata->crypto_tx_tailroom_needed_cnt -=
sdata->crypto_tx_tailroom_pending_dec;
decrease_tailroom_need_count(sdata,
sdata->crypto_tx_tailroom_pending_dec);
sdata->crypto_tx_tailroom_pending_dec = 0;
mutex_unlock(&sdata->local->key_mtx);
}

View File

@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
void ieee80211_free_sta_keys(struct ieee80211_local *local,
struct sta_info *sta);
void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
#define key_mtx_dereference(local, ref) \
rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))

View File

@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
if (rx->local->napi)
if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
rx->local->napi)
napi_gro_receive(rx->local->napi, skb);
else
netif_receive_skb(skb);
@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
.flags = 0,
.flags = IEEE80211_RX_REORDER_TIMER,
};
struct tid_ampdu_rx *tid_agg_rx;

View File

@ -2022,6 +2022,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
ieee80211_reset_crypto_tx_tailroom(sdata);
list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);

View File

@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
if (new && new->ops->attach) {
new->ops->attach(new);
num_q = 0;
}
if (new && new->ops->attach)
goto skip;
for (i = 0; i < num_q; i++) {
struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
qdisc_destroy(old);
}
skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
atomic_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
if (new && new->ops->attach)
new->ops->attach(new);
} else {
notify_and_destroy(net, skb, n, classid, old, new);
}

View File

@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
break;
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
}
@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
struct sk_buff *skb, *last;
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
goto unlock;
}
last = skb = skb_peek(&sk->sk_receive_queue);
again:
if (skb == NULL) {

View File

@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen,
assert(ret == 0);
ptr = haystack;
memset(pmatch, 0, sizeof(pmatch));
while (1) {
ret = regexec(&regex, ptr, 1, pmatch, 0);
if (ret == 0) {