1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Bluetooth pairing fixes from Johan Hedberg.

 2) ieee80211_send_auth() doesn't allocate enough tail room for the SKB,
    from Max Stepanov.

 3) New iwlwifi chip IDs, from Oren Givon.

 4) bnx2x driver reads wrong PCI config space MSI register, from Yijing
    Wang.

 5) IPV6 MLD Query validation isn't strong enough, from Hangbin Liu.

 6) Fix double SKB free in openvswitch, from Andy Zhou.

 7) Fix sk_dst_set() being racey with UDP sockets, leading to strange
    crashes, from Eric Dumazet.

 8) Interpret the NAPI budget correctly in the new systemport driver,
    from Florian Fainelli.

 9) VLAN code frees percpu stats in the wrong place, leading to crashes
    in the get stats handler.  From Eric Dumazet.

10) TCP sockets doing a repair can crash with a divide by zero, because
    we invoke tcp_push() with an MSS value of zero.  Just skip that part
    of the sendmsg paths in repair mode.  From Christoph Paasch.

11) IRQ affinity bug fixes in mlx4 driver from Amir Vadai.

12) Don't ignore path MTU icmp messages with a zero mtu, machines out
    there still spit them out, and all of our per-protocol handlers for
    PMTU can cope with it just fine.  From Edward Allcutt.

13) Some NETDEV_CHANGE notifier invocations were not passing in the
    correct kind of cookie as the argument, from Loic Prylli.

14) Fix crashes in long multicast/broadcast reassembly, from Jon Paul
    Maloy.

15) ip_tunnel_lookup() doesn't interpret wildcard keys correctly, fix
    from Dmitry Popov.

16) Fix skb->sk assigned without taking a reference to 'sk' in
    appletalk, from Andrey Utkin.

17) Fix some info leaks in ULP event signalling to userspace in SCTP,
    from Daniel Borkmann.

18) Fix deadlocks in HSO driver, from Olivier Sobrie.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (93 commits)
  hso: fix deadlock when receiving bursts of data
  hso: remove unused workqueue
  net: ppp: don't call sk_chk_filter twice
  mlx4: mark napi id for gro_skb
  bonding: fix ad_select module param check
  net: pppoe: use correct channel MTU when using Multilink PPP
  neigh: sysctl - simplify address calculation of gc_* variables
  net: sctp: fix information leaks in ulpevent layer
  MAINTAINERS: update r8169 maintainer
  net: bcmgenet: fix RGMII_MODE_EN bit
  tipc: clear 'next'-pointer of message fragments before reassembly
  r8152: fix r8152_csum_workaround function
  be2net: set EQ DB clear-intr bit in be_open()
  GRE: enable offloads for GRE
  farsync: fix invalid memory accesses in fst_add_one() and fst_init_card()
  igb: do a reset on SR-IOV re-init if device is down
  igb: Workaround for i210 Errata 25: Slow System Clock
  usbnet: smsc95xx: add reset_resume function with reset operation
  dp83640: Always decode received status frames
  r8169: disable L23
  ...
hifive-unleashed-5.1
Linus Torvalds 2014-07-15 08:42:52 -07:00
commit 5615f9f822
102 changed files with 736 additions and 666 deletions

View File

@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/realtek/r8169.c
@ -4511,8 +4510,7 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Alexander Aring <alex.aring@gmail.com>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git

View File

@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
else
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
} else {
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
PPC_SRWI(r_A, r_A, 12);
}
break;
case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,

View File

@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0) },
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3005) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x311E) },
@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },

View File

@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },

View File

@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
BT_ERR("Non-link packet received in non-active state");
h5_reset_rx(h5);
return 0;
}
h5->rx_func = h5_rx_payload;

View File

@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
if (ic->parm.ni1_io.timeout > 0)
if (!(pc = ni1_new_l3_process(st, -1)))
{ free_invoke_id(st, id);
if (ic->parm.ni1_io.timeout > 0) {
pc = ni1_new_l3_process(st, -1);
if (!pc) {
free_invoke_id(st, id);
return (-2);
}
pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
/* remember id */
pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
/* and procedure */
pc->prot.ni1.proc = ic->parm.ni1_io.proc;
}
if (!(skb = l3_alloc_skb(l)))
{ free_invoke_id(st, id);

View File

@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
int len, err;
int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code))
return PTR_ERR(code);
err = sk_chk_filter(code, uprog.len);
if (err) {
kfree(code);
return err;
}
*p = code;
return uprog.len;
}

View File

@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
}
if (ad_select) {
bond_opt_initstr(&newval, lacp_rate);
bond_opt_initstr(&newval, ad_select);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
&newval);
if (!valptr) {

View File

@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
if (work_done < budget) {
if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
}
return work_done;
return 0;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
usleep_range(1000, 2000);
}
static inline int umac_reset(struct bcm_sysport_priv *priv)
static inline void umac_reset(struct bcm_sysport_priv *priv)
{
unsigned int timeout = 0;
u32 reg;
int ret = 0;
umac_writel(priv, 0, UMAC_CMD);
while (timeout++ < 1000) {
reg = umac_readl(priv, UMAC_CMD);
if (!(reg & CMD_SW_RESET))
break;
udelay(1);
}
if (timeout == 1000) {
dev_err(&priv->pdev->dev,
"timeout waiting for MAC to come out of reset\n");
ret = -ETIMEDOUT;
}
return ret;
reg = umac_readl(priv, UMAC_CMD);
reg |= CMD_SW_RESET;
umac_writel(priv, reg, UMAC_CMD);
udelay(10);
reg = umac_readl(priv, UMAC_CMD);
reg &= ~CMD_SW_RESET;
umac_writel(priv, reg, UMAC_CMD);
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
int ret;
/* Reset UniMAC */
ret = umac_reset(priv);
if (ret) {
netdev_err(dev, "UniMAC reset failed\n");
return ret;
}
umac_reset(priv);
/* Flush TX and RX FIFOs at TOPCTRL level */
topctrl_flush(priv);
@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
/* We are interfaced to a switch which handles the multicast
* filtering for us, so we do not support programming any
* multicast hash table in this Ethernet MAC.
*/
dev->flags &= ~IFF_MULTICAST;
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);

View File

@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
bnx2x_frag_free(fp, new_data);
if (new_data)
bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,

View File

@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;

View File

@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
if (cb->skb)
continue;
/* set the DMA descriptor length once and for all
* it will only change if we support dynamically sizing
* priv->rx_buf_len, but we do not
*/
dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
ret = bcmgenet_rx_refill(priv, cb);
if (ret)
break;
@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
err = register_netdev(dev);
if (err)
goto err_clk_disable;
/* libphy will determine the link state */
netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
if (err)
goto err;
return err;
err_clk_disable:

View File

@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_MODE_EN (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
#define ID_MODE_DIS (1 << 16)
#define EXT_GPHY_CTRL 0x1C

View File

@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
be_eq_notify(adapter, eqo->q.id, true, true, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;

View File

@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
if (ug_info->largestexternallookupkeysize ==
QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
}

View File

@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
if ((hw->mac.type >= e1000_i210) &&
!(igb_get_flash_presence_i210(hw))) {
ret_val = igb_pll_workaround_i210(hw);
if (ret_val)
return ret_val;
}
/* Initialize identification LED */
ret_val = igb_id_led_init(hw);
if (ret_val) {

View File

@ -46,14 +46,15 @@
#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000

View File

@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */

View File

@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
}
return ret_val;
}
/**
* igb_pll_workaround_i210
* @hw: pointer to the HW structure
*
* Works around an errata in the PLL circuit where it occasionally
* provides the wrong clock frequency after power up.
**/
s32 igb_pll_workaround_i210(struct e1000_hw *hw)
{
s32 ret_val;
u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
u16 nvm_word, phy_word, pci_word, tmp_nvm;
int i;
/* Get and set needed register values */
wuc = rd32(E1000_WUC);
mdicnfg = rd32(E1000_MDICNFG);
reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
wr32(E1000_MDICNFG, reg_val);
/* Get data from NVM, or set default */
ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
&nvm_word);
if (ret_val)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
E1000_PHY_PLL_FREQ_REG), &phy_word);
if ((phy_word & E1000_PHY_PLL_UNCONF)
!= E1000_PHY_PLL_UNCONF) {
ret_val = 0;
break;
} else {
ret_val = -E1000_ERR_PHY;
}
/* directly reset the internal PHY */
ctrl = rd32(E1000_CTRL);
wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
ctrl_ext = rd32(E1000_CTRL_EXT);
ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
wr32(E1000_CTRL_EXT, ctrl_ext);
wr32(E1000_WUC, 0);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
wr32(E1000_EEARBC_I210, reg_val);
igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
pci_word |= E1000_PCI_PMCSR_D3;
igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
usleep_range(1000, 2000);
pci_word &= ~E1000_PCI_PMCSR_D3;
igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
wr32(E1000_EEARBC_I210, reg_val);
/* restore WUC register */
wr32(E1000_WUC, wuc);
}
/* restore MDICNFG setting */
wr32(E1000_MDICNFG, mdicnfg);
return ret_val;
}

View File

@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
bool igb_get_flash_presence_i210(struct e1000_hw *hw);
s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
/* PLL Defines */
#define E1000_PCI_PMCSR 0x44
#define E1000_PCI_PMCSR_D3 0x03
#define E1000_MAX_PLL_TRIES 5
#define E1000_PHY_PLL_UNCONF 0xFF
#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
#define E1000_PHY_PLL_FREQ_REG 0x000E
#define E1000_INVM_DEFAULT_AL 0x202F
#define E1000_INVM_AUTOLOAD 0x0A
#define E1000_INVM_PLL_WO_VAL 0x0010
#endif

View File

@ -66,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */

View File

@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
pci_read_config_word(adapter->pdev, reg, value);
}
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
pci_write_config_word(adapter->pdev, reg, *value);
}
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
if (netif_running(netdev))
igb_close(netdev);
else
igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);

View File

@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
if (l3_proto == swab16(ETH_P_IP))
if (l3_proto == htons(ETH_P_IP))
command |= MVNETA_TXD_IP_CSUM;
else
command |= MVNETA_TX_L3_IP6;
@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
if (phydev->speed == SPEED_1000)
val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else
else if (phydev->speed == SPEED_100)
val |= MVNETA_GMAC_CONFIG_MII_SPEED;
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);

View File

@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq;
cq->irq_affinity_change = false;
return 0;
err_radix:

View File

@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
cq->irq_desc =
irq_to_desc(mlx4_eq_get_irq(mdev->dev,
cq->vector));
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
if (!cq->is_tx)
irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);

View File

@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames;
coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
return 0;
}
@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv);
}

View File

@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
__be16 current_port;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
if (sa_family == AF_INET6)
@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);

View File

@ -40,6 +40,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
#include "mlx4_en.h"
@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
int cpu_curr;
const struct cpumask *aff;
INC_PERF_COUNTER(priv->pstats.napi_quota);
if (unlikely(cq->mcq.irq_affinity_change)) {
cq->mcq.irq_affinity_change = false;
cpu_curr = smp_processor_id();
aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
/* Current cpu is not according to smp_irq_affinity -
* probably affinity changed. need to stop this NAPI
* poll, and restart it on the right CPU
*/
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
} else {
/* Done for now */
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}

View File

@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
static int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq,
int budget)
static bool mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
int factor = priv->cqe_factor;
u64 timestamp = 0;
int done = 0;
int budget = priv->tx_work_limit;
if (!priv->port_up)
return 0;
return true;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
return done;
return done < budget;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
int done;
int clean_complete;
done = mlx4_en_process_tx_cq(dev, cq, budget);
clean_complete = mlx4_en_process_tx_cq(dev, cq);
if (!clean_complete)
return budget;
/* If we used up all the quota - we're probably not done yet... */
if (done < budget) {
/* Done for now */
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return done;
} else if (unlikely(cq->mcq.irq_affinity_change)) {
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
return budget;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,

View File

@ -53,11 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
struct mlx4_irq_notify {
void *arg;
struct irq_affinity_notify notify;
};
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct mlx4_irq_notify *n = container_of(notify,
struct mlx4_irq_notify,
notify);
struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
struct radix_tree_iter iter;
void **slot;
radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
if (cq->irq == notify->irq)
cq->irq_affinity_change = true;
}
}
static void mlx4_release_irq_notifier(struct kref *ref)
{
struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
notify.kref);
kfree(n);
}
static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
struct mlx4_dev *dev, int irq)
{
struct mlx4_irq_notify *irq_notifier = NULL;
int err = 0;
irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
if (!irq_notifier) {
mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
irq);
return;
}
irq_notifier->notify.irq = irq;
irq_notifier->notify.notify = mlx4_irq_notifier_notify;
irq_notifier->notify.release = mlx4_release_irq_notifier;
irq_notifier->arg = priv;
err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
if (err) {
kfree(irq_notifier);
irq_notifier = NULL;
mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
}
}
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
mlx4_assign_irq_notifier(priv, dev,
priv->eq_table.eq[vec].irq);
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
}
EXPORT_SYMBOL(mlx4_assign_eq);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->eq_table.eq[vec].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
irq_set_affinity_notifier(
priv->eq_table.eq[vec].irq,
NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);

View File

@ -126,6 +126,8 @@ enum {
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
@ -343,6 +345,7 @@ struct mlx4_en_cq {
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_RX_BUSY_POLL */
struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
@ -542,6 +545,7 @@ struct mlx4_en_priv {
__be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
u32 tx_work_limit;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;

View File

@ -538,6 +538,7 @@ enum rtl_register_content {
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
Rdy_to_L23 = (1 << 1), /* L23 Enable */
Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
/* Config4 register */
@ -4897,6 +4898,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
PCI_EXP_LNKCTL_CLKREQ_EN);
}
static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
{
void __iomem *ioaddr = tp->mmio_addr;
u8 data;
data = RTL_R8(Config3);
if (enable)
data |= Rdy_to_L23;
else
data &= ~Rdy_to_L23;
RTL_W8(Config3, data);
}
#define R8168_CPCMD_QUIRK_MASK (\
EnableBist | \
Mac_dbgo_oe | \
@ -5246,6 +5262,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
};
rtl_hw_start_8168f(tp);
rtl_pcie_state_l2l3_enable(tp, false);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@ -5284,6 +5301,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@ -5536,6 +5555,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@ -5571,6 +5592,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@ -5583,6 +5606,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8101(struct net_device *dev)

View File

@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
{
u32 value;
value = readl(ioaddr + GMAC_AN_CTRL);
/* auto negotiation enable and External Loopback enable */
value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
if (restart)
value |= GMAC_AN_CTRL_RAN;

View File

@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->rx_msg_type_delay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;

View File

@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
static void dfx_rcv_queue_process(DFX_board_t *bp);
#ifdef DYNAMIC_BUFFERS
static void dfx_rcv_flush(DFX_board_t *bp);
#else
static inline void dfx_rcv_flush(DFX_board_t *bp) {}
#endif
static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
struct net_device *dev);
@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
* Align an sk_buff to a boundary power of 2
*
*/
#ifdef DYNAMIC_BUFFERS
static void my_skb_align(struct sk_buff *skb, int n)
{
unsigned long x = (unsigned long)skb->data;
@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
skb_reserve(skb, v - x);
}
#endif
/*
* ================
@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
break;
}
else {
#ifndef DYNAMIC_BUFFERS
if (! rx_in_place)
#endif
{
if (!rx_in_place) {
/* Receive buffer allocated, pass receive packet up */
skb_copy_to_linear_data(skb,
@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
}
}
#else
static inline void dfx_rcv_flush( DFX_board_t *bp )
{
}
#endif /* DYNAMIC_BUFFERS */
/*

View File

@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
{
struct dp83640_private *dp83640 = phydev->priv;
if (!dp83640->hwts_rx_en)
return false;
if (is_status_frame(skb, type)) {
decode_status_frame(dp83640, skb);
kfree_skb(skb);
return true;
}
if (!dp83640->hwts_rx_en)
return false;
SKB_PTP_TYPE(skb) = type;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_work(&dp83640->ts_work);

View File

@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
/* Walk the list of subnodes of a mdio bus and look for a node that matches the
* phy's address with its 'reg' property. If found, set the of_node pointer for
* the phy. This allows auto-probed pyh devices to be supplied with information
* passed in via DT.
*/
static void of_mdiobus_link_phydev(struct mii_bus *mdio,
struct phy_device *phydev)
{
struct device *dev = &phydev->dev;
struct device_node *child;
if (dev->of_node || !mdio->dev.of_node)
return;
for_each_available_child_of_node(mdio->dev.of_node, child) {
int addr;
int ret;
ret = of_property_read_u32(child, "reg", &addr);
if (ret < 0) {
dev_err(dev, "%s has invalid PHY address\n",
child->full_name);
continue;
}
/* A PHY must have a reg property in the range [0-31] */
if (addr >= PHY_MAX_ADDR) {
dev_err(dev, "%s PHY address %i is too large\n",
child->full_name, addr);
continue;
}
if (addr == phydev->addr) {
dev->of_node = child;
return;
}
}
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
struct phy_device *phydev)
{
}
#endif
/**

View File

@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
int len, err;
int len;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
if (IS_ERR(code))
return PTR_ERR(code);
err = sk_chk_filter(code, uprog.len);
if (err) {
kfree(code);
return err;
}
*p = code;
return uprog.len;
}

View File

@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
dev->hard_header_len);
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;

View File

@ -258,10 +258,8 @@ struct hso_serial {
* so as not to drop characters on the floor.
*/
int curr_rx_urb_idx;
u16 curr_rx_urb_offset;
u8 rx_urb_filled[MAX_RX_URBS];
struct tasklet_struct unthrottle_tasklet;
struct work_struct retry_unthrottle_workqueue;
};
struct hso_device {
@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
tasklet_hi_schedule(&serial->unthrottle_tasklet);
}
static void hso_unthrottle_workfunc(struct work_struct *work)
{
struct hso_serial *serial =
container_of(work, struct hso_serial,
retry_unthrottle_workqueue);
hso_unthrottle_tasklet(serial);
}
/* open the requested serial port */
static int hso_serial_open(struct tty_struct *tty, struct file *filp)
{
@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
tasklet_init(&serial->unthrottle_tasklet,
(void (*)(unsigned long))hso_unthrottle_tasklet,
(unsigned long)serial);
INIT_WORK(&serial->retry_unthrottle_workqueue,
hso_unthrottle_workfunc);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
hso_stop_serial_device(serial->parent);
@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
if (!usb_gone)
hso_stop_serial_device(serial->parent);
tasklet_kill(&serial->unthrottle_tasklet);
cancel_work_sync(&serial->retry_unthrottle_workqueue);
}
if (!usb_gone)
@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
{
struct tty_struct *tty;
int write_length_remaining = 0;
int curr_write_len;
int count;
/* Sanity check */
if (urb == NULL || serial == NULL) {
@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
/* Push data to tty */
write_length_remaining = urb->actual_length -
serial->curr_rx_urb_offset;
D1("data to push to tty");
while (write_length_remaining) {
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
tty_kref_put(tty);
return -1;
}
curr_write_len = tty_insert_flip_string(&serial->port,
urb->transfer_buffer + serial->curr_rx_urb_offset,
write_length_remaining);
serial->curr_rx_urb_offset += curr_write_len;
write_length_remaining -= curr_write_len;
tty_flip_buffer_push(&serial->port);
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
tty_kref_put(tty);
return -1;
}
/* Push data to tty */
D1("data to push to tty");
count = tty_buffer_request_room(&serial->port, urb->actual_length);
if (count >= urb->actual_length) {
tty_insert_flip_string(&serial->port, urb->transfer_buffer,
urb->actual_length);
tty_flip_buffer_push(&serial->port);
} else {
dev_warn(&serial->parent->usb->dev,
"dropping data, %d bytes lost\n", urb->actual_length);
}
tty_kref_put(tty);
if (write_length_remaining == 0) {
serial->curr_rx_urb_offset = 0;
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
}
return write_length_remaining;
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
return 0;
}
@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
}
}
serial->curr_rx_urb_idx = 0;
serial->curr_rx_urb_offset = 0;
if (serial->tx_urb)
usb_kill_urb(serial->tx_urb);

View File

@ -741,6 +741,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */

View File

@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
struct sk_buff_head seg_list;
struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs) || !segs)
goto drop;
@ -3204,8 +3204,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
struct r8152 *tp = netdev_priv(dev);
struct tally_counter tally;
if (usb_autopm_get_interface(tp->intf) < 0)
return;
generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
usb_autopm_put_interface(tp->intf);
data[0] = le64_to_cpu(tally.tx_packets);
data[1] = le64_to_cpu(tally.rx_packets);
data[2] = le64_to_cpu(tally.tx_errors);

View File

@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
return ret;
}
static int smsc95xx_reset_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
int ret;
ret = smsc95xx_reset(dev);
if (ret < 0)
return ret;
return smsc95xx_resume(intf);
}
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
.probe = usbnet_probe,
.suspend = smsc95xx_suspend,
.resume = smsc95xx_resume,
.reset_resume = smsc95xx_resume,
.reset_resume = smsc95xx_reset_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
.supports_autosuspend = 1,

View File

@ -2363,7 +2363,7 @@ static char *type_strings[] = {
"FarSync TE1"
};
static void
static int
fst_init_card(struct fst_card_info *card)
{
int i;
@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
int j;
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
i, -err);
for (j = i; j < card->nports; j++) {
free_netdev(card->ports[j].dev);
card->ports[j].dev = NULL;
}
card->nports = i;
break;
}
i, -err);
while (i--)
unregister_hdlc_device(card->ports[i].dev);
return err;
}
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
return 0;
}
static const struct net_device_ops fst_ops = {
@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
pr_err("Failed to enable card. Err %d\n", -err);
kfree(card);
return err;
goto enable_fail;
}
if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
pr_err("Failed to allocate regions. Err %d\n", -err);
pci_disable_device(pdev);
kfree(card);
return err;
goto regions_fail;
}
/* Get virtual addresses of memory regions */
@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->phys_ctlmem = pci_resource_start(pdev, 3);
if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
pr_err("Physical memory remap failed\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(card);
return -ENODEV;
err = -ENODEV;
goto ioremap_physmem_fail;
}
if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
pr_err("Control memory remap failed\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->mem);
kfree(card);
return -ENODEV;
err = -ENODEV;
goto ioremap_ctlmem_fail;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENODEV;
err = -ENODEV;
goto irq_fail;
}
/* Record info we need */
@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
free_irq(card->irq, card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENODEV;
err = -ENOMEM;
goto hdlcdev_fail;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
if (no_of_cards_added >= FST_MAX_CARDS) {
pr_err("FarSync: too many cards\n");
err = -ENOMEM;
goto card_array_fail;
}
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
fst_init_card(card);
err = fst_init_card(card);
if (err)
goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
/*
* Allocate a dma buffer for transmit and receives
@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&card->rx_dma_handle_card);
if (card->rx_dma_handle_host == NULL) {
pr_err("Could not allocate rx dma buffer\n");
fst_disable_intr(card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENOMEM;
err = -ENOMEM;
goto rx_dma_fail;
}
card->tx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->tx_dma_handle_card);
if (card->tx_dma_handle_host == NULL) {
pr_err("Could not allocate tx dma buffer\n");
fst_disable_intr(card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENOMEM;
err = -ENOMEM;
goto tx_dma_fail;
}
}
return 0; /* Success */
tx_dma_fail:
pci_free_consistent(card->device, FST_MAX_MTU,
card->rx_dma_handle_host,
card->rx_dma_handle_card);
rx_dma_fail:
fst_disable_intr(card);
for (i = 0 ; i < card->nports ; i++)
unregister_hdlc_device(card->ports[i].dev);
init_card_fail:
fst_card_array[card->card_no] = NULL;
card_array_fail:
for (i = 0 ; i < card->nports ; i++)
free_netdev(card->ports[i].dev);
hdlcdev_fail:
free_irq(card->irq, card);
irq_fail:
iounmap(card->ctlmem);
ioremap_ctlmem_fail:
iounmap(card->mem);
ioremap_physmem_fail:
pci_release_regions(pdev);
regions_fail:
pci_disable_device(pdev);
enable_fail:
kfree(card);
return err;
}
/*

View File

@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_htc_stop;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
else
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))

View File

@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock);
@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
if (msdu_chaining && !last_msdu)
corrupted = true;
if (last_msdu) {
msdu->next = NULL;
break;
@ -456,20 +452,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (*head_msdu == NULL)
msdu_chaining = -1;
/*
* Apparently FW sometimes reports weird chained MSDU sequences with
* more than one rx descriptor. This seems like a bug but needs more
* analyzing. For the time being fix it by dropping such sequences to
* avoid blowing up the host system.
*/
if (corrupted) {
ath10k_warn("failed to pop chained msdus, dropping\n");
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
*tail_msdu = NULL;
msdu_chaining = -EINVAL;
}
/*
* Don't refill the ring yet.
*

View File

@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
bus->proto_type = BRCMF_PROTO_BCDC;
bus->always_use_fws_queue = true;
@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
}
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
/* request firmware here */
brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
brcmf_usb_probe_phase2);

View File

@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* recalculate basic rates */
iwl_calc_basic_rates(priv, ctx);
/*
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
if (!priv->hw_params.use_rts_for_aggregation)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
if (bss_conf->use_cts_prot)
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
else
ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_AP ||

View File

@ -88,6 +88,7 @@
* P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients

View File

@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
if (vif->bss_conf.use_cts_prot) {
if (vif->bss_conf.use_cts_prot)
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
vif->bss_conf.ht_operation_mode);

View File

@ -303,6 +303,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
!iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
@ -1159,8 +1166,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
bcast_mac = &cmd->macs[mvmvif->id];
/* enable filtering only for associated stations */
if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
/*
* enable filtering only for associated stations, but not for P2P
* Clients
*/
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
!vif->bss_conf.assoc)
return;
bcast_mac->default_discard = 1;
@ -1237,10 +1248,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
/* bcast filtering isn't supported for P2P client */
if (vif->p2p)
return 0;
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;

View File

@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct iwl_scan_offload_cmd *scan,
struct iwl_mvm_scan_params *params)
{
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
scan->channel_count = req->n_channels;
scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
int *head, int *tail,
int *head,
u32 ssid_bitmap,
struct iwl_mvm_scan_params *params)
{
struct ieee80211_supported_band *s_band;
int n_channels = req->n_channels;
int i, j, index = 0;
bool partial;
int i, index = 0;
/*
* We have to configure all supported channels, even if we don't want to
* scan on them, but we have to send channels in the order that we want
* to scan. So add requested channels to head of the list and others to
* the end.
*/
s_band = &mvm->nvm_data->bands[band];
for (i = 0; i < req->n_channels; i++) {
struct ieee80211_channel *chan = req->channels[i];
for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
partial = false;
for (j = 0; j < n_channels; j++)
if (s_band->channels[i].center_freq ==
req->channels[j]->center_freq) {
index = *head;
(*head)++;
/*
* Channels that came with the request will be
* in partial scan .
*/
partial = true;
break;
}
if (!partial) {
index = *tail;
(*tail)--;
}
channels->channel_number[index] =
cpu_to_le16(ieee80211_frequency_to_channel(
s_band->channels[i].center_freq));
if (chan->band != band)
continue;
index = *head;
(*head)++;
channels->channel_number[index] = cpu_to_le16(chan->hw_value);
channels->dwell_time[index][0] = params->dwell[band].active;
channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
if (!(chan->flags & IEEE80211_CHAN_NO_IR))
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
if (partial)
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
if (chan->flags & IEEE80211_CHAN_NO_HT40)
channels->type[index] |=
cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
&scan_cfg->scan_cmd.tx_cmd[0],
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_2GHZ, &head, &tail,
IEEE80211_BAND_2GHZ, &head,
ssid_bitmap, &params);
}
if (band_5ghz) {
@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
scan_cfg->data +
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_5GHZ, &head, &tail,
IEEE80211_BAND_5GHZ, &head,
ssid_bitmap, &params);
}

View File

@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},

View File

@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
tx_info_aggr->bss_type = tx_info_src->bss_type;
tx_info_aggr->bss_num = tx_info_src->bss_num;

View File

@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = pkt_len;

View File

@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
if (skb) {
rx_info = MWIFIEX_SKB_RXCB(skb);
memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
}

View File

@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = skb->len;

View File

@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
return -1;
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);

View File

@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
skb->priority = MWIFIEX_PRIO_VI;
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;

View File

@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
return -1;
}
memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;

View File

@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
}
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;

View File

@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
*/
static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
{
__le32 reg;
__le32 *reg;
u32 fw_mode;
reg = kmalloc(sizeof(*reg), GFP_KERNEL);
if (reg == NULL)
return -ENOMEM;
/* cannot use rt2x00usb_register_read here as it uses different
* mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
* magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
*/
rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
&reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
fw_mode = le32_to_cpu(reg);
reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
fw_mode = le32_to_cpu(*reg);
kfree(reg);
if ((fw_mode & 0x00000003) == 2)
return 1;
@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
int status;
u32 offset;
u32 length;
int retval;
/*
* Check which section of the firmware we need.
@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Write firmware to device.
*/
if (rt2800usb_autorun_detect(rt2x00dev)) {
retval = rt2800usb_autorun_detect(rt2x00dev);
if (retval < 0)
return retval;
if (retval) {
rt2x00_info(rt2x00dev,
"Firmware loading not required - NIC in AutoRun mode\n");
} else {
@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
if (rt2800usb_autorun_detect(rt2x00dev))
int retval;
retval = rt2800usb_autorun_detect(rt2x00dev);
if (retval < 0)
return retval;
if (retval)
return 1;
return rt2800_efuse_detect(rt2x00dev);
}
@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
int retval;
if (rt2800usb_efuse_detect(rt2x00dev))
retval = rt2800usb_efuse_detect(rt2x00dev);
if (retval < 0)
return retval;
if (retval)
retval = rt2800_read_eeprom_efuse(rt2x00dev);
else
retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,

View File

@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
unsigned int i = 0;
unsigned int num_queues = info->netdev->real_num_tx_queues;
netif_carrier_off(info->netdev);
for (i = 0; i < num_queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_bh(&queue->rx_lock);
spin_lock_irq(&queue->tx_lock);
netif_carrier_off(queue->info->netdev);
spin_unlock_irq(&queue->tx_lock);
spin_unlock_bh(&queue->rx_lock);
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0;
napi_synchronize(&queue->napi);
/* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
/* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
spin_lock_bh(&queue->rx_lock);
spin_lock_irq(&queue->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */
spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue);
spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
spin_lock_bh(&queue->rx_lock);
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag;
const struct page *page;
@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
}
queue->rx.req_prod_pvt = requeue_idx;
spin_unlock_bh(&queue->rx_lock);
}
/*
@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
notify_remote_via_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq);
xennet_tx_buf_gc(queue);
xennet_alloc_rx_buffers(queue);
spin_lock_irq(&queue->tx_lock);
xennet_tx_buf_gc(queue);
spin_unlock_irq(&queue->tx_lock);
spin_lock_bh(&queue->rx_lock);
xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock);
}

View File

@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
EXPORT_SYMBOL(of_mdiobus_register);
/**
* of_mdiobus_link_phydev - Find a device node for a phy
* @mdio: pointer to mii_bus structure
* @phydev: phydev for which the of_node pointer should be set
*
* Walk the list of subnodes of a mdio bus and look for a node that matches the
* phy's address with its 'reg' property. If found, set the of_node pointer for
* the phy. This allows auto-probed pyh devices to be supplied with information
* passed in via DT.
*/
void of_mdiobus_link_phydev(struct mii_bus *mdio,
struct phy_device *phydev)
{
struct device *dev = &phydev->dev;
struct device_node *child;
if (dev->of_node || !mdio->dev.of_node)
return;
for_each_available_child_of_node(mdio->dev.of_node, child) {
int addr;
addr = of_mdio_parse_addr(&mdio->dev, child);
if (addr < 0)
continue;
if (addr == phydev->addr) {
dev->of_node = child;
return;
}
}
}
EXPORT_SYMBOL(of_mdiobus_link_phydev);
/* Helper function for of_phy_find_device */
static int of_phy_match(struct device *dev, void *phy_np)
{

View File

@ -578,8 +578,6 @@ struct mlx4_cq {
u32 cons_index;
u16 irq;
bool irq_affinity_change;
__be32 *set_ci_db;
__be32 *arm_db;
int arm_sn;
@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);

View File

@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
struct phy_device *phydev);
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}
static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
struct phy_device *phydev)
{
}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)

View File

@ -203,7 +203,6 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
/* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;

View File

@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct netns_frags frags;
u16 max_dsize;
int max_dsize;
};
#endif

View File

@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
static inline void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
spin_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
spin_unlock(&sk->sk_dst_lock);
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
dst_release(old_dst);
}
static inline void
@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
static inline void
sk_dst_reset(struct sock *sk)
{
spin_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
spin_unlock(&sk->sk_dst_lock);
sk_dst_set(sk, NULL);
}
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);

View File

@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
i %= num_online_cpus();
if (!cpumask_of_node(numa_node)) {
if (numa_node == -1 || !cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {

View File

@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
};
static void vlan_dev_free(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
free_netdev(dev);
}
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = free_netdev;
dev->destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);

View File

@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* Queue packet (standard) */
skb->sk = sock;
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (!skb)
goto out;
skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;

View File

@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
disc_work.work);
int refcnt = atomic_read(&conn->refcnt);
BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
if (atomic_read(&conn->refcnt))
WARN_ON(refcnt < 0);
/* FIXME: It was observed that in pairing failed scenario, refcnt
* drops below 0. Probably this is because l2cap_conn_del calls
* l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
* dropped. After that loop hci_chan_del is called which also drops
* conn. For now make sure that ACL is alive if refcnt is higher then 0,
* otherwise drop it.
*/
if (refcnt > 0)
return;
switch (conn->state) {

View File

@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
{ CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
};
static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
{
/* If either side has unknown io_caps, use JUST WORKS */
if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
remote_io > SMP_IO_KEYBOARD_DISPLAY)
return JUST_WORKS;
return gen_method[remote_io][local_io];
}
static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
/* If neither side wants MITM, use JUST WORKS */
/* If either side has unknown io_caps, use JUST WORKS */
/* Otherwise, look up method from the table */
if (!(auth & SMP_AUTH_MITM) ||
local_io > SMP_IO_KEYBOARD_DISPLAY ||
remote_io > SMP_IO_KEYBOARD_DISPLAY)
if (!(auth & SMP_AUTH_MITM))
method = JUST_WORKS;
else
method = gen_method[remote_io][local_io];
method = get_auth_method(smp, local_io, remote_io);
/* If not bonding, don't ask user to confirm a Zero TK */
if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct smp_chan *smp;
u8 key_size, auth;
u8 key_size, auth, sec_level;
int ret;
BT_DBG("conn %p", conn);
@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* We didn't start the pairing, so match remote */
auth = req->auth_req;
conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
sec_level = authreq_to_seclevel(auth);
if (sec_level > conn->hcon->pending_sec_level)
conn->hcon->pending_sec_level = sec_level;
/* If we need MITM check that it can be acheived */
if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
u8 method;
method = get_auth_method(smp, conn->hcon->io_capability,
req->io_capability);
if (method == JUST_WORKS || method == JUST_CFM)
return SMP_AUTH_REQUIREMENTS;
}
build_pairing_cmd(conn, req, &rsp, auth);
@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
/* If we need MITM check that it can be acheived */
if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
u8 method;
method = get_auth_method(smp, req->io_capability,
rsp->io_capability);
if (method == JUST_WORKS || method == JUST_CFM)
return SMP_AUTH_REQUIREMENTS;
}
get_random_bytes(smp->prnd, sizeof(smp->prnd));
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct smp_chan *smp;
u8 sec_level;
BT_DBG("conn %p", conn);
@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!(conn->hcon->link_mode & HCI_LM_MASTER))
return SMP_CMD_NOTSUPP;
hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
sec_level = authreq_to_seclevel(rp->auth_req);
if (sec_level > hcon->pending_sec_level)
hcon->pending_sec_level = sec_level;
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
if (smp_sufficient_security(hcon, sec_level))
return 1;
if (sec_level > hcon->pending_sec_level)
hcon->pending_sec_level = sec_level;
if (hcon->link_mode & HCI_LM_MASTER)
if (smp_ltk_encrypt(conn, sec_level))
goto done;
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
return 0;
@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
* requires it.
*/
if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
sec_level > BT_SECURITY_MEDIUM)
hcon->pending_sec_level > BT_SECURITY_MEDIUM)
authreq |= SMP_AUTH_MITM;
if (hcon->link_mode & HCI_LM_MASTER) {
@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
set_bit(SMP_FLAG_INITIATOR, &smp->flags);
done:
hcon->pending_sec_level = sec_level;
return 0;
}

View File

@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
struct net_device *dev,
struct netdev_notifier_info *info);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
call_netdevice_notifiers(NETDEV_CHANGE, dev);
struct netdev_notifier_change_info change_info;
change_info.flags_changed = 0;
call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
&change_info.info);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
}
}
@ -4227,9 +4234,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
#endif
napi->weight = weight_p;
local_irq_disable();
while (work < quota) {
while (1) {
struct sk_buff *skb;
unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
local_irq_enable();
@ -4243,24 +4249,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
}
rps_lock(sd);
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen)
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
if (qlen < quota - work) {
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
* and NAPI_STATE_SCHED is the only possible flag set on backlog.
* we can use a plain write instead of clear_bit(),
* and NAPI_STATE_SCHED is the only possible flag set
* on backlog.
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
list_del(&napi->poll_list);
napi->state = 0;
rps_unlock(sd);
quota = work + qlen;
break;
}
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
rps_unlock(sd);
}
local_irq_enable();

View File

@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
struct neigh_table *tbl = p->tbl;
dev_name_source = "default";
t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
}
if (handler) {

View File

@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = tnl_flags_to_gre_flags(tpi->flags);
greh->protocol = tpi->proto;

View File

@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
/* fall through */
case 0:
info = ntohs(icmph->un.frag.mtu);
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:

View File

@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
ret = -ENODEV;
goto out;
}
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
if (in_dev)
ip_mc_dec_group(in_dev, group);
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
if (!in_dev)
ret = -ENODEV;
out:
rtnl_unlock();
return ret;
}

View File

@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (remote != t->parms.iph.daddr ||
t->parms.iph.saddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;
@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
head = &itn->tunnels[hash];
hlist_for_each_entry_rcu(t, head, hash_node) {
if ((local != t->parms.iph.saddr &&
(local != t->parms.iph.daddr ||
!ipv4_is_multicast(local))) ||
!(t->dev->flags & IFF_UP))
if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
(local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
continue;
if (!(t->dev->flags & IFF_UP))
continue;
if (!ip_tunnel_key_match(&t->parms, flags, key))
@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (t->parms.i_key != key ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
continue;

View File

@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct dst_entry *dst;
struct dst_entry *odst = NULL;
bool new = false;
bh_lock_sock(sk);
@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
if (!ip_sk_accept_pmtu(sk))
goto out;
rt = (struct rtable *) __sk_dst_get(sk);
odst = sk_dst_get(sk);
if (sock_owned_by_user(sk) || !rt) {
if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
if (!__sk_dst_check(sk, 0)) {
rt = (struct rtable *)odst;
if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
dst = dst_check(&rt->dst, 0);
if (!dst) {
if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
}
if (new)
__sk_dst_set(sk, &rt->dst);
sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);

View File

@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
copied = tcp_send_rcvq(sk, msg, size);
goto out;
goto out_nopush;
}
err = -EINVAL;
@ -1282,6 +1282,7 @@ wait_for_memory:
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
out_nopush:
release_sock(sk);
return copied + copied_syn;

View File

@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
}
/* D-SACK for already forgotten data... Do dumb counting. */
if (dup_sack && tp->undo_marker && tp->undo_retrans &&
if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans &&
if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
tp->lost_out = 0;
tp->undo_marker = 0;
tp->undo_retrans = 0;
tp->undo_retrans = -1;
}
void tcp_clear_retrans(struct tcp_sock *tp)
@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una;
tp->undo_retrans = tp->retrans_out;
tp->undo_retrans = tp->retrans_out ? : -1;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack)

View File

@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when;
tp->undo_retrans += tcp_skb_pcount(skb);
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
*/
@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
} else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
tp->undo_retrans += tcp_skb_pcount(skb);
return err;
}

View File

@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto csum_error;
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
goto drop;
}
rc = 0;

View File

@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
len -= skb_network_header_len(skb);
/* Drop queries with not link local source */
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
/* RFC3810 6.2
* Upon reception of an MLD message that contains a Query, the node
* checks if the source address of the message is a valid link-local
* address, if the Hop Limit is set to 1, and if the Router Alert
* option is present in the Hop-By-Hop Options header of the IPv6
* packet. If any of these checks fails, the packet is dropped.
*/
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
ipv6_hdr(skb)->hop_limit != 1 ||
!(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
return -EINVAL;
idev = __in6_dev_get(skb->dev);

View File

@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto csum_error;
}
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
}
skb_dst_drop(skb);
@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:

View File

@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
int err;
/* 24 + 6 = header + auth_algo + auth_transaction + status_code */
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
memset(mgmt, 0, 24 + 6);

View File

@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
sk->sk_err = err;
sk->sk_err = -err;
sk->sk_error_report(sk);
break;
}
@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = ret;
sk->sk_err = -ret;
sk->sk_error_report(sk);
}
}

View File

@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
case OVS_ACTION_ATTR_SAMPLE:
err = sample(dp, skb, a);
if (unlikely(err)) /* skb already freed. */
return err;
break;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007-2013 Nicira, Inc.
* Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
OVS_CB(skb)->flow = flow;
OVS_CB(skb)->pkt_key = &key;
ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
ovs_execute_actions(dp, skb);
stats_counter = &stats->n_hit;
@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* The unmasked key has to be the same for flow updates. */
if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
error = -EEXIST;
goto err_unlock_ovs;
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (!flow) {
error = -ENOENT;
goto err_unlock_ovs;
}
}
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
}
/* Check that the flow exists. */
flow = ovs_flow_tbl_lookup(&dp->table, &key);
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
error = -ENOENT;
goto err_unlock_ovs;
}
/* The unmasked key has to be the same for flow updates. */
if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
error = -EEXIST;
goto err_unlock_ovs;
}
/* Update actions, if present. */
if (likely(acts)) {
old_acts = ovsl_dereference(flow->sf_acts);
@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
}
@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
flow = ovs_flow_tbl_lookup(&dp->table, &key);
if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
if (unlikely(!flow)) {
err = -ENOENT;
goto unlock;
}

View File

@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
struct sk_buff *skb)
{
struct flow_stats *stats;
__be16 tcp_flags = flow->key.tp.flags;
int node = numa_node_id();
stats = rcu_dereference(flow->stats[node]);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007-2013 Nicira, Inc.
* Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@ -180,7 +180,8 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
struct sk_buff *);
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
unsigned long *used, __be16 *tcp_flags);
void ovs_flow_stats_clear(struct sw_flow *);

View File

@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
struct sw_flow_match *match)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
struct sw_flow *flow;
/* Always called under ovs-mutex. */
list_for_each_entry(mask, &tbl->mask_list, list) {
flow = masked_flow_lookup(ti, match->key, mask);
if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
return flow;
}
return NULL;
}
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;

View File

@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
u32 *n_mask_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
struct sw_flow_match *match);
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
struct sw_flow_match *match);

View File

@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
return PACKET_RCVD;
}
/* Called with rcu_read_lock and BH disabled. */
static int gre_err(struct sk_buff *skb, u32 info,
const struct tnl_ptk_info *tpi)
{
struct ovs_net *ovs_net;
struct vport *vport;
ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
vport = rcu_dereference(ovs_net->vport_net.gre_vport);
if (unlikely(!vport))
return PACKET_REJECT;
else
return PACKET_RCVD;
}
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
@ -186,6 +202,7 @@ error:
static struct gre_cisco_protocol gre_protocol = {
.handler = gre_rcv,
.err_handler = gre_err,
.priority = 1,
};

View File

@ -366,9 +366,10 @@ fail:
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc, struct sctp_chunk *chunk,
__u16 flags, gfp_t gfp)
struct sctp_ulpevent *
sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
struct sctp_chunk *chunk, __u16 flags,
gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
0, gfp);
skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error));
sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
skb_trim(skb, sizeof(*sre) + elen);
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_type:
* It should be SCTP_REMOTE_ERROR.
*/
/* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
memset(sre, 0, sizeof(*sre));
sre->sre_type = SCTP_REMOTE_ERROR;
/*
* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_flags: 16 bits (unsigned integer)
* Currently unused.
*/
sre->sre_flags = 0;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_length: sizeof (__u32)
*
* This field is the total length of the notification data,
* including the notification header.
*/
sre->sre_length = skb->len;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_error: 16 bits (unsigned integer)
* This value represents one of the Operational Error causes defined in
* the SCTP specification, in network byte order.
*/
sre->sre_error = cause;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
return notification->sn_header.sn_type;
}
/* Copy out the sndrcvinfo into a msghdr. */
/* RFC6458, Section 5.3.2. SCTP Header Information Structure
* (SCTP_SNDRCV, DEPRECATED)
*/
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
if (sctp_ulpevent_is_notification(event))
return;
/* Sockets API Extensions for SCTP
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
memset(&sinfo, 0, sizeof(sinfo));
sinfo.sinfo_stream = event->stream;
/* sinfo_ssn: 16 bits (unsigned integer)
*
* For recvmsg() this value contains the stream sequence number that
* the remote endpoint placed in the DATA chunk. For fragmented
* messages this is the same number for all deliveries of the message
* (if more than one recvmsg() is needed to read the message).
*/
sinfo.sinfo_ssn = event->ssn;
/* sinfo_ppid: 32 bits (unsigned integer)
*
* In recvmsg() this value is
* the same information that was passed by the upper layer in the peer
* application. Please note that byte order issues are NOT accounted
* for and this information is passed opaquely by the SCTP stack from
* one end to the other.
*/
sinfo.sinfo_ppid = event->ppid;
/* sinfo_flags: 16 bits (unsigned integer)
*
* This field may contain any of the following flags and is composed of
* a bitwise OR of these values.
*
* recvmsg() flags:
*
* SCTP_UNORDERED - This flag is present when the message was sent
* non-ordered.
*/
sinfo.sinfo_flags = event->flags;
/* sinfo_tsn: 32 bit (unsigned integer)
*
* For the receiving side, this field holds a TSN that was
* assigned to one of the SCTP Data Chunks.
*/
sinfo.sinfo_tsn = event->tsn;
/* sinfo_cumtsn: 32 bit (unsigned integer)
*
* This field will hold the current cumulative TSN as
* known by the underlying SCTP layer. Note this field is
* ignored when sending and only valid for a receive
* operation when sinfo_flags are set to SCTP_UNORDERED.
*/
sinfo.sinfo_cumtsn = event->cumtsn;
/* sinfo_assoc_id: sizeof (sctp_assoc_t)
*
* The association handle field, sinfo_assoc_id, holds the identifier
* for the association announced in the COMMUNICATION_UP notification.
* All notifications for a given association have the same identifier.
* Ignored for one-to-one style sockets.
*/
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
/* context value that is set via SCTP_CONTEXT socket option. */
/* Context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
sizeof(sinfo), &sinfo);
}
/* Do accounting for bytes received and hold a reference to the association

View File

@ -559,6 +559,7 @@ receive:
buf = node->bclink.deferred_head;
node->bclink.deferred_head = buf->next;
buf->next = NULL;
node->bclink.deferred_size--;
goto receive;
}

View File

@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
}
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
* Let first buffer become head buffer
* Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
* Leaves headbuf pointer at NULL if failure
* @*headbuf: in: NULL for first frag, otherwise value returned from prev call
* out: set when successful non-complete reassembly, otherwise NULL
* @*buf: in: the buffer to append. Always defined
* out: head buf after sucessful complete reassembly, otherwise NULL
* Returns 1 when reassembly complete, otherwise 0
*/
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
goto out_free;
head = *headbuf = frag;
skb_frag_list_init(head);
*buf = NULL;
return 0;
}
if (!head)
@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
out_free:
pr_warn_ratelimited("Unable to build fragment list\n");
kfree_skb(*buf);
kfree_skb(*headbuf);
*buf = *headbuf = NULL;
return 0;
}

View File

@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
if (end >= start)
return jiffies_to_msecs(end - start);
return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
}
void

Some files were not shown because too many files have changed in this diff Show More