1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Mostly small sets of driver fixes scattered all over the place.

   1) Mediatek driver fixes from Sean Wang.  Forward port not written
      correctly during TX map, missed handling of EPROBE_DEFER, and
      mistaken use of put_page() instead of skb_free_frag().

   2) Fix socket double-free in KCM code, from WANG Cong.

   3) QED driver fixes from Sudarsana Reddy Kalluru, including a fix for
      using the dcbx buffers before initializing them.

   4) Mellanox Switch driver fixes from Jiri Pirko, including a fix for
      double fib removals and an error handling fix in
      mlxsw_sp_module_init().

   5) Fix kernel panic when enabling LLDP in i40e driver, from Dave
      Ertman.

   6) Fix padding of TSO packets in thunderx driver, from Sunil Goutham.

   7) TCP's rcv_wup not initialized properly when using fastopen, from
      Neal Cardwell.

   8) Don't use uninitialized flow keys in flow dissector, from Gao
      Feng.

   9) Use after free in l2tp module unload, from Sabrina Dubroca.

  10) Fix interrupt registry ordering issues in smsc911x driver, from
      Jeremy Linton.

  11) Fix crashes in bonding having to do with enslaving and rx_handler,
      from Mahesh Bandewar.

  12) AF_UNIX deadlock fixes from Linus.

  13) In mlx5 driver, don't read skb->xmit_mode after it might have been
      freed from the TX reclaim path.  From Tariq Toukan.

  14) Fix a bug from 2015 in TCP Yeah where the congestion window does
      not increase, from Artem Germanov.

  15) Don't pad frames on receive in NFP driver, from Jakub Kicinski.

  16) Fix chunk fragmenting in SCTP wrt. GSO, from Marcelo Ricardo
      Leitner.

  17) Fix deletion of VRF routes, from Mark Tomlinson.

  18) Fix device refcount leak when DAD fails in ipv6, from Wei Yongjun"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (101 commits)
  net/mlx4_en: Fix panic on xmit while port is down
  net/mlx4_en: Fixes for DCBX
  net/mlx4_en: Fix the return value of mlx4_en_dcbnl_set_state()
  net/mlx4_en: Fix the return value of mlx4_en_dcbnl_set_all()
  net: ethernet: renesas: sh_eth: add POST registers for rz
  drivers: net: phy: mdio-xgene: Add hardware dependency
  dwc_eth_qos: do not register semi-initialized device
  sctp: identify chunks that need to be fragmented at IP level
  mlxsw: spectrum: Set port type before setting its address
  mlxsw: spectrum_router: Fix error path in mlxsw_sp_router_init
  nfp: don't pad frames on receive
  nfp: drop support for old firmware ABIs
  nfp: remove linux/version.h includes
  tcp: cwnd does not increase in TCP YeAH
  net/mlx5e: Fix parsing of vlan packets when updating lro header
  net/mlx5e: Fix global PFC counters replication
  net/mlx5e: Prevent casting overflow
  net/mlx5e: Move an_disable_cap bit to a new position
  net/mlx5e: Fix xmit_more counter race issue
  tcp: fastopen: avoid negative sk_forward_alloc
  ...
hifive-unleashed-5.1
Linus Torvalds 2016-09-12 07:56:06 -07:00
commit da499f8f53
92 changed files with 889 additions and 624 deletions

View File

@ -2485,7 +2485,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <gospo@cumulusnetworks.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
@ -3269,7 +3269,7 @@ S: Maintained
F: drivers/net/wan/cosa*
CPMAC ETHERNET DRIVER
M: Florian Fainelli <florian@openwrt.org>
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/cpmac.c

View File

@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
netdev_err(bond_dev,
"Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}

View File

@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff));
if (pci_channel_offline(bp->pdev)) {
BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
return;
}
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@ -9415,10 +9420,16 @@ unload_error:
/* Release IRQs */
bnx2x_free_irq(bp);
/* Reset the chip */
rc = bnx2x_reset_hw(bp, reset_code);
if (rc)
BNX2X_ERR("HW_RESET failed\n");
/* Reset the chip, unless PCI function is offline. If we reach this
* point following a PCI error handling, it means device is really
* in a bad state and we're about to remove it, so reset the chip
* is not a good idea.
*/
if (!pci_channel_offline(bp->pdev)) {
rc = bnx2x_reset_hw(bp, reset_code);
if (rc)
BNX2X_ERR("HW_RESET failed\n");
}
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);

View File

@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
__iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
push_len - 16);
__iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
(push_len - 16) << 1);
} else {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf,
push_len);

View File

@ -14012,6 +14012,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
(!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
(!ec->tx_coalesce_usecs) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
(ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@ -14022,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
(ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
return -EINVAL;
/* No rx interrupts will be generated if both are zero */
if ((ec->rx_coalesce_usecs == 0) &&
(ec->rx_max_coalesced_frames == 0))
return -EINVAL;
/* No tx interrupts will be generated if both are zero */
if ((ec->tx_coalesce_usecs == 0) &&
(ec->tx_max_coalesced_frames == 0))
return -EINVAL;
/* Only copy relevant parameters, ignore all others. */
tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;

View File

@ -1323,6 +1323,24 @@ dma_error:
return 0;
}
static inline int macb_clear_csum(struct sk_buff *skb)
{
/* no change for packets without checksum offloading */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
/* make sure we can modify the header */
if (unlikely(skb_cow_head(skb, 0)))
return -1;
/* initialize checksum field
* This is required - at least for Zynq, which otherwise calculates
* wrong UDP header checksums for UDP packets with UDP data len <=2
*/
*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
return 0;
}
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
if (macb_clear_csum(skb)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);

View File

@ -279,6 +279,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
bool t88;
/* Receive buffer alloc */
u32 rb_page_offset;

View File

@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
int lmac;
u64 lmac_cfg;
/* Max value that can be set is 60 */
if (size > 60)
size = 60;
/* There is a issue in HW where-in while sending GSO sized
* pkts as part of TSO, if pkt len falls below this size
* NIC will zero PAD packet and also updates IP total length.
* Hence set this value to lessthan min pkt size of MAC+IP+TCP
* headers, BGX will do the padding to transmit 64 byte pkt.
*/
if (size > 52)
size = 52;
for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));

View File

@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
/* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
if (hdr->dont_send) {
/* Get actual TSO descriptors and free them */
tso_sqe =
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of HW TSO, HW sends a CQE for each segment of a TSO
* packet instead of a single CQE for the whole TSO packet
* transmitted. Each of this CQE points to the same SQE, so
* avoid freeing same SQE multiple times.
/* In case of SW TSO on 88xx, only last segment will have
* a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
if (sdevid == 0xA134)
nic->t88 = true;
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;

View File

@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}
#define POST_CQE_DESC_COUNT 2
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}
/* Dummy descriptors to get TSO pkt completion notification */
if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
subdesc_cnt += POST_CQE_DESC_COUNT;
if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;
@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
sq->skbuff[qentry] = (u64)skb;
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;
if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
/* post_cqe = 0, to avoid HW posting a CQE for every TSO
* segment transmitted on 88xx.
*/
hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
} else {
sq->skbuff[qentry] = (u64)skb;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;
}
hdr->tot_len = len;
/* Offload checksum calculation to HW */
@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}
/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
* packet so that a CQE is posted as a notifation for transmission of
* TSO packet.
*/
static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
int tso_sqe, struct sk_buff *skb)
{
struct sq_imm_subdesc *imm;
struct sq_hdr_subdesc *hdr;
sq->skbuff[qentry] = (u64)skb;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* There is no packet to transmit here */
hdr->dont_send = 1;
hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
hdr->tot_len = 1;
/* Actual TSO header SQE index, needed for cleanup */
hdr->rsvd2 = tso_sqe;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
memset(imm, 0, SND_QUEUE_DESC_SIZE);
imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
imm->len = 1;
}
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
int subdesc_cnt;
int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
tso_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}
doorbell:
if (nic->t88 && skb_shinfo(skb)->gso_size) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();

View File

@ -5113,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
/* Enable DCB tagging only when more than one TC */
/* Enable DCB tagging only when more than one TC
* or explicitly disable if only one TC
*/
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
@ -5716,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return ret;
/* Ignore if event is not for Nearest Bridge */
@ -7896,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@ -10502,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@ -10525,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
@ -10922,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */

View File

@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_flow_control_packets),
};
static const char * const mtk_clks_source_name[] = {
"ethif", "esw", "gp1", "gp2"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
@ -291,7 +295,7 @@ err_phy:
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
int err;
int ret;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@ -300,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
if (!of_device_is_available(mii_np)) {
err = 0;
ret = -ENODEV;
goto err_put_node;
}
eth->mii_bus = mdiobus_alloc();
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
err = -ENOMEM;
ret = -ENOMEM;
goto err_put_node;
}
@ -317,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
err = of_mdiobus_register(eth->mii_bus, mii_np);
if (err)
goto err_free_bus;
return 0;
err_free_bus:
mdiobus_free(eth->mii_bus);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
eth->mii_bus = NULL;
return err;
return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
@ -338,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
return;
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@ -588,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
u32 txd4 = 0;
u32 txd4 = 0, fport;
itxd = ring->next_free;
if (itxd == ring->last_free)
return -ENOMEM;
/* set the forward port */
txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
txd4 |= fport;
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(tx_buf, 0, sizeof(*tx_buf));
@ -653,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
last_frag * TX_DMA_LS0));
WRITE_ONCE(txd->txd4, 0);
WRITE_ONCE(txd->txd4, fport);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
@ -865,7 +860,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
@ -1506,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
phy_disconnect(mac->phy_dev);
mtk_mdio_cleanup(eth);
mtk_irq_disable(eth, ~0);
free_irq(eth->irq[1], dev);
free_irq(eth->irq[2], dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@ -1813,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
@ -1847,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
return -ENXIO;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
return -EPROBE_DEFER;
return -ENODEV;
}
}
eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
return -ENODEV;
clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
clk_prepare_enable(eth->clk_ethif);
clk_prepare_enable(eth->clk_esw);
clk_prepare_enable(eth->clk_gp1);
clk_prepare_enable(eth->clk_gp2);
eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@ -1903,15 +1896,24 @@ err_free_dev:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
int i;
clk_disable_unprepare(eth->clk_ethif);
clk_disable_unprepare(eth->clk_esw);
clk_disable_unprepare(eth->clk_gp1);
clk_disable_unprepare(eth->clk_gp2);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
}
clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
platform_set_drvdata(pdev, NULL);
return 0;

View File

@ -290,6 +290,17 @@ enum mtk_tx_flags {
MTK_TX_FLAGS_PAGE0 = 0x02,
};
/* This enum allows us to identify how the clock is defined on the array of the
* clock in the order
*/
enum mtk_clks_map {
MTK_CLK_ETHIF,
MTK_CLK_ESW,
MTK_CLK_GP1,
MTK_CLK_GP2,
MTK_CLK_MAX
};
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@ -370,10 +381,7 @@ struct mtk_rx_ring {
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
* @clk_gp1: The gmac1 clock
* @clk_gp2: The gmac2 clock
* @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
*/
@ -400,10 +408,8 @@ struct mtk_eth {
struct mtk_tx_dma *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
struct clk *clk_gp1;
struct clk *clk_gp2;
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
struct work_struct pending_work;
};

View File

@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
*cap = true;
break;
case DCB_CAP_ATTR_DCBX:
*cap = priv->cee_params.dcbx_cap;
*cap = priv->dcbx_cap;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << mlx4_max_tc(priv->mdev->dev);
@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
return priv->cee_params.dcb_cfg.pfc_state;
return priv->cee_config.pfc_state;
}
static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
priv->cee_params.dcb_cfg.pfc_state = state;
priv->cee_config.pfc_state = state;
}
static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
*setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
*setting = priv->cee_config.dcb_pfc[priority];
}
static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
priv->cee_params.dcb_cfg.pfc_state = true;
priv->cee_config.dcb_pfc[priority] = setting;
priv->cee_config.pfc_state = true;
}
static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
int err = 0;
if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return -EINVAL;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (dcb_cfg->pfc_state) {
if (priv->cee_config.pfc_state) {
int tc;
priv->prof->rx_pause = 0;
@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
switch (dcb_cfg->tc_config[tc].dcb_pfc) {
switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
priv->prof->tx_ppp &= ~tc_mask;
priv->prof->rx_ppp &= ~tc_mask;
@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
en_dbg(DRV, priv, "Set pfc off\n");
}
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err)
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
return err;
return 1;
}
return 0;
}
static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
struct mlx4_en_priv *priv = netdev_priv(dev);
int num_tcs = 0;
if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
return mlx4_en_setup_tc(dev, num_tcs);
if (mlx4_en_setup_tc(dev, num_tcs))
return 1;
return 0;
}
/* On success returns a non-zero 802.1p user priority bitmap
@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
.selector = idtype,
.protocol = id,
};
if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 0;
return dcb_getapp(netdev, &app);
@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct dcb_app app;
if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return -EINVAL;
memset(&app, 0, sizeof(struct dcb_app));
@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return priv->cee_params.dcbx_cap;
return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
if (mode == priv->cee_params.dcbx_cap)
if (mode == priv->dcbx_cap)
return 0;
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
!(mode & DCB_CAP_DCBX_HOST))
goto err;
priv->cee_params.dcbx_cap = mode;
priv->dcbx_cap = mode;
ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;

View File

@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (up) {
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
if (priv->dcbx_cap)
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
priv->cee_params.dcb_cfg.pfc_state = false;
priv->cee_config.pfc_state = false;
}
}
#endif /* CONFIG_MLX4_EN_DCB */
@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
#ifdef CONFIG_MLX4_EN_DCB
struct tc_configuration *tc;
#endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
priv->cee_params.dcb_cfg.pfc_state = false;
priv->cee_config.pfc_state = false;
for (i = 0; i < MLX4_EN_NUM_UP; i++) {
tc = &priv->cee_params.dcb_cfg.tc_config[i];
tc->dcb_pfc = pfc_disabled;
}
for (i = 0; i < MLX4_EN_NUM_UP; i++)
priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;

View File

@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
goto tx_drop;
goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
goto tx_drop;
goto tx_drop_count;
}
bf_ok = ring->bf_enabled;
@ -1071,9 +1071,10 @@ tx_drop_unmap:
PCI_DMA_TODEVICE);
}
tx_drop_count:
ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
ring->tx_dropped++;
return NETDEV_TX_OK;
}
@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
goto tx_drop;
if (mlx4_en_is_tx_ring_full(ring))
goto tx_drop;
goto tx_drop_count;
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = READ_ONCE(ring->cons);
@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
return NETDEV_TX_OK;
tx_drop:
tx_drop_count:
ring->tx_dropped++;
tx_drop:
return NETDEV_TX_BUSY;
}

View File

@ -482,20 +482,10 @@ enum dcb_pfc_type {
pfc_enabled_rx
};
struct tc_configuration {
enum dcb_pfc_type dcb_pfc;
};
struct mlx4_en_cee_config {
bool pfc_state;
struct tc_configuration tc_config[MLX4_EN_NUM_UP];
enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
};
struct mlx4_en_cee_params {
u8 dcbx_cap;
struct mlx4_en_cee_config dcb_cfg;
};
#endif
struct ethtool_flow_id {
@ -624,7 +614,8 @@ struct mlx4_en_priv {
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
struct mlx4_en_cee_params cee_params;
struct mlx4_en_cee_config cee_config;
u8 dcbx_cap;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;

View File

@ -52,7 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
#define MLNX4_TX_MAX_NUMBER 8
#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
u8 num_tc = dev->caps.max_tc_eth;
if (!num_tc)
num_tc = MLNX4_TX_MAX_NUMBER;
num_tc = MLX4_TC_MAX_NUMBER;
return num_tc;
}

View File

@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
pport_per_prio_pfc_stats_desc, 0);
pport_per_prio_pfc_stats_desc, i);
}
}
@ -659,9 +659,10 @@ out:
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
unsigned long proto_cap = eth_proto_cap;
int proto;
for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(supported_modes, supported_modes,
ptys2ethtool_table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@ -670,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
u32 eth_proto_cap)
{
unsigned long proto_cap = eth_proto_cap;
int proto;
for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(advertising_modes, advertising_modes,
ptys2ethtool_table[proto].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS);

View File

@ -637,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
int network_depth = 0;
__be16 proto;
u16 tot_len;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
u16 tot_len = cqe_bcnt - ETH_HLEN;
skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
if (eth->h_proto == htons(ETH_P_IP)) {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
ipv4 = (struct iphdr *)(skb->data + network_depth);
ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
tot_len = cqe_bcnt - network_depth;
if (proto == htons(ETH_P_IP)) {
tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct iphdr));
ipv6 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct ipv6hdr));
ipv4 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;

View File

@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++;
}
sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
@ -375,7 +376,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets++;
sq->stats.bytes += num_bytes;
sq->stats.xmit_more += skb->xmit_more;
return NETDEV_TX_OK;
dma_unmap_wqe_err:

View File

@ -56,6 +56,7 @@
#include <generated/utsrelease.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
#include "spectrum.h"
#include "core.h"
@ -2105,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
mlxsw_sp_port->local_port);
goto err_port_swid_set;
}
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@ -2130,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
mlxsw_sp_port->local_port);
goto err_port_swid_set;
}
err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@ -2218,10 +2219,10 @@ err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@ -4541,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.priority = 10, /* Must be called before FIB notifier block */
};
static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
.notifier_call = mlxsw_sp_router_netevent_event,
};
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
return 0;
err_core_driver_register:
unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
return err;
}
@ -4560,6 +4569,7 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}

View File

@ -587,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
struct neighbour *n);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);

View File

@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
}
struct mlxsw_sp_fib_key {
struct net_device *dev;
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
};
@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
u8 added:1;
unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
struct list_head nexthop_group_node;
@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
size_t addr_len, unsigned char prefix_len,
struct net_device *dev)
{
struct mlxsw_sp_fib_entry *fib_entry;
fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
if (!fib_entry)
return NULL;
fib_entry->key.dev = dev;
memcpy(fib_entry->key.addr, addr, addr_len);
fib_entry->key.prefix_len = prefix_len;
return fib_entry;
@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
size_t addr_len, unsigned char prefix_len,
struct net_device *dev)
{
struct mlxsw_sp_fib_key key = {{ 0 } };
struct mlxsw_sp_fib_key key;
memset(&key, 0, sizeof(key));
key.dev = dev;
memcpy(key.addr, addr, addr_len);
key.prefix_len = prefix_len;
return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
mlxsw_sp_port_dev_put(mlxsw_sp_port);
}
static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr)
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_port *mlxsw_sp_port;
@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
.notifier_call = mlxsw_sp_router_netevent_event,
};
static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
*/
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
if (err)
goto err_register_netevent_notifier;
/* Create the delayed works for the activity_update */
INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
return 0;
err_register_netevent_notifier:
rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
return err;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
}
@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return err;
mlxsw_sp_lpm_init(mlxsw_sp);
mlxsw_sp_vrs_init(mlxsw_sp);
return mlxsw_sp_neigh_init(mlxsw_sp);
err = mlxsw_sp_neigh_init(mlxsw_sp);
if (err)
goto err_neigh_init;
return 0;
err_neigh_init:
__mlxsw_sp_router_fini(mlxsw_sp);
return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
enum mlxsw_reg_ralue_op op;
op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
MLXSW_REG_RALUE_OP_WRITE_UPDATE;
return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@ -1695,6 +1692,79 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_ipv4_fib *fib4)
{
struct mlxsw_sp_fib_entry *fib_entry;
struct fib_info *fi = fib4->fi;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return ERR_CAST(vr);
fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
sizeof(fib4->dst),
fib4->dst_len, fi->fib_dev);
if (fib_entry) {
/* Already exists, just take a reference */
fib_entry->ref_count++;
return fib_entry;
}
fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
sizeof(fib4->dst),
fib4->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
fib_entry->ref_count = 1;
err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
if (err)
goto err_fib4_entry_init;
return fib_entry;
err_fib4_entry_init:
mlxsw_sp_fib_entry_destroy(fib_entry);
err_fib_entry_create:
mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_ipv4_fib *fib4)
{
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
if (!vr)
return NULL;
return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
sizeof(fib4->dst), fib4->dst_len,
fib4->fi->fib_dev);
}
void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_vr *vr = fib_entry->vr;
if (--fib_entry->ref_count == 0) {
mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_destroy(fib_entry);
}
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int
mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_ipv4_fib *fib4,
@ -1703,25 +1773,11 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return PTR_ERR(vr);
fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
sizeof(fib4->dst), fib4->dst_len);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
if (err)
goto err_fib4_entry_init;
fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
if (IS_ERR(fib_entry))
return PTR_ERR(fib_entry);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
@ -1736,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_alloc_info:
mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
err_fib4_entry_init:
mlxsw_sp_fib_entry_destroy(fib_entry);
err_fib_entry_create:
mlxsw_sp_vr_put(mlxsw_sp, vr);
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@ -1759,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
fib_entry = info->fib_entry;
kfree(info);
if (fib_entry->ref_count != 1)
return 0;
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
if (err)
goto err_fib_entry_insert;
err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@ -1771,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
err_fib_entry_add:
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
err_fib_entry_insert:
mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, vr);
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@ -1793,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
if (!vr) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
return -ENOENT;
}
fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
sizeof(fib4->dst), fib4->dst_len);
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
}
mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_destroy(fib_entry);
mlxsw_sp_vr_put(mlxsw_sp, vr);
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}

View File

@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 idx_begin, u16 idx_end, bool set,
bool only_uc)
u16 idx_begin, u16 idx_end, bool uc_set,
bool bm_set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
table_type, range, local_port, set);
table_type, range, local_port, uc_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
/* Flooding control allows one to decide whether a given port will
* flood unicast traffic for which there is no FDB entry.
*/
if (only_uc)
goto buffer_out;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
table_type, range, local_port, set);
table_type, range, local_port, bm_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto err_flood_bm_set;
else
goto buffer_out;
goto buffer_out;
err_flood_bm_set:
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
table_type, range, local_port, !set);
table_type, range, local_port, !uc_set);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
* the start of the vFIDs range.
*/
vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
false);
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@ -460,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_fid *f;
if (test_bit(fid, mlxsw_sp_port->active_vlans))
return 0;
f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
if (!f) {
f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@ -517,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
true, false);
mlxsw_sp_port->uc_flood, true);
if (err)
goto err_port_flood_set;

View File

@ -41,7 +41,6 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash(nn->netdev, skb, rxd);
/* Pad small frames to minimum */
if (skb_put_padto(skb, 60))
break;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;

View File

@ -40,7 +40,6 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>

View File

@ -38,7 +38,6 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
/* Determine stride */
if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = NFP_NET_Q1_BAR;

View File

@ -19,6 +19,7 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@ -945,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt;
int rc;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
@ -984,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
if (p_params->pfc.prio[i])
pfc_map |= BIT(i);
*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@ -1058,24 +1063,33 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry = 0;
if (ieee) {
*entry &= ~DCBX_APP_SF_IEEE_MASK;
*entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
switch (p_params->app_entry[i].sf_ieee) {
case QED_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
break;
}
} else {
@ -1175,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
return 0;
}
dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
return -ENOMEM;
@ -1212,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info) {
DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
return NULL;

View File

@ -2520,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
#ifdef CONFIG_DCB
qede_set_dcbnl_ops(edev->ndev);
if (!IS_VF(edev))
qede_set_dcbnl_ops(edev->ndev);
#endif
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);

View File

@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
[TSU_FWSLC] = 0x0038,
[TSU_VTAG0] = 0x0058,
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
[TSU_POST1] = 0x0070,
[TSU_POST2] = 0x0074,
[TSU_POST3] = 0x0078,
[TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
[TXNLCR0] = 0x0080,
@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
if (sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
TSU_FWSLC); /* Enable POST registers */
return;
}

View File

@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#endif
#if ! SMC_CAN_USE_8BIT
#undef SMC_inb
#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
#undef SMC_outb
#define SMC_outb(x, ioaddr, reg) BUG()
#define SMC_insb(a, r, p, l) BUG()
#define SMC_outsb(a, r, p, l) BUG()

View File

@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
if (smsc911x_mii_probe(dev) < 0) {
SMSC_WARN(pdata, probe, "Error registering mii bus");
goto err_out_unregister_bus_3;
}
return 0;
err_out_unregister_bus_3:
mdiobus_unregister(pdata->mii_bus);
err_out_free_bus_2:
mdiobus_free(pdata->mii_bus);
err_out_1:
@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
}
static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct smsc911x_data *pdata = netdev_priv(dev);
u32 intsts = smsc911x_reg_read(pdata, INT_STS);
u32 inten = smsc911x_reg_read(pdata, INT_EN);
int serviced = IRQ_NONE;
u32 temp;
if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_SW_INT_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
pdata->software_irq_signal = 1;
smp_wmb();
serviced = IRQ_HANDLED;
}
if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
/* Called when there is a multicast update scheduled and
* it is now safe to complete the update */
SMSC_TRACE(pdata, intr, "RX Stop interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
if (pdata->multicast_update_pending)
smsc911x_rx_multicast_update_workaround(pdata);
serviced = IRQ_HANDLED;
}
if (intsts & inten & INT_STS_TDFA_) {
temp = smsc911x_reg_read(pdata, FIFO_INT);
temp |= FIFO_INT_TX_AVAIL_LEVEL_;
smsc911x_reg_write(pdata, FIFO_INT, temp);
smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
netif_wake_queue(dev);
serviced = IRQ_HANDLED;
}
if (unlikely(intsts & inten & INT_STS_RXE_)) {
SMSC_TRACE(pdata, intr, "RX Error interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
serviced = IRQ_HANDLED;
}
if (likely(intsts & inten & INT_STS_RSFL_)) {
if (likely(napi_schedule_prep(&pdata->napi))) {
/* Disable Rx interrupts */
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_RSFL_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
/* Schedule a NAPI poll */
__napi_schedule(&pdata->napi);
} else {
SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
}
serviced = IRQ_HANDLED;
}
return serviced;
}
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int timeout;
unsigned int temp;
unsigned int intcfg;
int retval;
int irq_flags;
/* if the phy is not yet registered, retry later*/
/* find and start the given phy */
if (!dev->phydev) {
SMSC_WARN(pdata, hw, "phy_dev is NULL");
return -EAGAIN;
retval = smsc911x_mii_probe(dev);
if (retval < 0) {
SMSC_WARN(pdata, probe, "Error starting phy");
goto out;
}
}
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata)) {
retval = smsc911x_soft_reset(pdata);
if (retval) {
SMSC_WARN(pdata, hw, "soft reset failed");
return -EIO;
goto mii_free_out;
}
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
pdata->software_irq_signal = 0;
smp_wmb();
irq_flags = irq_get_trigger_type(dev->irq);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
goto mii_free_out;
}
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_SW_INT_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
if (!pdata->software_irq_signal) {
netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
dev->irq);
return -ENODEV;
retval = -ENODEV;
goto irq_stop_out;
}
SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
dev->irq);
@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
irq_stop_out:
free_irq(dev->irq, dev);
mii_free_out:
phy_disconnect(dev->phydev);
dev->phydev = NULL;
out:
return retval;
}
/* Entry point for stopping the interface */
@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
smsc911x_tx_update_txcounters(dev);
free_irq(dev->irq, dev);
/* Bring the PHY down */
if (dev->phydev)
if (dev->phydev) {
phy_stop(dev->phydev);
phy_disconnect(dev->phydev);
dev->phydev = NULL;
}
netif_carrier_off(dev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&pdata->mac_lock, flags);
}
static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct smsc911x_data *pdata = netdev_priv(dev);
u32 intsts = smsc911x_reg_read(pdata, INT_STS);
u32 inten = smsc911x_reg_read(pdata, INT_EN);
int serviced = IRQ_NONE;
u32 temp;
if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_SW_INT_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
pdata->software_irq_signal = 1;
smp_wmb();
serviced = IRQ_HANDLED;
}
if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
/* Called when there is a multicast update scheduled and
* it is now safe to complete the update */
SMSC_TRACE(pdata, intr, "RX Stop interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
if (pdata->multicast_update_pending)
smsc911x_rx_multicast_update_workaround(pdata);
serviced = IRQ_HANDLED;
}
if (intsts & inten & INT_STS_TDFA_) {
temp = smsc911x_reg_read(pdata, FIFO_INT);
temp |= FIFO_INT_TX_AVAIL_LEVEL_;
smsc911x_reg_write(pdata, FIFO_INT, temp);
smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
netif_wake_queue(dev);
serviced = IRQ_HANDLED;
}
if (unlikely(intsts & inten & INT_STS_RXE_)) {
SMSC_TRACE(pdata, intr, "RX Error interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
serviced = IRQ_HANDLED;
}
if (likely(intsts & inten & INT_STS_RSFL_)) {
if (likely(napi_schedule_prep(&pdata->napi))) {
/* Disable Rx interrupts */
temp = smsc911x_reg_read(pdata, INT_EN);
temp &= (~INT_EN_RSFL_EN_);
smsc911x_reg_write(pdata, INT_EN, temp);
/* Schedule a NAPI poll */
__napi_schedule(&pdata->napi);
} else {
SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
}
serviced = IRQ_HANDLED;
}
return serviced;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void smsc911x_poll_controller(struct net_device *dev)
{
@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
BUG_ON(!dev->phydev);
WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
phy_disconnect(dev->phydev);
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
unregister_netdev(dev);
free_irq(dev->irq, dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res;
unsigned int intcfg = 0;
int res_size, irq, irq_flags;
int res_size, irq;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
@ -2472,41 +2491,21 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (retval < 0)
goto out_disable_resources;
/* configure irq polarity and type before connecting isr */
if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
intcfg |= INT_CFG_IRQ_POL_;
if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
intcfg |= INT_CFG_IRQ_TYPE_;
smsc911x_reg_write(pdata, INT_CFG, intcfg);
/* Ensure interrupts are globally disabled before connecting ISR */
smsc911x_disable_irq_chip(dev);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
goto out_disable_resources;
}
netif_carrier_off(dev);
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
goto out_free_irq;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_unregister_netdev_5;
goto out_disable_resources;
}
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
goto out_disable_resources;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
spin_lock_irq(&pdata->mac_lock);
@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
out_unregister_netdev_5:
unregister_netdev(dev);
out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);

View File

@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
lp->mii_bus->read = &dwceqos_mdio_read;
lp->mii_bus->write = &dwceqos_mdio_write;
lp->mii_bus->priv = lp;
lp->mii_bus->parent = &lp->ndev->dev;
lp->mii_bus->parent = &lp->pdev->dev;
of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@ -2853,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
ndev->features = ndev->hw_features;
netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_clk_dis_aper;
}
lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
if (IS_ERR(lp->phy_ref_clk)) {
dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
ret = PTR_ERR(lp->phy_ref_clk);
goto err_out_unregister_netdev;
goto err_out_clk_dis_aper;
}
ret = clk_prepare_enable(lp->phy_ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto err_out_unregister_netdev;
goto err_out_clk_dis_aper;
}
lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@ -2880,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "invalid fixed-link");
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@ -2889,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
lp->phy_interface = ret;
@ -2897,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@ -2922,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@ -2932,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
goto err_out_unregister_clk_notifier;
goto err_out_clk_dis_phy;
}
if (netif_msg_probe(lp))
netdev_dbg(ndev, "net_local@%p\n", lp);
netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_clk_dis_phy;
}
return 0;
err_out_unregister_clk_notifier:
err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
err_out_unregister_netdev:
unregister_netdev(ndev);
err_out_clk_dis_aper:
clk_disable_unprepare(lp->apb_pclk);
err_out_free_netdev:

View File

@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC
config MDIO_XGENE
tristate "APM X-Gene SoC MDIO bus controller"
depends on ARCH_XGENE || COMPILE_TEST
help
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.

View File

@ -2782,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct net_device *lowerdev = NULL;
if (conf->flags & VXLAN_F_GPE) {
if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
return -EINVAL;
/* For now, allow GPE only together with COLLECT_METADATA.
* This can be relaxed later; in such case, the other side
* of the PtP link will have to be provided.
*/
if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
pr_info("unsupported combination of extensions\n");
return -EINVAL;
}
vxlan_raw_setup(dev);
} else {
@ -2842,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len;
} else if (vxlan_addr_multicast(&dst->remote_ip)) {
pr_info("multicast destination requires interface to be specified\n");
return -EINVAL;
}
if (conf->mtu) {
@ -2874,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
tmp->cfg.dst_port == vxlan->cfg.dst_port &&
(tmp->flags & VXLAN_F_RCV_FLAGS) ==
(vxlan->flags & VXLAN_F_RCV_FLAGS))
return -EEXIST;
(vxlan->flags & VXLAN_F_RCV_FLAGS)) {
pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
return -EEXIST;
}
}
dev->ethtool_ops = &vxlan_ethtool_ops;
@ -2909,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vxlan_config conf;
int err;
memset(&conf, 0, sizeof(conf));
@ -3018,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU])
conf.mtu = nla_get_u32(tb[IFLA_MTU]);
err = vxlan_dev_configure(src_net, dev, &conf);
switch (err) {
case -ENODEV:
pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
break;
case -EPERM:
pr_info("IPv6 is disabled via sysctl\n");
break;
case -EEXIST:
pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
break;
case -EINVAL:
pr_info("unsupported combination of extensions\n");
break;
}
return err;
return vxlan_dev_configure(src_net, dev, &conf);
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)

View File

@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
static struct ieee80211_rx_status rx_status;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
int ret;
@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return 0;
}

View File

@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA9887_1_0_DEVICE_ID:
dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;

View File

@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
return -EINVAL;
}
ath9k_gpio_cap_init(ah);
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
ath9k_gpio_cap_init(ah);
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
else

View File

@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
if (ah->led_pin >= 0)
if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 1 : 0);
ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
}
/*
* Reset key cache to sane defaults (all entries cleared) instead of
@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
if (ah->led_pin >= 0)
if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 0 : 1);
ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
}
ath_prepare_reset(sc);
@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
bool changed = (iter_data.primary_sta != ctx->primary_sta);
if (iter_data.primary_sta) {
iter_data.primary_beacon_vif = iter_data.primary_sta;
iter_data.beacons = true;
ath9k_set_assoc_state(sc, iter_data.primary_sta,
changed);
@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
ret = ath9k_sta_add(hw, vif, sta);
ath_dbg(common, CONFIG,
"Add station: %pM\n", sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
ret = ath9k_sta_remove(hw, vif, sta);
ath_dbg(common, CONFIG,
"Remove station: %pM\n", sta->addr);

View File

@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
(u8 *)&settings->beacon.head[ie_offset],
settings->beacon.head_len - ie_offset,
WLAN_EID_SSID);
if (!ssid_ie)
if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;
memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifevent->action, ifevent->flags, ifevent->ifidx,
ifevent->bsscfgidx);
mutex_lock(&event->vif_event_lock);
spin_lock(&event->vif_event_lock);
event->action = ifevent->action;
vif = event->vif;
@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
case BRCMF_E_IF_ADD:
/* waiting process may have timed out */
if (!cfg->vif_event.vif) {
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
return -EBADF;
}
@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifp->ndev->ieee80211_ptr = &vif->wdev;
SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
}
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_DEL:
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
/* event may not be upon user request */
if (brcmf_cfg80211_vif_event_armed(cfg))
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_CHANGE:
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
default:
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
break;
}
return -EINVAL;
@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
{
init_waitqueue_head(&event->vif_wq);
mutex_init(&event->vif_event_lock);
spin_lock_init(&event->vif_event_lock);
}
static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
{
u8 evt_action;
mutex_lock(&event->vif_event_lock);
spin_lock(&event->vif_event_lock);
evt_action = event->action;
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
return evt_action == action;
}
@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
mutex_lock(&event->vif_event_lock);
spin_lock(&event->vif_event_lock);
event->vif = vif;
event->action = 0;
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
}
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
bool armed;
mutex_lock(&event->vif_event_lock);
spin_lock(&event->vif_event_lock);
armed = event->vif != NULL;
mutex_unlock(&event->vif_event_lock);
spin_unlock(&event->vif_event_lock);
return armed;
}

View File

@ -227,7 +227,7 @@ struct escan_info {
*/
struct brcmf_cfg80211_vif_event {
wait_queue_head_t vif_wq;
struct mutex vif_event_lock;
spinlock_t vif_event_lock;
u8 action;
struct brcmf_cfg80211_vif *vif;
};

View File

@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
brcmf_p2p_ifp_removed(ifp);
brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
}

View File

@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return err;
}
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
rtnl_lock();
if (!rtnl_locked)
rtnl_lock();
cfg80211_unregister_wdev(&vif->wdev);
rtnl_unlock();
if (!rtnl_locked)
rtnl_unlock();
brcmf_free_vif(vif);
}

View File

@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
enum brcmf_fil_p2p_if_types if_type);
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_scan_prep(struct wiphy *wiphy,

View File

@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
}
mvm->fw_dbg_conf = conf_id;
return ret;
return 0;
}

View File

@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
{
u32 trig_vif = le32_to_cpu(trig->vif_type);
return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
ieee80211_vif_type_p2p(vif) == trig_vif;
}
static inline bool

View File

@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;

View File

@ -467,6 +467,8 @@ struct iwl_mvm_vif {
static inline struct iwl_mvm_vif *
iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
{
if (!vif)
return NULL;
return (void *)vif->drv_priv;
}

View File

@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
do {
/* Check if AMSDU can accommodate this MSDU */
if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
adapter->tx_buf_size)
break;
skb_src = skb_dequeue(&pra_list->skb_head);

View File

@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
};
struct mlx5_ifc_ptys_reg_bits {
u8 an_disable_cap[0x1];
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
u8 reserved_at_2[0x6];
u8 an_disable_cap[0x1];
u8 reserved_at_3[0x5];
u8 local_port[0x8];
u8 reserved_at_10[0xd];
u8 proto_mask[0x3];

View File

@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);

View File

@ -52,7 +52,7 @@ struct unix_sock {
struct sock sk;
struct unix_address *addr;
struct path path;
struct mutex readlock;
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;

View File

@ -1102,6 +1102,7 @@ struct station_info {
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
#if IS_ENABLED(CONFIG_CFG80211)
/**
* cfg80211_get_station - retrieve information about a given station
* @dev: the device where the station is supposed to be connected to
@ -1114,6 +1115,14 @@ struct station_info {
*/
int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo);
#else
static inline int cfg80211_get_station(struct net_device *dev,
const u8 *mac_addr,
struct station_info *sinfo)
{
return -ENOENT;
}
#endif
/**
* enum monitor_flags - monitor flags

View File

@ -111,6 +111,7 @@ struct fib_info {
unsigned char fib_scope;
unsigned char fib_type;
__be32 fib_prefsrc;
u32 fib_tb_id;
u32 fib_priority;
u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net *net, __be32 local);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
extern u32 fib_multipath_secret __read_mostly;

View File

@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
#endif

View File

@ -8,6 +8,10 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
int nft_reject_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[]);

View File

@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
if (dev->flags & IFF_NOARP)
if ((dev->flags & IFF_NOARP) ||
!pskb_may_pull(skb, arp_hdr_len(dev)))
return;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
dev->stats.tx_dropped++;
return;
}
parp = arp_hdr(skb);
if (parp->ar_pro != htons(ETH_P_IP) ||

View File

@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
} else {
err = br_ip6_multicast_add_group(br, port,
&grec->grec_mca, vid);
if (!err)
if (err)
break;
}
}

View File

@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
if (!IS_ERR(match))
module_put(match->me);
request_module("ebt_%s", m->u.name);
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
}

View File

@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
.validate = nft_meta_set_validate,
};
static const struct nft_expr_ops *

View File

@ -3974,6 +3974,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
return skb;
}
/**
* netdev_is_rx_handler_busy - check if receive handler is registered
* @dev: device to check
*
* Check if a receive handler is already registered for a given device.
* Return true if there one.
*
* The caller must hold the rtnl_mutex.
*/
bool netdev_is_rx_handler_busy(struct net_device *dev)
{
ASSERT_RTNL();
return dev && rtnl_dereference(dev->rx_handler);
}
EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
/**
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for

View File

@ -680,11 +680,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
__flow_hash_secret_init();
__skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
flow_keys_have_l4(&keys));
hash = ___skb_get_hash(skb, &keys, hashrnd);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);

View File

@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
};
static int __devinet_sysctl_register(struct net *net, char *dev_name,
struct ipv4_devconf *p)
int ifindex, struct ipv4_devconf *p)
{
int i;
struct devinet_sysctl_table *t;
@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
goto free;
p->sysctl = t;
inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
return 0;
free:
@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
if (err)
return err;
err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
&idev->cnf);
idev->dev->ifindex, &idev->cnf);
if (err)
neigh_sysctl_unregister(idev->arp_parms);
return err;
@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
}
#ifdef CONFIG_SYSCTL
err = __devinet_sysctl_register(net, "all", all);
err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
if (err < 0)
goto err_reg_all;
err = __devinet_sysctl_register(net, "default", dflt);
err = __devinet_sysctl_register(net, "default",
NETCONFA_IFINDEX_DEFAULT, dflt);
if (err < 0)
goto err_reg_dflt;

View File

@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
if (!dev)
return -ENODEV;
cfg->fc_oif = dev->ifindex;
cfg->fc_table = l3mdev_fib_table(dev);
if (colon) {
struct in_ifaddr *ifa;
struct in_device *in_dev = __in_dev_get_rtnl(dev);
@ -1027,7 +1028,7 @@ no_promotions:
* First of all, we scan fib_info list searching
* for stray nexthop entries, then ignite fib_flush.
*/
if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
if (fib_sync_down_addr(dev, ifa->ifa_local))
fib_flush(dev_net(dev));
}
}

View File

@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_priority = cfg->fc_priority;
fi->fib_prefsrc = cfg->fc_prefsrc;
fi->fib_type = cfg->fc_type;
fi->fib_tb_id = cfg->fc_table;
fi->fib_nhs = nhs;
change_nexthops(fi) {
@ -1337,18 +1338,21 @@ nla_put_failure:
* referring to it.
* - device went down -> we must shutdown all nexthops going via it.
*/
int fib_sync_down_addr(struct net *net, __be32 local)
int fib_sync_down_addr(struct net_device *dev, __be32 local)
{
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
struct net *net = dev_net(dev);
int tb_id = l3mdev_fib_table(dev);
struct fib_info *fi;
if (!fib_info_laddrhash || local == 0)
return 0;
hlist_for_each_entry(fi, head, fib_lhash) {
if (!net_eq(fi->fib_net, net))
if (!net_eq(fi->fib_net, net) ||
fi->fib_tb_id != tb_id)
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;

View File

@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
.eval = nft_reject_ipv4_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {

View File

@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
tp->segs_in = 0;
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
sk_forced_mem_schedule(sk, skb->truesize);
skb_set_owner_r(skb, sk);
TCP_SKB_CB(skb)->seq++;
@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
tcp_fastopen_add_skb(child, skb);
tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
tp->rcv_wup = tp->rcv_nxt;
/* tcp_conn_request() is sending the SYNACK,
* and queues the child into listener accept queue.
*/

View File

@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
if (tp->snd_cwnd <= tp->snd_ssthresh)
if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else if (!yeah->doing_reno_now) {

View File

@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
fl4->flowi4_oif = oif;
fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
if (saddr)
fl4->saddr = saddr->a4;

View File

@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
}
if (p == &net->ipv6.devconf_all->forwarding) {
int old_dflt = net->ipv6.devconf_dflt->forwarding;
net->ipv6.devconf_dflt->forwarding = newf;
if ((!newf) ^ (!old_dflt))
inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv6.devconf_dflt);
addrconf_forward_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@ -1941,6 +1948,7 @@ errdad:
spin_unlock_bh(&ifp->lock);
addrconf_mod_dad_work(ifp, 0);
in6_ifa_put(ifp);
}
/* Join to solicited addr multicast group.
@ -3850,6 +3858,7 @@ static void addrconf_dad_work(struct work_struct *w)
addrconf_dad_begin(ifp);
goto out;
} else if (action == DAD_ABORT) {
in6_ifa_hold(ifp);
addrconf_dad_stop(ifp, 1);
if (disable_ipv6)
addrconf_ifdown(idev->dev, 0);
@ -6025,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
struct inet6_dev *idev, struct ipv6_devconf *p)
{
int i;
int i, ifindex;
struct ctl_table *table;
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
@ -6045,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
if (!p->sysctl_header)
goto free;
if (!strcmp(dev_name, "all"))
ifindex = NETCONFA_IFINDEX_ALL;
else if (!strcmp(dev_name, "default"))
ifindex = NETCONFA_IFINDEX_DEFAULT;
else
ifindex = idev->dev->ifindex;
inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
return 0;
free:

View File

@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph);
@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)

View File

@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
.eval = nft_reject_ipv6_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
.validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {

View File

@ -126,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = (struct rt6_info *) dst;
np = inet6_sk(sk);
if (!np)
return -EBADF;
if (!np) {
err = -EBADF;
goto dst_err_out;
}
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
@ -163,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
release_sock(sk);
dst_err_out:
dst_release(dst);
if (err)
return err;

View File

@ -23,6 +23,7 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
return xfrm_input(skb, nexthdr, spi, 0);

View File

@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
int err;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)

View File

@ -13,6 +13,7 @@
#include <linux/socket.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <net/kcm.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@ -2029,7 +2030,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (copy_to_user((void __user *)arg, &info,
sizeof(info))) {
err = -EFAULT;
sock_release(newsock);
sys_close(info.fd);
}
}

View File

@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
(void)l2tp_tunnel_delete(tunnel);
}
rcu_read_unlock_bh();
flush_workqueue(l2tp_wq);
rcu_barrier();
}
static struct pernet_operations l2tp_net_ops = {

View File

@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
if (!uc.center_freq1)
return;
/* proceed to downgrade the chandef until usable or the same */
/* proceed to downgrade the chandef until usable or the same as AP BW */
while (uc.width > max_width ||
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype))
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
ieee80211_chandef_downgrade(&uc);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {

View File

@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
if (!iph)
return;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
return;

View File

@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const tb[])
{
char *acct_name;
struct nf_acct *cur;
struct nf_acct *cur, *tmp;
int ret = -ENOENT;
char *acct_name;
if (!tb[NFACCT_NAME]) {
list_for_each_entry(cur, &net->nfnl_acct_list, head)
list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;

View File

@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
break;
}
l4proto = nf_ct_l4proto_find_get(l3num, l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err_proto_put;
}
if (matching) {
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
/* You cannot replace one timeout policy by another of
* different kind, sorry.
*/
if (matching->l3num != l3num ||
matching->l4proto->l4proto != l4num) {
ret = -EINVAL;
goto err_proto_put;
}
matching->l4proto->l4proto != l4num)
return -EINVAL;
ret = ctnl_timeout_parse_policy(&matching->data,
l4proto, net,
cda[CTA_TIMEOUT_DATA]);
return ret;
return ctnl_timeout_parse_policy(&matching->data,
matching->l4proto, net,
cda[CTA_TIMEOUT_DATA]);
}
ret = -EBUSY;
return -EBUSY;
}
l4proto = nf_ct_l4proto_find_get(l3num, l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
goto err_proto_put;
}
@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
const struct hlist_nulls_node *nn;
unsigned int last_hsize;
spinlock_t *lock;
int i;
int i, cpu;
for_each_possible_cpu(cpu) {
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
untimeout(h, timeout);
spin_unlock_bh(&pcpu->lock);
}
local_bh_disable();
restart:
@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
struct ctnl_timeout *cur;
struct ctnl_timeout *cur, *tmp;
int ret = -ENOENT;
char *name;
if (!cda[CTA_TIMEOUT_NAME]) {
list_for_each_entry(cur, &net->nfct_timeout_list, head)
list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
head)
ctnl_timeout_try_del(net, cur);
return 0;

View File

@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
int nft_meta_set_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
if (priv->key != NFT_META_PKTTYPE)
return 0;
switch (ctx->afi->family) {
case NFPROTO_BRIDGE:
hooks = 1 << NF_BR_PRE_ROUTING;
@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
return nft_chain_validate_hooks(ctx->chain, hooks);
}
EXPORT_SYMBOL_GPL(nft_meta_set_validate);
int nft_meta_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
len = sizeof(u8);
break;
case NFT_META_PKTTYPE:
err = nft_meta_set_init_pkttype(ctx);
if (err)
return err;
len = sizeof(u8);
break;
default:
return -EOPNOTSUPP;
}
err = nft_meta_set_validate(ctx, expr, NULL);
if (err < 0)
return err;
priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
err = nft_validate_register_load(priv->sreg, len);
if (err < 0)
@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
.validate = nft_meta_set_validate,
};
static const struct nft_expr_ops *

View File

@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
};
EXPORT_SYMBOL_GPL(nft_reject_policy);
int nft_reject_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT));
}
EXPORT_SYMBOL_GPL(nft_reject_validate);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int err;
err = nft_reject_validate(ctx, expr, NULL);
if (err < 0)
return err;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;

View File

@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code;
int icmp_code, err;
err = nft_reject_validate(ctx, expr, NULL);
if (err < 0)
return err;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.eval = nft_reject_inet_eval,
.init = nft_reject_inet_init,
.dump = nft_reject_inet_dump,
.validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {

View File

@ -878,7 +878,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
struct sctp_chunk *chunk,
u16 chunk_len)
{
size_t psize, pmtu;
size_t psize, pmtu, maxsize;
sctp_xmit_t retval = SCTP_XMIT_OK;
psize = packet->size;
@ -906,6 +906,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
goto out;
}
/* Similarly, if this chunk was built before a PMTU
* reduction, we have to fragment it at IP level now. So
* if the packet already contains something, we need to
* flush.
*/
maxsize = pmtu - packet->overhead;
if (packet->auth)
maxsize -= WORD_ROUND(packet->auth->skb->len);
if (chunk_len > maxsize)
retval = SCTP_XMIT_PMTU_FULL;
/* It is also okay to fragment if the chunk we are
* adding is a control chunk, but only if current packet
* is not a GSO one otherwise it causes fragmentation of

View File

@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
*
* The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
ITEM_SIZE;
uint msg_rem = msg_dsz;
u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */

View File

@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->readlock))
if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
return 0;
}
@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
err = mutex_lock_interruptible(&u->readlock);
err = mutex_lock_interruptible(&u->bindlock);
if (err)
return err;
@ -895,7 +896,7 @@ retry:
spin_unlock(&unix_table_lock);
err = 0;
out: mutex_unlock(&u->readlock);
out: mutex_unlock(&u->bindlock);
return err;
}
@ -954,20 +955,32 @@ fail:
return NULL;
}
static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
struct path *res)
static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
{
int err;
struct dentry *dentry;
struct path path;
int err = 0;
/*
* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
return err;
err = security_path_mknod(path, dentry, mode, 0);
/*
* All right, let's create it.
*/
err = security_path_mknod(&path, dentry, mode, 0);
if (!err) {
err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
if (!err) {
res->mnt = mntget(path->mnt);
res->mnt = mntget(path.mnt);
res->dentry = dget(dentry);
}
}
done_path_create(&path, dentry);
return err;
}
@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
int err, name_err;
int err;
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
struct path path;
struct dentry *dentry;
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
name_err = 0;
dentry = NULL;
if (sun_path[0]) {
/* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
if (IS_ERR(dentry)) {
/* delay report until after 'already bound' check */
name_err = PTR_ERR(dentry);
dentry = NULL;
}
}
err = mutex_lock_interruptible(&u->readlock);
err = mutex_lock_interruptible(&u->bindlock);
if (err)
goto out_path;
goto out;
err = -EINVAL;
if (u->addr)
goto out_up;
if (name_err) {
err = name_err == -EEXIST ? -EADDRINUSE : name_err;
goto out_up;
}
err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (dentry) {
struct path u_path;
if (sun_path[0]) {
struct path path;
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(dentry, &path, mode, &u_path);
err = unix_mknod(sun_path, mode, &path);
if (err) {
if (err == -EEXIST)
err = -EADDRINUSE;
@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
u->path = path;
list = &unix_socket_table[hash];
} else {
spin_lock(&unix_table_lock);
@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
out_path:
if (dentry)
done_path_create(&path, dentry);
mutex_unlock(&u->bindlock);
out:
return err;
}
@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) {
alloc_skb:
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
goto err;
}
/* we must acquire readlock as we modify already present
/* we must acquire iolock as we modify already present
* skbs in the sk_receive_queue and mess with skb->len
*/
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
err = mutex_lock_interruptible(&unix_sk(other)->iolock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
goto err;
@ -2046,7 +2033,7 @@ alloc_skb:
}
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
other->sk_data_ready(other);
scm_destroy(&scm);
@ -2055,7 +2042,7 @@ alloc_skb:
err_state_unlock:
unix_state_unlock(other);
err_unlock:
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
if (skb)
break;
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
if (err != -EAGAIN)
break;
} while (timeo &&
!__skb_wait_for_more_packets(sk, &err, &timeo, last));
if (!skb) { /* implies readlock unlocked */
if (!skb) { /* implies iolock unlocked */
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
out_free:
skb_free_datagram(sk, skb);
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
out:
return err;
}
@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
@ -2340,7 +2327,7 @@ again:
break;
}
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
@ -2351,7 +2338,7 @@ again:
goto out;
}
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
goto redo;
unlock:
unix_state_unlock(sk);
@ -2454,7 +2441,7 @@ unlock:
}
} while (size);
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
if (state->msg)
scm_recv(sock, state->msg, &scm, flags);
else
@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
int ret;
struct unix_sock *u = unix_sk(sk);
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
ret = splice_to_pipe(pipe, spd);
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
return ret;
}

View File

@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
if (dev->netdev_ops->ndo_do_ioctl) {
#ifdef CONFIG_COMPAT
if (info->flags & IW_REQUEST_FLAG_COMPAT) {
int ret = 0;
struct iwreq iwr_lcl;
struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
iwr_lcl.u.data.length = iwp_compat->length;
iwr_lcl.u.data.flags = iwp_compat->flags;
ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
iwp_compat->length = iwr_lcl.u.data.length;
iwp_compat->flags = iwr_lcl.u.data.flags;
return ret;
} else
#endif
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
}
if (dev->netdev_ops->ndo_do_ioctl)
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
return -EOPNOTSUPP;
}

View File

@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
family = XFRM_SPI_SKB_CB(skb)->family;
/* if tunnel is present override skb->mark value with tunnel i_key */
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
switch (family) {
case AF_INET:
switch (family) {
case AF_INET:
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
break;
case AF_INET6:
break;
case AF_INET6:
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
break;
}
break;
}
/* Allocate new secpath or COW existing one. */

View File

@ -626,6 +626,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family,

View File

@ -896,7 +896,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
struct sock *sk = cb->skb->sk;
struct net *net = sock_net(sk);
xfrm_state_walk_done(walk, net);
if (cb->args[0])
xfrm_state_walk_done(walk, net);
return 0;
}
@ -921,8 +922,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
u8 proto = 0;
int err;
cb->args[0] = 1;
err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
xfrma_policy);
if (err < 0)
@ -939,6 +938,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
proto = nla_get_u8(attrs[XFRMA_PROTO]);
xfrm_state_walk_init(walk, proto, filter);
cb->args[0] = 1;
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
@ -2051,9 +2051,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (up->hard) {
xfrm_policy_delete(xp, p->dir);
xfrm_audit_policy_delete(xp, 1, true);
} else {
// reset the timers here?
WARN(1, "Don't know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
@ -2117,7 +2114,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
err = verify_newpolicy_info(&ua->policy);
if (err)
goto bad_policy;
goto free_state;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@ -2149,8 +2146,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
bad_policy:
WARN(1, "BAD policy passed\n");
free_state:
kfree(x);
nomem: