1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Don't corrupt xfrm_interface parms before validation, from Nicolas
    Dichtel.

 2) Revert use of usb-wakeup in btusb, from Mario Limonciello.

 3) Block ipv6 packets in bridge netfilter if ipv6 is disabled, from
    Leonardo Bras.

 4) IPS_OFFLOAD not honored in ctnetlink, from Pablo Neira Ayuso.

 5) Missing ULP check in sock_map, from John Fastabend.

 6) Fix receive statistic handling in forcedeth, from Zhu Yanjun.

 7) Fix length of SKB allocated in 6pack driver, from Christophe
    JAILLET.

 8) ip6_route_info_create() returns an error pointer, not NULL. From
    Maciej Żenczykowski.

 9) Only add RDS sock to the hashes after rs_transport is set, from
    Ka-Cheong Poon.

10) Don't double clean TX descriptors in ixgbe, from Ilya Maximets.

11) Presence of transmit IPSEC offload in an SKB is not tested for
    correctly in ixgbe and ixgbevf. From Steffen Klassert and Jeff
    Kirsher.

12) Need rcu_barrier() when register_netdevice() takes one of the
    notifier based failure paths, from Subash Abhinov Kasiviswanathan.

13) Fix leak in sctp_do_bind(), from Mao Wenan.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (72 commits)
  cdc_ether: fix rndis support for Mediatek based smartphones
  sctp: destroy bucket if failed to bind addr
  sctp: remove redundant assignment when call sctp_get_port_local
  sctp: change return type of sctp_get_port_local
  ixgbevf: Fix secpath usage for IPsec Tx offload
  sctp: Fix the link time qualifier of 'sctp_ctrlsock_exit()'
  ixgbe: Fix secpath usage for IPsec TX offload.
  net: qrtr: fix memort leak in qrtr_tun_write_iter
  net: Fix null de-reference of device refcount
  ipv6: Fix the link time qualifier of 'ping_v6_proc_exit_net()'
  tun: fix use-after-free when register netdev failed
  tcp: fix tcp_ecn_withdraw_cwr() to clear TCP_ECN_QUEUE_CWR
  ixgbe: fix double clean of Tx descriptors with xdp
  ixgbe: Prevent u8 wrapping of ITR value to something less than 10us
  mlx4: fix spelling mistake "veify" -> "verify"
  net: hns3: fix spelling mistake "undeflow" -> "underflow"
  net: lmc: fix spelling mistake "runnin" -> "running"
  NFC: st95hf: fix spelling mistake "receieve" -> "receive"
  net/rds: An rds_sock is added too early to the hash table
  mac80211: Do not send Layer 2 Update frame before authorization
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-09-14 12:20:38 -07:00
commit 36024fcf8d
67 changed files with 443 additions and 289 deletions

View File

@ -17699,8 +17699,7 @@ F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER
M: Anirudha Sarangi <anirudh@xilinx.com>
M: John Linn <John.Linn@xilinx.com>
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*

View File

@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_free_urb(urb);
return 0;
return err;
}
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)

View File

@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@ -1170,10 +1173,6 @@ static int btusb_open(struct hci_dev *hdev)
}
data->intf->needs_remote_wakeup = 1;
/* device specific wakeup source enabled and required for USB
* remote wakeup while host is suspended
*/
device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@ -1238,7 +1237,6 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:

View File

@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work)
ws_awake_device);
struct hci_uart *hu = qca->hu;
unsigned long retrans_delay;
unsigned long flags;
BT_DBG("hu %p wq awake device", hu);
/* Vote for serial clock */
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
spin_lock(&qca->hci_ibs_lock);
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
/* Send wake indication to device */
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work)
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
spin_unlock(&qca->hci_ibs_lock);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work)
struct qca_data *qca = container_of(work, struct qca_data,
ws_awake_rx);
struct hci_uart *hu = qca->hu;
unsigned long flags;
BT_DBG("hu %p wq awake rx", hu);
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
spin_lock(&qca->hci_ibs_lock);
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
/* Always acknowledge device wake up,
@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
qca->ibs_sent_wacks++;
spin_unlock(&qca->hci_ibs_lock);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);

View File

@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
if (!cdev->ap.applid)
return -ENODEV;
if (count < CAPIMSG_BASELEN)
return -EINVAL;
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
if (count < CAPI_DATA_B3_REQ_LEN ||
(size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
kfree_skb(skb);
return -EINVAL;
}
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);

View File

@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
.reset_level = HNAE3_GLOBAL_RESET },

View File

@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
while (rwi) {
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED)
goto out;
adapter->state == VNIC_REMOVED) {
kfree(rwi);
rc = EBUSY;
break;
}
if (adapter->force_reset_recovery) {
adapter->force_reset_recovery = false;
@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
}
out:
adapter->resetting = false;
if (we_lock_rtnl)
rtnl_unlock();

View File

@ -36,6 +36,7 @@
#include <net/vxlan.h>
#include <net/mpls.h>
#include <net/xdp_sock.h>
#include <net/xfrm.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@ -2621,7 +2622,7 @@ adjust_by_size:
/* 16K ints/sec to 9.2K ints/sec */
avg_wire_size *= 15;
avg_wire_size += 11452;
} else if (avg_wire_size <= 1980) {
} else if (avg_wire_size < 1968) {
/* 9.2K ints/sec to 8K ints/sec */
avg_wire_size *= 5;
avg_wire_size += 22420;
@ -2654,6 +2655,8 @@ adjust_by_size:
case IXGBE_LINK_SPEED_2_5GB_FULL:
case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10_FULL:
if (avg_wire_size > 8064)
avg_wire_size = 8064;
itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
IXGBE_ITR_ADAPTIVE_MIN_INC;
@ -8695,7 +8698,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC
if (secpath_exists(skb) &&
if (xfrm_offload(skb) &&
!ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif

View File

@ -633,19 +633,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
u32 i = tx_ring->next_to_clean, xsk_frames = 0;
unsigned int budget = q_vector->tx.work_limit;
struct xdp_umem *umem = tx_ring->xsk_umem;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
bool xmit_done;
u32 xsk_frames = 0;
tx_bi = &tx_ring->tx_buffer_info[i];
tx_desc = IXGBE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
tx_bi = &tx_ring->tx_buffer_info[ntc];
tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
do {
while (ntc != ntu) {
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
@ -661,22 +659,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
tx_bi++;
tx_desc++;
i++;
if (unlikely(!i)) {
i -= tx_ring->count;
ntc++;
if (unlikely(ntc == tx_ring->count)) {
ntc = 0;
tx_bi = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
}
/* update budget accounting */
budget--;
} while (likely(budget));
i += tx_ring->count;
tx_ring->next_to_clean = i;
tx_ring->next_to_clean = ntc;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
@ -688,8 +682,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
return budget > 0 && xmit_done;
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)

View File

@ -30,6 +30,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
#include <net/xfrm.h>
#include "ixgbevf.h"
@ -4161,7 +4162,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);

View File

@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_dev_port(dev, i, &port_cap)) {
mlx4_err(dev,
"QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
"QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
} else if ((dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_DEFAULT) &&
(port_cap.dmfs_optimized_state ==

View File

@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */

View File

@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
type = cmsg_hdr->type;
switch (type) {
case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
nfp_flower_cmsg_portreify_rx(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
/* Handle REIFY acks outside wq to prevent RTNL conflict. */
nfp_flower_cmsg_portreify_rx(app, skb);
dev_consume_skb_any(skb);
} else {
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}

View File

@ -713,6 +713,21 @@ struct nv_skb_map {
struct nv_skb_map *next_tx_ctx;
};
struct nv_txrx_stats {
u64 stat_rx_packets;
u64 stat_rx_bytes; /* not always available in HW */
u64 stat_rx_missed_errors;
u64 stat_rx_dropped;
u64 stat_tx_packets; /* not always available in HW */
u64 stat_tx_bytes;
u64 stat_tx_dropped;
};
#define nv_txrx_stats_inc(member) \
__this_cpu_inc(np->txrx_stats->member)
#define nv_txrx_stats_add(member, count) \
__this_cpu_add(np->txrx_stats->member, (count))
/*
* SMP locking:
* All hardware access under netdev_priv(dev)->lock, except the performance
@ -797,10 +812,7 @@ struct fe_priv {
/* RX software stats */
struct u64_stats_sync swstats_rx_syncp;
u64 stat_rx_packets;
u64 stat_rx_bytes; /* not always available in HW */
u64 stat_rx_missed_errors;
u64 stat_rx_dropped;
struct nv_txrx_stats __percpu *txrx_stats;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@ -826,9 +838,6 @@ struct fe_priv {
/* TX software stats */
struct u64_stats_sync swstats_tx_syncp;
u64 stat_tx_packets; /* not always available in HW */
u64 stat_tx_bytes;
u64 stat_tx_dropped;
/* msi/msi-x fields */
u32 msi_flags;
@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
}
}
static void nv_get_stats(int cpu, struct fe_priv *np,
struct rtnl_link_stats64 *storage)
{
struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
unsigned int syncp_start;
u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
u64 tx_packets, tx_bytes, tx_dropped;
do {
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
rx_packets = src->stat_rx_packets;
rx_bytes = src->stat_rx_bytes;
rx_dropped = src->stat_rx_dropped;
rx_missed_errors = src->stat_rx_missed_errors;
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
storage->rx_packets += rx_packets;
storage->rx_bytes += rx_bytes;
storage->rx_dropped += rx_dropped;
storage->rx_missed_errors += rx_missed_errors;
do {
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
tx_packets = src->stat_tx_packets;
tx_bytes = src->stat_tx_bytes;
tx_dropped = src->stat_tx_dropped;
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
storage->tx_packets += tx_packets;
storage->tx_bytes += tx_bytes;
storage->tx_dropped += tx_dropped;
}
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
unsigned int syncp_start;
int cpu;
/*
* Note: because HW stats are not always available and for
@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
*/
/* software stats */
do {
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
storage->rx_packets = np->stat_rx_packets;
storage->rx_bytes = np->stat_rx_bytes;
storage->rx_dropped = np->stat_rx_dropped;
storage->rx_missed_errors = np->stat_rx_missed_errors;
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
do {
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
storage->tx_packets = np->stat_tx_packets;
storage->tx_bytes = np->stat_tx_bytes;
storage->tx_dropped = np->stat_tx_dropped;
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
for_each_online_cpu(cpu)
nv_get_stats(cpu, np, storage);
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
}
if (nv_release_txskb(np, &np->tx_skb[i])) {
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
}
np->tx_skb[i].dma = 0;
@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_packets++;
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_packets++;
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_legacybackoff_reseed(dev);
}
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_packets++;
np->stat_tx_bytes += np->get_tx_ctx->skb->len;
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
}
}
static void rx_missing_handler(u32 flags, struct fe_priv *np)
{
if (flags & NV_RX_MISSEDFRAME) {
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_missed_errors);
u64_stats_update_end(&np->swstats_rx_syncp);
}
}
static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
if (flags & NV_RX_MISSEDFRAME) {
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_missed_errors++;
u64_stats_update_end(&np->swstats_rx_syncp);
}
rx_missing_handler(flags, np);
dev_kfree_skb(skb);
goto next_pkt;
}
@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_packets++;
np->stat_rx_bytes += len;
nv_txrx_stats_inc(stat_rx_packets);
nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_packets++;
np->stat_rx_bytes += len;
nv_txrx_stats_inc(stat_rx_packets);
nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
SET_NETDEV_DEV(dev, &pci_dev->dev);
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
if (!np->txrx_stats) {
pr_err("np->txrx_stats, alloc memory error.\n");
err = -ENOMEM;
goto out_alloc_percpu;
}
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
@ -6060,6 +6110,8 @@ out_relreg:
out_disable:
pci_disable_device(pci_dev);
out_free:
free_percpu(np->txrx_stats);
out_alloc_percpu:
free_netdev(dev);
out:
return err;
@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
static void nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct fe_priv *np = netdev_priv(dev);
free_percpu(np->txrx_stats);
unregister_netdev(dev);

View File

@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
int ret;
u32 reg, val;
regmap_field_read(gmac->regmap_field, &val);
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
dev_err(priv->device, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,

View File

@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
sp->dev->stats.rx_bytes += count;
if ((skb = dev_alloc_skb(count)) == NULL)
if ((skb = dev_alloc_skb(count + 1)) == NULL)
goto out_mem;
ptr = skb_put(skb, count);
ptr = skb_put(skb, count + 1);
*ptr++ = cmd; /* KISS command */
memcpy(ptr, sp->cooked_buf + 1, count);

View File

@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
* Local device Link partner
* Pause AsymDir Pause AsymDir Result
* 1 X 1 X TX+RX
* 0 1 1 1 RX
* 1 1 0 1 TX
* 0 1 1 1 TX
* 1 1 0 1 RX
*/
static void phylink_resolve_flow(struct phylink *pl,
struct phylink_link_state *state)
@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
else if (pause & MLO_PAUSE_ASYM)
new_pause = state->pause & MLO_PAUSE_SYM ?
MLO_PAUSE_RX : MLO_PAUSE_TX;
MLO_PAUSE_TX : MLO_PAUSE_RX;
} else {
new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
}

View File

@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
}
static int tun_attach(struct tun_struct *tun, struct file *file,
bool skip_filter, bool napi, bool napi_frags)
bool skip_filter, bool napi, bool napi_frags,
bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
* initialized tfile; otherwise we risk using half-initialized
* object.
*/
rcu_assign_pointer(tfile->tun, tun);
if (publish_tun)
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
tun_set_real_num_queues(tun);
@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
ifr->ifr_flags & IFF_NAPI,
ifr->ifr_flags & IFF_NAPI_FRAGS);
ifr->ifr_flags & IFF_NAPI_FRAGS, true);
if (err < 0)
return err;
@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
ifr->ifr_flags & IFF_NAPI_FRAGS);
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
/* free_netdev() won't check refcnt, to aovid race
* with dev_put() we need publish tun after registration.
*/
rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
if (ret < 0)
goto unlock;
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
tun->flags & IFF_NAPI_FRAGS);
tun->flags & IFF_NAPI_FRAGS, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)

View File

@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
skip:
if (rndis && header.usb_cdc_acm_descriptor &&
/* Communcation class functions with bmCapabilities are not
* RNDIS. But some Wireless class RNDIS functions use
* bmCapabilities for their own purpose. The failsafe is
* therefore applied only to Communication class RNDIS
* functions. The rndis test is redundant, but a cheap
* optimization.
*/
if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
header.usb_cdc_acm_descriptor &&
header.usb_cdc_acm_descriptor->bmCapabilities) {
dev_dbg(&intf->dev,
"ACM capabilities %02x, not really RNDIS?\n",

View File

@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
lmc_trace(dev, "lmc_runnin_reset_out");
lmc_trace(dev, "lmc_running_reset_out");
}

View File

@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
"%d\n", result);
result = 0;
error_cmd:
kfree(cmd);
kfree_skb(ack_skb);
error_msg_to_dev:
error_alloc:

View File

@ -1070,18 +1070,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
if (cfg == &iwl_ax101_cfg_qu_hr)
cfg = &iwl_ax101_cfg_quz_hr;
else if (cfg == &iwl_ax201_cfg_qu_hr)
cfg = &iwl_ax201_cfg_quz_hr;
else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
}
#endif

View File

@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
}
vs_ie = (struct ieee_types_header *)vendor_ie;
if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
IEEE_MAX_IE_SIZE)
return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);

View File

@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
if (rate_ie)
if (rate_ie) {
if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
}
return;
}
@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
return;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;

View File

@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
if (is_mt7630(dev)) {
dev->mt76.cap.has_5ghz = false;
dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
}
if (!mt76x02_field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;

View File

@ -62,6 +62,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mt76x0e_stop_hw(dev);
}
static int
mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct mt76x02_dev *dev = hw->priv;
if (is_mt7630(dev))
return -EOPNOTSUPP;
return mt76x02_set_key(hw, cmd, vif, sta, key);
}
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
@ -78,7 +91,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
.set_key = mt76x0e_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,

View File

@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
if ((crypto->cipher == CIPHER_TKIP) ||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
(crypto->cipher == CIPHER_AES))
iveiv_entry.iv[3] |= 0x20;
iveiv_entry.iv[3] |= key->keyidx << 6;
if (crypto->cmd == SET_KEY) {
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
if ((crypto->cipher == CIPHER_TKIP) ||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
(crypto->cipher == CIPHER_AES))
iveiv_entry.iv[3] |= 0x20;
iveiv_entry.iv[3] |= key->keyidx << 6;
} else {
memset(&iveiv_entry, 0, sizeof(iveiv_entry));
}
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
/* Turn on tertiary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
rf->channel <= 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
/* fall-through */
case 2:
/* Turn on secondary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
rf->channel <= 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
/* fall-through */
case 1:
/* Turn on primary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
rf->channel <= 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
break;
}

View File

@ -645,7 +645,6 @@ fail_rx:
kfree(rsi_dev->tx_buffer);
fail_eps:
kfree(rsi_dev);
return status;
}

View File

@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context)
&echo_response);
if (result) {
dev_err(&st95context->spicontext.spidev->dev,
"err: echo response receieve error = 0x%x\n", result);
"err: echo response receive error = 0x%x\n", result);
return result;
}

View File

@ -11,6 +11,7 @@ struct fixed_phy_status {
};
struct device_node;
struct gpio_desc;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);

View File

@ -513,7 +513,7 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct netlink_callback *cb);
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
unsigned char *flags, bool skip_oif);
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
int nh_weight);
int nh_weight, u8 rt_family);
#endif /* _NET_FIB_H */

View File

@ -161,7 +161,8 @@ struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
}
static inline
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
int i;
@ -172,7 +173,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
if (fib_add_nexthop(skb, nhc, weight) < 0)
if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
return -EMSGSIZE;
}

View File

@ -983,7 +983,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
char name[IFNAMSIZ]; /* name of XFRM device */
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
};
@ -991,7 +990,6 @@ struct xfrm_if_parms {
struct xfrm_if {
struct xfrm_if __rcu *next; /* next interface in list */
struct net_device *dev; /* virtual device associated with interface */
struct net_device *phydev; /* physical device */
struct net *net; /* netns for packet i/o */
struct xfrm_if_parms p; /* interface parms */

View File

@ -16,6 +16,7 @@
#define CAPI_MSG_BASELEN 8
#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
/*----- CAPI commands -----*/
#define CAPI_ALERT 0x01

View File

@ -1772,16 +1772,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
bitmap_from_u64(mask, stack_mask);
for_each_set_bit(i, mask, 64) {
if (i >= func->allocated_stack / BPF_REG_SIZE) {
/* This can happen if backtracking
* is propagating stack precision where
* caller has larger stack frame
* than callee, but backtrack_insn() should
* have returned -ENOTSUPP.
/* the sequence of instructions:
* 2: (bf) r3 = r10
* 3: (7b) *(u64 *)(r3 -8) = r0
* 4: (79) r4 = *(u64 *)(r10 -8)
* doesn't contain jmps. It's backtracked
* as a single block.
* During backtracking insn 3 is not recognized as
* stack access, so at the end of backtracking
* stack slot fp-8 is still marked in stack_mask.
* However the parent state may not have accessed
* fp-8 and it's "unallocated" stack space.
* In such case fallback to conservative.
*/
verbose(env, "BUG spi %d stack_size %d\n",
i, func->allocated_stack);
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
mark_all_scalars_precise(env, st);
return 0;
}
if (func->stack[i].slot_type[0] != STACK_SPILL) {

View File

@ -631,6 +631,9 @@ config SBITMAP
config PARMAN
tristate "parman" if COMPILE_TEST
config OBJAGG
tristate "objagg" if COMPILE_TEST
config STRING_SELFTEST
tristate "Test string functions"
@ -653,6 +656,3 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
config OBJAGG
tristate "objagg" if COMPILE_TEST

View File

@ -5660,11 +5660,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
if (min < hcon->le_conn_min_interval ||
max > hcon->le_conn_max_interval)
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);

View File

@ -5305,14 +5305,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
if (min < hcon->le_conn_min_interval ||
max > hcon->le_conn_max_interval) {
BT_DBG("requested connection interval exceeds current bounds.");
err = -EINVAL;
} else {
err = hci_check_conn_params(min, max, latency, to_multiplier);
}
err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else

View File

@ -437,7 +437,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
struct nlmsghdr *nlh;
struct nlattr *nest;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;

View File

@ -496,6 +496,10 @@ static unsigned int br_nf_pre_routing(void *priv,
if (!brnet->call_ip6tables &&
!br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
return NF_ACCEPT;
if (!ipv6_mod_enabled()) {
pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
return NF_DROP;
}
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(priv, skb, state);

View File

@ -8758,6 +8758,8 @@ int register_netdevice(struct net_device *dev)
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
rcu_barrier();
dev->reg_state = NETREG_UNREGISTERED;
}
/*

View File

@ -3664,6 +3664,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int pos;
int dummy;
if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
(skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
/* gso_size is untrusted, and we have a frag_list with a linear
* non head_frag head.
*
* (we assume checking the first list_skb member suffices;
* i.e if either of the list_skb members have non head_frag
* head, then the first one has too).
*
* If head_skb's headlen does not fit requested gso_size, it
* means that the frag_list members do NOT terminate on exact
* gso_size boundaries. Hence we cannot perform skb_frag_t page
* sharing. Therefore we must fallback to copying the frag_list
* skbs; we do so by disabling SG.
*/
if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
features &= ~NETIF_F_SG;
}
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))

View File

@ -656,6 +656,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
struct sock *sk, u64 flags)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct inet_connection_sock *icsk = inet_csk(sk);
u32 key_size = map->key_size, hash;
struct bpf_htab_elem *elem, *elem_new;
struct bpf_htab_bucket *bucket;
@ -666,6 +667,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
WARN_ON_ONCE(!rcu_read_lock_held());
if (unlikely(flags > BPF_EXIST))
return -EINVAL;
if (unlikely(icsk->icsk_ulp_data))
return -EINVAL;
link = sk_psock_init_link();
if (!link)

View File

@ -1582,7 +1582,7 @@ failure:
}
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
unsigned char *flags, bool skip_oif)
u8 rt_family, unsigned char *flags, bool skip_oif)
{
if (nhc->nhc_flags & RTNH_F_DEAD)
*flags |= RTNH_F_DEAD;
@ -1613,7 +1613,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
/* if gateway family does not match nexthop family
* gateway is encoded as RTA_VIA
*/
if (nhc->nhc_gw_family != nhc->nhc_family) {
if (rt_family != nhc->nhc_gw_family) {
int alen = sizeof(struct in6_addr);
struct nlattr *nla;
struct rtvia *via;
@ -1654,7 +1654,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
int nh_weight)
int nh_weight, u8 rt_family)
{
const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh;
@ -1667,7 +1667,7 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
rtnh->rtnh_hops = nh_weight - 1;
rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
if (fib_nexthop_info(skb, nhc, &flags, true) < 0)
if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
goto nla_put_failure;
rtnh->rtnh_flags = flags;
@ -1693,13 +1693,14 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
goto nla_put_failure;
if (unlikely(fi->nh)) {
if (nexthop_mpath_fill_node(skb, fi->nh) < 0)
if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
goto nla_put_failure;
goto mp_end;
}
for_nexthops(fi) {
if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0)
if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
AF_INET) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid &&
@ -1775,7 +1776,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
unsigned char flags = 0;
if (fib_nexthop_info(skb, nhc, &flags, false) < 0)
if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
goto nla_put_failure;
rtm->rtm_flags = flags;

View File

@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)

View File

@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
return 0;
}
static void __net_init ping_v6_proc_exit_net(struct net *net)
static void __net_exit ping_v6_proc_exit_net(struct net *net)
{
remove_proc_entry("icmp6", net->proc_net);
}

View File

@ -4388,13 +4388,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
.fc_ifindex = idev->dev->ifindex,
.fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
.fc_flags = RTF_UP | RTF_NONEXTHOP,
.fc_dst = *addr,
.fc_dst_len = 128,
.fc_protocol = RTPROT_KERNEL,
.fc_nlinfo.nl_net = net,
.fc_ignore_dev_down = true,
};
struct fib6_info *f6i;
if (anycast) {
cfg.fc_type = RTN_ANYCAST;
@ -4404,7 +4405,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
cfg.fc_flags |= RTF_LOCAL;
}
return ip6_route_info_create(&cfg, gfp_flags, NULL);
f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
if (!IS_ERR(f6i))
f6i->dst_nocount = true;
return f6i;
}
/* remove deleted ip from prefsrc entries */
@ -5325,11 +5329,11 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
if (nexthop_is_multipath(nh)) {
struct nlattr *mp;
mp = nla_nest_start(skb, RTA_MULTIPATH);
mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;
if (nexthop_mpath_fill_node(skb, nh))
if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
goto nla_put_failure;
nla_nest_end(skb, mp);
@ -5337,7 +5341,7 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
struct fib6_nh *fib6_nh;
fib6_nh = nexthop_fib6_nh(nh);
if (fib_nexthop_info(skb, &fib6_nh->nh_common,
if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
flags, false) < 0)
goto nla_put_failure;
}
@ -5466,13 +5470,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
rt->fib6_nh->fib_nh_weight) < 0)
rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->fib_nh_weight) < 0)
sibling->fib6_nh->fib_nh_weight,
AF_INET6) < 0)
goto nla_put_failure;
}
@ -5489,7 +5494,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
rtm->rtm_flags |= nh_flags;
} else {
if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
&nh_flags, false) < 0)
goto nla_put_failure;

View File

@ -1529,7 +1529,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
struct ieee80211_sub_if_data *sdata;
int err;
int layer2_update;
if (params->vlan) {
sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@ -1573,18 +1572,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
test_sta_flag(sta, WLAN_STA_ASSOC))
rate_control_rate_init(sta);
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
err = sta_info_insert_rcu(sta);
if (err) {
rcu_read_unlock();
return err;
}
if (layer2_update)
cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
rcu_read_unlock();
return 0;
@ -1682,10 +1675,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
sta->sdata = vlansdata;
ieee80211_check_fast_xmit(sta);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
ieee80211_vif_inc_num_mcast(sta->sdata);
cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
cfg80211_send_layer2_update(sta->sdata->dev,
sta->sta.addr);
}
}
err = sta_apply_parameters(local, sta, params);

View File

@ -1979,6 +1979,10 @@ int sta_info_move_state(struct sta_info *sta,
ieee80211_check_fast_xmit(sta);
ieee80211_check_fast_rx(sta);
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sta->sdata->vif.type == NL80211_IFTYPE_AP)
cfg80211_send_layer2_update(sta->sdata->dev,
sta->sta.addr);
break;
default:
break;

View File

@ -553,10 +553,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_acct(skb, ct, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
@ -568,6 +566,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
ctnetlink_dump_ct_synproxy(skb, ct) < 0)
goto nla_put_failure;
if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
(ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return skb->len;

View File

@ -217,7 +217,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err;
}
flow->timeout = (u32)jiffies;
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);

View File

@ -14,6 +14,7 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
#include <net/ipv6.h>
#include <net/netfilter/nft_fib.h>
@ -34,6 +35,8 @@ static void nft_fib_netdev_eval(const struct nft_expr *expr,
}
break;
case ETH_P_IPV6:
if (!ipv6_mod_enabled())
break;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:

View File

@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
return;
}
/* So that subsequent socket matching not to require other lookups. */
skb->sk = sk;
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
nft_reg_store8(dest, inet_sk_transparent(sk));
@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
}
if (sk != skb->sk)
sock_gen_put(sk);
}
static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {

View File

@ -84,11 +84,14 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!kbuf)
return -ENOMEM;
if (!copy_from_iter_full(kbuf, len, from))
if (!copy_from_iter_full(kbuf, len, from)) {
kfree(kbuf);
return -EFAULT;
}
ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
kfree(kbuf);
return ret < 0 ? ret : len;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -239,34 +239,30 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
goto out;
if (rs->rs_transport) { /* previously bound */
/* The transport can be set using SO_RDS_TRANSPORT option before the
* socket is bound.
*/
if (rs->rs_transport) {
trans = rs->rs_transport;
if (trans->laddr_check(sock_net(sock->sk),
binding_addr, scope_id) != 0) {
ret = -ENOPROTOOPT;
rds_remove_bound(rs);
} else {
ret = 0;
goto out;
}
goto out;
}
trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
scope_id);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
__func__, binding_addr);
goto out;
} else {
trans = rds_trans_get_preferred(sock_net(sock->sk),
binding_addr, scope_id);
if (!trans) {
ret = -EADDRNOTAVAIL;
pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
__func__, binding_addr);
goto out;
}
rs->rs_transport = trans;
}
rs->rs_transport = trans;
ret = 0;
sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
out:
release_sock(sk);

View File

@ -1262,8 +1262,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
if (nskb != skb) {
rxrpc_eaten_skb(skb, rxrpc_skb_received);
rxrpc_new_skb(skb, rxrpc_skb_unshared);
skb = nskb;
rxrpc_new_skb(skb, rxrpc_skb_unshared);
sp = rxrpc_skb(skb);
}
}

View File

@ -1920,6 +1920,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
cl = cops->find(q, portid);
if (!cl)
return;
if (!cops->tcf_block)
return;
block = cops->tcf_block(q, cl, NULL);
if (!block)
return;

View File

@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
const struct netdev_queue *txq = q->dev_queue;
@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
q->q.qlen--;
}
} else {
skb = NULL;
skb = SKB_XOFF_MAGIC;
}
}
@ -253,8 +255,11 @@ validate:
return skb;
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb))
if (unlikely(skb)) {
if (skb == SKB_XOFF_MAGIC)
return NULL;
goto bulk;
}
skb = q->dequeue(q);
if (skb) {
bulk:

View File

@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
if (non_hh_quantum > INT_MAX)
if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);

View File

@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
return status;
}
static void __net_init sctp_ctrlsock_exit(struct net *net)
static void __net_exit sctp_ctrlsock_exit(struct net *net)
{
/* Free the control endpoint. */
inet_ctl_sock_destroy(net->sctp.ctl_sock);

View File

@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
(transport->error_count < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
(transport->error_count > transport->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_PF,

View File

@ -309,7 +309,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
return retval;
}
static long sctp_get_port_local(struct sock *, union sctp_addr *);
static int sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
@ -399,9 +399,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
* detection.
*/
addr->v4.sin_port = htons(snum);
if ((ret = sctp_get_port_local(sk, addr))) {
if (sctp_get_port_local(sk, addr))
return -EADDRINUSE;
}
/* Refresh ephemeral port. */
if (!bp->port)
@ -413,11 +412,13 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
SCTP_ADDR_SRC, GFP_ATOMIC);
/* Copy back into socket for getsockname() use. */
if (!ret) {
inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
sp->pf->to_sk_saddr(addr, sk);
if (ret) {
sctp_put_port(sk);
return ret;
}
/* Copy back into socket for getsockname() use. */
inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
sp->pf->to_sk_saddr(addr, sk);
return ret;
}
@ -7173,7 +7174,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
return 0;
goto out;
}
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
@ -7191,6 +7192,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
val.spt_pathmaxrxt = sp->pathmaxrxt;
}
out:
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
return -EFAULT;
@ -7998,7 +8000,7 @@ static void sctp_unhash(struct sock *sk)
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_sock *sp = sctp_sk(sk);
bool reuse = (sk->sk_reuse || sp->reuse);
@ -8108,7 +8110,7 @@ pp_found:
if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
addr, sp2, sp)) {
ret = (long)sk2;
ret = 1;
goto fail_unlock;
}
}
@ -8180,7 +8182,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */
return !!sctp_get_port_local(sk, &addr);
return sctp_get_port_local(sk, &addr);
}
/*

View File

@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
publ->key);
}
kfree_rcu(p, rcu);
if (p)
kfree_rcu(p, rcu);
}
/**

View File

@ -145,8 +145,6 @@ static int xfrmi_create(struct net_device *dev)
if (err < 0)
goto out;
strcpy(xi->p.name, dev->name);
dev_hold(dev);
xfrmi_link(xfrmn, xi);
@ -177,7 +175,6 @@ static void xfrmi_dev_uninit(struct net_device *dev)
struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
xfrmi_unlink(xfrmn, xi);
dev_put(xi->phydev);
dev_put(dev);
}
@ -294,7 +291,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
xi->p.name);
dev->name);
goto tx_err_dst_release;
}
@ -364,7 +361,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_err;
}
fl.flowi_oif = xi->phydev->ifindex;
fl.flowi_oif = xi->p.link;
ret = xfrmi_xmit2(skb, dev, &fl);
if (ret < 0)
@ -505,7 +502,7 @@ static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
{
struct net *net = dev_net(xi->dev);
struct net *net = xi->net;
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
int err;
@ -550,7 +547,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
return xi->phydev->ifindex;
return xi->p.link;
}
@ -576,12 +573,14 @@ static void xfrmi_dev_setup(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
netif_keep_dst(dev);
eth_broadcast_addr(dev->broadcast);
}
static int xfrmi_dev_init(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net_device *phydev = xi->phydev;
struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@ -596,13 +595,19 @@ static int xfrmi_dev_init(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->needed_headroom = phydev->needed_headroom;
dev->needed_tailroom = phydev->needed_tailroom;
if (phydev) {
dev->needed_headroom = phydev->needed_headroom;
dev->needed_tailroom = phydev->needed_tailroom;
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, phydev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, phydev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, phydev->broadcast,
dev->addr_len);
} else {
eth_hw_addr_random(dev);
eth_broadcast_addr(dev->broadcast);
}
return 0;
}
@ -638,12 +643,6 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
int err;
xfrmi_netlink_parms(data, &p);
if (!tb[IFLA_IFNAME])
return -EINVAL;
nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
xi = xfrmi_locate(net, &p);
if (xi)
return -EEXIST;
@ -652,13 +651,8 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
xi->p = p;
xi->net = net;
xi->dev = dev;
xi->phydev = dev_get_by_index(net, p.link);
if (!xi->phydev)
return -ENODEV;
err = xfrmi_create(dev);
if (err < 0)
dev_put(xi->phydev);
return err;
}
@ -672,11 +666,11 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = dev_net(dev);
struct net *net = xi->net;
struct xfrm_if_parms p;
xfrmi_netlink_parms(data, &xi->p);
xi = xfrmi_locate(net, &xi->p);
xfrmi_netlink_parms(data, &p);
xi = xfrmi_locate(net, &p);
if (!xi) {
xi = netdev_priv(dev);
} else {
@ -684,7 +678,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
return -EEXIST;
}
return xfrmi_update(xi, &xi->p);
return xfrmi_update(xi, &p);
}
static size_t xfrmi_get_size(const struct net_device *dev)
@ -715,7 +709,7 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
return dev_net(xi->phydev);
return xi->net;
}
static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {

View File

@ -912,6 +912,7 @@ restart:
} else if (delta > 0) {
p = &parent->rb_right;
} else {
bool same_prefixlen = node->prefixlen == n->prefixlen;
struct xfrm_policy *tmp;
hlist_for_each_entry(tmp, &n->hhead, bydst) {
@ -919,9 +920,11 @@ restart:
hlist_del_rcu(&tmp->bydst);
}
node->prefixlen = prefixlen;
xfrm_policy_inexact_list_reinsert(net, node, family);
if (node->prefixlen == n->prefixlen) {
if (same_prefixlen) {
kfree_rcu(n, rcu);
return;
}
@ -929,7 +932,6 @@ restart:
rb_erase(*p, new);
kfree_rcu(n, rcu);
n = node;
n->prefixlen = prefixlen;
goto restart;
}
}

View File

@ -212,6 +212,8 @@ check_output()
printf " ${out}\n"
printf " Expected:\n"
printf " ${expected}\n\n"
else
echo " WARNING: Unexpected route entry"
fi
fi
@ -274,7 +276,7 @@ ipv6_fcnal()
run_cmd "$IP nexthop get id 52"
log_test $? 0 "Get nexthop by id"
check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1"
check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1 scope link"
run_cmd "$IP nexthop del id 52"
log_test $? 0 "Delete nexthop by id"
@ -479,12 +481,12 @@ ipv6_fcnal_runtime()
run_cmd "$IP -6 nexthop add id 85 dev veth1"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85"
log_test $? 0 "IPv6 route with device only nexthop"
check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1"
check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1 metric 1024 pref medium"
run_cmd "$IP nexthop add id 123 group 81/85"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123"
log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw"
check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 nexthop via 2001:db8:91::2 dev veth1 nexthop dev veth1"
check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 123 metric 1024 nexthop via 2001:db8:91::2 dev veth1 weight 1 nexthop dev veth1 weight 1 pref medium"
#
# IPv6 route with v4 nexthop - not allowed
@ -538,7 +540,7 @@ ipv4_fcnal()
run_cmd "$IP nexthop get id 12"
log_test $? 0 "Get nexthop by id"
check_nexthop "id 12" "id 12 via 172.16.1.2 src 172.16.1.1 dev veth1 scope link"
check_nexthop "id 12" "id 12 via 172.16.1.2 dev veth1 scope link"
run_cmd "$IP nexthop del id 12"
log_test $? 0 "Delete nexthop by id"
@ -685,7 +687,7 @@ ipv4_withv6_fcnal()
set +e
run_cmd "$IP ro add 172.16.101.1/32 nhid 11"
log_test $? 0 "IPv6 nexthop with IPv4 route"
check_route "172.16.101.1" "172.16.101.1 nhid 11 via ${lladdr} dev veth1"
check_route "172.16.101.1" "172.16.101.1 nhid 11 via inet6 ${lladdr} dev veth1"
set -e
run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
@ -694,11 +696,11 @@ ipv4_withv6_fcnal()
run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
log_test $? 0 "IPv6 nexthop with IPv4 route"
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
log_test $? 0 "IPv4 route with IPv6 gateway"
check_route "172.16.101.1" "172.16.101.1 via ${lladdr} dev veth1"
check_route "172.16.101.1" "172.16.101.1 via inet6 ${lladdr} dev veth1"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1"
log_test $? 2 "IPv4 route with invalid IPv6 gateway"
@ -785,10 +787,10 @@ ipv4_fcnal_runtime()
log_test $? 0 "IPv4 route with device only nexthop"
check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1"
run_cmd "$IP nexthop add id 122 group 21/85"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
run_cmd "$IP nexthop add id 123 group 21/85"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 123"
log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw"
check_route "172.16.101.1" "172.16.101.1 nhid 85 nexthop via 172.16.1.2 dev veth1 nexthop dev veth1"
check_route "172.16.101.1" "172.16.101.1 nhid 123 nexthop via 172.16.1.2 dev veth1 weight 1 nexthop dev veth1 weight 1"
#
# IPv4 with IPv6
@ -820,7 +822,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
log_test $? 0 "IPv4 route with mixed v4-v6 multipath route"
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"

View File

@ -106,6 +106,13 @@ do_overlap()
#
# 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
# similar to above: add policies (with partially random address), with shrinking prefixes.
for p in 29 28 27;do
for k in $(seq 1 32); do
ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null
done
done
}
do_esp_policy_get_check() {