1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix route leak in xfrm_bundle_create().

 2) In mac80211, validate user rate mask before configuring it. From
    Johannes Berg.

 3) Properly enforce memory limits in fair queueing code, from Toke
    Hoiland-Jorgensen.

 4) Fix lockdep splat in inet_csk_route_req(), from Eric Dumazet.

 5) Fix TSO header allocation and management in mvpp2 driver, from Yan
    Markman.

 6) Don't take socket lock in BH handler in strparser code, from Tom
    Herbert.

 7) Don't show sockets from other namespaces in AF_UNIX code, from
    Andrei Vagin.

 8) Fix double free in error path of tap_open(), from Girish Moodalbail.

 9) Fix TX map failure path in igb and ixgbe, from Jean-Philippe Brucker
    and Alexander Duyck.

10) Fix DCB mode programming in stmmac driver, from Jose Abreu.

11) Fix err_count handling in various tunnels (ipip, ip6_gre). From Xin
    Long.

12) Properly align SKB head before building SKB in tuntap, from Jason
    Wang.

13) Avoid matching qdiscs with a zero handle during lookups, from Cong
    Wang.

14) Fix various endianness bugs in sctp, from Xin Long.

15) Fix tc filter callback races and add selftests which trigger the
    problem, from Cong Wang.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (73 commits)
  selftests: Introduce a new test case to tc testsuite
  selftests: Introduce a new script to generate tc batch file
  net_sched: fix call_rcu() race on act_sample module removal
  net_sched: add rtnl assertion to tcf_exts_destroy()
  net_sched: use tcf_queue_work() in tcindex filter
  net_sched: use tcf_queue_work() in rsvp filter
  net_sched: use tcf_queue_work() in route filter
  net_sched: use tcf_queue_work() in u32 filter
  net_sched: use tcf_queue_work() in matchall filter
  net_sched: use tcf_queue_work() in fw filter
  net_sched: use tcf_queue_work() in flower filter
  net_sched: use tcf_queue_work() in flow filter
  net_sched: use tcf_queue_work() in cgroup filter
  net_sched: use tcf_queue_work() in bpf filter
  net_sched: use tcf_queue_work() in basic filter
  net_sched: introduce a workqueue for RCU callbacks of tc filter
  sctp: fix some type cast warnings introduced since very beginning
  sctp: fix a type cast warnings that causes a_rwnd gets the wrong value
  sctp: fix some type cast warnings introduced by transport rhashtable
  sctp: fix some type cast warnings introduced by stream reconf
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-10-29 08:11:49 -07:00
commit 19e12196da
78 changed files with 1024 additions and 364 deletions

View File

@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;

View File

@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
if (dev->family != KVASER_LEAF)
goto warn;
break;
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);

View File

@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
char *p;
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
break;
continue;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}

View File

@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
netif_carrier_off(netdev);
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH();
msleep(10);
/* Set the carrier off after transmits have been disabled in the
* hardware, to avoid race conditions with e1000_watchdog() (which
* may be running concurrently to us, checking for the carrier
* bit to decide whether it should enable transmits again). Such
* a race condition would result into transmission being disabled
* in the hardware until the next IFF_DOWN+IFF_UP cycle.
*/
netif_carrier_off(netdev);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);

View File

@ -2102,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (unlikely(i40e_rx_is_programming_status(qword))) {
i40e_clean_programming_status(rx_ring, rx_desc, qword);
cleaned_count++;
continue;
}
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@ -2269,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
goto enable_int;
}
if (ITR_IS_DYNAMIC(tx_itr_setting)) {
if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}

View File

@ -5326,7 +5326,7 @@ dma_error:
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i--)
if (i-- == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
}

View File

@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) {
for (;;) {
tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i--)
if (tx_buffer == first)
break;
if (i == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
i--;
}
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
dev_kfree_skb_any(first->skb);
first->skb = NULL;

View File

@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool {
u32 port_map;
};
#define IS_TSO_HEADER(txq_pcpu, addr) \
((addr) >= (txq_pcpu)->tso_headers_dma && \
(addr) < (txq_pcpu)->tso_headers_dma + \
(txq_pcpu)->size * TSO_HEADER_SIZE)
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
if (tcam_data != data)
return false;
return true;
@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* place holders only - no ports */
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, false);
mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
}
/* Set default entries for various types of dsa packets */
@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
struct mvpp2_prs_entry *pe;
int tid;
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return NULL;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
if (tid < 0)
return tid;
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return -ENOMEM;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
@ -5623,7 +5629,7 @@ cleanup:
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@ -6212,12 +6218,15 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
size_t buf_sz =
mvpp2_txdesc_size_get(port, desc);
dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
buf_sz, DMA_TO_DEVICE);
if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@ -6490,7 +6499,7 @@ out:
}
/* Finalize TX processing */
if (txq_pcpu->count >= txq->done_pkts_coal)
if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */

View File

@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
struct mlx5_core_dev *dev,
struct mlx5_priv *priv)
static void delayed_event_release(struct mlx5_device_context *dev_ctx,
struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
struct list_head temp;
INIT_LIST_HEAD(&temp);
spin_lock_irq(&priv->ctx_lock);
/* stop delaying events */
priv->is_accum_events = false;
/* fire all accumulated events before new event comes */
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
spin_unlock_irq(&priv->ctx_lock);
list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list);
kfree(de);
}
}
static void cleanup_delayed_evets(struct mlx5_priv *priv)
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
static void delayed_event_start(struct mlx5_priv *priv)
{
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
spin_lock_irq(&priv->ctx_lock);
priv->is_accum_events = false;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
list_del(&de->list);
kfree(de);
}
priv->is_accum_events = true;
spin_unlock_irq(&priv->ctx_lock);
}
@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
return;
dev_ctx->intf = intf;
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
priv->is_accum_events = true;
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
fire_delayed_event_locked(dev_ctx, dev, priv);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) {
if (priv->pfault) {
@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
}
#endif
spin_unlock_irq(&priv->ctx_lock);
} else {
kfree(dev_ctx);
/* delete all accumulated events */
cleanup_delayed_evets(priv);
}
delayed_event_release(dev_ctx, priv);
if (!dev_ctx->context)
kfree(dev_ctx);
}
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
return;
goto out;
intf->attach(dev, dev_ctx->context);
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
return;
goto out;
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
out:
delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param);
/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
* still in priv->ctx_list. In this case, only notify the dev_ctx if its
* ADDED or ATTACHED bit are set.
*/
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
if (dev_ctx->intf->event &&
(test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);

View File

@ -41,6 +41,11 @@
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_LOWEST_PRIO_GROUP = 0,
};
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
bool is_tc_group_6_exist = false;
bool is_zero_bw_ets_tc = false;
int err = 0;
int i;
@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
}
for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err)
return err;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
is_zero_bw_ets_tc = true;
if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
is_tc_group_6_exist = true;
}
/* Report 0% ets tc if exits*/
if (is_zero_bw_ets_tc) {
for (i = 0; i < ets->ets_cap; i++)
if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
ets->tc_tx_bw[i] = 0;
}
/* Update tc_tsa based on fw setting*/
for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
!is_tc_group_6_exist)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_ETS_TC_GROUP_NUM = 0,
};
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
bool ets_zero_bw = false;
int strict_group;
int i;
for (i = 0; i <= max_tc; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true;
if (!ets->tc_tx_bw[i])
ets_zero_bw = true;
}
}
strict_group = any_tc_mapped_to_ets ? 1 : 0;
/* strict group has higher priority than ets group */
strict_group = MLX5E_LOWEST_PRIO_GROUP;
if (any_tc_mapped_to_ets)
strict_group++;
if (ets_zero_bw)
strict_group++;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
if (ets->tc_tx_bw[i] && ets_zero_bw)
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break;
}
}
@ -146,8 +183,22 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
int bw_for_ets_zero_bw_tc = 0;
int last_ets_zero_bw_tc = -1;
int num_ets_zero_bw = 0;
int i;
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
!ets->tc_tx_bw[i]) {
num_ets_zero_bw++;
last_ets_zero_bw_tc = i;
}
}
if (num_ets_zero_bw)
bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i];
tc_tx_bw[i] = ets->tc_tx_bw[i] ?
ets->tc_tx_bw[i] :
bw_for_ets_zero_bw_tc;
break;
}
}
/* Make sure the total bw for ets zero bw group is 100% */
if (last_ets_zero_bw_tc != -1)
tc_tx_bw[last_ets_zero_bw_tc] +=
MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
/* If there are ETS BW 0,
* Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
* Set group #0 to all the ETS BW 0 tcs and
* equally splits the 100% BW between them
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
return err;
}
@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
if (!ets->tc_tx_bw[i]) {
netdev_err(netdev,
"Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
return;
}
if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
*bw_pct = 0;
mlx5e_dcbnl_ieee_getets(netdev, &ets);
*bw_pct = ets.tc_tx_bw[pgid];
}
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;

View File

@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
int mirred_ifindex;
};
enum {
@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_flow_handle *rule;
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_flow_handle *rule = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
out_dev, &encap_dev, flow);
if (err) {
rule = ERR_PTR(err);
if (err != -EAGAIN)
goto err_attach_encap;
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err) {
rule = ERR_PTR(err);
@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
}
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
/* we get here if (1) there's no error (rule being null) or when
* (2) there's an encap action and we're on -EAGAIN (no valid neigh)
*/
if (rule != ERR_PTR(-EAGAIN)) {
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
}
return rule;
err_add_rule:
@ -361,6 +391,7 @@ err_mod_hdr:
err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
err_attach_encap:
return rule;
}
@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_tc_flow *flow;
int err;
@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) {
flow->esw_attr->encap_id = e->encap_id;
flow->rule = mlx5e_tc_add_fdb_flow(priv,
flow->esw_attr->parse_attr,
flow);
esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
}
}
@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL;
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
err = mlx5e_attach_encap(priv, info,
out_dev, &encap_dev, flow);
if (err && err != -EAGAIN)
return err;
parse_attr->mirred_ifindex = ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
attr->parse_attr = parse_attr;
/* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_handle_encap_flow;
goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
goto err_free;
if (err != -EAGAIN)
goto err_free;
}
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
if (err != -EAGAIN)
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
err_handle_encap_flow:
if (err == -EAGAIN) {
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
mlx5e_tc_del_flow(priv, flow);
else
return 0;
}
err_free:
kvfree(parse_attr);
kfree(flow);

View File

@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
unsigned long flags;
spin_lock(&health->wq_lock);
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock);
spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}

View File

@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
void *ets_tcn_conf;
int err;
err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
if (err)
return err;
ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
tc_configuration[tc]);
*tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
group);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};

View File

@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
*/
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
if (!output->port)

View File

@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
if (of_property_read_u32(np, "read,read-requests",
if (of_property_read_u32(np, "snps,read-requests",
&plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property

View File

@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_to_use = 1;
plat->tx_queues_to_use = 1;
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
*/
plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node)
return;

View File

@ -197,8 +197,8 @@ static int ipvtap_init(void)
{
int err;
err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
THIS_MODULE);
if (err)
goto out1;

View File

@ -204,8 +204,8 @@ static int macvtap_init(void)
{
int err;
err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
THIS_MODULE);
if (err)
goto out1;

View File

@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
err = -ENOMEM;
if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
goto err_array;
err = tap_set_queue(tap, file, q);
if (err)
goto err_queue;
if (err) {
/* tap_sock_destruct() will take care of freeing skb_array */
goto err_put;
}
dev_put(tap->dev);
rtnl_unlock();
return err;
err_queue:
skb_array_cleanup(&q->skb_array);
err_array:
err_put:
sock_put(&q->sk);
err:
if (tap)
@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name)
return 0;
}
int tap_create_cdev(struct cdev *tap_cdev,
dev_t *tap_major, const char *device_name)
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
const char *device_name, struct module *module)
{
int err;
@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
goto out1;
cdev_init(tap_cdev, &tap_fops);
tap_cdev->owner = module;
err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
if (err)
goto out2;

View File

@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
@ -2028,7 +2029,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
err = dev_get_valid_name(net, dev, name);
if (err)
if (err < 0)
goto err_free_dev;
dev_net_set(dev, net);

View File

@ -561,6 +561,7 @@ static const struct driver_info wwan_info = {
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
#define TPLINK_VENDOR_ID 0x2357
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@ -813,6 +814,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
@ -863,6 +871,12 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t)&wwan_info,
}, {
/* Huawei ME906 and ME909 */
USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE modules */
USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,

View File

@ -615,6 +615,7 @@ enum rtl8152_flags {
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@ -5319,6 +5320,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
{}
};

View File

@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
int tap_get_minor(dev_t major, struct tap_dev *tap);
void tap_free_minor(dev_t major, struct tap_dev *tap);
int tap_queue_resize(struct tap_dev *tap);
int tap_create_cdev(struct cdev *tap_cdev,
dev_t *tap_major, const char *device_name);
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
const char *device_name, struct module *module);
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
#endif /*_LINUX_IF_TAP_H_*/

View File

@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);

View File

@ -231,7 +231,7 @@ struct sctp_datahdr {
__be32 tsn;
__be16 stream;
__be16 ssn;
__be32 ppid;
__u32 ppid;
__u8 payload[0];
};
@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
struct sctp_strreset_outreq {
struct sctp_paramhdr param_hdr;
__u32 request_seq;
__u32 response_seq;
__u32 send_reset_at_tsn;
__u16 list_of_streams[0];
__be32 request_seq;
__be32 response_seq;
__be32 send_reset_at_tsn;
__be16 list_of_streams[0];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
__u32 request_seq;
__u16 list_of_streams[0];
__be32 request_seq;
__be16 list_of_streams[0];
};
struct sctp_strreset_tsnreq {
struct sctp_paramhdr param_hdr;
__u32 request_seq;
__be32 request_seq;
};
struct sctp_strreset_addstrm {
struct sctp_paramhdr param_hdr;
__u32 request_seq;
__u16 number_of_streams;
__u16 reserved;
__be32 request_seq;
__be16 number_of_streams;
__be16 reserved;
};
enum {
@ -752,16 +752,16 @@ enum {
struct sctp_strreset_resp {
struct sctp_paramhdr param_hdr;
__u32 response_seq;
__u32 result;
__be32 response_seq;
__be32 result;
};
struct sctp_strreset_resptsn {
struct sctp_paramhdr param_hdr;
__u32 response_seq;
__u32 result;
__u32 senders_next_tsn;
__u32 receivers_next_tsn;
__be32 response_seq;
__be32 result;
__be32 senders_next_tsn;
__be32 receivers_next_tsn;
};
#endif /* __LINUX_SCTP_H__ */

View File

@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq,
fq_flow_get_default_t get_default_func)
{
struct fq_flow *flow;
bool oom;
lockdep_assert_held(&fq->lock);
@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq,
}
__skb_queue_tail(&flow->queue, skb);
if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
oom = (fq->memory_usage > fq->memory_limit);
while (fq->backlog > fq->limit || oom) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
if (fq->memory_usage > fq->memory_limit)
if (oom) {
fq->overmemory++;
oom = (fq->memory_usage > fq->memory_limit);
}
}
}

View File

@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
{
return rcu_dereference_check(ireq->ireq_opt,
refcount_read(&ireq->req.rsk_refcnt) > 0);
}
struct inet_cork {
unsigned int flags;
__be32 addr;

View File

@ -2,6 +2,7 @@
#define __NET_PKT_CLS_H
#include <linux/pkt_cls.h>
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
@ -17,6 +18,8 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
bool tcf_queue_work(struct work_struct *work);
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);

View File

@ -10,6 +10,7 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@ -271,6 +272,7 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
struct work_struct work;
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)

View File

@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
__u16 stream_num, __u16 *stream_list,
__u16 stream_num, __be16 *stream_list,
bool out, bool in);
struct sctp_chunk *sctp_make_strreset_tsnreq(
const struct sctp_association *asoc);

View File

@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
__u16 stream_num, __u16 *stream_list, gfp_t gfp);
__u16 stream_num, __be16 *stream_list, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
const struct sctp_association *asoc, __u16 flags,

View File

@ -74,10 +74,9 @@ struct strparser {
u32 unrecov_intr : 1;
struct sk_buff **skb_nextp;
struct timer_list msg_timer;
struct sk_buff *skb_head;
unsigned int need_bytes;
struct delayed_work delayed_work;
struct delayed_work msg_timer_work;
struct work_struct work;
struct strp_stats stats;
struct strp_callbacks cb;

View File

@ -844,6 +844,7 @@ struct tcp_skb_cb {
__u32 key;
__u32 flags;
struct bpf_map *map;
void *data_end;
} bpf;
};
};

View File

@ -575,7 +575,7 @@ union bpf_attr {
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
* Return: SK_REDIRECT
* Return: SK_PASS
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
@ -786,8 +786,8 @@ struct xdp_md {
};
enum sk_action {
SK_ABORTED = 0,
SK_DROP,
SK_DROP = 0,
SK_PASS,
SK_REDIRECT,
};

View File

@ -376,7 +376,7 @@ struct sctp_remote_error {
__u16 sre_type;
__u16 sre_flags;
__u32 sre_length;
__u16 sre_error;
__be16 sre_error;
sctp_assoc_t sre_assoc_id;
__u8 sre_data[0];
};

View File

@ -93,6 +93,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
/* compute the linear packet data range [data, data_end) for skb when
* sk_skb type programs are in use.
*/
static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
{
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@ -108,13 +116,14 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
*/
TCP_SKB_CB(skb)->bpf.map = NULL;
skb->sk = psock->sock;
bpf_compute_data_end(skb);
bpf_compute_data_end_sk_skb(skb);
preempt_disable();
rc = (*prog->bpf_func)(skb, prog->insnsi);
preempt_enable();
skb->sk = NULL;
return rc;
return rc == SK_PASS ?
(TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
}
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
@ -368,7 +377,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
* any socket yet.
*/
skb->sk = psock->sock;
bpf_compute_data_end(skb);
bpf_compute_data_end_sk_skb(skb);
rc = (*prog->bpf_func)(skb, prog->insnsi);
skb->sk = NULL;
rcu_read_unlock();

View File

@ -1844,14 +1844,15 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
/* If user passes invalid input drop the packet. */
if (unlikely(flags))
return SK_ABORTED;
return SK_DROP;
tcb->bpf.key = key;
tcb->bpf.flags = flags;
tcb->bpf.map = map;
return SK_REDIRECT;
return SK_PASS;
}
struct sock *do_sk_redirect_map(struct sk_buff *skb)
@ -4243,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
int off;
switch (si->off) {
case offsetof(struct __sk_buff, data_end):
off = si->off;
off -= offsetof(struct __sk_buff, data_end);
off += offsetof(struct sk_buff, cb);
off += offsetof(struct tcp_skb_cb, bpf.data_end);
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
si->src_reg, off);
break;
default:
return bpf_convert_ctx_access(type, si, insn_buf, prog,
target_size);
}
return insn - insn_buf;
}
const struct bpf_verifier_ops sk_filter_prog_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
@ -4301,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = {
const struct bpf_verifier_ops sk_skb_prog_ops = {
.get_func_proto = sk_skb_func_proto,
.is_valid_access = sk_skb_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
.convert_ctx_access = sk_skb_convert_ctx_access,
.gen_prologue = sk_skb_prologue,
};

View File

@ -495,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
ireq->ir_rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
rcu_dereference(ireq->ireq_opt));
ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}

View File

@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
if (!ethernet)
return -EINVAL;
ethernet_dev = of_find_net_device_by_node(ethernet);
if (!ethernet_dev)
return -EPROBE_DEFER;
} else {
ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
if (!ethernet_dev)
return -EPROBE_DEFER;
dev_put(ethernet_dev);
}
if (!ethernet_dev)
return -EPROBE_DEFER;
if (!dst->cpu_dp) {
dst->cpu_dp = port;
dst->cpu_dp->netdev = ethernet_dev;

View File

@ -543,7 +543,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct ip_options_rcu *opt;
struct rtable *rt;
opt = rcu_dereference(ireq->ireq_opt);
opt = ireq_opt_deref(ireq);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),

View File

@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
static int ipip_err(struct sk_buff *skb, u32 info)
{
/* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
/* All the routers (except for Linux) return only
* 8 bytes of packet payload. It means, that precise relaying of
* ICMP in the real Internet is absolutely infeasible.
*/
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct ip_tunnel *t;
int err;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err = 0;
switch (type) {
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
/* Impossible event. */
goto out;
default:
/* All others are translated to HOST_UNREACH.
* rfc2003 contains "deep thoughts" about NET_UNREACH,
* I believe they are just ether pollution. --ANK
*/
break;
}
break;
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
goto out;
break;
case ICMP_REDIRECT:
break;
default:
goto out;
}
err = -ENOENT;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
if (!t)
if (!t) {
err = -ENOENT;
goto out;
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
t->parms.link, 0, iph->protocol, 0);
err = 0;
ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
iph->protocol, 0);
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
iph->protocol, 0);
err = 0;
ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
goto out;
}
if (t->parms.iph.daddr == 0)
if (t->parms.iph.daddr == 0) {
err = -ENOENT;
goto out;
}
err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;

View File

@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
rcu_dereference(ireq->ireq_opt));
ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}

View File

@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tp->lost_out > tp->retrans_out &&
tp->snd_cwnd > tcp_packets_in_flight(tp))
tp->snd_cwnd > tcp_packets_in_flight(tp)) {
tcp_mstamp_refresh(tp);
tcp_xmit_retransmit_queue(sk);
}
tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
@ -2237,6 +2239,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sent_pkts = 0;
tcp_mstamp_refresh(tp);
if (!push_one) {
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
@ -2248,7 +2251,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
max_segs = tcp_tso_segs(sk, mss_now);
tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@ -2841,8 +2843,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
if (!err)
if (!err) {
skb->skb_mstamp = tp->tcp_mstamp;
tcp_rate_skb_sent(sk, skb);
}
} else {
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}

View File

@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
break;
if (code != ICMPV6_PORT_UNREACH)
break;
return;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
break;
}
break;
return;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
break;
return;
case ICMPV6_PKT_TOOBIG:
mtu = be32_to_cpu(info) - offset - t->tun_hlen;
if (t->dev->type == ARPHRD_ETHER)
@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
break;
return;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
__u32 *pmtu, __be16 proto)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
__be16 protocol = (dev->type == ARPHRD_ETHER) ?
htons(ETH_P_TEB) : proto;
struct dst_entry *dst = skb_dst(skb);
__be16 protocol;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
tunnel->o_seqno++;
/* Push GRE header. */
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
/* TooBig packet may have updated dst->dev's mtu */
if (dst && dst_mtu(dst) > dst->dev->mtu)
dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
}

View File

@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
ret = drv_set_bitrate_mask(local, sdata, mask);
if (ret)
return ret;
}
/*
* If active validate the setting and reject it if it doesn't leave
* at least one basic rate usable, since we really have to be able
@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return -EINVAL;
}
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
ret = drv_set_bitrate_mask(local, sdata, mask);
if (ret)
return ret;
}
for (i = 0; i < NUM_NL80211_BANDS; i++) {
struct ieee80211_supported_band *sband = wiphy->bands[i];
int j;

View File

@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
#include <crypto/algapi.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
ieee80211_key_free_common(key);
}
static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
u8 *tk_old, *tk_new;
if (!old || new->conf.keylen != old->conf.keylen)
return false;
tk_old = old->conf.key;
tk_new = new->conf.key;
/*
* In station mode, don't compare the TX MIC key, as it's never used
* and offloaded rekeying may not care to send it to the host. This
* is the case in iwlwifi, for example.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
new->conf.keylen == WLAN_KEY_LEN_TKIP &&
!(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
tk_old = tkip_old;
tk_new = tkip_new;
}
return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
}
int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
@ -634,8 +668,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* Silently accept key re-installation without really installing the
* new version of the key to avoid nonce reuse or replay issues.
*/
if (old_key && key->conf.keylen == old_key->conf.keylen &&
!memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
if (ieee80211_key_identical(sdata, old_key, key)) {
ieee80211_key_free_unused(key);
ret = 0;
goto out;

View File

@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
rds_ib_set_wr_signal_state(ic, send, false);
/*
* Always signal the last one if we're stopping due to flow control.
*/
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
rds_ib_set_wr_signal_state(ic, send, true);
send->s_wr.send_flags |= IB_SEND_SOLICITED;
}
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
prev->s_wr.send_flags |= IB_SEND_SIGNALED;
nr_sig++;
}
if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
ic->i_data_op = NULL;
}
@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
send->s_atomic_wr.swap_mask = 0;
}
send->s_wr.send_flags = 0;
nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
send->s_atomic_wr.wr.num_sge = 1;
send->s_atomic_wr.wr.next = NULL;

View File

@ -264,6 +264,7 @@ static int __init sample_init_module(void)
static void __exit sample_cleanup_module(void)
{
rcu_barrier();
tcf_unregister_action(&act_sample_ops, &sample_net_ops);
}

View File

@ -77,6 +77,8 @@ out:
}
EXPORT_SYMBOL(register_tcf_proto_ops);
static struct workqueue_struct *tc_filter_wq;
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
* tcf_proto_ops's destroy() handler.
*/
rcu_barrier();
flush_workqueue(tc_filter_wq);
write_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head) {
@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
bool tcf_queue_work(struct work_struct *work)
{
return queue_work(tc_filter_wq, work);
}
EXPORT_SYMBOL(tcf_queue_work);
/* Select new prio value from the range, managed by kernel. */
static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@ -266,23 +275,30 @@ err_chain_create:
}
EXPORT_SYMBOL(tcf_block_get);
void tcf_block_put(struct tcf_block *block)
static void tcf_block_put_final(struct work_struct *work)
{
struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp;
if (!block)
return;
/* At this point, all the chains should have refcnt == 1. */
rtnl_lock();
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
rtnl_unlock();
kfree(block);
}
/* XXX: Standalone actions are not allowed to jump to any chain, and
* bound actions should be all removed after flushing. However,
* filters are destroyed in RCU callbacks, we have to hold the chains
* first, otherwise we would always race with RCU callbacks on this list
* without proper locking.
*/
/* Wait for existing RCU callbacks to cool down. */
rcu_barrier();
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
* actions should be all removed after flushing. However, filters are destroyed
* in RCU callbacks, we have to hold the chains first, otherwise we would
* always race with RCU callbacks on this list without proper locking.
*/
static void tcf_block_put_deferred(struct work_struct *work)
{
struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain;
rtnl_lock();
/* Hold a refcnt for all chains, except 0, in case they are gone. */
list_for_each_entry(chain, &block->chain_list, list)
if (chain->index)
@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block)
list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
/* Wait for RCU callbacks to release the reference count. */
INIT_WORK(&block->work, tcf_block_put_final);
/* Wait for RCU callbacks to release the reference count and make
* sure their works have been queued before this.
*/
rcu_barrier();
tcf_queue_work(&block->work);
rtnl_unlock();
}
/* At this point, all the chains should have refcnt == 1. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
kfree(block);
void tcf_block_put(struct tcf_block *block)
{
if (!block)
return;
INIT_WORK(&block->work, tcf_block_put_deferred);
/* Wait for existing RCU callbacks to cool down, make sure their works
* have been queued before this. We can not flush pending works here
* because we are holding the RTNL lock.
*/
rcu_barrier();
tcf_queue_work(&block->work);
}
EXPORT_SYMBOL(tcf_block_put);
@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
#ifdef CONFIG_NET_CLS_ACT
LIST_HEAD(actions);
ASSERT_RTNL();
tcf_exts_to_list(exts, &actions);
tcf_action_destroy(&actions, TCA_ACT_UNBIND);
kfree(exts->actions);
@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
static int __init tc_filter_init(void)
{
tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
if (!tc_filter_wq)
return -ENOMEM;
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,

View File

@ -34,7 +34,10 @@ struct basic_filter {
struct tcf_result res;
struct tcf_proto *tp;
struct list_head link;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@ -82,13 +85,24 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
static void basic_delete_filter_work(struct work_struct *work)
{
struct basic_filter *f = container_of(work, struct basic_filter, work);
rtnl_lock();
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
rtnl_unlock();
kfree(f);
}
static void basic_delete_filter(struct rcu_head *head)
{
struct basic_filter *f = container_of(head, struct basic_filter, rcu);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
kfree(f);
INIT_WORK(&f->work, basic_delete_filter_work);
tcf_queue_work(&f->work);
}
static void basic_destroy(struct tcf_proto *tp)

View File

@ -49,7 +49,10 @@ struct cls_bpf_prog {
struct sock_filter *bpf_ops;
const char *bpf_name;
struct tcf_proto *tp;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
kfree(prog);
}
static void cls_bpf_delete_prog_work(struct work_struct *work)
{
struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
rtnl_lock();
__cls_bpf_delete_prog(prog);
rtnl_unlock();
}
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
{
__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
tcf_queue_work(&prog->work);
}
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)

View File

@ -23,7 +23,10 @@ struct cls_cgroup_head {
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
struct tcf_proto *tp;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
static void cls_cgroup_destroy_work(struct work_struct *work)
{
struct cls_cgroup_head *head = container_of(work,
struct cls_cgroup_head,
work);
rtnl_lock();
tcf_exts_destroy(&head->exts);
tcf_em_tree_destroy(&head->ematches);
kfree(head);
rtnl_unlock();
}
static void cls_cgroup_destroy_rcu(struct rcu_head *root)
{
struct cls_cgroup_head *head = container_of(root,
struct cls_cgroup_head,
rcu);
tcf_exts_destroy(&head->exts);
tcf_em_tree_destroy(&head->ematches);
kfree(head);
INIT_WORK(&head->work, cls_cgroup_destroy_work);
tcf_queue_work(&head->work);
}
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,

View File

@ -57,7 +57,10 @@ struct flow_filter {
u32 divisor;
u32 baseclass;
u32 hashrnd;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static inline u32 addr_fold(void *addr)
@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
static void flow_destroy_filter(struct rcu_head *head)
static void flow_destroy_filter_work(struct work_struct *work)
{
struct flow_filter *f = container_of(head, struct flow_filter, rcu);
struct flow_filter *f = container_of(work, struct flow_filter, work);
rtnl_lock();
del_timer_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
kfree(f);
rtnl_unlock();
}
static void flow_destroy_filter(struct rcu_head *head)
{
struct flow_filter *f = container_of(head, struct flow_filter, rcu);
INIT_WORK(&f->work, flow_destroy_filter_work);
tcf_queue_work(&f->work);
}
static int flow_change(struct net *net, struct sk_buff *in_skb,

View File

@ -87,7 +87,10 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
struct net_device *hw_dev;
};
@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
return 0;
}
static void fl_destroy_filter_work(struct work_struct *work)
{
struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
rtnl_unlock();
}
static void fl_destroy_filter(struct rcu_head *head)
{
struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
tcf_exts_destroy(&f->exts);
kfree(f);
INIT_WORK(&f->work, fl_destroy_filter_work);
tcf_queue_work(&f->work);
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)

View File

@ -46,7 +46,10 @@ struct fw_filter {
#endif /* CONFIG_NET_CLS_IND */
struct tcf_exts exts;
struct tcf_proto *tp;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static u32 fw_hash(u32 handle)
@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
static void fw_delete_filter_work(struct work_struct *work)
{
struct fw_filter *f = container_of(work, struct fw_filter, work);
rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
rtnl_unlock();
}
static void fw_delete_filter(struct rcu_head *head)
{
struct fw_filter *f = container_of(head, struct fw_filter, rcu);
tcf_exts_destroy(&f->exts);
kfree(f);
INIT_WORK(&f->work, fw_delete_filter_work);
tcf_queue_work(&f->work);
}
static void fw_destroy(struct tcf_proto *tp)

View File

@ -21,7 +21,10 @@ struct cls_mall_head {
struct tcf_result res;
u32 handle;
u32 flags;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
return 0;
}
static void mall_destroy_work(struct work_struct *work)
{
struct cls_mall_head *head = container_of(work, struct cls_mall_head,
work);
rtnl_lock();
tcf_exts_destroy(&head->exts);
kfree(head);
rtnl_unlock();
}
static void mall_destroy_rcu(struct rcu_head *rcu)
{
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
rcu);
tcf_exts_destroy(&head->exts);
kfree(head);
INIT_WORK(&head->work, mall_destroy_work);
tcf_queue_work(&head->work);
}
static int mall_replace_hw_filter(struct tcf_proto *tp,

View File

@ -57,7 +57,10 @@ struct route4_filter {
u32 handle;
struct route4_bucket *bkt;
struct tcf_proto *tp;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
static void route4_delete_filter_work(struct work_struct *work)
{
struct route4_filter *f = container_of(work, struct route4_filter, work);
rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
rtnl_unlock();
}
static void route4_delete_filter(struct rcu_head *head)
{
struct route4_filter *f = container_of(head, struct route4_filter, rcu);
tcf_exts_destroy(&f->exts);
kfree(f);
INIT_WORK(&f->work, route4_delete_filter_work);
tcf_queue_work(&f->work);
}
static void route4_destroy(struct tcf_proto *tp)

View File

@ -97,7 +97,10 @@ struct rsvp_filter {
u32 handle;
struct rsvp_session *sess;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
static void rsvp_delete_filter_work(struct work_struct *work)
{
struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
rtnl_unlock();
}
static void rsvp_delete_filter_rcu(struct rcu_head *head)
{
struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
tcf_exts_destroy(&f->exts);
kfree(f);
INIT_WORK(&f->work, rsvp_delete_filter_work);
tcf_queue_work(&f->work);
}
static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)

View File

@ -27,14 +27,20 @@
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
struct tcindex_filter {
u16 key;
struct tcindex_filter_result result;
struct tcindex_filter __rcu *next;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
return 0;
}
static void tcindex_destroy_rexts_work(struct work_struct *work)
{
struct tcindex_filter_result *r;
r = container_of(work, struct tcindex_filter_result, work);
rtnl_lock();
tcf_exts_destroy(&r->exts);
rtnl_unlock();
}
static void tcindex_destroy_rexts(struct rcu_head *head)
{
struct tcindex_filter_result *r;
r = container_of(head, struct tcindex_filter_result, rcu);
tcf_exts_destroy(&r->exts);
INIT_WORK(&r->work, tcindex_destroy_rexts_work);
tcf_queue_work(&r->work);
}
static void tcindex_destroy_fexts_work(struct work_struct *work)
{
struct tcindex_filter *f = container_of(work, struct tcindex_filter,
work);
rtnl_lock();
tcf_exts_destroy(&f->result.exts);
kfree(f);
rtnl_unlock();
}
static void tcindex_destroy_fexts(struct rcu_head *head)
@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
struct tcindex_filter *f = container_of(head, struct tcindex_filter,
rcu);
tcf_exts_destroy(&f->result.exts);
kfree(f);
INIT_WORK(&f->work, tcindex_destroy_fexts_work);
tcf_queue_work(&f->work);
}
static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)

View File

@ -68,7 +68,10 @@ struct tc_u_knode {
u32 __percpu *pcpu_success;
#endif
struct tcf_proto *tp;
struct rcu_head rcu;
union {
struct work_struct work;
struct rcu_head rcu;
};
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
*/
@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
* this the u32_delete_key_rcu variant does not free the percpu
* statistics.
*/
static void u32_delete_key_work(struct work_struct *work)
{
struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
rtnl_lock();
u32_destroy_key(key->tp, key, false);
rtnl_unlock();
}
static void u32_delete_key_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
u32_destroy_key(key->tp, key, false);
INIT_WORK(&key->work, u32_delete_key_work);
tcf_queue_work(&key->work);
}
/* u32_delete_key_freepf_rcu is the rcu callback variant
@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
* for the variant that should be used with keys return from
* u32_init_knode()
*/
static void u32_delete_key_freepf_work(struct work_struct *work)
{
struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
rtnl_lock();
u32_destroy_key(key->tp, key, true);
rtnl_unlock();
}
static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
u32_destroy_key(key->tp, key, true);
INIT_WORK(&key->work, u32_delete_key_freepf_work);
tcf_queue_work(&key->work);
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)

View File

@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
if (!handle)
return NULL;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;

View File

@ -794,7 +794,7 @@ hit:
struct sctp_hash_cmp_arg {
const union sctp_addr *paddr;
const struct net *net;
u16 lport;
__be16 lport;
};
static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@ -820,37 +820,37 @@ out:
return err;
}
static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
const union sctp_addr *paddr = &t->ipaddr;
const struct net *net = sock_net(t->asoc->base.sk);
u16 lport = htons(t->asoc->base.bind_addr.port);
u32 addr;
__be16 lport = htons(t->asoc->base.bind_addr.port);
__u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
addr = paddr->v4.sin_addr.s_addr;
addr = (__force __u32)paddr->v4.sin_addr.s_addr;
return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}
static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed)
static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
{
const struct sctp_hash_cmp_arg *x = data;
const union sctp_addr *paddr = x->paddr;
const struct net *net = x->net;
u16 lport = x->lport;
u32 addr;
__be16 lport = x->lport;
__u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
addr = paddr->v4.sin_addr.s_addr;
addr = (__force __u32)paddr->v4.sin_addr.s_addr;
return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}

View File

@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
/* Was this packet marked by Explicit Congestion Notification? */
static int sctp_v6_is_ce(const struct sk_buff *skb)
{
return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20);
return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
}
/* Dump the v6 addr to the seq file. */
@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
net = sock_net(&opt->inet.sk);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
if (!dev ||
!ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
if (!dev || !(opt->inet.freebind ||
net->ipv6.sysctl.ip_nonlocal_bind ||
ipv6_chk_addr(net, &addr->v6.sin6_addr,
dev, 0))) {
rcu_read_unlock();
return 0;
}

View File

@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = flags;
param.param_hdr.length = htons(paramlen + addr_param_len);
param.crr_id = i;
param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = SCTP_PARAM_DEL_IP;
param.param_hdr.length = htons(paramlen + addr_param_len);
param.crr_id = i;
param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
*/
struct sctp_chunk *sctp_make_strreset_req(
const struct sctp_association *asoc,
__u16 stream_num, __u16 *stream_list,
__u16 stream_num, __be16 *stream_list,
bool out, bool in)
{
struct sctp_strreset_outreq outreq;
@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
{
struct sctp_reconf_chunk *hdr;
union sctp_params param;
__u16 last = 0, cnt = 0;
__be16 last = 0;
__u16 cnt = 0;
hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
sctp_walk_params(param, hdr, params) {

View File

@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_INIT_FAILED:
sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc, event_type,
subtype, chunk, cmd->obj.err);
subtype, chunk, cmd->obj.u32);
break;
case SCTP_CMD_INIT_COUNTER_INC:
@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = cmd->obj.be32;
sackh.a_rwnd = asoc->peer.rwnd +
asoc->outqueue.outstanding_bytes;
sackh.a_rwnd = htonl(asoc->peer.rwnd +
asoc->outqueue.outstanding_bytes);
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
chunk->subh.sack_hdr = &sackh;

View File

@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sk_mem_charge(sk, chunk->skb->truesize);
}
static void sctp_clear_owner_w(struct sctp_chunk *chunk)
{
skb_orphan(chunk->skb);
}
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
void (*cb)(struct sctp_chunk *))
{
struct sctp_outq *q = &asoc->outqueue;
struct sctp_transport *t;
struct sctp_chunk *chunk;
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
cb(chunk);
list_for_each_entry(chunk, &q->retransmit, list)
cb(chunk);
list_for_each_entry(chunk, &q->sacked, list)
cb(chunk);
list_for_each_entry(chunk, &q->abandoned, list)
cb(chunk);
list_for_each_entry(chunk, &q->out_chunk_list, list)
cb(chunk);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
@ -8212,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.

View File

@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
__u16 i, str_nums, *str_list;
struct sctp_chunk *chunk;
int retval = -EINVAL;
__be16 *nstr_list;
bool out, in;
if (!asoc->peer.reconf_capable ||
@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
if (str_list[i] >= stream->incnt)
goto out;
for (i = 0; i < str_nums; i++)
str_list[i] = htons(str_list[i]);
chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
if (!nstr_list) {
retval = -ENOMEM;
goto out;
}
for (i = 0; i < str_nums; i++)
str_list[i] = ntohs(str_list[i]);
nstr_list[i] = htons(str_list[i]);
chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
kfree(nstr_list);
if (!chunk) {
retval = -ENOMEM;
@ -305,7 +311,7 @@ out:
}
static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
struct sctp_association *asoc, __u32 resp_seq,
struct sctp_association *asoc, __be32 resp_seq,
__be16 type)
{
struct sctp_chunk *chunk = asoc->strreset_chunk;
@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
{
struct sctp_strreset_outreq *outreq = param.v;
struct sctp_stream *stream = &asoc->stream;
__u16 i, nums, flags = 0, *str_p = NULL;
__u32 result = SCTP_STRRESET_DENIED;
__u16 i, nums, flags = 0;
__be16 *str_p = NULL;
__u32 request_seq;
request_seq = ntohl(outreq->request_seq);
@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_chunk *chunk = NULL;
__u16 i, nums, *str_p;
__u32 request_seq;
__u16 i, nums;
__be16 *str_p;
request_seq = ntohl(inreq->request_seq);
if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
struct sctp_strreset_outreq *outreq;
__u16 *str_p;
__be16 *str_p;
outreq = (struct sctp_strreset_outreq *)req;
str_p = outreq->list_of_streams;
@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums, str_p, GFP_ATOMIC);
} else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
struct sctp_strreset_inreq *inreq;
__u16 *str_p;
__be16 *str_p;
/* if the result is performed, it's impossible for inreq */
if (result == SCTP_STRRESET_PERFORMED)

View File

@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
__u16 *stream_list, gfp_t gfp)
__be16 *stream_list, gfp_t gfp)
{
struct sctp_stream_reset_event *sreset;
struct sctp_ulpevent *event;

View File

@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
{
/* Unrecoverable error in receive */
del_timer(&strp->msg_timer);
cancel_delayed_work(&strp->msg_timer_work);
if (strp->stopped)
return;
@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
static void strp_start_timer(struct strparser *strp, long timeo)
{
if (timeo)
mod_timer(&strp->msg_timer, timeo);
mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
}
/* Lower lock held */
@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
eaten += (cand_len - extra);
/* Hurray, we have a new message! */
del_timer(&strp->msg_timer);
cancel_delayed_work(&strp->msg_timer_work);
strp->skb_head = NULL;
STRP_STATS_INCR(strp->stats.msgs);
@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
do_strp_work(container_of(w, struct strparser, work));
}
static void strp_msg_timeout(unsigned long arg)
static void strp_msg_timeout(struct work_struct *w)
{
struct strparser *strp = (struct strparser *)arg;
struct strparser *strp = container_of(w, struct strparser,
msg_timer_work.work);
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.msg_timeouts);
@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
setup_timer(&strp->msg_timer, strp_msg_timeout,
(unsigned long)strp);
INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
INIT_WORK(&strp->work, strp_work);
return 0;
@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
{
WARN_ON(!strp->stopped);
del_timer_sync(&strp->msg_timer);
cancel_delayed_work_sync(&strp->msg_timer_work);
cancel_work_sync(&strp->work);
if (strp->skb_head) {

View File

@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
if (!net_eq(sock_net(sk), net))
goto out;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)

View File

@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
return -EOPNOTSUPP;
if (wdev->current_bss) {
if (!prev_bssid)
return -EALREADY;
if (prev_bssid &&
!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
return -ENOTCONN;
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
wdev->current_bss = NULL;
@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
if (WARN_ON(wdev->connect_keys)) {
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
/*
* If we have an ssid_len, we're trying to connect or are
* already connected, so reject a new SSID unless it's the
* same (which is the case for re-association.)
*/
if (wdev->ssid_len &&
(wdev->ssid_len != connect->ssid_len ||
memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
return -EALREADY;
/*
* If connected, reject (re-)association unless prev_bssid
* matches the current BSSID.
*/
if (wdev->current_bss) {
if (!prev_bssid)
return -EALREADY;
if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
return -ENOTCONN;
}
/*
* Reject if we're in the process of connecting with WEP,
* this case isn't very interesting and trying to handle
* it would make the code much more complex.
*/
if (wdev->connect_keys)
return -EINPROGRESS;
cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
rdev->wiphy.ht_capa_mod_mask);
@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
if (err) {
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
/*
* This could be reassoc getting refused, don't clear
* ssid_len in that case.
*/
if (!wdev->current_bss)
wdev->ssid_len = 0;
return err;
}
@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
else if (wdev->ssid_len)
err = rdev_disconnect(rdev, dev, reason);
/*
* Clear ssid_len unless we actually were fully connected,
* in which case cfg80211_disconnected() will take care of
* this later.
*/
if (!wdev->current_bss)
wdev->ssid_len = 0;
return err;
}

View File

@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
if (!dst_prev)
dst0 = dst1;
else
/* Ref count is taken during xfrm_alloc_dst()
* No need to do dst_clone() on dst1
*/
dst_prev->child = dst1;
if (xfrm[i]->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(xfrm[i],
xfrm_af2proto(family));
@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
} else
inner_mode = xfrm[i]->inner_mode;
if (!dst_prev)
dst0 = dst1;
else
/* Ref count is taken during xfrm_alloc_dst()
* No need to do dst_clone() on dst1
*/
dst_prev->child = dst1;
xdst->route = dst;
dst_copy_metrics(dst1, dst);

View File

@ -1693,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
static int xfrm_dump_policy_start(struct netlink_callback *cb)
{
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
return 0;
}
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
sizeof(cb->args) - sizeof(cb->args[0]));
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
cb->args[0] = 1;
xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
}
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
@ -2474,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
@ -2487,6 +2490,7 @@ static const struct xfrm_link {
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
.start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@ -2539,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct netlink_dump_control c = {
.start = link->start,
.dump = link->dump,
.done = link->done,
};

View File

@ -787,8 +787,8 @@ struct xdp_md {
};
enum sk_action {
SK_ABORTED = 0,
SK_DROP,
SK_DROP = 0,
SK_PASS,
SK_REDIRECT,
};

View File

@ -17,5 +17,26 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
},
{
"id": "d052",
"name": "Add 1M filters with the same action",
"category": [
"filter",
"flower"
],
"setup": [
"$TC qdisc add dev $DEV2 ingress",
"./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
],
"cmdUnderTest": "$TC -b $BATCH_FILE",
"expExitCode": "0",
"verifyCmd": "$TC actions list action gact",
"matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DEV2 ingress",
"/bin/rm $BATCH_FILE"
]
}
]
]

View File

@ -88,7 +88,7 @@ def prepare_env(cmdlist):
exit(1)
def test_runner(filtered_tests):
def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
@ -105,6 +105,8 @@ def test_runner(filtered_tests):
for tidx in testlist:
result = True
tresult = ""
if "flower" in tidx["category"] and args.device == None:
continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
@ -152,6 +154,10 @@ def ns_create():
exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV1 up'
exec_cmd(cmd, False)
cmd = 'ip link set $DEV2 netns $NS'
exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV2 up'
exec_cmd(cmd, False)
def ns_destroy():
@ -211,7 +217,8 @@ def set_args(parser):
help='Execute the single test case with specified ID')
parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
help='Generate ID numbers for new test cases')
return parser
parser.add_argument('-d', '--device',
help='Execute the test case in flower category')
return parser
@ -225,6 +232,8 @@ def check_default_settings(args):
if args.path != None:
NAMES['TC'] = args.path
if args.device != None:
NAMES['DEV2'] = args.device
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
@ -381,14 +390,17 @@ def set_operation_mode(args):
if (len(alltests) == 0):
print("Cannot find a test case with ID matching " + target_id)
exit(1)
catresults = test_runner(alltests)
catresults = test_runner(alltests, args)
print("All test results: " + "\n\n" + catresults)
elif (len(target_category) > 0):
if (target_category == "flower") and args.device == None:
print("Please specify a NIC device (-d) to run category flower")
exit(1)
if (target_category not in ucat):
print("Specified category is not present in this file.")
exit(1)
else:
catresults = test_runner(testcases[target_category])
catresults = test_runner(testcases[target_category], args)
print("Category " + target_category + "\n\n" + catresults)
ns_destroy()

View File

@ -0,0 +1,62 @@
#!/usr/bin/python3
"""
tdc_batch.py - a script to generate TC batch file
Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
"""
import argparse
parser = argparse.ArgumentParser(description='TC batch file generator')
parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
parser.add_argument("-s", "--share_action",
help="all filters share the same action",
action="store_true")
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
args = parser.parse_args()
device = args.device
file = open(args.file, 'w')
number = 1
if args.number:
number = args.number
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
share_action = ""
if args.share_action:
share_action = "index 1"
prio = "prio 1"
if args.prio:
prio = ""
if number > 0x4000:
number = 0x4000
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("%02x:%02x:%02x" % (i, j, k))
src_mac = "e4:11:00:" + mac
dst_mac = "e4:12:00:" + mac
cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
"src_mac %s dst_mac %s action drop %s" %
(device, prio, skip, src_mac, dst_mac, share_action))
file.write("%s\n" % cmd)
index += 1
if index >= number:
file.close()
exit(0)

View File

@ -12,6 +12,8 @@ NAMES = {
# Name of veth devices to be created for the namespace
'DEV0': 'v0p0',
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
# Name of the namespace to use
'NS': 'tcut'
}