1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Several netfilter fixes including a nfnetlink deadlock fix from
    Florian Westphal and fix for dropping VRF packets from Miaohe Lin.

 2) Flow offload fixes from Pablo Neira Ayuso including a fix to restore
    proper block sharing.

 3) Fix r8169 PHY init from Thomas Voegtle.

 4) Fix memory leak in mac80211, from Lorenzo Bianconi.

 5) Missing NULL check on object allocation in cxgb4, from Navid
    Emamdoost.

 6) Fix scaling of RX power in sfp phy driver, from Andrew Lunn.

 7) Check that there is actually an ip header to access in skb->data in
    VRF, from Peter Kosyh.

 8) Remove spurious rcu unlock in hv_netvsc, from Haiyang Zhang.

 9) One more tweak the the TCP fragmentation memory limit changes, to be
    less harmful to applications setting small SO_SNDBUF values. From
    Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits)
  tcp: be more careful in tcp_fragment()
  hv_netvsc: Fix extra rcu_read_unlock in netvsc_recv_callback()
  vrf: make sure skb->data contains ip header to make routing
  connector: remove redundant input callback from cn_dev
  qed: Prefer pcie_capability_read_word()
  igc: Prefer pcie_capability_read_word()
  cxgb4: Prefer pcie_capability_read_word()
  be2net: Synchronize be_update_queues with dev_watchdog
  bnx2x: Prevent load reordering in tx completion processing
  net: phy: sfp: hwmon: Fix scaling of RX power
  net: sched: verify that q!=NULL before setting q->flags
  chelsio: Fix a typo in a function name
  allocate_flower_entry: should check for null deref
  net: hns3: typo in the name of a constant
  kbuild: add net/netfilter/nf_tables_offload.h to header-test blacklist.
  tipc: Fix a typo
  mac80211: don't warn about CW params when not using them
  mac80211: fix possible memory leak in ieee80211_assign_beacon
  nl80211: fix NL80211_HE_MAX_CAPABILITY_LEN
  nl80211: fix VENDOR_CMD_RAW_DATA
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-07-22 08:49:22 -07:00
commit 83768245a3
91 changed files with 322 additions and 236 deletions

View File

@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
return 0;
}
static struct cn_dev cdev = {
.input = cn_rx_skb,
};
static int cn_init(void)
{
struct cn_dev *dev = &cdev;
struct netlink_kernel_cfg cfg = {
.groups = CN_NETLINK_USERS + 0xf,
.input = dev->input,
.input = cn_rx_skb,
};
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);

View File

@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
sw_cons = txdata->tx_pkt_cons;
/* Ensure subsequent loads occur after hw_cons */
smp_rmb();
while (sw_cons != hw_cons) {
u16 pkt_cons;

View File

@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy)
return cphy_cause_link_change;
}
static void my3216_poll(struct work_struct *work)
static void my3126_poll(struct work_struct *work)
{
struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev,
return NULL;
cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll);
cphy->bmsr = 0;
return cphy;

View File

@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
char name[IFNAMSIZ];
u32 devcap2;
u16 flags;
int pos;
/* If we want to instantiate Virtual Functions, then our
* parent bridge's PCI-E needs to support Alternative Routing
@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
* and above.
*/
pbridge = pdev->bus->self;
pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
!(devcap2 & PCI_EXP_DEVCAP2_ARI)) {

View File

@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
spin_lock_init(&new->lock);
if (new)
spin_lock_init(&new->lock);
return new;
}

View File

@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
} else {
unsigned int pack_align;
unsigned int ingpad, ingpack;
unsigned int pcie_cap;
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* multiple of the Maximum Payload Size.
*/
pack_align = fl_align;
pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
if (pcie_cap) {
if (pci_is_pcie(adap->pdev)) {
unsigned int mps, mps_log;
u16 devctl;
@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* [bits 7:5] encodes sizes as powers of 2 starting at
* 128 bytes.
*/
pci_read_config_word(adap->pdev,
pcie_cap + PCI_EXP_DEVCTL,
&devctl);
pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
&devctl);
mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
mps = 1 << mps_log;
if (mps > pack_align)

View File

@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter)
int status;
if (netif_running(netdev)) {
/* be_tx_timeout() must not run concurrently with this
* function, synchronize with an already-running dev_watchdog
*/
netif_tx_lock_bh(netdev);
/* device cannot transmit now, avoid dev_watchdog timeouts */
netif_carrier_off(netdev);
netif_tx_unlock_bh(netdev);
be_close(netdev);
}

View File

@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */

View File

@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HLCGE_MBX_PUSH_VLAN_INFO, vfid);
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,

View File

@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HLCGE_MBX_PUSH_VLAN_INFO:
case HCLGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
case HLCGE_MBX_PUSH_VLAN_INFO:
case HCLGE_MBX_PUSH_VLAN_INFO:
state = le16_to_cpu(msg_q[1]);
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,

View File

@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
u16 cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset)
if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
pci_read_config_word(adapter->pdev, cap_offset + reg, value);
pcie_capability_read_word(adapter->pdev, reg, value);
return IGC_SUCCESS;
}
@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
u16 cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset)
if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
pcie_capability_write_word(adapter->pdev, reg, *value);
return IGC_SUCCESS;
}

View File

@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
block_cb = flow_block_cb_alloc(f->net,
mlx5e_rep_indr_setup_block_cb,
block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
indr_priv, indr_priv,
mlx5e_rep_indr_tc_block_unbind);
if (IS_ERR(block_cb)) {
@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (!indr_priv)
return -ENOENT;
block_cb = flow_block_cb_lookup(f,
block_cb = flow_block_cb_lookup(f->block,
mlx5e_rep_indr_setup_block_cb,
indr_priv);
if (!block_cb)

View File

@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
bool register_block = false;
int err;
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb) {
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
if (!acl_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(f->net,
mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp, acl_block,
mlxsw_sp_tc_block_flower_release);
if (IS_ERR(block_cb)) {
@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_cb *block_cb;
int err;
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
block_cb = flow_block_cb_lookup(f->block,
mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb)
return;
@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
bool ingress;
int err;
@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
mlxsw_sp_port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
f, ingress);
block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
if (!block_cb)
return -ENOENT;

View File

@ -316,15 +316,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
port);
block_cb = flow_block_cb_lookup(f->block,
ocelot_setup_tc_block_cb_flower, port);
if (!block_cb) {
port_block = ocelot_port_block_create(port);
if (!port_block)
return -ENOMEM;
block_cb = flow_block_cb_alloc(f->net,
ocelot_setup_tc_block_cb_flower,
block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
port, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
@ -351,8 +350,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
{
struct flow_block_cb *block_cb;
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
port);
block_cb = flow_block_cb_lookup(f->block,
ocelot_setup_tc_block_cb_flower, port);
if (!block_cb)
return;

View File

@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
int err;
netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
block_cb = flow_block_cb_alloc(cb, port, port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, port);
block_cb = flow_block_cb_lookup(f->block, cb, port);
if (!block_cb)
return -ENOENT;

View File

@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
&nfp_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net,
nfp_flower_setup_tc_block_cb,
block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
repr, repr, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
block_cb = flow_block_cb_lookup(f->block,
nfp_flower_setup_tc_block_cb,
repr);
if (!block_cb)
return -ENOENT;
@ -1424,8 +1424,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
block_cb = flow_block_cb_alloc(f->net,
nfp_flower_setup_indr_block_cb,
block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
cb_priv, cb_priv,
nfp_flower_setup_indr_tc_release);
if (IS_ERR(block_cb)) {
@ -1442,7 +1441,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
if (!cb_priv)
return -ENOENT;
block_cb = flow_block_cb_lookup(f,
block_cb = flow_block_cb_lookup(f->block,
nfp_flower_setup_indr_block_cb,
cb_priv);
if (!block_cb)

View File

@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
/* Check atomic operations support in PCI configuration space. */
pci_read_config_dword(cdev->pdev,
cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
&pci_status_control);
pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
&pci_status_control);
if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);

View File

@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
if (ret & BIT(8))
phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
else
phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
/* Enable PHY auto speed down */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));

View File

@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net,
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
rcu_read_unlock();
return NVSP_STAT_FAIL;
}

View File

@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
static void sfp_hwmon_to_rx_power(long *value)
{
*value = DIV_ROUND_CLOSEST(*value, 100);
*value = DIV_ROUND_CLOSEST(*value, 10);
}
static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,

View File

@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct ipv6hdr *iph;
struct net *net = dev_net(skb->dev);
struct flowi6 fl6 = {
/* needed to match OIF rule */
.flowi6_oif = dev->ifindex,
.flowi6_iif = LOOPBACK_IFINDEX,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
};
struct flowi6 fl6;
int ret = NET_XMIT_DROP;
struct dst_entry *dst;
struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
goto err;
iph = ipv6_hdr(skb);
memset(&fl6, 0, sizeof(fl6));
/* needed to match OIF rule */
fl6.flowi6_oif = dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = iph->nexthdr;
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_route_output(net, NULL, &fl6);
if (dst == dst_null)
goto err;
@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
struct iphdr *ip4h = ip_hdr(skb);
struct iphdr *ip4h;
int ret = NET_XMIT_DROP;
struct flowi4 fl4 = {
/* needed to match OIF rule */
.flowi4_oif = vrf_dev->ifindex,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
.flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};
struct flowi4 fl4;
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
goto err;
ip4h = ip_hdr(skb);
memset(&fl4, 0, sizeof(fl4));
/* needed to match OIF rule */
fl4.flowi4_oif = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.flowi4_tos = RT_TOS(ip4h->tos);
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;

View File

@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_cfg
},
{
@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_cfg
},
{
@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_selected
},
{
@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_selected
},
};

View File

@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV,
.policy = VENDOR_CMD_RAW_DATA,
.doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
},
};

View File

@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_start,
.policy = wlcore_vendor_attr_policy,
},
{
.info = {
@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_stop,
.policy = wlcore_vendor_attr_policy,
},
{
.info = {
@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_set_group_key,
.policy = wlcore_vendor_attr_policy,
},
};

View File

@ -50,7 +50,6 @@ struct cn_dev {
u32 seq, groups;
struct sock *nls;
void (*input) (struct sk_buff *skb);
struct cn_queue_dev *cbdev;
};

View File

@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
* conntrack/NAT module.
* BER and PER decoding library for H.323 conntrack/NAT module.
*
* Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*

View File

@ -4170,7 +4170,7 @@ struct sta_opmode_info {
u8 rx_nss;
};
#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)ERR_PTR(-ENODATA))
#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA))
/**
* struct wiphy_vendor_command - vendor command definition

View File

@ -2,8 +2,8 @@
#define _NET_FLOW_OFFLOAD_H
#include <linux/kernel.h>
#include <linux/list.h>
#include <net/flow_dissector.h>
#include <net/sch_generic.h>
struct flow_match {
struct flow_dissector *dissector;
@ -249,6 +249,10 @@ enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
};
struct flow_block {
struct list_head cb_list;
};
struct netlink_ext_ack;
struct flow_block_offload {
@ -256,29 +260,33 @@ struct flow_block_offload {
enum flow_block_binder_type binder_type;
bool block_shared;
struct net *net;
struct flow_block *block;
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
};
enum tc_setup_type;
typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
void *cb_priv);
struct flow_block_cb {
struct list_head driver_list;
struct list_head list;
struct net *net;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
void *cb_ident;
void *cb_priv;
void (*release)(void *cb_priv);
unsigned int refcnt;
};
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
void flow_block_cb_free(struct flow_block_cb *block_cb);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload,
tc_setup_cb_t *cb, void *cb_ident);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident);
void *flow_block_cb_priv(struct flow_block_cb *block_cb);
void flow_block_cb_incref(struct flow_block_cb *block_cb);
@ -296,11 +304,12 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
list_move(&block_cb->list, &offload->cb_list);
}
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);
int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_list, tc_setup_cb_t *cb,
struct list_head *driver_list,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv, bool ingress_only);
enum flow_cls_command {
@ -333,4 +342,9 @@ flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
return flow_cmd->rule;
}
static inline void flow_block_init(struct flow_block *flow_block)
{
INIT_LIST_HEAD(&flow_block->cb_list);
}
#endif /* _NET_FLOW_OFFLOAD_H */

View File

@ -76,6 +76,11 @@ struct nf_conntrack_expect_policy {
#define NF_CT_EXPECT_CLASS_DEFAULT 0
#define NF_CT_EXPECT_MAX_CNT 255
/* Allow to reuse expectations with the same tuples from different master
* conntracks.
*/
#define NF_CT_EXP_F_SKIP_MASTER 0x1
int nf_conntrack_expect_pernet_init(struct net *net);
void nf_conntrack_expect_pernet_fini(struct net *net);
@ -122,10 +127,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
u_int8_t, const __be16 *, const __be16 *);
void nf_ct_expect_put(struct nf_conntrack_expect *exp);
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
u32 portid, int report);
static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect)
u32 portid, int report, unsigned int flags);
static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect,
unsigned int flags)
{
return nf_ct_expect_related_report(expect, 0, 0);
return nf_ct_expect_related_report(expect, 0, 0, flags);
}
#endif /*_NF_CONNTRACK_EXPECT_H*/

View File

@ -68,6 +68,7 @@ struct synproxy_options {
u8 options;
u8 wscale;
u16 mss;
u16 mss_encode;
u32 tsval;
u32 tsecr;
};

View File

@ -11,6 +11,7 @@
#include <linux/rhashtable.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
#include <net/flow_offload.h>
struct module;
@ -951,7 +952,7 @@ struct nft_stats {
* @stats: per-cpu chain stats
* @chain: the chain
* @dev_name: device name that this base chain is attached to (if any)
* @cb_list: list of flow block callbacks (for hardware offload)
* @flow_block: flow block (for hardware offload)
*/
struct nft_base_chain {
struct nf_hook_ops ops;
@ -961,7 +962,7 @@ struct nft_base_chain {
struct nft_stats __percpu *stats;
struct nft_chain chain;
char dev_name[IFNAMSIZ];
struct list_head cb_list;
struct flow_block flow_block;
};
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)

View File

@ -6,7 +6,6 @@
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
#include <net/flow_offload.h>
#include <net/net_namespace.h>
/* TC action not accessible from user space */
@ -126,14 +125,14 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
}
static inline
int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv)
{
return 0;
}
static inline
void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv)
{
}

View File

@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
struct Qdisc_ops;
struct qdisc_walker;
@ -22,9 +23,6 @@ struct tcf_walker;
struct module;
struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
@ -313,7 +311,7 @@ struct tcf_proto_ops {
void (*walk)(struct tcf_proto *tp,
struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
tc_setup_cb_t *cb, void *cb_priv,
flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
void * (*tmplt_create)(struct net *net,
@ -401,7 +399,7 @@ struct tcf_block {
refcount_t refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */

View File

@ -1709,6 +1709,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
return skb_rb_first(&sk->tcp_rtx_queue);
}
static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
{
return skb_rb_last(&sk->tcp_rtx_queue);
}
static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);

View File

@ -2863,7 +2863,7 @@ enum nl80211_attrs {
#define NL80211_HT_CAPABILITY_LEN 26
#define NL80211_VHT_CAPABILITY_LEN 12
#define NL80211_HE_MIN_CAPABILITY_LEN 16
#define NL80211_HE_MAX_CAPABILITY_LEN 51
#define NL80211_HE_MAX_CAPABILITY_LEN 54
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2

View File

@ -6,7 +6,7 @@
menuconfig NF_TABLES_BRIDGE
depends on BRIDGE && NETFILTER && NF_TABLES
select NETFILTER_FAMILY_BRIDGE
bool "Ethernet Bridge nf_tables support"
tristate "Ethernet Bridge nf_tables support"
if NF_TABLES_BRIDGE
@ -25,6 +25,8 @@ config NF_LOG_BRIDGE
tristate "Bridge packet logging"
select NF_LOG_COMMON
endif # NF_TABLES_BRIDGE
config NF_CONNTRACK_BRIDGE
tristate "IPv4/IPV6 bridge connection tracking support"
depends on NF_CONNTRACK
@ -39,8 +41,6 @@ config NF_CONNTRACK_BRIDGE
To compile it as a module, choose M here. If unsure, say N.
endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES

View File

@ -165,7 +165,7 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
}
EXPORT_SYMBOL(flow_rule_match_enc_opts);
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv))
{
@ -175,7 +175,6 @@ struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
if (!block_cb)
return ERR_PTR(-ENOMEM);
block_cb->net = net;
block_cb->cb = cb;
block_cb->cb_ident = cb_ident;
block_cb->cb_priv = cb_priv;
@ -194,14 +193,13 @@ void flow_block_cb_free(struct flow_block_cb *block_cb)
}
EXPORT_SYMBOL(flow_block_cb_free);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f,
tc_setup_cb_t *cb, void *cb_ident)
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident)
{
struct flow_block_cb *block_cb;
list_for_each_entry(block_cb, f->driver_block_list, driver_list) {
if (block_cb->net == f->net &&
block_cb->cb == cb &&
list_for_each_entry(block_cb, &block->cb_list, list) {
if (block_cb->cb == cb &&
block_cb->cb_ident == cb_ident)
return block_cb;
}
@ -228,7 +226,7 @@ unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
}
EXPORT_SYMBOL(flow_block_cb_decref);
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list)
{
struct flow_block_cb *block_cb;
@ -245,7 +243,8 @@ EXPORT_SYMBOL(flow_block_cb_is_busy);
int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_block_list,
tc_setup_cb_t *cb, void *cb_ident, void *cb_priv,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
bool ingress_only)
{
struct flow_block_cb *block_cb;
@ -261,8 +260,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, cb_ident,
cb_priv, NULL);
block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -270,7 +268,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
list_add_tail(&block_cb->driver_list, driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, cb_ident);
block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
if (!block_cb)
return -ENOENT;

View File

@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
cb = dsa_slave_setup_tc_block_cb_ig;
@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL);
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f, cb, dev);
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (!block_cb)
return -ENOENT;

View File

@ -416,8 +416,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
ctinfo == IP_CT_RELATED_REPLY))
return XT_CONTINUE;
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
* TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
/* nf_conntrack_proto_icmp guarantees us that we only have ICMP_ECHO,
* TIMESTAMP, INFO_REQUEST or ICMP_ADDRESS type icmp packets from here
* on, which all have an ID field [relevant for hashing]. */
hash = clusterip_hashfn(skb, cipinfo->config);

View File

@ -36,6 +36,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
opts.options |= XT_SYNPROXY_OPT_ECN;
opts.options &= info->options;
opts.mss_encode = opts.mss;
opts.mss = info->mss;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, &opts);
else

View File

@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
flow.flowi4_tos = RT_TOS(iph->tos);
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
}

View File

@ -221,11 +221,11 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
int ret;
rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
ret = nf_ct_expect_related(rtp_exp);
ret = nf_ct_expect_related(rtp_exp, 0);
if (ret == 0) {
rtcp_exp->tuple.dst.u.udp.port =
htons(nated_port + 1);
ret = nf_ct_expect_related(rtcp_exp);
ret = nf_ct_expect_related(rtcp_exp, 0);
if (ret == 0)
break;
else if (ret == -EBUSY) {
@ -296,7 +296,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
@ -352,7 +352,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
@ -444,7 +444,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {
@ -537,7 +537,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
int ret;
exp->tuple.dst.u.tcp.port = htons(nated_port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {

View File

@ -1288,6 +1288,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize, old_factor;
long limit;
int nlen;
u8 flags;
@ -1298,8 +1299,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
if (nsize < 0)
nsize = 0;
if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
* We need some allowance to not penalize applications setting small
* SO_SNDBUF values.
* Also allow first and last skb in retransmit queue to be split.
*/
limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
skb != tcp_rtx_queue_head(sk) &&
skb != tcp_rtx_queue_tail(sk))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
return -ENOMEM;
}

View File

@ -36,6 +36,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
opts.options |= XT_SYNPROXY_OPT_ECN;
opts.options &= info->options;
opts.mss_encode = opts.mss;
opts.mss = info->mss;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, &opts);
else

View File

@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
if (rpfilter_addr_linklocal(&iph->saddr)) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6.flowi6_oif = dev->ifindex;
} else if ((flags & XT_RPFILTER_LOOSE) == 0)
/* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
} else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
(flags & XT_RPFILTER_LOOSE) == 0)
fl6.flowi6_oif = dev->ifindex;
rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
goto out;
}
if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
if (rt->rt6i_idev->dev == dev ||
l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
(flags & XT_RPFILTER_LOOSE))
ret = true;
out:
ip6_rt_put(rt);

View File

@ -936,8 +936,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_set_probe_resp(sdata, params->probe_resp,
params->probe_resp_len, csa);
if (err < 0)
if (err < 0) {
kfree(new);
return err;
}
if (err == 0)
changed |= BSS_CHANGED_AP_PROBE_RESP;
@ -949,8 +951,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
params->civicloc,
params->civicloc_len);
if (err < 0)
if (err < 0) {
kfree(new);
return err;
}
changed |= BSS_CHANGED_FTM_RESPONDER;
}

View File

@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
if (WARN_ONCE(params->cw_min == 0 ||
params->cw_min > params->cw_max,
"%s: invalid CW_min/CW_max: %d/%d\n",
sdata->name, params->cw_min, params->cw_max))
if (params->cw_min == 0 || params->cw_min > params->cw_max) {
/*
* If we can't configure hardware anyway, don't warn. We may
* never have initialized the CW parameters.
*/
WARN_ONCE(local->ops->conf_tx,
"%s: invalid CW_min/CW_max: %d/%d\n",
sdata->name, params->cw_min, params->cw_max);
return -EINVAL;
}
trace_drv_conf_tx(local, sdata, ac, params);
if (local->ops->conf_tx)

View File

@ -223,8 +223,6 @@ config NF_CONNTRACK_FTP
of Network Address Translation on them.
This is FTP support on Layer 3 independent connection tracking.
Layer 3 independent connection tracking is experimental scheme
which generalize ip_conntrack to support other layer 3 protocols.
To compile it as a module, choose M here. If unsure, say N.
@ -338,7 +336,7 @@ config NF_CONNTRACK_SIP
help
SIP is an application-layer control protocol that can establish,
modify, and terminate multimedia sessions (conferences) such as
Internet telephony calls. With the ip_conntrack_sip and
Internet telephony calls. With the nf_conntrack_sip and
the nf_nat_sip modules you can support the protocol on a connection
tracking/NATing firewall.
@ -1313,7 +1311,7 @@ config NETFILTER_XT_MATCH_HELPER
depends on NETFILTER_ADVANCED
help
Helper matching allows you to match packets in dynamic connections
tracked by a conntrack-helper, ie. ip_conntrack_ftp
tracked by a conntrack-helper, ie. nf_conntrack_ftp
To compile it as a module, choose M here. If unsure, say Y.

View File

@ -231,7 +231,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
IP_VS_DBG_BUF(7, "%s: ct=%p, expect tuple=" FMT_TUPLE "\n",
__func__, ct, ARG_TUPLE(&exp->tuple));
nf_ct_expect_related(exp);
nf_ct_expect_related(exp, 0);
nf_ct_expect_put(exp);
}
EXPORT_SYMBOL(ip_vs_nfct_expect_related);

View File

@ -159,7 +159,7 @@ static int amanda_help(struct sk_buff *skb,
if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
ret = nf_nat_amanda(skb, ctinfo, protoff,
off - dataoff, len, exp);
else if (nf_ct_expect_related(exp) != 0) {
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}

View File

@ -68,7 +68,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
exp->helper = NULL;
nf_ct_expect_related(exp);
nf_ct_expect_related(exp, 0);
nf_ct_expect_put(exp);
nf_ct_refresh(ct, skb, timeout * HZ);

View File

@ -1817,9 +1817,7 @@ EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/mutex.h>
/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
* in ip_conntrack_core, since we don't want the protocols to autoload
* or depend on ctnetlink */
/* Generic function for tcp/udp/sctp/dccp and alike. */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{

View File

@ -249,13 +249,22 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return a->master == b->master &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
static bool master_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b,
unsigned int flags)
{
if (flags & NF_CT_EXP_F_SKIP_MASTER)
return true;
return a->master == b->master;
}
/* Generally a bad idea to call this: could have matched already. */
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
{
@ -399,7 +408,8 @@ static void evict_oldest_expect(struct nf_conn *master,
nf_ct_remove_expect(last);
}
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
unsigned int flags)
{
const struct nf_conntrack_expect_policy *p;
struct nf_conntrack_expect *i;
@ -417,8 +427,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) {
if (i->class != expect->class)
if (master_matches(i, expect, flags) &&
expect_matches(i, expect)) {
if (i->class != expect->class ||
i->master != expect->master)
return -EALREADY;
if (nf_ct_remove_expect(i))
@ -453,12 +465,12 @@ out:
}
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
u32 portid, int report)
u32 portid, int report, unsigned int flags)
{
int ret;
spin_lock_bh(&nf_conntrack_expect_lock);
ret = __nf_ct_expect_check(expect);
ret = __nf_ct_expect_check(expect, flags);
if (ret < 0)
goto out;

View File

@ -525,7 +525,7 @@ skip_nl_seq:
protoff, matchoff, matchlen, exp);
else {
/* Can't expect this? Best to drop packet now. */
if (nf_ct_expect_related(exp) != 0) {
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
} else

View File

@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
* conntrack/NAT module.
* BER and PER decoding library for H.323 conntrack/NAT module.
*
* Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*
* See ip_conntrack_helper_h323_asn1.h for details.
* See nf_conntrack_helper_h323_asn1.h for details.
*/
#ifdef __KERNEL__

View File

@ -305,8 +305,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
taddr, port, rtp_port, rtp_exp, rtcp_exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(rtp_exp) == 0) {
if (nf_ct_expect_related(rtcp_exp) == 0) {
if (nf_ct_expect_related(rtp_exp, 0) == 0) {
if (nf_ct_expect_related(rtcp_exp, 0) == 0) {
pr_debug("nf_ct_h323: expect RTP ");
nf_ct_dump_tuple(&rtp_exp->tuple);
pr_debug("nf_ct_h323: expect RTCP ");
@ -364,7 +364,7 @@ static int expect_t120(struct sk_buff *skb,
ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_h323: expect T.120 ");
nf_ct_dump_tuple(&exp->tuple);
} else
@ -701,7 +701,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect H.245 ");
nf_ct_dump_tuple(&exp->tuple);
} else
@ -825,7 +825,7 @@ static int expect_callforwarding(struct sk_buff *skb,
protoff, data, dataoff,
taddr, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_q931: expect Call Forwarding ");
nf_ct_dump_tuple(&exp->tuple);
} else
@ -1284,7 +1284,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
ret = nat_q931(skb, ct, ctinfo, protoff, data,
taddr, i, port, exp);
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
@ -1349,7 +1349,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
IPPROTO_UDP, NULL, &port);
exp->helper = nf_conntrack_helper_ras;
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect RAS ");
nf_ct_dump_tuple(&exp->tuple);
} else
@ -1561,7 +1561,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else
@ -1615,7 +1615,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
exp->flags = NF_CT_EXPECT_PERMANENT;
exp->helper = nf_conntrack_helper_q931;
if (nf_ct_expect_related(exp) == 0) {
if (nf_ct_expect_related(exp, 0) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
nf_ct_dump_tuple(&exp->tuple);
} else

View File

@ -213,7 +213,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
addr_beg_p - ib_ptr,
addr_end_p - addr_beg_p,
exp);
else if (nf_ct_expect_related(exp) != 0) {
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct,
"cannot add expectation");
ret = NF_DROP;

View File

@ -2616,7 +2616,7 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
if (IS_ERR(exp))
return PTR_ERR(exp);
err = nf_ct_expect_related_report(exp, portid, report);
err = nf_ct_expect_related_report(exp, portid, report, 0);
nf_ct_expect_put(exp);
return err;
}
@ -3367,7 +3367,7 @@ ctnetlink_create_expect(struct net *net,
goto err_rcu;
}
err = nf_ct_expect_related_report(exp, portid, report);
err = nf_ct_expect_related_report(exp, portid, report, 0);
nf_ct_expect_put(exp);
err_rcu:
rcu_read_unlock();

View File

@ -234,9 +234,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
nf_nat_pptp_exp_gre(exp_orig, exp_reply);
if (nf_ct_expect_related(exp_orig) != 0)
if (nf_ct_expect_related(exp_orig, 0) != 0)
goto out_put_both;
if (nf_ct_expect_related(exp_reply) != 0)
if (nf_ct_expect_related(exp_reply, 0) != 0)
goto out_unexpect_orig;
/* Add GRE keymap entries */

View File

@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ip_conntrack_proto_gre.c - Version 3.0
*
* Connection tracking protocol helper module for GRE.
*
* GRE is a generic encapsulation protocol, which is generally not very

View File

@ -215,7 +215,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
return -NF_ACCEPT;
}
/* See ip_conntrack_proto_tcp.c */
/* See nf_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {

View File

@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
u16 win_raw;
s32 receiver_offset;
bool res, in_recv_win;
@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
*/
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
win = ntohs(tcph->window);
win_raw = ntohs(tcph->window);
win = win_raw;
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
&& state->last_seq == seq
&& state->last_ack == ack
&& state->last_end == end
&& state->last_win == win)
&& state->last_win == win_raw)
state->retrans++;
else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
state->last_win = win;
state->last_win = win_raw;
state->retrans = 0;
}
}

View File

@ -153,7 +153,7 @@ static int help(struct sk_buff *skb,
nf_ct_dump_tuple(&exp->tuple);
/* Can't expect this? Best to drop packet now. */
if (nf_ct_expect_related(exp) != 0) {
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}

View File

@ -977,11 +977,15 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
/* -EALREADY handling works around end-points that send
* SDP messages with identical port but different media type,
* we pretend expectation was set up.
* It also works in the case that SDP messages are sent with
* identical expect tuples but for different master conntracks.
*/
int errp = nf_ct_expect_related(rtp_exp);
int errp = nf_ct_expect_related(rtp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (errp == 0 || errp == -EALREADY) {
int errcp = nf_ct_expect_related(rtcp_exp);
int errcp = nf_ct_expect_related(rtcp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (errcp == 0 || errcp == -EALREADY)
ret = NF_ACCEPT;
@ -1296,7 +1300,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
exp, matchoff, matchlen);
else {
if (nf_ct_expect_related(exp) != 0) {
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
} else

View File

@ -78,7 +78,7 @@ static int tftp_help(struct sk_buff *skb,
nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
ret = nf_nat_tftp(skb, ctinfo, exp);
else if (nf_ct_expect_related(exp) != 0) {
else if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, ct, "cannot add expectation");
ret = NF_DROP;
}

View File

@ -48,7 +48,7 @@ static unsigned int help(struct sk_buff *skb,
int res;
exp->tuple.dst.u.tcp.port = htons(port);
res = nf_ct_expect_related(exp);
res = nf_ct_expect_related(exp, 0);
if (res == 0)
break;
else if (res != -EBUSY) {

View File

@ -519,7 +519,7 @@ another_round:
* and NF_INET_LOCAL_OUT, we change the destination to map into the
* range. It might not be possible to get a unique tuple, but we try.
* At worst (or if we race), we will end up with a final duplicate in
* __ip_conntrack_confirm and drop the packet. */
* __nf_conntrack_confirm and drop the packet. */
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig_tuple,

View File

@ -91,7 +91,7 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {

View File

@ -53,7 +53,7 @@ static unsigned int help(struct sk_buff *skb,
int ret;
exp->tuple.dst.u.tcp.port = htons(port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, 0);
if (ret == 0)
break;
else if (ret != -EBUSY) {

View File

@ -414,7 +414,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
int ret;
exp->tuple.dst.u.udp.port = htons(port);
ret = nf_ct_expect_related(exp);
ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
if (ret == 0)
break;
else if (ret != -EBUSY) {
@ -607,7 +607,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
int ret;
rtp_exp->tuple.dst.u.udp.port = htons(port);
ret = nf_ct_expect_related(rtp_exp);
ret = nf_ct_expect_related(rtp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
@ -615,7 +616,8 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
break;
}
rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
ret = nf_ct_expect_related(rtcp_exp);
ret = nf_ct_expect_related(rtcp_exp,
NF_CT_EXP_F_SKIP_MASTER);
if (ret == 0)
break;
else if (ret == -EBUSY) {

View File

@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff *skb,
= ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
if (nf_ct_expect_related(exp) != 0) {
if (nf_ct_expect_related(exp, 0) != 0) {
nf_ct_helper_log(skb, exp->master, "cannot add expectation");
return NF_DROP;
}

View File

@ -470,7 +470,7 @@ synproxy_send_client_synack(struct net *net,
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss;
u16 mss = opts->mss_encode;
iph = ip_hdr(skb);
@ -687,7 +687,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;
@ -884,7 +884,7 @@ synproxy_send_client_synack_ipv6(struct net *net,
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss;
u16 mss = opts->mss_encode;
iph = ipv6_hdr(skb);
@ -1111,7 +1111,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
if (th->rst && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;

View File

@ -1662,7 +1662,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
chain->flags |= NFT_BASE_CHAIN | flags;
basechain->policy = NF_ACCEPT;
INIT_LIST_HEAD(&basechain->cb_list);
flow_block_init(&basechain->flow_block);
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (chain == NULL)
@ -1900,6 +1900,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
if (nla[NFTA_CHAIN_FLAGS])
flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
else if (chain)
flags = chain->flags;
nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);

View File

@ -116,7 +116,7 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
struct flow_block_cb *block_cb;
int err;
list_for_each_entry(block_cb, &basechain->cb_list, list) {
list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err < 0)
return err;
@ -154,7 +154,7 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
static int nft_flow_offload_bind(struct flow_block_offload *bo,
struct nft_base_chain *basechain)
{
list_splice(&bo->cb_list, &basechain->cb_list);
list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
return 0;
}
@ -198,6 +198,7 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
return -EOPNOTSUPP;
bo.command = cmd;
bo.block = &basechain->flow_block;
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo.extack = &extack;
INIT_LIST_HEAD(&bo.cb_list);

View File

@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
request_module("nfnetlink-subsys-%d", type);
request_module_nowait("nfnetlink-subsys-%d", type);
return 0;
}
#endif

View File

@ -193,7 +193,7 @@ static inline void nft_chain_filter_inet_init(void) {}
static inline void nft_chain_filter_inet_fini(void) {}
#endif /* CONFIG_NF_TABLES_IPV6 */
#ifdef CONFIG_NF_TABLES_BRIDGE
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE)
static unsigned int
nft_do_chain_bridge(void *priv,
struct sk_buff *skb,

View File

@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
#ifdef CONFIG_NF_TABLES_IPV6
MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
#endif
#ifdef CONFIG_NF_TABLES_INET
MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
#endif

View File

@ -1252,7 +1252,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
priv->l4proto, NULL, &priv->dport);
exp->timeout.expires = jiffies + priv->timeout * HZ;
if (nf_ct_expect_related(exp) != 0)
if (nf_ct_expect_related(exp, 0) != 0)
regs->verdict.code = NF_DROP;
}

View File

@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
if (priv->modulus <= 1)
if (priv->modulus < 1)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)

View File

@ -546,7 +546,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
return ERR_PTR(-EINVAL);
#ifdef CONFIG_NF_TABLES_BRIDGE
#if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META)
if (ctx->family == NFPROTO_BRIDGE)
return ERR_PTR(-EAGAIN);
#endif

View File

@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
MODULE_ALIAS_NFT_EXPR("nat");
MODULE_ALIAS_NFT_EXPR("redir");

View File

@ -31,6 +31,8 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
opts->options |= NF_SYNPROXY_OPT_ECN;
opts->options &= priv->info.options;
opts->mss_encode = opts->mss;
opts->mss = info->mss;
if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP)
synproxy_init_timestamp_cookie(info, opts);
else

View File

@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
const struct sk_buff *skb)
{
struct flow_stats *stats;
struct sw_flow_stats *stats;
unsigned int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(flow->stats_last_writer != -1) &&
likely(!rcu_access_pointer(flow->stats[cpu]))) {
/* Try to allocate CPU-specific stats. */
struct flow_stats *new_stats;
struct sw_flow_stats *new_stats;
new_stats =
kmem_cache_alloc_node(flow_stats_cache,
@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);

View File

@ -194,7 +194,7 @@ struct sw_flow_actions {
struct nlattr actions[];
};
struct flow_stats {
struct sw_flow_stats {
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
unsigned long used; /* Last used time (in jiffies). */
@ -216,7 +216,7 @@ struct sw_flow {
struct cpumask cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct flow_stats __rcu *stats[]; /* One for each CPU. First one
struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.

View File

@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct flow_stats *stats;
struct sw_flow_stats *stats;
flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow)
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct flow_stats __force *)flow->stats[cpu]);
(struct sw_flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@ -712,13 +712,13 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
* sizeof(struct flow_stats *)),
* sizeof(struct sw_flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);

View File

@ -691,6 +691,8 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
if (!indr_dev->block)
return;
bo.block = &indr_dev->block->flow_block;
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
tcf_block_setup(indr_dev->block, &bo);
@ -775,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
.command = command,
.binder_type = ei->binder_type,
.net = dev_net(dev),
.block = &block->flow_block,
.block_shared = tcf_block_shared(block),
.extack = extack,
};
@ -810,6 +813,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
bo.net = dev_net(dev);
bo.command = command;
bo.binder_type = ei->binder_type;
bo.block = &block->flow_block;
bo.block_shared = tcf_block_shared(block);
bo.extack = extack;
INIT_LIST_HEAD(&bo.cb_list);
@ -987,8 +991,8 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
return ERR_PTR(-ENOMEM);
}
mutex_init(&block->lock);
flow_block_init(&block->flow_block);
INIT_LIST_HEAD(&block->chain_list);
INIT_LIST_HEAD(&block->cb_list);
INIT_LIST_HEAD(&block->owner_list);
INIT_LIST_HEAD(&block->chain0.filter_chain_list);
@ -1514,7 +1518,7 @@ void tcf_block_put(struct tcf_block *block)
EXPORT_SYMBOL(tcf_block_put);
static int
tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
void *cb_priv, bool add, bool offload_in_use,
struct netlink_ext_ack *extack)
{
@ -1570,7 +1574,7 @@ static int tcf_block_bind(struct tcf_block *block,
i++;
}
list_splice(&bo->cb_list, &block->cb_list);
list_splice(&bo->cb_list, &block->flow_block.cb_list);
return 0;
@ -2152,7 +2156,9 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held);
tfilter_put(tp, fh);
q->flags &= ~TCQ_F_CAN_BYPASS;
/* q pointer is NULL for shared blocks */
if (q)
q->flags &= ~TCQ_F_CAN_BYPASS;
}
errout:
@ -3156,7 +3162,7 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
if (block->nooffloaddevcnt && err_stop)
return -EOPNOTSUPP;
list_for_each_entry(block_cb, &block->cb_list, list) {
list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err) {
if (err_stop)

View File

@ -651,7 +651,7 @@ skip:
}
}
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);

View File

@ -1800,7 +1800,7 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
return NULL;
}
static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tcf_block *block = tp->chain->block;

View File

@ -282,7 +282,7 @@ skip:
arg->count++;
}
static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);

View File

@ -1152,7 +1152,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
}
static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_cls_u32_offload cls_u32 = {};
@ -1172,7 +1172,7 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
}
static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool add, tc_setup_cb_t *cb, void *cb_priv,
bool add, flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@ -1213,7 +1213,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
return 0;
}
static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;

View File

@ -476,7 +476,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
}
}
/* tipc_toprsv_listener_data_ready - interrupt callback with connection request
/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
* The queued job is launched into tipc_topsrv_accept()
*/
static void tipc_topsrv_listener_data_ready(struct sock *sk)