1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix PCI IDs of 9000 series iwlwifi devices, from Luca Coelho.

 2) bpf offload bug fixes from Jakub Kicinski.

 3) Fix bpf verifier to NOP out code which is dead at run time because
    due to branch pruning the verifier will not explore such
    instructions. From Alexei Starovoitov.

 4) Fix crash when deleting secondary chains in packet scheduler
    classifier. From Roman Kapl.

 5) Fix buffer management bugs in smc, from Ursula Braun.

 6) Fix regression in anycast route handling, from David Ahern.

 7) Fix link settings regression in r8169, from Tobias Jakobi.

 8) Add back enough UFO support so that live migration still works, from
    Willem de Bruijn.

 9) Linearize enough packet data for the full extent to which the ipvlan
    code will inspect the packet headers, from Gao Feng.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
  ipvlan: Fix insufficient skb linear check for ipv6 icmp
  ipvlan: Fix insufficient skb linear check for arp
  geneve: only configure or fill UDP_ZERO_CSUM6_RX/TX info when CONFIG_IPV6
  net: dsa: bcm_sf2: Clear IDDQ_GLOBAL_PWR bit for PHY
  net: accept UFO datagrams from tuntap and packet
  net: realtek: r8169: implement set_link_ksettings()
  net: ipv6: Fixup device for anycast routes during copy
  net/smc: Fix preinitialization of buf_desc in __smc_buf_create()
  net/smc: use sk_rcvbuf as start for rmb creation
  ipv6: Do not consider linkdown nexthops during multipath
  net: sched: fix crash when deleting secondary chains
  net: phy: cortina: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE
  bpf: fix branch pruning logic
  bpf: change bpf_perf_event_output arg5 type to ARG_CONST_SIZE_OR_ZERO
  bpf: change bpf_probe_read_str arg2 type to ARG_CONST_SIZE_OR_ZERO
  bpf: remove explicit handling of 0 for arg2 in bpf_probe_read
  bpf: introduce ARG_PTR_TO_MEM_OR_NULL
  i40evf: Use smp_rmb rather than read_barrier_depends
  fm10k: Use smp_rmb rather than read_barrier_depends
  igb: Use smp_rmb rather than read_barrier_depends
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-11-23 21:18:46 -10:00
commit 1d3b78bbc6
62 changed files with 997 additions and 411 deletions

View File

@ -95,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);

View File

@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))

View File

@ -90,7 +90,6 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10

View File

@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
hw->aq.api_min_ver >= 5))
hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&

View File

@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
return I40E_ERR_NOT_READY;
}
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {

View File

@ -2166,6 +2166,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
return aq_ret;
}
/**
* i40e_set_promiscuous - set promiscuous mode
* @pf: board private structure
* @promisc: promisc on or off
*
* There are different ways of setting promiscuous mode on a PF depending on
* what state/environment we're in. This identifies and sets it appropriately.
* Returns 0 on success.
**/
static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
* replicated on the Main VSI.
*/
if (promisc)
aq_ret = i40e_aq_set_default_vsi(hw,
vsi->seid,
NULL);
else
aq_ret = i40e_aq_clear_default_vsi(hw,
vsi->seid,
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"Set default VSI failed, err %s, aq_err %s\n",
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
hw,
vsi->seid,
promisc, NULL,
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"set unicast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
vsi->seid,
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"set multicast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
if (!aq_ret)
pf->cur_promisc = promisc;
return aq_ret;
}
/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
* replicated on the Main VSI.
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
if (cur_promisc)
aq_ret =
i40e_aq_set_default_vsi(hw,
vsi->seid,
NULL);
else
aq_ret =
i40e_aq_clear_default_vsi(hw,
vsi->seid,
NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"Set default VSI failed on %s, err %s, aq_err %s\n",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
}
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
hw,
vsi->seid,
cur_promisc, NULL,
true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set unicast promisc failed on %s, err %s, aq_err %s\n",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
hw,
vsi->seid,
cur_promisc, NULL);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set multicast promisc failed on %s, err %s, aq_err %s\n",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
}
}
aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
vsi->seid,
cur_promisc, NULL);
aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
"Setting promiscuous %s failed on %s, err %s aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
return -EINVAL;
*reconfig_rss = false;
if (num_queues > I40E_MAX_QUEUES_PER_CH) {
dev_err(&pf->pdev->dev,
"Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
num_queues, I40E_MAX_QUEUES_PER_CH);
return -EINVAL;
}
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
@ -9429,6 +9423,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired)
rtnl_unlock();
/* Restore promiscuous settings */
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
"Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
pf->cur_promisc ? "on" : "off",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */

View File

@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
i40e_status ret_code;
i40e_status ret_code = 0;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
i40e_release_nvm(hw);
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
i40e_release_nvm(hw);
return ret_code;
}

View File

@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */

View File

@ -629,6 +629,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */

View File

@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
if (!f)
if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
} else {
vf->num_mac++;
}
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);

View File

@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */

View File

@ -25,6 +25,26 @@ static struct i40e_ops i40evf_lan_ops = {
.setup_qvlist = i40evf_client_setup_qvlist,
};
/**
* i40evf_client_get_params - retrieve relevant client parameters
* @vsi: VSI with parameters
* @params: client param struct
**/
static
void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
{
int i;
memset(params, 0, sizeof(struct i40e_params));
params->mtu = vsi->netdev->mtu;
params->link_up = vsi->back->link_up;
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
params->qos.prio_qos[i].tc = 0;
params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
}
}
/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
return;
cinst = vsi->back->cinst;
memset(&params, 0, sizeof(params));
params.mtu = vsi->netdev->mtu;
params.link_up = vsi->back->link_up;
params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
i40evf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
@ -166,9 +184,9 @@ static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
struct netdev_hw_addr *mac = NULL;
struct i40e_params params;
if (!vf_registered_client)
goto out;
@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
i40evf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
cinst->lan_info.params.qos.prio_qos[i].tc = 0;
cinst->lan_info.params.qos.prio_qos[i].qs_handle =
vsi->qs_handle;
}
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)

View File

@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
i40evf_notify_client_l2_params(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
goto out;
}
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
goto out;
}
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
i40evf_notify_client_l2_params(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);

View File

@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))

View File

@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))

View File

@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))

View File

@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))

View File

@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
if (prog && !prog->aux->offload)
return -EINVAL;
if (prog) {
struct bpf_dev_offload *offload = prog->aux->offload;
if (!offload)
return -EINVAL;
if (offload->netdev != nn->dp.netdev)
return -EINVAL;
}
if (prog && old_prog) {
u8 cap;

View File

@ -2030,21 +2030,6 @@ out:
return ret;
}
static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
int ret;
del_timer_sync(&tp->timer);
rtl_lock_work(tp);
ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
cmd->duplex, cmd->advertising);
rtl_unlock_work(tp);
return ret;
}
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
return rc;
}
static int rtl8169_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
int rc;
u32 advertising;
if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising))
return -EINVAL;
del_timer_sync(&tp->timer);
rtl_lock_work(tp);
rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
cmd->base.duplex, advertising);
rtl_unlock_work(tp);
return rc;
}
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
.set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
.set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,

View File

@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
}
return 0;
@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
goto nla_put_failure;
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums))
goto nla_put_failure;
#endif
return 0;

View File

@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return false;
}
static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
case htons(ETH_P_ARP): {
struct arphdr *arph;
if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
struct icmp6hdr *icmph;
if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
return NULL;
ip6h = ipv6_hdr(skb);
icmph = (struct icmp6hdr *)(ip6h + 1);
if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
/* Need to access the ipv6 address in body */
if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ sizeof(struct in6_addr))))
return NULL;
ip6h = ipv6_hdr(skb);
icmph = (struct icmp6hdr *)(ip6h + 1);
}
*type = IPVL_ICMPV6;
lyr3h = ip6h + 1;
lyr3h = icmph;
}
break;
}
@ -510,7 +528,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
struct ipvl_addr *addr;
int addr_type;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
@ -539,7 +557,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
@ -606,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
@ -627,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
@ -666,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
} else {
struct ipvl_addr *addr;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
@ -717,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;

View File

@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
MODULE_AUTHOR("NXP");
MODULE_LICENSE("GPL");

View File

@ -1077,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
TUN_F_TSO_ECN))
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();

View File

@ -2370,6 +2370,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by

View File

@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
#define MICHAEL_MIC_LEN 8
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return IEEE80211_TKIP_ICV_LEN;
return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype));
ath10k_htt_rx_crypto_mic_len(ar, enctype));
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
skb_trim(msdu, msdu->len - 8);
if (status->flag & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED &&
enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype));
ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
skb_trim(msdu, msdu->len - 8);
skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {

View File

@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
}
/* External RF module */
iris_node = of_find_node_by_name(mmio_node, "iris");
iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;

View File

@ -72,18 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9000A_MODULE_FIRMWARE(api) \
IWL9000A_FW_PRE __stringify(api) ".ucode"
#define IWL9000B_MODULE_FIRMWARE(api) \
IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9461_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9461",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9462_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9462",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));

View File

@ -80,15 +80,15 @@
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10

View File

@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
* @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
* @adwell_default_n_aps: for adaptive dwell the default number of APs
* per channel
* @adwell_default_n_aps_social: for adaptive dwell the default
* number of APs per social (1,6,11) channel
* @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
* to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
* @reserved2: for future use and alignment
* @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
/* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
u8 reserved2;
u8 reserved;
u8 scan_start_mac_id;
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
union {
struct {
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
/* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
__le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
/* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
__le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
struct {
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
u8 adwell_default_n_aps;
u8 adwell_default_n_aps_social;
u8 reserved3;
__le16 adwell_max_budget;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
__le16 reserved2;
u8 data[];
} v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(__le32))
2 * sizeof(__le32) - 2 * sizeof(u8) - \
sizeof(__le16))
/**
* struct iwl_umac_scan_abort

View File

@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,

View File

@ -366,6 +366,7 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;

View File

@ -1142,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
}
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between

View File

@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
u32 measurement_dwell;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
return (void *)&cmd->v7.data;
if (iwl_mvm_has_new_tx_api(mvm))
return (void *)&cmd->v6.data;
return (void *)&cmd->v1.data;
}
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
if (params->measurement_dwell) {
cmd->active_dwell = params->measurement_dwell;
cmd->passive_dwell = params->measurement_dwell;
cmd->extended_dwell = params->measurement_dwell;
} else {
cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
if (params->measurement_dwell) {
cmd->v7.active_dwell = params->measurement_dwell;
cmd->v7.passive_dwell = params->measurement_dwell;
} else {
cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
}
cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
return;
}
cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (params->measurement_dwell) {
cmd->v1.active_dwell = params->measurement_dwell;
cmd->v1.passive_dwell = params->measurement_dwell;
cmd->v1.extended_dwell = params->measurement_dwell;
} else {
cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->v6.max_out_time[1] =
cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[1] =
cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
if (iwl_mvm_is_regular_scan(params))
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
(void *)&cmd->v6.data : (void *)&cmd->v1.data;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
if (iwl_mvm_has_new_tx_api(mvm)) {
if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
cmd->v7.channel_flags = channel_flags;
cmd->v7.n_channels = params->n_channels;
} else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (iwl_mvm_has_new_tx_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
else if (iwl_mvm_has_new_tx_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +

View File

@ -535,47 +535,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},

View File

@ -78,6 +78,7 @@ enum bpf_arg_type {
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
* helper function must fill all bytes or clear
* them in error case.
@ -334,9 +335,8 @@ extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
struct net_device *netdev);
bool attach_drv);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@ -425,15 +425,9 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
enum bpf_prog_type type,
struct net_device *netdev)
bool attach_drv)
{
return ERR_PTR(-EOPNOTSUPP);
}
@ -514,9 +508,14 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

View File

@ -115,7 +115,7 @@ struct bpf_insn_aux_data {
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int converted_op_size; /* the valid value width after perceived conversion */
bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@ -171,7 +171,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
return -EOPNOTSUPP;
}

View File

@ -54,8 +54,9 @@ enum {
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_ESP_BIT,
NETIF_F_GSO_UDP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@ -132,6 +133,7 @@ enum {
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)

View File

@ -4140,6 +4140,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}

View File

@ -568,6 +568,8 @@ enum {
SKB_GSO_SCTP = 1 << 14,
SKB_GSO_ESP = 1 << 15,
SKB_GSO_UDP = 1 << 16,
};
#if BITS_PER_LONG > 32

View File

@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
{
unsigned short gso_type = 0;
unsigned int gso_type = 0;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
break;
default:
return -EINVAL;
}

View File

@ -767,6 +767,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);

View File

@ -262,7 +262,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_target_ifindex; /* ifindex of netdev to prep for */
__u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -897,10 +897,6 @@ enum sk_action {
#define BPF_TAG_SIZE 8
enum bpf_prog_status {
BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
};
struct bpf_prog_info {
__u32 type;
__u32 id;
@ -914,8 +910,6 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {

View File

@ -14,8 +14,9 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
struct net *net = current->nsproxy->net_ns;
struct bpf_dev_offload *offload;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_type != BPF_PROG_TYPE_XDP)
return -EINVAL;
if (attr->prog_flags)
return -EINVAL;
@ -28,7 +29,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
init_waitqueue_head(&offload->verifier_done);
rtnl_lock();
offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
if (!offload->netdev) {
rtnl_unlock();
kfree(offload);
@ -85,6 +86,10 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
/* Caution - if netdev is destroyed before the program, this function
* will be called twice.
*/
data.offload.prog = prog;
if (offload->verifier_running)
@ -144,18 +149,6 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
return bpf_prog_offload_translate(prog);
}
u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
u32 ifindex;
rtnl_lock();
ifindex = offload->netdev ? offload->netdev->ifindex : 0;
rtnl_unlock();
return ifindex;
}
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
@ -169,6 +162,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
switch (event) {
case NETDEV_UNREGISTER:
/* ignore namespace changes */
if (netdev->reg_state != NETREG_UNREGISTERING)
break;
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
offloads) {
if (offload->netdev == netdev)

View File

@ -1057,22 +1057,23 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
}
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
static bool bpf_prog_can_attach(struct bpf_prog *prog,
enum bpf_prog_type *attach_type,
struct net_device *netdev)
static bool bpf_prog_get_ok(struct bpf_prog *prog,
enum bpf_prog_type *attach_type, bool attach_drv)
{
struct bpf_dev_offload *offload = prog->aux->offload;
/* not an attachment, just a refcount inc, always allow */
if (!attach_type)
return true;
if (prog->type != *attach_type)
return false;
if (offload && offload->netdev != netdev)
if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
return false;
return true;
}
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
struct net_device *netdev)
bool attach_drv)
{
struct fd f = fdget(ufd);
struct bpf_prog *prog;
@ -1080,7 +1081,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
prog = ____bpf_prog_get(f);
if (IS_ERR(prog))
return prog;
if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
prog = ERR_PTR(-EINVAL);
goto out;
}
@ -1093,23 +1094,13 @@ out:
struct bpf_prog *bpf_prog_get(u32 ufd)
{
return __bpf_prog_get(ufd, NULL, NULL);
return __bpf_prog_get(ufd, NULL, false);
}
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
return prog;
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
struct net_device *netdev)
bool attach_drv)
{
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
@ -1118,7 +1109,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
static int bpf_prog_load(union bpf_attr *attr)
{
@ -1181,7 +1172,7 @@ static int bpf_prog_load(union bpf_attr *attr)
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
if (attr->prog_target_ifindex) {
if (attr->prog_ifindex) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
@ -1625,11 +1616,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
if (bpf_prog_is_dev_bound(prog->aux)) {
info.status |= BPF_PROG_STATUS_DEV_BOUND;
info.ifindex = bpf_prog_offload_ifindex(prog);
}
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))

View File

@ -1384,13 +1384,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
if (register_is_null(*reg))
if (register_is_null(*reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
@ -3825,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env)
return err;
regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@ -4020,6 +4023,7 @@ process_bpf_exit:
return err;
insn_idx++;
env->insn_aux_data[insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@ -4202,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
int i;
if (cnt == 1)
return 0;
@ -4211,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++)
new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@ -4229,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
/* The verifier does more data flow analysis than llvm and will not explore
* branches that are dead at run time. Malicious programs can have dead code
* too. Therefore replace all dead at-run-time code with nops.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &nop, sizeof(nop));
}
}
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@ -4555,6 +4581,9 @@ skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
if (ret == 0)
sanitize_dead_code(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);

View File

@ -78,16 +78,12 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
int ret = 0;
if (unlikely(size == 0))
goto out;
int ret;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
out:
return ret;
}
@ -407,7 +403,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
@ -498,7 +494,7 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
.arg2_type = ARG_CONST_SIZE,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
};
@ -609,7 +605,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,

View File

@ -2746,7 +2746,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
return skb->ip_summed != CHECKSUM_PARTIAL;
return skb->ip_summed != CHECKSUM_PARTIAL &&
skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE;
}
@ -7139,13 +7140,17 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
__dev_xdp_attached(dev, bpf_op, NULL))
return -EBUSY;
if (bpf_op == ops->ndo_bpf)
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
dev);
else
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
bpf_op == ops->ndo_bpf);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (!(flags & XDP_FLAGS_HW_MODE) &&
bpf_prog_is_dev_bound(prog->aux)) {
NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
bpf_prog_put(prog);
return -EINVAL;
}
}
err = dev_xdp_install(dev, bpf_op, extack, flags, prog);

View File

@ -1646,9 +1646,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_MEM,
.arg1_type = ARG_PTR_TO_MEM_OR_NULL,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_PTR_TO_MEM_OR_NULL,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
.arg5_type = ARG_ANYTHING,
};

View File

@ -1223,9 +1223,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
bool fixedid = false, gso_partial, encap;
bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
unsigned int offset = 0;
struct iphdr *iph;
int proto, tot_len;
int nhoff;
@ -1260,6 +1261,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
segs = ERR_PTR(-EPROTONOSUPPORT);
if (!skb->encapsulation || encap) {
udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
@ -1279,7 +1281,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
if (skb_is_gso(skb)) {
if (udpfrag) {
iph->frag_off = htons(offset >> 3);
if (skb->next)
iph->frag_off |= htons(IP_MF);
offset += skb->len - nhoff - ihl;
tot_len = skb->len - nhoff;
} else if (skb_is_gso(skb)) {
if (!fixedid) {
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;

View File

@ -187,16 +187,57 @@ out_unlock:
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
netdev_features_t features)
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
__wsum csum;
struct udphdr *uh;
struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
segs = skb_udp_tunnel_segment(skb, features, false);
goto out;
}
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
/* Do software UFO. Complete and fill in the UDP checksum as
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments.
*/
uh = udp_hdr(skb);
iph = ip_hdr(skb);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame.
*/
if (!skb->encap_hdr_csum)
features |= NETIF_F_HW_CSUM;
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
segs = skb_segment(skb, features);
out:
return segs;
}
@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv4_offload = {
.callbacks = {
.gso_segment = udp4_tunnel_segment,
.gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},

View File

@ -31,6 +31,37 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
return id;
}
/* This function exists only for tap drivers that must support broken
* clients requesting UFO without specifying an IPv6 fragment ID.
*
* This is similar to ipv6_select_ident() but we use an independent hash
* seed to limit information leakage.
*
* The network header must be set before calling this.
*/
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
struct in6_addr *addrs;
u32 id;
addrs = skb_header_pointer(skb,
skb_network_offset(skb) +
offsetof(struct ipv6hdr, saddr),
sizeof(buf), buf);
if (!addrs)
return 0;
net_get_random_once(&ip6_proxy_idents_hashrnd,
sizeof(ip6_proxy_idents_hashrnd));
id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
&addrs[1], &addrs[0]);
return htonl(id);
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)

View File

@ -472,6 +472,11 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
&match->rt6i_siblings, rt6i_siblings) {
route_choosen--;
if (route_choosen == 0) {
struct inet6_dev *idev = sibling->rt6i_idev;
if (!netif_carrier_ok(sibling->dst.dev) &&
idev->cnf.ignore_routes_with_linkdown)
break;
if (rt6_score_route(sibling, oif, strict) < 0)
break;
match = sibling;
@ -1019,7 +1024,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
{
struct net_device *dev = rt->dst.dev;
if (rt->rt6i_flags & RTF_LOCAL) {
if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
/* for copies of local routes, dst->dev needs to be the
* device if it is a master device, the master device if
* device is enslaved, and the loopback as the default

View File

@ -17,15 +17,94 @@
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb,
netdev_features_t features)
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features, true);
else {
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
uh = udp_hdr(skb);
ipv6h = ipv6_hdr(skb);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
&ipv6h->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame.
*/
if (!skb->encap_hdr_csum)
features |= NETIF_F_HW_CSUM;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
goto out;
}
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
err = ip6_find_1stfragopt(skb, &prevhdr);
if (err < 0)
return ERR_PTR(err);
unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
unfrag_ip6hlen + tnl_hlen;
packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb);
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
}
out:
return segs;
}
@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload udpv6_offload = {
.callbacks = {
.gso_segment = udp6_tunnel_segment,
.gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},

View File

@ -308,6 +308,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
unsigned short gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
@ -318,9 +320,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
if (segs == NULL)
return -EINVAL;
if (gso_type & SKB_GSO_UDP) {
/* The initial flow key extracted by ovs_flow_key_extract()
* in this case is for a first fragment, so we need to
* properly mark later fragments.
*/
later_key = *key;
later_key.ip.frag = OVS_FRAG_TYPE_LATER;
}
/* Queue all of the segments. */
skb = segs;
do {
if (gso_type & SKB_GSO_UDP && skb != segs)
key = &later_key;
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
if (err)
break;

View File

@ -631,7 +631,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
key->ip.frag = OVS_FRAG_TYPE_LATER;
return 0;
}
if (nh->frag_off & htons(IP_MF))
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
else
key->ip.frag = OVS_FRAG_TYPE_NONE;
@ -747,6 +748,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
return 0;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
if (tcphdr_ok(skb)) {

View File

@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
const struct iphdr *iph;
u16 ul;
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
return 1;
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
const struct ipv6hdr *ip6h;
u16 ul;
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
return 1;
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,

View File

@ -205,13 +205,14 @@ static void tcf_chain_head_change(struct tcf_chain *chain,
static void tcf_chain_flush(struct tcf_chain *chain)
{
struct tcf_proto *tp;
struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
tcf_chain_head_change(chain, NULL);
while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
while (tp) {
RCU_INIT_POINTER(chain->filter_chain, tp->next);
tcf_chain_put(chain);
tcf_proto_destroy(tp);
tp = rtnl_dereference(chain->filter_chain);
tcf_chain_put(chain);
}
}

View File

@ -382,15 +382,13 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
{
struct bpf_prog *fp;
char *name = NULL;
bool skip_sw;
u32 bpf_fd;
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
qdisc_dev(tp->q));
else
fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
if (IS_ERR(fp))
return PTR_ERR(fp);

View File

@ -562,7 +562,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
{
struct smc_connection *conn = &smc->conn;
struct smc_link_group *lgr = conn->lgr;
struct smc_buf_desc *buf_desc = NULL;
struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
struct list_head *buf_list;
int bufsize, bufsize_short;
int sk_buf_size;
@ -575,7 +575,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
/* use socket send buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_sndbuf / 2;
for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
for (bufsize_short = smc_compress_bufsize(sk_buf_size);
bufsize_short >= 0; bufsize_short--) {
if (is_rmb) {

View File

@ -41,7 +41,6 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -230,21 +229,6 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
jsonw_name(json_wtr, "dev");
if (info->ifindex) {
char name[IF_NAMESIZE];
if (!if_indextoname(info->ifindex, name))
jsonw_printf(json_wtr, "\"ifindex:%d\"",
info->ifindex);
else
jsonw_printf(json_wtr, "\"%s\"", name);
} else {
jsonw_printf(json_wtr, "\"unknown\"");
}
}
if (info->load_time) {
char buf[32];
@ -302,21 +286,6 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
printf(" ");
if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
printf("dev ");
if (info->ifindex) {
char name[IF_NAMESIZE];
if (!if_indextoname(info->ifindex, name))
printf("ifindex:%d ", info->ifindex);
else
printf("%s ", name);
} else {
printf("unknown ");
}
}
printf("\n");
if (info->load_time) {

View File

@ -262,7 +262,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_target_ifindex; /* ifindex of netdev to prep for */
__u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -897,10 +897,6 @@ enum sk_action {
#define BPF_TAG_SIZE 8
enum bpf_prog_status {
BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
};
struct bpf_prog_info {
__u32 type;
__u32 id;
@ -914,8 +910,6 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {

View File

@ -4377,11 +4377,10 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@ -4481,14 +4480,12 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@ -4618,18 +4615,16 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R2 min value is outside of the array range",
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@ -4760,20 +4755,18 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
offsetof(struct test_val, foo), 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr = "R2 min value is outside of the array range",
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@ -5638,7 +5631,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size = 0 allowed on NULL",
"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
@ -5652,7 +5645,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size > 0 not allowed on NULL",
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
@ -5670,7 +5663,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size = 0 allowed on != NULL stack pointer",
"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@ -5687,7 +5680,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size = 0 allowed on != NULL map pointer",
"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@ -5709,7 +5702,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer",
"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@ -5734,7 +5727,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size possible = 0 allowed on != NULL map pointer",
"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@ -5757,7 +5750,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer",
"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@ -5778,6 +5771,105 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=inv expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=inv expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"helper access to variable memory: 8 bytes leak",
.insns = {