1
0
Fork 0

wireless-drivers-next patches for v5.12

Second set of patches for v5.12. Last time there was a smaller pull
 request so unsurprisingly this time we have a big one. mt76 has new
 hardware support and lots of new features, iwlwifi getting new
 features and rtw88 got NAPI support. And the usual cleanups and fixes
 all over.
 
 Major changes:
 
 ath10k
 
 * support setting SAR limits via nl80211
 
 rtw88
 
 * support 8821 RFE type2 devices
 
 * NAPI support
 
 iwlwifi
 
 * add new FW API support
 
 * support for new So devices
 
 * support for RF interference mitigation (RFI)
 
 * support for PNVM (Platform Non-Volatile Memory, a firmware data
   file) from BIOS
 
 mt76
 
 * add new mt7921e driver
 
 * 802.11 encap offload support
 
 * support for multiple pcie gen1 host interfaces on 7915
 
 * 7915 testmode support
 
 * 7915 txbf support
 
 brcmfmac
 
 * support for CQM RSSI notifications
 
 wil6210
 
 * support for extended DMG MCS 12.1 rate
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJgJl5/AAoJEG4XJFUm622bCrcIAIwarm51aHq8R0Py0ZYBEiIM
 STe/x3gjxVhzYHd438APchEMnePY6pWZ6e00GnZwMyWxZpMHK+yOYzWrewLvYI29
 qyPFkjGdE6PhYjL+lJjYHn4M1cdAkO4t0FSvCy5OvOnuIgu0Yz3TXXQVxZtrzrIc
 2q+bOR1kKaVBe8NOjggnxTWe4mTj6efeTkD0D5M+IbppERtmpcVLra9FSgz5IcLl
 hlKyNNtiNo/CmQu0bGejXR7ip+JA08f5No6TOlEQKR6pBvBvwrvgXHsq9rfQO8qA
 CDhz4DqfPPYMNXnJuFVAzYsw4raZblwTg5GtIjH7e0cbXxSAx50Ne2Hd850nkO8=
 =eQGJ
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-2021-02-12' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:
====================
wireless-drivers-next patches for v5.12

Second set of patches for v5.12. Last time there was a smaller pull
request so unsurprisingly this time we have a big one. mt76 has new
hardware support and lots of new features, iwlwifi getting new
features and rtw88 got NAPI support. And the usual cleanups and fixes
all over.

Major changes:

ath10k

* support setting SAR limits via nl80211

rtw88

* support 8821 RFE type2 devices

* NAPI support

iwlwifi

* add new FW API support

* support for new So devices

* support for RF interference mitigation (RFI)

* support for PNVM (Platform Non-Volatile Memory, a firmware data
  file) from BIOS

mt76

* add new mt7921e driver

* 802.11 encap offload support

* support for multiple pcie gen1 host interfaces on 7915

* 7915 testmode support

* 7915 txbf support

brcmfmac

* support for CQM RSSI notifications

wil6210

* support for extended DMG MCS 12.1 rate
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
master
David S. Miller 2021-02-12 16:43:13 -08:00
commit 79201f358d
224 changed files with 33747 additions and 6076 deletions

View File

@ -90,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@ -124,6 +125,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@ -159,6 +161,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -189,6 +192,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.bmi_large_size_download = true,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -223,6 +227,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -257,6 +262,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@ -291,6 +297,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -329,6 +336,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@ -369,6 +377,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@ -416,6 +425,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@ -460,6 +470,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@ -494,6 +505,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -530,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -557,6 +570,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.uart_pin_workaround = true,
.dynamic_sar_support = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@ -598,6 +612,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@ -625,6 +640,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_filter_reset_required = false,
.fw_diag_ce_download = false,
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
},
};

View File

@ -1019,7 +1019,6 @@ struct ath10k {
enum ath10k_hw_rev hw_rev;
u16 dev_id;
u32 chip_id;
enum ath10k_dev_type dev_type;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@ -1296,6 +1295,9 @@ struct ath10k {
bool coex_support;
int coex_gpio_pin;
s32 tx_power_2g_limit;
s32 tx_power_5g_limit;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};

View File

@ -34,6 +34,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_QMI = 0x00200000,
ATH10K_DBG_STA = 0x00400000,
ATH10K_DBG_ANY = 0xffffffff,
};

View File

@ -449,6 +449,10 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
}
ep = &htc->endpoint[eid];
if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
goto out;
}
payload_len = __le16_to_cpu(hdr->len);

View File

@ -2241,7 +2241,7 @@ struct htt_rx_chan_info {
* Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_BUF_SIZE 2048
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle

View File

@ -2781,13 +2781,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx addba tid %hu peer_id %hu size %hhu\n",
"htt rx addba tid %u peer_id %u size %u\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@ -2802,7 +2802,7 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx start rx ba session sta %pM tid %hu size %hhu\n",
"htt rx start rx ba session sta %pM tid %u size %u\n",
peer->addr, tid, ev->window_size);
ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
@ -2821,13 +2821,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx delba tid %hu peer_id %hu\n",
"htt rx delba tid %u peer_id %u\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
@ -2842,7 +2842,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx stop rx ba session sta %pM tid %hu\n",
"htt rx stop rx ba session sta %pM tid %u\n",
peer->addr, tid);
ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
@ -3102,7 +3102,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
num_records, num_resp_ids,
le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
@ -3127,12 +3127,12 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
max_num_msdus = le16_to_cpu(record->num_msdus);
max_num_bytes = le32_to_cpu(record->num_bytes);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
i, peer_id, tid, max_num_msdus, max_num_bytes);
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@ -3146,7 +3146,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
*/
if (unlikely(!txq)) {
ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@ -3259,7 +3259,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
"htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
info0, info1, enable, num_records, mode, threshold);
len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
@ -3296,7 +3296,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@ -3310,7 +3310,7 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
*/
if (unlikely(!txq)) {
ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
@ -3348,7 +3348,7 @@ static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
return i;
}
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
return -EINVAL;
}
@ -3502,13 +3502,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
return;
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
return;
}
if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
(txrate.mcs > 7 || txrate.nss < 1)) {
ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
txrate.mcs, txrate.nss);
return;
}

View File

@ -72,7 +72,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
peer_id, tid);
return;
}
@ -81,7 +81,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
peer_id, tid, count);
}
@ -213,7 +213,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
idr_remove(&htt->pending_tx, msdu_id);
}
@ -507,7 +507,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0};
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
tx_done.msdu_id = msdu_id;
tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
@ -569,6 +569,8 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
desc_hdr = (struct htt_data_tx_desc *)
(skb->data + sizeof(*htt_hdr));
flags1 = __le16_to_cpu(desc_hdr->flags1);
skb_pull(skb, sizeof(struct htt_cmd_hdr));
skb_pull(skb, sizeof(struct htt_data_tx_desc));
}
}
@ -1557,7 +1559,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
"htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
@ -1766,7 +1768,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
"htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",

View File

@ -623,6 +623,8 @@ struct ath10k_hw_params {
/* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
bool supports_peer_stats_info;
bool dynamic_sar_support;
};
struct htt_rx_desc;

View File

@ -81,6 +81,17 @@ static struct ieee80211_rate ath10k_rates_rev2[] = {
{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
};
static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = {
{.start_freq = 2402, .end_freq = 2494 },
{.start_freq = 5170, .end_freq = 5875 },
};
static const struct cfg80211_sar_capa ath10k_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)),
.freq_ranges = &ath10k_sar_freq_ranges[0],
};
#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@ -2177,7 +2188,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
return;
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
info->ssid_len ? info->ssid : NULL, info->ssid_len,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@ -2880,6 +2892,158 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
return 0;
}
static bool ath10k_mac_is_connected(struct ath10k *ar)
{
struct ath10k_vif *arvif;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
return true;
}
return false;
}
static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
{
int ret;
u32 param;
int tx_power_2g, tx_power_5g;
bool connected;
lockdep_assert_held(&ar->conf_mutex);
/* ath10k internally uses unit of 0.5 dBm so multiply by 2 */
tx_power_2g = txpower * 2;
tx_power_5g = txpower * 2;
connected = ath10k_mac_is_connected(ar);
if (connected && ar->tx_power_2g_limit)
if (tx_power_2g > ar->tx_power_2g_limit)
tx_power_2g = ar->tx_power_2g_limit;
if (connected && ar->tx_power_5g_limit)
if (tx_power_5g > ar->tx_power_5g_limit)
tx_power_5g = ar->tx_power_5g_limit;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n",
tx_power_2g, tx_power_5g);
param = ar->wmi.pdev_param->txpower_limit2g;
ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_2g);
if (ret) {
ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
tx_power_2g, ret);
return ret;
}
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_5g);
if (ret) {
ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
tx_power_5g, ret);
return ret;
}
return 0;
}
static int ath10k_mac_txpower_recalc(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret, txpower = -1;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
/* txpower not initialized yet? */
if (arvif->txpower == INT_MIN)
continue;
if (txpower == -1)
txpower = arvif->txpower;
else
txpower = min(txpower, arvif->txpower);
}
if (txpower == -1)
return 0;
ret = ath10k_mac_txpower_setup(ar, txpower);
if (ret) {
ath10k_warn(ar, "failed to setup tx power %d: %d\n",
txpower, ret);
return ret;
}
return 0;
}
static int ath10k_mac_set_sar_power(struct ath10k *ar)
{
if (!ar->hw_params.dynamic_sar_support)
return -EOPNOTSUPP;
if (!ath10k_mac_is_connected(ar))
return 0;
/* if connected, then arvif->txpower must be valid */
return ath10k_mac_txpower_recalc(ar);
}
static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
const struct cfg80211_sar_sub_specs *sub_specs;
struct ath10k *ar = hw->priv;
u32 i;
int ret;
mutex_lock(&ar->conf_mutex);
if (!ar->hw_params.dynamic_sar_support) {
ret = -EOPNOTSUPP;
goto err;
}
if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
sar->num_sub_specs == 0) {
ret = -EINVAL;
goto err;
}
sub_specs = sar->sub_specs;
/* 0dbm is not a practical value for ath10k, so use 0
* as no SAR limitation on it.
*/
ar->tx_power_2g_limit = 0;
ar->tx_power_5g_limit = 0;
/* note the power is in 0.25dbm unit, while ath10k uses
* 0.5dbm unit.
*/
for (i = 0; i < sar->num_sub_specs; i++) {
if (sub_specs->freq_range_index == 0)
ar->tx_power_2g_limit = sub_specs->power / 2;
else if (sub_specs->freq_range_index == 1)
ar->tx_power_5g_limit = sub_specs->power / 2;
sub_specs++;
}
ret = ath10k_mac_set_sar_power(ar);
if (ret) {
ath10k_warn(ar, "failed to set sar power: %d", ret);
goto err;
}
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@ -2968,6 +3132,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->is_up = true;
ath10k_mac_set_sar_power(ar);
/* Workaround: Some firmware revisions (tested with qca6174
* WLAN.RM.2.0-00073) have buggy powersave state machine and must be
* poked with peer param command.
@ -3010,6 +3176,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false;
ath10k_mac_txpower_recalc(ar);
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
@ -3763,23 +3931,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
int ret = 0;
spin_lock_bh(&ar->data_lock);
if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
ath10k_warn(ar, "wmi mgmt tx queue is full\n");
ret = -ENOSPC;
goto unlock;
return -ENOSPC;
}
__skb_queue_tail(q, skb);
skb_queue_tail(q, skb);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
unlock:
spin_unlock_bh(&ar->data_lock);
return ret;
return 0;
}
static enum ath10k_mac_tx_path
@ -3954,9 +4115,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
spin_unlock_bh(&ar->data_lock);
if (peer)
/* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
ath10k_warn(ar, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
if (!peer) {
ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
@ -5207,65 +5367,6 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
{
int ret;
u32 param;
lockdep_assert_held(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
param = ar->wmi.pdev_param->txpower_limit2g;
ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
if (ret) {
ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
txpower, ret);
return ret;
}
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
if (ret) {
ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
txpower, ret);
return ret;
}
return 0;
}
static int ath10k_mac_txpower_recalc(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret, txpower = -1;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
/* txpower not initialized yet? */
if (arvif->txpower == INT_MIN)
continue;
if (txpower == -1)
txpower = arvif->txpower;
else
txpower = min(txpower, arvif->txpower);
}
if (txpower == -1)
return 0;
ret = ath10k_mac_txpower_setup(ar, txpower);
if (ret) {
ath10k_warn(ar, "failed to setup tx power %d: %d\n",
txpower, ret);
return ret;
}
return 0;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
@ -6565,7 +6666,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
enum wmi_phy_mode mode;
mode = chan_to_phymode(&def);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n",
sta->addr, bw, mode);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@ -6584,7 +6685,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n",
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@ -6595,7 +6696,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n",
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
@ -6606,7 +6707,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@ -7302,7 +7403,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
u32 num_tdls_stations;
ath10k_dbg(ar, ATH10K_DBG_MAC,
ath10k_dbg(ar, ATH10K_DBG_STA,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
arvif->vdev_id, sta->addr,
ar->num_stations + 1, ar->max_num_stations,
@ -7402,7 +7503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
ath10k_dbg(ar, ATH10K_DBG_STA,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
@ -7474,7 +7575,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, vif, sta, false);
@ -7487,7 +7588,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Tdls station authorized.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n",
sta->addr);
ret = ath10k_station_assoc(ar, vif, sta, false);
@ -7510,7 +7611,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, vif, sta);
@ -8069,7 +8170,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
vdev_param = ar->wmi.vdev_param->fixed_rate;
@ -8327,7 +8428,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
return;
}
ath10k_dbg(ar, ATH10K_DBG_MAC,
ath10k_dbg(ar, ATH10K_DBG_STA,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->bandwidth, sta->rx_nss,
sta->smps_mode);
@ -8426,7 +8527,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n",
arvif->vdev_id, sta->addr, tid, action);
switch (action) {
@ -8522,7 +8623,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
arvif = (void *)vifs[i].vif->drv_priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
@ -8596,7 +8697,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx add freq %hu width %d ptr %pK\n",
"mac chanctx add freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -8620,7 +8721,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx remove freq %hu width %d ptr %pK\n",
"mac chanctx remove freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -8685,7 +8786,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx change freq %hu width %d ptr %pK changed %x\n",
"mac chanctx change freq %u width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@ -9117,7 +9218,9 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
if (!ath10k_peer_stats_enabled(ar))
return;
mutex_lock(&ar->conf_mutex);
ath10k_debug_fw_stats_request(ar);
mutex_unlock(&ar->conf_mutex);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@ -9272,6 +9375,7 @@ static const struct ieee80211_ops ath10k_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
#endif
.set_sar_specs = ath10k_mac_set_sar_specs,
};
#define CHAN2G(_channel, _freq, _flags) { \
@ -10009,6 +10113,9 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
if (ar->hw_params.dynamic_sar_support)
ar->hw->wiphy->sar_capa = &ath10k_sar_capa;
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;

View File

@ -283,7 +283,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
),
TP_printk(
"%s %s %d size %hu",
"%s %s %d size %u",
__get_str(driver),
__get_str(device),
__entry->hw_type,
@ -488,7 +488,7 @@ TRACE_EVENT(ath10k_wmi_diag_container,
),
TP_printk(
"%s %s diag container type %hhu timestamp %u code %u len %d",
"%s %s diag container type %u timestamp %u code %u len %d",
__get_str(driver),
__get_str(device),
__entry->type,

View File

@ -211,7 +211,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer map event with idx out of bounds: %hu\n",
"received htt peer map event with idx out of bounds: %u\n",
ev->peer_id);
return;
}
@ -247,7 +247,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer unmap event with idx out of bounds: %hu\n",
"received htt peer unmap event with idx out of bounds: %u\n",
ev->peer_id);
return;
}

View File

@ -93,7 +93,7 @@ ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
if (tlv_len > len) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
"wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@ -102,7 +102,7 @@ ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
wmi_tlv_policies[tlv_tag].min_len &&
wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
"wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
@ -240,8 +240,10 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
__le32_to_cpu(stat->last_tx_rate_code),
__le32_to_cpu(stat->last_tx_bitrate_kbps));
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
if (!sta) {
rcu_read_unlock();
ath10k_warn(ar, "not found station for peer stats\n");
return -EINVAL;
}
@ -251,6 +253,7 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
rcu_read_unlock();
return 0;
}
@ -421,7 +424,7 @@ static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
vdev_id = __le32_to_cpu(ev->vdev_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
"wmi tlv p2p noa vdev_id %i descriptors %u\n",
vdev_id, noa->num_descriptors);
ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
@ -573,13 +576,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
case WMI_TDLS_TEARDOWN_REASON_TX:
case WMI_TDLS_TEARDOWN_REASON_RSSI:
case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
rcu_read_lock();
station = ieee80211_find_sta_by_ifaddr(ar->hw,
ev->peer_macaddr.addr,
NULL);
if (!station) {
ath10k_warn(ar, "did not find station from tdls peer event");
kfree(tb);
return;
goto exit;
}
arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
ieee80211_tdls_oper_request(
@ -590,6 +593,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
);
break;
}
exit:
rcu_read_unlock();
kfree(tb);
}

View File

@ -3497,7 +3497,7 @@ void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
return;
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
arg.mac_addr);
rcu_read_lock();
@ -7506,7 +7506,7 @@ ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(value);
ath10k_dbg(ar, ATH10K_DBG_WMI,
ath10k_dbg(ar, ATH10K_DBG_STA,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
return skb;
@ -9551,7 +9551,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
struct sk_buff *msdu;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"force cleanup mgmt msdu_id %hu\n", msdu_id);
"force cleanup mgmt msdu_id %u\n", msdu_id);
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,

View File

@ -200,6 +200,7 @@ struct ath11k_vif {
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
u16 ast_idx;
u16 tcl_metadata;
u8 hal_addr_search_flags;
u8 search_type;
@ -875,14 +876,6 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);

View File

@ -3845,6 +3845,18 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
htt_stats_buf->num_obss_tx_ppdu_success);
len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
htt_stats_buf->num_non_srg_opportunities);
len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_tried);
len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunies = %u\n",
htt_stats_buf->num_srg_opportunities);
len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
buf[buf_len - 1] = 0;

View File

@ -1656,8 +1656,19 @@ struct htt_tx_sounding_stats_tlv {
};
struct htt_pdev_obss_pd_stats_tlv {
u32 num_obss_tx_ppdu_success;
u32 num_obss_tx_ppdu_failure;
u32 num_obss_tx_ppdu_success;
u32 num_obss_tx_ppdu_failure;
u32 num_sr_tx_transmissions;
u32 num_spatial_reuse_opportunities;
u32 num_non_srg_opportunities;
u32 num_non_srg_ppdu_tried;
u32 num_non_srg_ppdu_success;
u32 num_srg_opportunities;
u32 num_srg_ppdu_tried;
u32 num_srg_ppdu_success;
u32 num_psr_opportunities;
u32 num_psr_ppdu_tried;
u32 num_psr_ppdu_success;
};
struct htt_ring_backpressure_stats_tlv {

View File

@ -1292,7 +1292,7 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
len -= sizeof(*tlv);
if (tlv_len > len) {
ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@ -1381,22 +1381,22 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
*/
if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
return;
}
if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
mcs, nss);
return;
}
@ -1652,6 +1652,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
u8 mac_addr[ETH_ALEN];
u16 peer_mac_h16;
u16 ast_hash;
u16 hw_peer_id;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
@ -1672,7 +1673,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP2:
vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
@ -1685,7 +1686,10 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
resp->peer_map_ev.info2);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
resp->peer_map_ev.info1);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:

View File

@ -165,6 +165,7 @@ tcl_ring_sel:
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL &&

View File

@ -71,6 +71,8 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
ti->dscp_tid_tbl_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
ti->bss_ast_idx) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
ti->bss_ast_hash);
tcl_cmd->info4 = 0;
}

View File

@ -29,6 +29,7 @@ struct hal_tx_info {
u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
u16 bss_ast_hash;
u16 bss_ast_idx;
u8 tid;
u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
u8 lmac_id;

View File

@ -1871,6 +1871,158 @@ static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
return ret;
}
static int ath11k_mac_config_obss_pd(struct ath11k *ar,
struct ieee80211_he_obss_pd *he_obss_pd)
{
u32 bitmap[2], param_id, param_val, pdev_id;
int ret;
s8 non_srg_th = 0, srg_th = 0;
pdev_id = ar->pdev->pdev_id;
/* Set and enable SRG/non-SRG OBSS PD Threshold */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) {
ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
if (ret)
ath11k_warn(ar->ab,
"failed to set obss_pd_threshold for pdev: %u\n",
pdev_id);
return ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n",
he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
he_obss_pd->max_offset);
param_val = 0;
if (he_obss_pd->sr_ctrl &
IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD;
} else {
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
he_obss_pd->non_srg_max_offset);
else
non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
}
if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
param_val |= ATH11K_OBSS_PD_SRG_EN;
}
if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
} else {
non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
/* SRG not supported and threshold in dB */
param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
ATH11K_OBSS_PD_THRESHOLD_IN_DBM);
}
param_val |= (non_srg_th & GENMASK(7, 0));
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to set obss_pd_threshold for pdev: %u\n",
pdev_id);
return ret;
}
/* Enable OBSS PD for all access category */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
param_val = 0xf;
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to set obss_pd_per_ac for pdev: %u\n",
pdev_id);
return ret;
}
/* Set SR Prohibit */
param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
param_val = !!(he_obss_pd->sr_ctrl &
IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
if (ret) {
ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n",
pdev_id);
return ret;
}
if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
ar->ab->wmi_ab.svc_map))
return 0;
/* Set SRG BSS Color Bitmap */
memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set bss_color_bitmap for pdev: %u\n",
pdev_id);
return ret;
}
/* Set SRG Partial BSSID Bitmap */
memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set partial_bssid_bitmap for pdev: %u\n",
pdev_id);
return ret;
}
memset(bitmap, 0xff, sizeof(bitmap));
/* Enable all BSS Colors for SRG */
ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set srg_color_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all patial BSSID mask for SRG */
ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set srg_bssid_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all BSS Colors for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set non_srg_color_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
/* Enable all patial BSSID mask for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
"failed to set non_srg_bssid_en_bitmap pdev: %u\n",
pdev_id);
return ret;
}
return 0;
}
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@ -2114,8 +2266,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&info->he_obss_pd);
ath11k_mac_config_obss_pd(ar, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if (vif->type == NL80211_IFTYPE_AP) {
@ -4248,11 +4399,6 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* Configure the hash seed for hash based reo dest ring selection */
ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
/* allow device to enter IMPS */
if (ab->hw_params.idle_ps) {
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
@ -4262,6 +4408,12 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
}
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
return 0;
err:
@ -4849,7 +5001,7 @@ static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx add freq %hu width %d ptr %pK\n",
"mac chanctx add freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -4873,7 +5025,7 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx remove freq %hu width %d ptr %pK\n",
"mac chanctx remove freq %u width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@ -5117,7 +5269,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
arvif = (void *)vifs[i].vif->drv_priv;
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
@ -5214,7 +5366,7 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx change freq %hu width %d ptr %pK changed %x\n",
"mac chanctx change freq %u width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@ -5583,7 +5735,7 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
arvif->vdev_id, rate, nss, sgi);
vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
@ -6360,17 +6512,20 @@ static int __ath11k_mac_register(struct ath11k *ar)
ret = ath11k_regd_update(ar, true);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
goto err_free_if_combs;
goto err_unregister_hw;
}
ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
goto err_free_if_combs;
goto err_unregister_hw;
}
return 0;
err_unregister_hw:
ieee80211_unregister_hw(ar->hw);
err_free_if_combs:
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);

View File

@ -116,6 +116,12 @@ struct ath11k_generic_iter {
#define ATH11K_CHAN_WIDTH_NUM 8
#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
#define ATH11K_OBSS_PD_THRESHOLD_IN_DBM BIT(29)
#define ATH11K_OBSS_PD_SRG_EN BIT(30)
#define ATH11K_OBSS_PD_NON_SRG_EN BIT(31)
extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
void ath11k_mac_destroy(struct ath11k_base *ab);

View File

@ -1086,8 +1086,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
int ret;
dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
&ath11k_pci_bus_params);
if (!ab) {

View File

@ -118,7 +118,7 @@ exit:
}
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash)
u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
{
struct ath11k_peer *peer;
@ -132,6 +132,7 @@ void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
peer->vdev_id = vdev_id;
peer->peer_id = peer_id;
peer->ast_hash = ast_hash;
peer->hw_peer_id = hw_peer_id;
ether_addr_copy(peer->addr, mac_addr);
list_add(&peer->list, &ab->peers);
wake_up(&ab->peer_mapping_wq);
@ -309,7 +310,11 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;

View File

@ -14,6 +14,7 @@ struct ath11k_peer {
int peer_id;
u16 ast_hash;
u8 pdev_idx;
u16 hw_peer_id;
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
@ -31,7 +32,7 @@ struct ath11k_peer {
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,

View File

@ -1686,6 +1686,11 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
req->mem_seg[i].size = ab->qmi.target_mem[i].size;
req->mem_seg[i].type = ab->qmi.target_mem[i].type;
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi req mem_seg[%d] 0x%llx %u %u\n", i,
ab->qmi.target_mem[i].paddr,
ab->qmi.target_mem[i].size,
ab->qmi.target_mem[i].type);
}
}

View File

@ -43,7 +43,7 @@ TRACE_EVENT(ath11k_htt_pktlog,
),
TP_printk(
"%s %s size %hu pktlog_checksum %d",
"%s %s size %u pktlog_checksum %d",
__get_str(driver),
__get_str(device),
__entry->buf_len,

View File

@ -169,7 +169,7 @@ ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
len -= sizeof(*tlv);
if (tlv_len > len) {
ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
tlv_tag, ptr - begin, len, tlv_len);
return -EINVAL;
}
@ -177,7 +177,7 @@ ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
wmi_tlv_policies[tlv_tag].min_len &&
wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
tlv_tag, ptr - begin, tlv_len,
wmi_tlv_policies[tlv_tag].min_len);
return -EINVAL;
@ -2970,6 +2970,233 @@ ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
return ret;
}
int
ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd pdev_id %d bss color bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd pdev_id %d partial bssid bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
struct sk_buff *skb;
int ret, len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
cmd->tlv_header =
FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = ar->pdev->pdev_id;
memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n",
cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
ret = ath11k_wmi_cmd_send(wmi, skb,
WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
if (ret) {
ath11k_warn(ab,
"failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
dev_kfree_skb(skb);
}
return ret;
}
int
ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
u8 bss_color, u32 period,

View File

@ -257,6 +257,16 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
WMI_PDEV_SET_RAP_CONFIG_CMDID,
WMI_PDEV_DSM_FILTER_CMDID,
WMI_PDEV_FRAME_INJECT_CMDID,
WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@ -919,6 +929,9 @@ enum wmi_tlv_pdev_param {
WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
};
enum wmi_tlv_vdev_param {
@ -1812,10 +1825,15 @@ enum wmi_tlv_tag {
WMI_TAG_NDP_CHANNEL_INFO,
WMI_TAG_NDP_CMD,
WMI_TAG_NDP_EVENT,
/* TODO add all the missing cmds */
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_MAX
};
@ -2039,6 +2057,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
WMI_MAX_EXT_SERVICE
};
@ -4781,6 +4800,12 @@ struct wmi_obss_spatial_reuse_params_cmd {
u32 vdev_id;
} __packed;
struct wmi_pdev_obss_pd_bitmap_cmd {
u32 tlv_header;
u32 pdev_id;
u32 bitmap[2];
} __packed;
#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
@ -5316,6 +5341,16 @@ int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap);
int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar,
u32 *bitmap);
int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
u32 *bitmap);
int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar,
u32 *bitmap);
int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
u32 *bitmap);
int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
u8 bss_color, u32 period,
bool enable);

View File

@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
ah->nf_override = val;
if (ah->curchan)
if (ah->curchan) {
ath9k_ps_wakeup(sc);
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_ps_restore(sc);
}
return count;
}

View File

@ -240,7 +240,7 @@ struct carl9170_cmd {
struct carl9170_bcn_ctrl_cmd bcn_ctrl;
struct carl9170_rx_filter_cmd rx_filter;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
} __packed;
} __packed __aligned(4);
} __packed __aligned(4);
#define CARL9170_TX_STATUS_QUEUE 3

View File

@ -367,27 +367,27 @@ struct ar9170_rx_macstatus {
struct ar9170_rx_frame_single {
struct ar9170_rx_head phy_head;
struct ieee80211_hdr i3e;
struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_phystatus phy_tail;
struct ar9170_rx_macstatus macstatus;
} __packed;
};
struct ar9170_rx_frame_head {
struct ar9170_rx_head phy_head;
struct ieee80211_hdr i3e;
struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_macstatus macstatus;
} __packed;
};
struct ar9170_rx_frame_middle {
struct ieee80211_hdr i3e;
struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_macstatus macstatus;
} __packed;
};
struct ar9170_rx_frame_tail {
struct ieee80211_hdr i3e;
struct ieee80211_hdr i3e __packed __aligned(2);
struct ar9170_rx_phystatus phy_tail;
struct ar9170_rx_macstatus macstatus;
} __packed;
};
struct ar9170_rx_frame {
union {
@ -395,8 +395,8 @@ struct ar9170_rx_frame {
struct ar9170_rx_frame_head head;
struct ar9170_rx_frame_middle middle;
struct ar9170_rx_frame_tail tail;
} __packed;
} __packed;
};
};
static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
{

View File

@ -1140,7 +1140,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_START:
spin_lock_bh(&sta_priv->ampdu_lock);
@ -1164,6 +1164,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
spin_unlock_bh(&sta_priv->ampdu_lock);
wcn36xx_smd_del_ba(wcn, tid, 1, get_sta_index(vif, sta_priv));
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
default:

View File

@ -485,7 +485,6 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
#define PREPARE_HAL_PTT_MSG_BUF(send_buf, p_msg_body) \
do { \
memset(send_buf, 0, p_msg_body->header.len); \
memcpy(send_buf, p_msg_body, p_msg_body->header.len); \
} while (0)
@ -2467,7 +2466,7 @@ out:
return ret;
}
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
int ret;
@ -2477,7 +2476,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
msg_body.sta_index = sta_index;
msg_body.tid = tid;
msg_body.direction = 0;
msg_body.direction = direction;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);

View File

@ -135,7 +135,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 direction,
u8 sta_index);
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);

View File

@ -441,7 +441,9 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
int rc;
u8 txflag = RATE_INFO_FLAGS_DMG;
u8 tx_mcs, rx_mcs;
u8 tx_rate_flag = RATE_INFO_FLAGS_DMG;
u8 rx_rate_flag = RATE_INFO_FLAGS_DMG;
memset(&reply, 0, sizeof(reply));
@ -451,13 +453,15 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
if (rc)
return rc;
tx_mcs = le16_to_cpu(reply.evt.bf_mcs);
wil_dbg_wmi(wil, "Link status for CID %d MID %d: {\n"
" MCS %d TSF 0x%016llx\n"
" MCS %s TSF 0x%016llx\n"
" BF status 0x%08x RSSI %d SQI %d%%\n"
" Tx Tpt %d goodput %d Rx goodput %d\n"
" Sectors(rx:tx) my %d:%d peer %d:%d\n"
" Tx mode %d}\n",
cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs),
cid, vif->mid, WIL_EXTENDED_MCS_CHECK(tx_mcs),
le64_to_cpu(reply.evt.tsf), reply.evt.status,
reply.evt.rssi,
reply.evt.sqi,
@ -481,12 +485,30 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG)
txflag = RATE_INFO_FLAGS_EDMG;
if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG) {
tx_rate_flag = RATE_INFO_FLAGS_EDMG;
rx_rate_flag = RATE_INFO_FLAGS_EDMG;
}
rx_mcs = stats->last_mcs_rx;
/* check extended MCS (12.1) and convert it into
* base MCS (7) + EXTENDED_SC_DMG flag
*/
if (tx_mcs == WIL_EXTENDED_MCS_26) {
tx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
tx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
}
if (rx_mcs == WIL_EXTENDED_MCS_26) {
rx_rate_flag = RATE_INFO_FLAGS_EXTENDED_SC_DMG;
rx_mcs = WIL_BASE_MCS_FOR_EXTENDED_26;
}
sinfo->txrate.flags = tx_rate_flag;
sinfo->rxrate.flags = rx_rate_flag;
sinfo->txrate.mcs = tx_mcs;
sinfo->rxrate.mcs = rx_mcs;
sinfo->txrate.flags = txflag;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->txrate.n_bonded_ch =
wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
sinfo->rxrate.n_bonded_ch =

View File

@ -1294,6 +1294,7 @@ static int bf_show(struct seq_file *s, void *data)
for (i = 0; i < wil->max_assoc_sta; i++) {
u32 status;
u8 bf_mcs;
cmd.cid = i;
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
@ -1305,9 +1306,10 @@ static int bf_show(struct seq_file *s, void *data)
continue;
status = le32_to_cpu(reply.evt.status);
bf_mcs = le16_to_cpu(reply.evt.bf_mcs);
seq_printf(s, "CID %d {\n"
" TSF = 0x%016llx\n"
" TxMCS = %2d TxTpt = %4d\n"
" TxMCS = %s TxTpt = %4d\n"
" SQI = %4d\n"
" RSSI = %4d\n"
" Status = 0x%08x %s\n"
@ -1316,7 +1318,7 @@ static int bf_show(struct seq_file *s, void *data)
"}\n",
i,
le64_to_cpu(reply.evt.tsf),
le16_to_cpu(reply.evt.bf_mcs),
WIL_EXTENDED_MCS_CHECK(bf_mcs),
le32_to_cpu(reply.evt.tx_tpt),
reply.evt.sqi,
reply.evt.rssi,
@ -1443,8 +1445,10 @@ static int link_show(struct seq_file *s, void *data)
if (rc)
goto out;
seq_printf(s, " Tx_mcs = %d\n", sinfo->txrate.mcs);
seq_printf(s, " Rx_mcs = %d\n", sinfo->rxrate.mcs);
seq_printf(s, " Tx_mcs = %s\n",
WIL_EXTENDED_MCS_CHECK(sinfo->txrate.mcs));
seq_printf(s, " Rx_mcs = %s\n",
WIL_EXTENDED_MCS_CHECK(sinfo->rxrate.mcs));
seq_printf(s, " SQ = %d\n", sinfo->signal);
} else {
seq_puts(s, " INVALID MID\n");
@ -1848,7 +1852,7 @@ static void wil_link_stats_print_basic(struct wil6210_vif *vif,
snprintf(per, sizeof(per), "%d%%", basic->per_average);
seq_printf(s, "CID %d {\n"
"\tTxMCS %d TxTpt %d\n"
"\tTxMCS %s TxTpt %d\n"
"\tGoodput(rx:tx) %d:%d\n"
"\tRxBcastFrames %d\n"
"\tRSSI %d SQI %d SNR %d PER %s\n"
@ -1856,7 +1860,8 @@ static void wil_link_stats_print_basic(struct wil6210_vif *vif,
"\tSectors(rx:tx) my %d:%d peer %d:%d\n"
"}\n",
basic->cid,
basic->bf_mcs, le32_to_cpu(basic->tx_tpt),
WIL_EXTENDED_MCS_CHECK(basic->bf_mcs),
le32_to_cpu(basic->tx_tpt),
le32_to_cpu(basic->rx_goodput),
le32_to_cpu(basic->tx_goodput),
le32_to_cpu(basic->rx_bcast_frames),

View File

@ -1026,6 +1026,8 @@ skipping:
stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
else if (stats->last_mcs_rx == WIL_EXTENDED_MCS_26)
stats->rx_per_mcs[WIL_BASE_MCS_FOR_EXTENDED_26]++;
stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
}

View File

@ -89,6 +89,9 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
#define WIL6210_MAX_STATUS_RINGS (8)
#define WIL_WMI_CALL_GENERAL_TO_MS 100
#define WIL_EXTENDED_MCS_26 (26) /* FW reports MCS 12.1 to driver as "26" */
#define WIL_BASE_MCS_FOR_EXTENDED_26 (7) /* MCS 7 is base MCS for MCS 12.1 */
#define WIL_EXTENDED_MCS_CHECK(x) (((x) == WIL_EXTENDED_MCS_26) ? "12.1" : #x)
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header

View File

@ -851,9 +851,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len)
d_status = le16_to_cpu(data->info.status);
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.rssi,
data->info.sqi);
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %s RSSI %d SQI %d%%\n",
data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
data->info.rssi, data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@ -1422,8 +1422,9 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
else
signal = data->info.sqi;
wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
data->info.channel, data->info.mcs, data->info.rssi);
wil_dbg_wmi(wil, "sched scan result: channel %d MCS %s RSSI %d\n",
data->info.channel, WIL_EXTENDED_MCS_CHECK(data->info.mcs),
data->info.rssi);
wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
d_len, data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,

View File

@ -101,7 +101,7 @@ do { \
static uint at76_debug = DBG_DEFAULTS;
/* Protect against concurrent firmware loading and parsing */
static struct mutex fw_mutex;
static DEFINE_MUTEX(fw_mutex);
static struct fwentry firmwares[] = {
[0] = { "" },
@ -2572,8 +2572,6 @@ static int __init at76_mod_init(void)
printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " loading\n");
mutex_init(&fw_mutex);
/* register this driver with the USB subsystem */
result = usb_register(&at76_driver);
if (result < 0)

View File

@ -5196,6 +5196,48 @@ exit:
return err;
}
static int brcmf_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
struct net_device *ndev,
s32 rssi_low, s32 rssi_high)
{
struct brcmf_cfg80211_vif *vif;
struct brcmf_if *ifp;
int err = 0;
brcmf_dbg(TRACE, "low=%d high=%d", rssi_low, rssi_high);
ifp = netdev_priv(ndev);
vif = ifp->vif;
if (rssi_low != vif->cqm_rssi_low || rssi_high != vif->cqm_rssi_high) {
/* The firmware will send an event when the RSSI is less than or
* equal to a configured level and the previous RSSI event was
* less than or equal to a different level. Set a third level
* so that we also detect the transition from rssi <= rssi_high
* to rssi > rssi_high.
*/
struct brcmf_rssi_event_le config = {
.rate_limit_msec = cpu_to_le32(0),
.rssi_level_num = 3,
.rssi_levels = {
clamp_val(rssi_low, S8_MIN, S8_MAX - 2),
clamp_val(rssi_high, S8_MIN + 1, S8_MAX - 1),
S8_MAX,
},
};
err = brcmf_fil_iovar_data_set(ifp, "rssi_event", &config,
sizeof(config));
if (err) {
err = -EINVAL;
} else {
vif->cqm_rssi_low = rssi_low;
vif->cqm_rssi_high = rssi_high;
}
}
return err;
}
static int
brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
@ -5502,6 +5544,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.update_mgmt_frame_registrations =
brcmf_cfg80211_update_mgmt_frame_registrations,
.mgmt_tx = brcmf_cfg80211_mgmt_tx,
.set_cqm_rssi_range_config = brcmf_cfg80211_set_cqm_rssi_range_config,
.remain_on_channel = brcmf_p2p_remain_on_channel,
.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
.get_channel = brcmf_cfg80211_get_channel,
@ -6140,6 +6183,47 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
return 0;
}
static s32 brcmf_notify_rssi(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
struct brcmf_cfg80211_vif *vif = ifp->vif;
struct brcmf_rssi_be *info = data;
s32 rssi, snr, noise;
s32 low, high, last;
if (e->datalen < sizeof(*info)) {
brcmf_err("insufficient RSSI event data\n");
return 0;
}
rssi = be32_to_cpu(info->rssi);
snr = be32_to_cpu(info->snr);
noise = be32_to_cpu(info->noise);
low = vif->cqm_rssi_low;
high = vif->cqm_rssi_high;
last = vif->cqm_rssi_last;
brcmf_dbg(TRACE, "rssi=%d snr=%d noise=%d low=%d high=%d last=%d\n",
rssi, snr, noise, low, high, last);
vif->cqm_rssi_last = rssi;
if (rssi <= low || rssi == 0) {
brcmf_dbg(INFO, "LOW rssi=%d\n", rssi);
cfg80211_cqm_rssi_notify(ifp->ndev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
rssi, GFP_KERNEL);
} else if (rssi > high) {
brcmf_dbg(INFO, "HIGH rssi=%d\n", rssi);
cfg80211_cqm_rssi_notify(ifp->ndev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
rssi, GFP_KERNEL);
}
return 0;
}
static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
@ -6238,6 +6322,7 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_p2p_notify_action_tx_complete);
brcmf_fweh_register(cfg->pub, BRCMF_E_PSK_SUP,
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_RSSI, brcmf_notify_rssi);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@ -7172,6 +7257,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_DFS_OFFLOAD);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_read_of_freq_limits(wiphy);
return 0;

View File

@ -213,6 +213,9 @@ struct vif_saved_ie {
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
* @cqm_rssi_low: Lower RSSI limit for CQM monitoring
* @cqm_rssi_high: Upper RSSI limit for CQM monitoring
* @cqm_rssi_last: Last RSSI reading for CQM monitoring
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@ -224,6 +227,9 @@ struct brcmf_cfg80211_vif {
u16 mgmt_rx_reg;
bool mbss;
int is_11d;
s32 cqm_rssi_low;
s32 cqm_rssi_high;
s32 cqm_rssi_last;
};
/* association inform */

View File

@ -40,6 +40,18 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
};
static const struct brcmf_dmi_data predia_basic_data = {
BRCM_CC_43341_CHIP_ID, 2, "predia-basic"
};
/* Note the Voyo winpad A15 tablet uses the same Ampak AP6330 module, with the
* exact same nvram file as the Prowise-PT301 tablet. Since the nvram for the
* Prowise-PT301 is already in linux-firmware we just point to that here.
*/
static const struct brcmf_dmi_data voyo_winpad_a15_data = {
BRCM_CC_4330_CHIP_ID, 4, "Prowise-PT301"
};
static const struct dmi_system_id dmi_platform_data[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
@ -111,6 +123,26 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&pov_tab_p1006w_data,
},
{
/* Predia Basic tablet (+ with keyboard dock) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
/* Mx.WT107.KUBNGEA02 with the version-nr dropped */
DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
},
.driver_data = (void *)&predia_basic_data,
},
{
/* Voyo winpad A15 tablet */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
/* Above strings are too generic, also match on BIOS date */
DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"),
},
.driver_data = (void *)&voyo_winpad_a15_data,
},
{}
};

View File

@ -752,6 +752,34 @@ struct brcmf_assoclist_le {
u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
};
/**
* struct brcmf_rssi_be - RSSI threshold event format
*
* @rssi: receive signal strength (in dBm)
* @snr: signal-noise ratio
* @noise: noise (in dBm)
*/
struct brcmf_rssi_be {
__be32 rssi;
__be32 snr;
__be32 noise;
};
#define BRCMF_MAX_RSSI_LEVELS 8
/**
* struct brcm_rssi_event_le - rssi_event IOVAR format
*
* @rate_limit_msec: RSSI event rate limit
* @rssi_level_num: number of supplied RSSI levels
* @rssi_levels: RSSI levels in ascending order
*/
struct brcmf_rssi_event_le {
__le32 rate_limit_msec;
s8 rssi_level_num;
s8 rssi_levels[BRCMF_MAX_RSSI_LEVELS];
};
/**
* struct brcmf_wowl_wakeind_le - Wakeup indicators
* Note: note both fields contain same information.

View File

@ -783,7 +783,7 @@ struct d11txh {
u8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
struct ieee80211_rts rts_frame; /* 0x2f - 0x36 */
u16 PAD; /* 0x37 */
} __packed;
} __packed __aligned(2);
#define D11_TXH_LEN 112 /* bytes */
@ -1469,7 +1469,7 @@ struct d11rxhdr {
/* htphy PhyRxStatus_1: */
/* core enables for {3..0}, 0=disabled, 1=enabled */
#define PRXS1_HTPHY_CORE_MASK 0x000F
/* antenna configation */
/* antenna configuration */
#define PRXS1_HTPHY_ANTCFG_MASK 0x00F0
/* Mixmode PLCP Length low byte mask */
#define PRXS1_HTPHY_MMPLCPLenL_MASK 0xFF00

View File

@ -2593,8 +2593,7 @@ out:
*/
if (ret != IL_INVALID_STATION &&
(!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
(il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
(il->stations[ret].used & IL_STA_UCODE_INPROGRESS))) {
IL_ERR("Requested station info for sta %d before ready.\n",
ret);
ret = IL_INVALID_STATION;

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -76,8 +76,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@ -102,8 +101,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.rx_with_siso_diversity = true
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -102,8 +102,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2000_2bgn_cfg = {
@ -129,8 +128,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@ -150,8 +148,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.rx_with_siso_diversity = true
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@ -177,8 +174,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.rx_with_siso_diversity = true
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",

View File

@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 59
#define IWL_22000_UCODE_API_MAX 62
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@ -42,7 +42,10 @@
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
#define IWL_SNJ_A_JF_B_FW_PRE "iwlwifi-SoSnj-a0-jf-b0-"
#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0-"
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
@ -76,8 +79,14 @@
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
#define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \
IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
@ -133,7 +142,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mac_addr_from_csr = true, \
.ht_params = &iwl_22000_ht_params, \
.nvm_ver = IWL_22000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.trans.use_tfh = true, \
.trans.rf_id = true, \
.trans.gen2 = true, \
@ -238,6 +246,44 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const struct iwl_cfg_trans_params iwl_snj_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
.base_params = &iwl_ax210_base_params,
.umac_prph_offset = 0x300000,
};
const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
.base_params = &iwl_ax210_base_params,
.umac_prph_offset = 0x300000,
.integrated = true,
/* TODO: the following values need to be checked */
.xtal_latency = 500,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.mq_rx_supported = true,
.use_tfh = true,
.rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
.base_params = &iwl_ax210_base_params,
.umac_prph_offset = 0x300000,
.integrated = true,
/* TODO: the following values need to be checked */
.xtal_latency = 12000,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@ -606,9 +652,22 @@ const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = {
.name = iwl_ax201_name,
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
const struct iwl_cfg iwl_cfg_snj_hr_b0 = {
.fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = {
.fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_hr_b0 = {
.fw_name_pre = IWL_MA_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
@ -621,6 +680,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0 = {
.fw_name_pre = IWL_MA_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
.uhb_supported = true,
@ -635,6 +701,24 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@ -650,6 +734,9 @@ MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -74,8 +74,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@ -138,8 +137,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.internal_wimax_coex = true
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@ -123,8 +123,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@ -177,8 +176,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@ -213,8 +211,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_RF_STATE
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@ -268,8 +265,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@ -301,8 +297,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.internal_wimax_coex = true
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@ -327,8 +322,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.internal_wimax_coex = true
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2019 Intel Corporation
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@ -95,7 +95,6 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.dccm_offset = IWL7000_DCCM_OFFSET
#define IWL_DEVICE_7000 \

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2014, 2018-2019 Intel Corporation
* Copyright (C) 2014, 2018-2020 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@ -125,7 +125,6 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8265_2ac_cfg = {
@ -134,7 +133,6 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@ -144,7 +142,6 @@ const struct iwl_cfg iwl8275_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@ -154,7 +151,6 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));

View File

@ -97,7 +97,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.mon_smem_regs = { \
.write_ptr = { \
.addr = LDBG_M2S_BUF_WPTR, \

View File

@ -406,7 +406,6 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
u32 i;
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
if (mode == 0)
ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
@ -414,7 +413,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
if (!iwl_trans_grab_nic_access(priv->trans, &reg_flags))
if (!iwl_trans_grab_nic_access(priv->trans))
return;
/* Set starting address; reads will auto-increment */
@ -446,7 +445,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
}
}
/* Allow device to power down */
iwl_trans_release_nic_access(priv->trans, &reg_flags);
iwl_trans_release_nic_access(priv->trans);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@ -1694,7 +1693,6 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
struct iwl_trans *trans = priv->trans;
@ -1718,7 +1716,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
if (!iwl_trans_grab_nic_access(trans, &reg_flags))
if (!iwl_trans_grab_nic_access(trans))
return pos;
/* Set starting address; reads will auto-increment */
@ -1757,7 +1755,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
/* Allow device to power down */
iwl_trans_release_nic_access(trans, &reg_flags);
iwl_trans_release_nic_access(trans);
return pos;
}

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018, 2020 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -155,7 +155,6 @@ static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
struct iwl_priv *priv = from_timer(priv, t,
thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@ -171,8 +170,8 @@ static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
if (iwl_trans_grab_nic_access(priv->trans, &flags))
iwl_trans_release_nic_access(priv->trans, &flags);
if (iwl_trans_grab_nic_access(priv->trans))
iwl_trans_release_nic_access(priv->trans);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a

View File

@ -9,9 +9,15 @@
#include "acpi.h"
#include "fw/runtime.h"
static const guid_t intel_wifi_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
IWL_EXPORT_SYMBOL(iwl_guid);
const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0x81, 0x4F, 0x75, 0xE4,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
@ -64,11 +70,12 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
* function.
*/
static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
union acpi_object *args)
union acpi_object *args,
const guid_t *guid)
{
union acpi_object *obj;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_wifi_guid, rev, func,
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func,
args);
if (!obj) {
IWL_DEBUG_DEV_RADIO(dev,
@ -87,12 +94,13 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
* return 0 in success and the appropriate errno otherwise.
*/
static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
u64 *value, size_t expected_size)
const guid_t *guid, u64 *value,
size_t expected_size)
{
union acpi_object *obj;
int ret = 0;
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
if (IS_ERR(obj)) {
IWL_DEBUG_DEV_RADIO(dev,
"Failed to get DSM object. func= %d\n",
@ -137,12 +145,14 @@ out:
/*
* Evaluate a DSM with no arguments and a u8 return value,
*/
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value)
{
int ret;
u64 val;
ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
ret = iwl_acpi_get_dsm_integer(dev, rev, func,
guid, &val, sizeof(u8));
if (ret < 0)
return ret;
@ -478,11 +488,16 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
@ -516,11 +531,16 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
@ -576,11 +596,17 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if (tbl_rev > 1) {
ret = -EINVAL;
goto out_free;
}
fwrt->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {

View File

@ -54,9 +54,9 @@
#define ACPI_WGDS_TABLE_SIZE 3
#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS) + 3)
IWL_NUM_SUB_BANDS) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 3)
IWL_NUM_SUB_BANDS_V2) + 2)
/* PPAG gain value bounds in 1/8 dBm */
#define ACPI_PPAG_MIN_LB -16
@ -93,13 +93,27 @@ enum iwl_dsm_values_indonesia {
DSM_VALUE_INDONESIA_MAX
};
/* DSM RFI uses a different GUID, so need separate definitions */
#define DSM_RFI_FUNC_ENABLE 3
enum iwl_dsm_values_rfi {
DSM_VALUE_RFI_ENABLE,
DSM_VALUE_RFI_DISABLE,
DSM_VALUE_RFI_MAX
};
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
extern const guid_t iwl_rfi_guid;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
@ -159,8 +173,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
static inline
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
const guid_t *guid, u8 *value)
{
return -ENOENT;
}

View File

@ -284,7 +284,7 @@ enum iwl_legacy_cmds {
/* Phy */
/**
* @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
* @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd_v1 or &struct iwl_phy_cfg_cmd_v3
*/
PHY_CONFIGURATION_CMD = 0x6a,
@ -606,6 +606,16 @@ enum iwl_system_subcmd_ids {
* @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd
*/
FW_ERROR_RECOVERY_CMD = 0x7,
/**
* @RFI_CONFIG_CMD: &struct iwl_rfi_config_cmd
*/
RFI_CONFIG_CMD = 0xb,
/**
* @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd
*/
RFI_GET_FREQ_TABLE_CMD = 0xc,
};
#endif /* __iwl_fw_api_commands_h__ */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2019 Intel Corporation
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -52,6 +52,12 @@ enum iwl_data_path_subcmd_ids {
*/
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
MONITOR_NOTIF = 0xF4,
/**
* @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
*/
@ -153,4 +159,14 @@ struct iwl_channel_estimation_cfg {
__le64 frame_types;
} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
enum iwl_datapath_monitor_notif_type {
IWL_DP_MON_NOTIF_TYPE_EXT_CCA,
};
struct iwl_datapath_monitor_notif {
__le32 type;
u8 mac_id;
u8 reserved[3];
} __packed; /* MONITOR_NTF_API_S_VER_1 */
#endif /* __iwl_fw_api_datapath_h__ */

View File

@ -185,6 +185,21 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo2_control_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
/**
* struct iwl_mfuart_load_notif_v1 - mfuart image version & status
* ( MFUART_LOAD_NOTIFICATION = 0xb1 )
* @installed_ver: installed image version
* @external_ver: external image version
* @status: MFUART loading status
* @duration: MFUART loading time
*/
struct iwl_mfuart_load_notif_v1 {
__le32 installed_ver;
__le32 external_ver;
__le32 status;
__le32 duration;
} __packed; /* MFU_LOADER_NTFY_API_S_VER_1 */
/**
* struct iwl_mfuart_load_notif - mfuart image version & status
* ( MFUART_LOAD_NOTIFICATION = 0xb1 )

View File

@ -12,7 +12,12 @@
enum iwl_location_subcmd_ids {
/**
* @TOF_RANGE_REQ_CMD: TOF ranging request,
* uses &struct iwl_tof_range_req_cmd
* uses one of &struct iwl_tof_range_req_cmd_v5,
* &struct iwl_tof_range_req_cmd_v7,
* &struct iwl_tof_range_req_cmd_v8,
* &struct iwl_tof_range_req_cmd_v9,
* &struct iwl_tof_range_req_cmd_v11,
* &struct iwl_tof_range_req_cmd_v7
*/
TOF_RANGE_REQ_CMD = 0x0,
/**

View File

@ -452,6 +452,10 @@ struct iwl_he_pkt_ext {
* enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
* @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
* parameter set, i.e. the backoff counters for trig-based ACs
* @STA_CTXT_HE_NIC_NOT_ACK_ENABLED: mark that the NIC doesn't support receiving
* ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
@ -466,6 +470,7 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
STA_CTXT_HE_ACK_ENABLED = BIT(11),
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
};

View File

@ -415,14 +415,25 @@ enum iwl_lari_config_masks {
};
/**
* struct iwl_lari_config_change_cmd - change LARI configuration
* struct iwl_lari_config_change_cmd_v1 - change LARI configuration
* @config_bitmap: bit map of the config commands. each bit will trigger a
* different predefined FW config operation
*/
struct iwl_lari_config_change_cmd {
struct iwl_lari_config_change_cmd_v1 {
__le32 config_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
/**
* struct iwl_lari_config_change_cmd_v2 - change LARI configuration
* @config_bitmap: bit map of the config commands. each bit will trigger a
* different predefined FW config operation
* @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets
*/
struct iwl_lari_config_change_cmd_v2 {
__le32 config_bitmap;
__le32 oem_uhb_allow_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status

View File

@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
#define IWL_RFI_LUT_ENTRY_CHANNELS_NUM 15
#define IWL_RFI_LUT_SIZE 24
#define IWL_RFI_LUT_INSTALLED_SIZE 4
/**
* struct iwl_rfi_lut_entry - an entry in the RFI frequency LUT.
*
* @freq: frequency
* @channels: channels that can be interfered at frequency freq (at most 15)
* @bands: the corresponding bands
*/
struct iwl_rfi_lut_entry {
__le16 freq;
u8 channels[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
u8 bands[IWL_RFI_LUT_ENTRY_CHANNELS_NUM];
} __packed;
/**
* struct iwl_rfi_config_cmd - RFI configuration table
*
* @entry: a table can have 24 frequency/channel mappings
* @oem: specifies if this is the default table or set by OEM
*/
struct iwl_rfi_config_cmd {
struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE];
u8 oem;
u8 reserved[3];
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
/**
* iwl_rfi_freq_table_status - status of the frequency table query
* @RFI_FREQ_TABLE_OK: can be used
* @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later
* @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW
*/
enum iwl_rfi_freq_table_status {
RFI_FREQ_TABLE_OK,
RFI_FREQ_TABLE_DVFS_NOT_READY,
RFI_FREQ_TABLE_DISABLED,
};
/**
* struct iwl_rfi_freq_table_resp_cmd - get the rfi freq table used by FW
*
* @table: table used by FW
* @status: see &iwl_rfi_freq_table_status
*/
struct iwl_rfi_freq_table_resp_cmd {
struct iwl_rfi_lut_entry table[IWL_RFI_LUT_INSTALLED_SIZE];
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */

View File

@ -140,7 +140,8 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
* @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension
* algorithm
* @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
* @RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: this frame is protected using
* CMAC or GMAC
* @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
* @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
* @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
@ -167,7 +168,7 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
@ -239,6 +240,8 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
/* overlayed since IWL_UCODE_TLV_API_DEPRECATE_TTAK */
IWL_RX_MPDU_STATUS_REPLAY_ERROR = BIT(7),
IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK,
IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,

View File

@ -542,7 +542,8 @@ struct iwl_scan_config_v2 {
* struct iwl_scan_config
* @enable_cam_mode: whether to enable CAM mode.
* @enable_promiscouos_mode: whether to enable promiscouos mode
* @bcast_sta_id: the index of the station in the fw
* @bcast_sta_id: the index of the station in the fw. Deprecated starting with
* API version 5.
* @reserved: reserved
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@ -554,7 +555,7 @@ struct iwl_scan_config {
u8 reserved;
__le32 tx_chains;
__le32 rx_chains;
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */
/**
* enum iwl_umac_scan_flags - UMAC scan flags

View File

@ -20,6 +20,7 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
* @TX_CMD_FLG_BT_PRIO_MASK: BT priority value
* @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
* on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
@ -51,6 +52,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
TX_CMD_FLG_BT_PRIO_POS = 11,
TX_CMD_FLG_BT_PRIO_MASK = BIT(11) | BIT(12),
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),
@ -177,7 +179,7 @@ enum iwl_tx_offload_assist_flags_pos {
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_*
* @tx_flags: combination of TX_CMD_FLG_*, see &enum iwl_tx_flags
* @scratch: scratch buffer used by the device
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
@ -238,7 +240,7 @@ struct iwl_tx_cmd {
__le16 pm_frame_timeout;
__le16 reserved4;
u8 payload[0];
struct ieee80211_hdr hdr[];
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@ -855,6 +857,32 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
#define IWL_TX_FLUSH_QUEUE_RSP 16
/**
* struct iwl_flush_queue_info - virtual flush queue info
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
*/
struct iwl_flush_queue_info {
__le16 tid;
__le16 queue_num;
__le16 read_before_flush;
__le16 read_after_flush;
} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
struct iwl_tx_path_flush_cmd_rsp {
__le16 sta_id;
__le16 num_flushed_queues;
struct iwl_flush_queue_info queues[IWL_TX_FLUSH_QUEUE_RSP];
} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -33,12 +33,11 @@ static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
u8 *pos = (void *)(*dump_data)->data;
unsigned long flags;
int i;
IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
@ -56,7 +55,7 @@ static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
*dump_data = iwl_fw_error_next_data(*dump_data);
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
}
static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
@ -172,11 +171,10 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
unsigned long flags;
IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
@ -194,7 +192,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
}
static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
@ -204,12 +202,11 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
int i, j;
IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return;
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
@ -299,7 +296,7 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
}
}
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
}
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
@ -527,7 +524,6 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_error_dump_data **data =
(struct iwl_fw_error_dump_data **)ptr;
unsigned long flags;
u32 i;
if (!data)
@ -535,7 +531,7 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
if (!iwl_trans_grab_nic_access(trans, &flags))
if (!iwl_trans_grab_nic_access(trans))
return;
for (i = 0; i < range_len; i++) {
@ -558,7 +554,7 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
*data = iwl_fw_error_next_data(*data);
}
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
/*
@ -1048,7 +1044,6 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
u32 addr = le32_to_cpu(reg->addrs[idx]);
u32 dphy_state;
u32 dphy_addr;
unsigned long flags;
int i;
range->internal_base_addr = cpu_to_le32(addr);
@ -1060,7 +1055,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset);
indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset);
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW :
@ -1082,7 +1077,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
*val++ = cpu_to_le32(prph_val);
}
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@ -1157,10 +1152,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
{
/* increase idx by 1 since the pages are from 1 to
* fwrt->num_of_paging_blk + 1
*/
struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block;
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
@ -1183,6 +1175,9 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
/* all paged index start from 1 to skip CSS section */
idx++;
if (!fwrt->trans->trans_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
@ -1297,13 +1292,12 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
if (!iwl_ini_txf_iter(fwrt, reg_data, idx))
return -EIO;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
@ -1345,7 +1339,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@ -1429,14 +1423,13 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
u32 registers_size = registers_num * sizeof(*reg_dump);
__le32 *data;
unsigned long flags;
int i;
iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data);
if (!rxf_data.size)
return -EIO;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
@ -1479,7 +1472,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@ -1596,9 +1589,8 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
if (!iwl_trans_grab_nic_access(fwrt->trans)) {
IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
}
@ -1615,7 +1607,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id,
&addrs->cur_frag);
iwl_trans_release_nic_access(fwrt->trans, &flags);
iwl_trans_release_nic_access(fwrt->trans);
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
@ -1684,8 +1676,12 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
if (fwrt->trans->trans_cfg->gen2)
return fwrt->trans->init_dram.paging_cnt;
if (fwrt->trans->trans_cfg->gen2) {
if (fwrt->trans->init_dram.paging_cnt)
return fwrt->trans->init_dram.paging_cnt - 1;
else
return 0;
}
return fwrt->num_of_paging_blk;
}
@ -1750,15 +1746,13 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
u32 size = sizeof(struct iwl_fw_ini_error_dump);
if (fwrt->trans->trans_cfg->gen2) {
for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
size += range_header_len +
fwrt->trans->init_dram.paging[i].size;
} else {
for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
i++)
size += range_header_len +
fwrt->fw_paging_db[i].fw_paging_size;
/* start from 1 to skip CSS section */
for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
size += range_header_len;
if (fwrt->trans->trans_cfg->gen2)
size += fwrt->trans->init_dram.paging[i].size;
else
size += fwrt->fw_paging_db[i].fw_paging_size;
}
return size;
@ -2071,7 +2065,8 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest);
dump->regions_mask = trigger->regions_mask;
dump->regions_mask = trigger->regions_mask &
~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk);
dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
memcpy(dump->build_tag, fwrt->fw->human_readable,
@ -2200,7 +2195,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
};
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask);
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
~(fwrt->trans->dbg.unsupported_region_msk);
BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
@ -2451,7 +2447,8 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
return -EIO;
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT)
if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT &&
trig_type != FW_DBG_TRIGGER_DRIVER)
return -EIO;
iwl_dbg_tlv_time_point(fwrt,
@ -2758,7 +2755,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
unsigned long flags;
int i;
struct {
u32 addr;
@ -2778,7 +2774,7 @@ void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
};
if (!iwl_trans_grab_nic_access(trans, &flags))
if (!iwl_trans_grab_nic_access(trans))
return;
IWL_ERR(fwrt, "Fseq Registers:\n");
@ -2788,7 +2784,7 @@ void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
fseq_regs[i].str);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);

View File

@ -93,6 +93,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_HW_TYPE = 58,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61,
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
@ -439,6 +440,9 @@ enum iwl_ucode_tlv_capa {
*/
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */

View File

@ -219,6 +219,9 @@ struct iwl_fw {
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
u8 *phy_integration_ver;
u32 phy_integration_ver_len;
};
static inline const char *get_fw_dbg_mode_string(int mode)

View File

@ -36,11 +36,13 @@ IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
{
iwl_fw_suspend_timestamp(fwrt);
iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
{
iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, NULL);
iwl_fw_resume_timestamp(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);

View File

@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/******************************************************************************
*
* Copyright(c) 2020 Intel Corporation
*
*****************************************************************************/
/*
* Copyright(c) 2020-2021 Intel Corporation
*/
#include "iwl-drv.h"
#include "pnvm.h"
@ -12,6 +10,7 @@
#include "fw/api/commands.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/alive.h"
#include <linux/efi.h>
struct iwl_pnvm_section {
__le32 offset;
@ -198,14 +197,14 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
le32_to_cpu(sku_id->data[1]),
le32_to_cpu(sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
int ret;
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
ret = iwl_pnvm_handle_section(trans, data, len);
if (!ret)
return 0;
@ -221,51 +220,165 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
#if defined(CONFIG_EFI)
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
0xb2, 0xec, 0xf5, 0xa3, \
0x59, 0x4f, 0x4a, 0xea)
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_HARDCODED_PNVM_SIZE 4096
struct pnvm_sku_package {
u8 rev;
u8 reserved1[3];
u32 total_size;
u8 n_skus;
u8 reserved2[11];
u8 data[];
};
static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
u8 **data, size_t *len)
{
struct efivar_entry *pnvm_efivar;
struct pnvm_sku_package *package;
unsigned long package_size;
int err;
pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
if (!pnvm_efivar)
return -ENOMEM;
memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
sizeof(IWL_UEFI_OEM_PNVM_NAME));
pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
/*
* TODO: we hardcode a maximum length here, because reading
* from the UEFI is not working. To implement this properly,
* we have to call efivar_entry_size().
*/
package_size = IWL_HARDCODED_PNVM_SIZE;
package = kmalloc(package_size, GFP_KERNEL);
if (!package) {
err = -ENOMEM;
goto out;
}
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
if (err) {
IWL_DEBUG_FW(trans,
"PNVM UEFI variable not found %d (len %zd)\n",
err, package_size);
goto out;
}
IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %zd\n", package_size);
*data = kmemdup(package->data, *len, GFP_KERNEL);
if (!*data)
err = -ENOMEM;
*len = package_size - sizeof(*package);
out:
kfree(package);
kfree(pnvm_efivar);
return err;
}
#else /* CONFIG_EFI */
static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
u8 **data, size_t *len)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_EFI */
static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
{
const struct firmware *pnvm;
char pnvm_name[64];
int ret;
/*
* The prefix unfortunately includes a hyphen at the end, so
* don't add the dot here...
*/
snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
trans->cfg->fw_name_pre);
/* ...but replace the hyphen with the dot here. */
if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
return ret;
}
*data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
if (!*data)
return -ENOMEM;
*len = pnvm->size;
return 0;
}
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait)
{
u8 *data;
size_t len;
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
int ret;
/* if the SKU_ID is empty, there's nothing to do */
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
return 0;
/* load from disk only if we haven't done it (or tried) before */
if (!trans->pnvm_loaded) {
const struct firmware *pnvm;
char pnvm_name[64];
int ret;
/*
* The prefix unfortunately includes a hyphen at the end, so
* don't add the dot here...
*/
snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
trans->cfg->fw_name_pre);
/* ...but replace the hyphen with the dot here. */
if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
/*
* Pretend we've loaded it - at least we've tried and
* couldn't load it at all, so there's no point in
* trying again over and over.
*/
trans->pnvm_loaded = true;
} else {
iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
release_firmware(pnvm);
}
/*
* If we already loaded (or tried to load) it before, we just
* need to set it again.
*/
if (trans->pnvm_loaded) {
ret = iwl_trans_set_pnvm(trans, NULL, 0);
if (ret)
return ret;
goto skip_parse;
}
/* First attempt to get the PNVM from BIOS */
ret = iwl_pnvm_get_from_efi(trans, &data, &len);
if (!ret)
goto parse;
/* If it's not available, try from the filesystem */
ret = iwl_pnvm_get_from_fs(trans, &data, &len);
if (ret) {
/*
* Pretend we've loaded it - at least we've tried and
* couldn't load it at all, so there's no point in
* trying again over and over.
*/
trans->pnvm_loaded = true;
goto skip_parse;
}
parse:
iwl_pnvm_parse(trans, data, len);
kfree(data);
skip_parse:
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
iwl_pnvm_complete_fn, trans);

View File

@ -325,10 +325,6 @@ struct iwl_fw_mon_regs {
* @features: hw features, any combination of feature_passlist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
* station can receive in HT
* @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
* station can receive in VHT
* @dccm_offset: offset from which DCCM begins
* @dccm_len: length of DCCM (including runtime stack CCM)
* @dccm2_offset: offset from which the second DCCM begins
@ -395,8 +391,6 @@ struct iwl_cfg {
u8 non_shared_ant;
u8 nvm_hw_section_num;
u8 max_tx_agg_size;
u8 max_ht_ampdu_exponent;
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
u16 num_rbds;
@ -418,6 +412,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QU 0x33
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_QNJ 0x36
#define IWL_CFG_MAC_TYPE_SO 0x37
#define IWL_CFG_MAC_TYPE_SNJ 0x42
#define IWL_CFG_MAC_TYPE_MA 0x44
@ -444,6 +439,9 @@ struct iwl_cfg {
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
#define IWL_CFG_NO_CDB 0x0
#define IWL_CFG_CDB 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
@ -457,6 +455,7 @@ struct iwl_dev_info {
u8 rf_id;
u8 no_160;
u8 cores;
u8 cdb;
const struct iwl_cfg *cfg;
const char *name;
};
@ -473,6 +472,9 @@ extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
@ -600,10 +602,15 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
extern const struct iwl_cfg iwl_cfg_snj_hr_b0;
extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */

View File

@ -277,6 +277,8 @@
#define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
#define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
#define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
/**
* hw_rev values

View File

@ -61,7 +61,8 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
@ -76,9 +77,9 @@ static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
return 0;
}
static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 ver = le32_to_cpu(hdr->version);
@ -91,9 +92,9 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
}
static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
@ -105,9 +106,9 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
u32 buf_location;
u32 alloc_id;
@ -145,9 +146,9 @@ err:
}
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
u32 tp = le32_to_cpu(hcmd->time_point);
if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
@ -169,9 +170,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
struct iwl_ucode_tlv **active_reg;
u32 id = le32_to_cpu(reg->id);
u32 type = le32_to_cpu(reg->type);
@ -214,9 +215,10 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
}
static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
const struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
struct iwl_ucode_tlv *dup = NULL;
int ret;
@ -237,8 +239,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
GFP_KERNEL);
if (!dup)
return -ENOMEM;
trig = (void *)dup->data;
trig->occurrences = cpu_to_le32(-1);
dup_trig = (void *)dup->data;
dup_trig->occurrences = cpu_to_le32(-1);
tlv = dup;
}
@ -249,7 +251,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv) = {
const struct iwl_ucode_tlv *tlv) = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
[IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
@ -257,10 +259,10 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
[IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
};
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 domain = le32_to_cpu(hdr->domain);
@ -396,7 +398,7 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans)
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
size_t len)
{
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
u32 tlv_len;
while (len >= sizeof(*tlv)) {
@ -737,12 +739,12 @@ static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
}
}
static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
struct iwl_ucode_tlv *old)
static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
const struct iwl_ucode_tlv *old)
{
struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
__le32 *new_data = new_trig->data, *old_data = old_trig->data;
const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
int i, j;
@ -957,6 +959,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
u32 failed_alloc = 0;
if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
return;
@ -988,10 +991,43 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
continue;
ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
if (ret)
if (ret) {
IWL_WARN(fwrt,
"WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
i, ret);
failed_alloc |= BIT(i);
}
}
if (!failed_alloc)
return;
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
struct iwl_fw_ini_region_tlv *reg;
struct iwl_ucode_tlv **active_reg =
&fwrt->trans->dbg.active_regions[i];
u32 reg_type;
if (!*active_reg)
continue;
reg = (void *)(*active_reg)->data;
reg_type = le32_to_cpu(reg->type);
if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
!(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
continue;
IWL_DEBUG_FW(fwrt,
"WRT: removing allocation id %d from region id %d\n",
le32_to_cpu(reg->dram_alloc_id), i);
failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id);
fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
kfree(*active_reg);
*active_reg = NULL;
}
}

View File

@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2019 Intel Corporation
* Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
#include <linux/device.h>
#include <linux/types.h>
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
/**
* struct iwl_dbg_tlv_node - debug TLV node
@ -43,7 +45,7 @@ struct iwl_fw_runtime;
void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext);
void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,

View File

@ -127,6 +127,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.phy_integration_ver);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@ -558,7 +559,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
bool *usniffer_images)
{
struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
struct iwl_ucode_tlv *tlv;
const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
u32 tlv_len;
@ -1143,6 +1144,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_cmd_versions =
tlv_len / sizeof(struct iwl_fw_cmd_version);
break;
case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION:
if (drv->fw.phy_integration_ver) {
IWL_ERR(drv,
"phy integration str ignored, already exists\n");
break;
}
drv->fw.phy_integration_ver =
kmemdup(tlv_data, tlv_len, GFP_KERNEL);
if (!drv->fw.phy_integration_ver)
return -ENOMEM;
drv->fw.phy_integration_ver_len = tlv_len;
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2018-2019 Intel Corporation
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#include <linux/types.h>
@ -711,12 +711,11 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (cfg->ht_params->ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((trans->trans_cfg->mq_rx_supported &&
iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
if (trans->trans_cfg->mq_rx_supported ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF;

View File

@ -66,10 +66,10 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
u32 value = 0x5a5a5a5a;
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
value = iwl_read32(trans, reg);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
return value;
@ -78,22 +78,18 @@ IWL_EXPORT_SYMBOL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
iwl_write32(trans, reg, value);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct32);
void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
iwl_write64(trans, reg, value);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_write_direct64);
@ -139,12 +135,11 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
u32 val = 0x5a5a5a5a;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
return val;
}
@ -152,12 +147,10 @@ IWL_EXPORT_SYMBOL(iwl_read_prph);
void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
mdelay(delay_ms);
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
@ -179,13 +172,11 @@ int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
iwl_write_prph_no_grab(trans, ofs,
iwl_read_prph_no_grab(trans, ofs) |
mask);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
@ -193,26 +184,23 @@ IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask)
{
unsigned long flags;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
iwl_write_prph_no_grab(trans, ofs,
(iwl_read_prph_no_grab(trans, ofs) &
mask) | bits);
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
u32 val;
if (iwl_trans_grab_nic_access(trans, &flags)) {
if (iwl_trans_grab_nic_access(trans)) {
val = iwl_read_prph_no_grab(trans, ofs);
iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
iwl_trans_release_nic_access(trans, &flags);
iwl_trans_release_nic_access(trans);
}
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
@ -446,3 +434,39 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
return err < 0 ? err : 0;
}
IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit)
{
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
/* if the interrupts were already disabled, there is no point in
* calling iwl_disable_interrupts
*/
if (interrupts_enabled)
iwl_trans_interrupts(trans, false);
iwl_force_nmi(trans);
while (time_after(timeout, jiffies)) {
u32 inta_hw = iwl_read32(trans, inta_addr);
/* Error detected by uCode */
if (inta_hw & sw_err_bit) {
/* Clear causes register */
iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
break;
}
mdelay(1);
}
/* enable interrupts only if there were already enabled before this
* function to avoid a case were the driver enable interrupts before
* proper configurations were made
*/
if (interrupts_enabled)
iwl_trans_interrupts(trans, true);
iwl_trans_fw_error(trans);
}

View File

@ -453,8 +453,6 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
const struct iwl_cfg *cfg = trans->cfg;
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
IEEE80211_VHT_MAX_AMPDU_1024K);
vht_cap->vht_supported = true;
@ -462,7 +460,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
max_ampdu_exponent <<
IEEE80211_VHT_MAX_AMPDU_1024K <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (data->vht160_supported)
@ -585,6 +583,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |

View File

@ -80,4 +80,5 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
*/
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2019 Intel Corporation
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "iwl-dbg-tlv.h"
struct iwl_op_mode;
struct iwl_trans;
@ -83,6 +84,7 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @time_point: called when transport layer wants to collect debug data
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@ -104,6 +106,9 @@ struct iwl_op_mode_ops {
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
void (*time_point)(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@ -196,4 +201,11 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
op_mode->ops->time_point(op_mode, tp_id, tp_data);
}
#endif /* __iwl_op_mode_h__ */

View File

@ -365,6 +365,7 @@ enum {
/* device family 22000 WPROT register */
#define PREG_PRPH_WPROT_22000 0xA04D00
#define SB_MODIFY_CFG_FLAG 0xA03088
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0

View File

@ -13,6 +13,7 @@
#include "iwl-fh.h"
#include "queue/tx.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
@ -102,6 +103,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
return NULL;
}
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
return trans;
}
@ -130,6 +134,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
/*
* We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
* bit is set early in the D3 flow, before we send all the commands
* that configure the firmware for D3 operation (power, patterns, ...)
* and we don't want to flag all those with CMD_SEND_IN_D3.
* So use the system_pm_mode instead. The only command sent after
* we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
* CMD_SEND_IN_D3.
*/
if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
!(cmd->flags & CMD_SEND_IN_D3)))
return -EHOSTDOWN;
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@ -145,10 +162,12 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
cmd->id = DEF_ID(cmd->id);
if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
if (cmd->id != REPLY_ERROR)
cmd->id = DEF_ID(cmd->id);
}
ret = trans->ops->send_cmd(trans, cmd);
ret = iwl_trans_txq_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);

View File

@ -107,12 +107,16 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
* @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
* SUSPEND and RESUME commands. We are in D3 mode when we set
* trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_WANT_ASYNC_CALLBACK = BIT(3),
CMD_SEND_IN_D3 = BIT(4),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@ -514,6 +518,7 @@ struct iwl_trans_rxq_dma_data {
* of the trans debugfs
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @interrupts: disable/enable interrupts to transport
*/
struct iwl_trans_ops {
@ -574,19 +579,17 @@ struct iwl_trans_ops {
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
void (*sw_reset)(struct iwl_trans *trans);
bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
void (*release_nic_access)(struct iwl_trans *trans,
unsigned long *flags);
bool (*grab_nic_access)(struct iwl_trans *trans);
void (*release_nic_access)(struct iwl_trans *trans);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
int (*suspend)(struct iwl_trans *trans);
void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
};
/**
@ -742,6 +745,7 @@ struct iwl_trans_debug {
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
u64 unsupported_region_msk;
struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
struct list_head debug_info_tlv_list;
struct iwl_dbg_tlv_time_point_data
@ -914,6 +918,7 @@ struct iwl_trans_txqs {
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @iml_len: the length of the image loader
@ -957,6 +962,7 @@ struct iwl_trans {
int command_groups_size;
bool wide_cmd_header;
wait_queue_head_t wait_command_queue;
u8 num_rx_queues;
size_t iml_len;
@ -1073,20 +1079,6 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test, reset);
}
static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
if (!trans->ops->suspend)
return 0;
return trans->ops->suspend(trans);
}
static inline void iwl_trans_resume(struct iwl_trans *trans)
{
if (trans->ops->resume)
trans->ops->resume(trans);
}
static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
{
@ -1375,14 +1367,14 @@ iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
trans->ops->set_bits_mask(trans, reg, mask, value);
}
#define iwl_trans_grab_nic_access(trans, flags) \
#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
likely((trans)->ops->grab_nic_access(trans, flags)))
likely((trans)->ops->grab_nic_access(trans)))
static inline void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
trans->ops->release_nic_access(trans, flags);
trans->ops->release_nic_access(trans);
__release(nic_access);
}
@ -1409,6 +1401,9 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
trans->ops->sync_nmi(trans);
}
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len)
{
@ -1430,6 +1425,12 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
{
if (trans->ops->interrupts)
trans->ops->interrupts(trans, enable);
}
/*****************************************************
* transport helper functions
*****************************************************/

View File

@ -6,6 +6,7 @@ iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-y += ftm-responder.o ftm-initiator.o
iwlmvm-y += rfi.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o

View File

@ -975,7 +975,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
.flags = CMD_WANT_SKB,
.flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@ -997,6 +997,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) {
ret = 1;
@ -1065,6 +1067,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
@ -1103,19 +1107,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_trans *trans = mvm->trans;
int ret;
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
ret = iwl_trans_suspend(trans);
if (ret)
return ret;
trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
return __iwl_mvm_suspend(hw, wowlan, false);
}
@ -2047,12 +2043,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
goto err;
}
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END,
NULL);
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
if (ret)
goto err;
@ -2065,7 +2059,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (d0i3_first) {
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
.flags = CMD_WANT_SKB,
.flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
};
int len;
@ -2098,6 +2092,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
}
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@ -2169,21 +2165,12 @@ out:
return 1;
}
static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
{
iwl_trans_resume(mvm->trans);
return __iwl_mvm_resume(mvm, false);
}
int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
ret = iwl_mvm_resume_d3(mvm);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
ret = __iwl_mvm_resume(mvm, false);
iwl_mvm_resume_tcm(mvm);
@ -2210,10 +2197,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
synchronize_net();
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
@ -2283,8 +2266,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
iwl_fw_runtime_resume(&mvm->fwrt);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait);
if (!unified_image) {
int remaining_time = 10;

View File

@ -91,7 +91,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
"FLUSHING all tids queues on sta_id = %d\n",
flush_arg);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF)
? : count;
mutex_unlock(&mvm->mutex);
return ret;
@ -101,7 +101,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
flush_arg);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
@ -712,6 +712,30 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
return ret;
}
static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
char *buf;
size_t bufsz;
int pos;
ssize_t ret;
bufsz = mvm->fw->phy_integration_ver_len + 2;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len,
mvm->fw->phy_integration_ver);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
#define PRINT_STATS_LE32(_struct, _memb) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, #_memb, \
@ -1117,24 +1141,22 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_op_mode *opmode = container_of((void *)mvm,
struct iwl_op_mode,
op_mode_specific);
struct iwl_rx_cmd_buffer rxb = {
._rx_page_order = 0,
.truesize = 0, /* not used */
._offset = 0,
};
struct iwl_rx_packet *pkt;
struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
/* supporting only 9000 descriptor */
/* supporting only MQ RX */
if (!mvm->trans->trans_cfg->mq_rx_supported)
return -ENOTSUPP;
@ -1147,23 +1169,13 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
if (ret)
goto out;
/* avoid invalid memory access */
if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out;
/* check this is RX packet */
if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
goto out;
/* check the length in metadata matches actual received length */
desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) !=
(bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
/* avoid invalid memory access and malformed packet */
if (bin_len < sizeof(*pkt) ||
bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
goto out;
local_bh_disable();
iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
iwl_mvm_rx_mq(opmode, NULL, &rxb);
local_bh_enable();
ret = 0;
@ -1337,6 +1349,24 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
u32 timepoint;
if (kstrtou32(buf, 0, &timepoint))
return -EINVAL;
if (timepoint == IWL_FW_INI_TIME_POINT_INVALID ||
timepoint >= IWL_FW_INI_TIME_POINT_NUM)
return -EINVAL;
iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL);
return count;
}
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@ -1746,6 +1776,69 @@ iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm,
return ret ?: count;
}
static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
int ret = 0;
u16 op_id;
if (kstrtou16(buf, 10, &op_id))
return -EINVAL;
/* value zero triggers re-sending the default table to the device */
if (!op_id)
ret = iwl_rfi_send_config_cmd(mvm, NULL);
else
ret = -EOPNOTSUPP; /* in the future a new table will be added */
return ret ?: count;
}
/* The size computation is as follows:
* each number needs at most 3 characters, number of rows is the size of
* the table; So, need 5 chars for the "freq: " part and each tuple afterwards
* needs 6 characters for numbers and 5 for the punctuation around.
*/
#define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\
(5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5)))
static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
struct iwl_rfi_freq_table_resp_cmd *resp;
u32 status;
char buf[IWL_RFI_BUF_SIZE];
int i, j, pos = 0;
resp = iwl_rfi_get_freq_table(mvm);
if (IS_ERR(resp))
return PTR_ERR(resp);
status = le32_to_cpu(resp->status);
if (status != RFI_FREQ_TABLE_OK) {
scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status);
goto out;
}
for (i = 0; i < ARRAY_SIZE(resp->table); i++) {
pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ",
resp->table[i].freq);
for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++)
pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos,
"(%d, %d) ",
resp->table[i].channels[j],
resp->table[i].bands[j]);
pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n");
}
out:
kfree(resp);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@ -1766,6 +1859,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
@ -1773,6 +1867,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
@ -1795,6 +1890,7 @@ MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16);
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@ -1941,26 +2037,24 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600);
}
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
mvm->debugfs_dir = dbgfs_dir;
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
@ -1978,8 +2072,12 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600);
if (mvm->fw->phy_integration_ver)
MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400);
#ifdef CONFIG_ACPI
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400);
#endif
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
@ -2031,12 +2129,13 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
debugfs_create_blob("nvm_reg", S_IRUSR,
mvm->debugfs_dir, &mvm->nvm_reg_blob);
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
&iwl_dbgfs_mem_ops);
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -36,5 +36,6 @@
#include "fw/api/stats.h"
#include "fw/api/location.h"
#include "fw/api/tx.h"
#include "fw/api/rfi.h"
#endif /* __fw_api_h__ */

View File

@ -6,6 +6,7 @@
*/
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@ -475,9 +476,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
/* Load NVM to NIC if needed */
if (mvm->nvm_file_name) {
iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
iwl_mvm_load_nvm_to_nic(mvm);
ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
if (ret)
goto error;
ret = iwl_mvm_load_nvm_to_nic(mvm);
if (ret)
goto error;
}
if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
@ -633,6 +638,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
iwl_wait_phy_db_entry,
mvm->phy_db);
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
/* Will also start the device */
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
if (ret) {
@ -656,8 +663,11 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
}
/* In case we read the NVM from external file, load it to the NIC */
if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
if (mvm->nvm_file_name) {
ret = iwl_mvm_load_nvm_to_nic(mvm);
if (ret)
goto remove_notif;
}
WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
"Too old NVM version (0x%0x, required = 0x%0x)",
@ -859,12 +869,10 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
@ -884,6 +892,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
/*
* Set the revision on versions that contain it.
* This must be done after calling iwl_sar_geo_init().
*/
if (cmd_ver == 3)
cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
0, len, &cmd);
@ -892,7 +910,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data, *enabled;
union iwl_ppag_table_cmd ppag_table;
int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
s8 *gain;
@ -946,8 +963,8 @@ read_table:
goto out_free;
}
ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
if (!ppag_table.v1.enabled) {
mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
if (!mvm->fwrt.ppag_table.v1.enabled) {
ret = 0;
goto out_free;
}
@ -962,16 +979,23 @@ read_table:
union acpi_object *ent;
ent = &wifi_pkg->package.elements[idx++];
if (ent->type != ACPI_TYPE_INTEGER ||
(j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
ppag_table.v1.enabled = cpu_to_le32(0);
if (ent->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
gain[i * num_sub_bands + j] = ent->integer.value;
if ((j == 0 &&
(gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
(j != 0 &&
(gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
}
}
ret = 0;
@ -984,7 +1008,6 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
u8 cmd_ver;
int i, j, ret, num_sub_bands, cmd_size;
union iwl_ppag_table_cmd ppag_table;
s8 *gain;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
@ -1003,7 +1026,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS;
gain = mvm->fwrt.ppag_table.v1.gain[0];
cmd_size = sizeof(ppag_table.v1);
cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
if (mvm->fwrt.ppag_ver == 2) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v2 but FW supports v1, sending truncated table\n");
@ -1011,7 +1034,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
} else if (cmd_ver == 2) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = mvm->fwrt.ppag_table.v2.gain[0];
cmd_size = sizeof(ppag_table.v2);
cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
if (mvm->fwrt.ppag_ver == 1) {
IWL_DEBUG_RADIO(mvm,
"PPAG table is v1 but FW supports v2, sending padded table\n");
@ -1031,7 +1054,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
0, cmd_size, &ppag_table);
0, cmd_size, &mvm->fwrt.ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@ -1039,6 +1062,29 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return ret;
}
static const struct dmi_system_id dmi_ppag_approved_list[] = {
{ .ident = "HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
},
},
{ .ident = "SAMSUNG",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
},
},
{ .ident = "MSFT",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
{ .ident = "ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
},
},
};
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
int ret;
@ -1050,6 +1096,15 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
ret);
return 0;
}
if (!dmi_check_system(dmi_ppag_approved_list)) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
return 0;
}
return iwl_mvm_ppag_send_cmd(mvm);
}
@ -1093,7 +1148,8 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
DSM_FUNC_ENABLE_INDONESIA_5G2,
&iwl_guid, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
@ -1114,11 +1170,36 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
return DSM_VALUE_INDONESIA_DISABLE;
}
static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, DSM_RFI_FUNC_ENABLE,
&iwl_rfi_guid, &value);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
} else if (value >= DSM_VALUE_RFI_MAX) {
IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
value);
} else if (value == DSM_VALUE_RFI_ENABLE) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
return DSM_VALUE_RFI_ENABLE;
}
IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
/* default behaviour is disabled */
return DSM_VALUE_RFI_DISABLE;
}
static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
{
u8 value;
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
DSM_FUNC_DISABLE_SRD, &value);
DSM_FUNC_DISABLE_SRD,
&iwl_guid, &value);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
@ -1148,7 +1229,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
u8 ret;
int cmd_ret;
struct iwl_lari_config_change_cmd cmd = {};
struct iwl_lari_config_change_cmd_v2 cmd = {};
if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
cmd.config_bitmap |=
@ -1166,11 +1247,18 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
/* apply more config masks here */
if (cmd.config_bitmap) {
IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n");
size_t cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw,
REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE, 1) == 2 ?
sizeof(struct iwl_lari_config_change_cmd_v2) :
sizeof(struct iwl_lari_config_change_cmd_v1);
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap));
cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
0, sizeof(cmd), &cmd);
0, cmd_size, &cmd);
if (cmd_ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
@ -1212,6 +1300,11 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
}
static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
return DSM_VALUE_RFI_DISABLE;
}
#endif /* CONFIG_ACPI */
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@ -1315,8 +1408,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret)
return ret;
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
mvm->rfkill_safe_init_done = false;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
@ -1550,6 +1641,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_ftm_initiator_smooth_config(mvm);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
iwl_rfi_send_config_cmd(mvm, NULL);
}
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:

View File

@ -1289,6 +1289,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
struct ieee80211_vif *csa_vif;
@ -1304,6 +1305,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr =
&beacon_v5->beacon_notify_hdr;
if (unlikely(pkt_len < sizeof(*beacon_v5)))
return;
mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0;
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
@ -1314,6 +1318,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate));
} else {
if (unlikely(pkt_len < sizeof(*beacon)))
return;
mvm->ibss_manager = beacon->ibss_mgr_status != 0;
status = le32_to_cpu(beacon->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
@ -1419,12 +1426,13 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb;
u32 size = le32_to_cpu(sb->byte_count);
if (size == 0)
if (size == 0 || pkt_len < struct_size(sb, data, size))
return;
skb = alloc_skb(size, GFP_ATOMIC);
@ -1460,14 +1468,10 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
int len = iwl_rx_packet_payload_len(pkt);
u32 id = le32_to_cpu(notif->mac_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
notif->noa_active, notif->csa_counter);
@ -1514,12 +1518,8 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
struct ieee80211_vif *csa_vif, *vif;
struct iwl_mvm_vif *mvmvif;
int len = iwl_rx_packet_payload_len(pkt);
u32 id_n_color, csa_id, mac_id;
if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
id_n_color = le32_to_cpu(notif->id_and_color);
mac_id = id_n_color & FW_CTXT_ID_MSK;

View File

@ -472,6 +472,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@ -816,8 +821,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
rcu_read_lock();
do {
while (likely(!mvmtxq->stopped &&
(mvm->trans->system_pm_mode ==
IWL_PLAT_PM_MODE_DISABLED))) {
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
if (!skb) {
@ -1368,15 +1372,13 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm;
struct iwl_mvm_vif *mvmvif;
struct ieee80211_vif *vif;
mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
mvm = mvmvif->mvm;
iwl_mvm_abort_channel_switch(mvm->hw, vif);
/* Trigger disconnect (should clear the CSA state) */
ieee80211_chswitch_done(vif, false);
}
@ -2005,9 +2007,21 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u32 flags;
int i;
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
return;
}
sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band];
own_he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
if (IS_ERR_OR_NULL(sta)) {
rcu_read_unlock();
@ -2194,6 +2208,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
}
if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN))
flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED;
if (vif->bss_conf.nontransmitted) {
flags |= STA_CTXT_HE_REF_BSSID_VALID;
ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
@ -2404,12 +2422,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update power mode\n");
}
if (changes & BSS_CHANGED_TXPOWER) {
IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
if (changes & BSS_CHANGED_CQM) {
IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
/* reset cqm events tracking */
@ -2641,12 +2653,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
IWL_WARN(mvm, "Failed updating beacon data\n");
if (changes & BSS_CHANGED_TXPOWER) {
IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
if (changes & BSS_CHANGED_FTM_RESPONDER) {
int ret = iwl_mvm_ftm_start_responder(mvm, vif);
@ -2686,6 +2692,12 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON_ONCE(1);
}
if (changes & BSS_CHANGED_TXPOWER) {
IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n",
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
mutex_unlock(&mvm->mutex);
}
@ -3009,6 +3021,39 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
mvmvif->he_ru_2mhz_block = !iter_data.tolerated;
}
static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sta_he_cap *he_cap;
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (!mvm->cca_40mhz_workaround)
return;
/* decrement and check that we reached zero */
mvm->cca_40mhz_workaround--;
if (mvm->cca_40mhz_workaround)
return;
sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
he_cap = ieee80211_get_he_iftype_cap(sband,
ieee80211_vif_type_p2p(vif));
if (he_cap) {
/* we know that ours is writable */
struct ieee80211_sta_he_cap *he = (void *)he_cap;
he->he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
}
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@ -3048,6 +3093,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* No need to make sure deferred TX indication is off since the
* worker will already remove it if it was on
*/
/*
* Additionally, reset the 40 MHz capability if we disconnected
* from the AP now.
*/
iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
}
mutex_lock(&mvm->mutex);
@ -3389,6 +3440,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
if (keyidx == 6 || keyidx == 7)
rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6],
key);
if ((vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP) && !sta) {
/*
@ -3497,6 +3552,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case DISABLE_KEY:
if (keyidx == 6 || keyidx == 7)
RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6],
NULL);
ret = -ENOENT;
for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
if (mvmvif->ap_early_keys[i] == key) {
@ -4631,7 +4690,8 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (mvmvif->csa_failed)
goto out_unlock;
IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id);
IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d mode = %d\n",
mvmvif->id, chsw->count, chsw->block_tx);
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
@ -4648,7 +4708,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
if (drop) {
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues, 0);
iwl_mvm_flushable_queues(mvm) & queues);
mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
@ -4666,7 +4726,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
continue;
if (drop)
iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
@ -4948,6 +5008,34 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_mlme_event *mlme)
{
if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED ||
mlme->status == MLME_TIMEOUT)) {
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
NULL);
return;
}
if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED ||
mlme->status == MLME_TIMEOUT)) {
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
NULL);
return;
}
if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) {
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_DEASSOC,
NULL);
return;
}
}
static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
@ -4962,6 +5050,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
if (iwl_trans_dbg_ini_valid(mvm->trans)) {
iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme);
return;
}
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)

View File

@ -419,6 +419,10 @@ struct iwl_mvm_vif {
/* 26-tone RU OFDMA transmissions should be blocked */
bool he_ru_2mhz_block;
struct {
struct ieee80211_key_conf __rcu *keys[2];
} bcn_prot;
};
static inline struct iwl_mvm_vif *
@ -796,6 +800,8 @@ struct iwl_mvm {
bool hw_registered;
bool rfkill_safe_init_done;
u8 cca_40mhz_workaround;
u32 ampdu_ref;
bool ampdu_toggle;
@ -887,8 +893,12 @@ struct iwl_mvm {
/* last smart fifo state that was successfully sent to firmware */
enum iwl_sf_state sf_state;
#ifdef CONFIG_IWLWIFI_DEBUGFS
/*
* Leave this pointer outside the ifdef below so that it can be
* assigned without ifdef in the source code.
*/
struct dentry *debugfs_dir;
#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 dbgfs_sram_offset, dbgfs_sram_len;
u32 dbgfs_prph_reg_addr;
bool disable_power_off;
@ -1471,10 +1481,9 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
@ -1547,6 +1556,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
*/
void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
@ -1692,12 +1704,11 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#else
static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
struct dentry *dbgfs_dir)
static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
}
static inline void
@ -1898,7 +1909,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
@ -1995,6 +2005,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 size);
void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
#define MVM_TCM_PERIOD_MSEC 500
@ -2029,6 +2040,10 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
switch (band) {

View File

@ -146,6 +146,70 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
struct ieee80211_supported_band *sband;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_vif *vif;
if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
return;
vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
if (!vif->bss_conf.chandef.chan ||
vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->bss_conf.assoc)
return;
/* this shouldn't happen *again*, ignore it */
if (mvm->cca_40mhz_workaround)
return;
/*
* We'll decrement this on disconnect - so set to 2 since we'll
* still have to disconnect from the current AP first.
*/
mvm->cca_40mhz_workaround = 2;
/*
* This capability manipulation isn't really ideal, but it's the
* easiest choice - otherwise we'd have to do some major changes
* in mac80211 to support this, which isn't worth it. This does
* mean that userspace may have outdated information, but that's
* actually not an issue at all.
*/
sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
WARN_ON(!sband->ht_cap.ht_supported);
WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
he_cap = ieee80211_get_he_iftype_cap(sband,
ieee80211_vif_type_p2p(vif));
if (he_cap) {
/* we know that ours is writable */
struct ieee80211_sta_he_cap *he = (void *)he_cap;
WARN_ON(!he->has_he);
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
he->he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
}
ieee80211_disconnect(vif, true);
}
/**
* enum iwl_rx_handler_context context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@ -169,15 +233,21 @@ enum iwl_rx_handler_context {
* @fn: the function is called when notification is received
*/
struct iwl_rx_handlers {
u16 cmd_id;
u16 cmd_id, min_size;
enum iwl_rx_handler_context context;
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
#define RX_HANDLER(_cmd_id, _fn, _context) \
{ .cmd_id = _cmd_id, .fn = _fn, .context = _context }
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \
{ .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \
{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
#define RX_HANDLER(_cmd_id, _fn, _context, _struct) \
{ .cmd_id = _cmd_id, .fn = _fn, \
.context = _context, .min_size = sizeof(_struct), }
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \
{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \
.context = _context, .min_size = sizeof(_struct), }
/*
* Handlers for fw notifications
@ -187,85 +257,107 @@ struct iwl_rx_handlers {
* The handler can be one from three contexts, see &iwl_rx_handler_context
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
struct iwl_mvm_tx_resp),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
struct iwl_mvm_ba_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
struct iwl_ba_window_status_notif),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
RX_HANDLER_SYNC),
RX_HANDLER_SYNC, struct iwl_time_event_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
struct iwl_mvm_session_prot_notif),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
struct iwl_mvm_eosp_notification),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_lmac_scan_complete_notif),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_lmac_scan_complete_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
RX_HANDLER_SYNC),
RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
iwl_mvm_rx_scan_match_found,
RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
RX_HANDLER_SYNC),
RX_HANDLER_SYNC, struct iwl_card_state_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
RX_HANDLER_SYNC),
RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
struct iwl_uapsd_misbehaving_ap_notif),
RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
struct ct_kill_notif),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_ASYNC_LOCKED,
struct iwl_tdls_channel_switch_notif),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
RX_HANDLER_SYNC),
RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED),
iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
struct iwl_ftm_responder_stats),
RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF,
iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
struct iwl_mfu_assert_dump_notif),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
struct iwl_stored_beacon_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
struct iwl_mu_group_mgmt_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
struct iwl_mvm_pm_state_notification),
RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
iwl_mvm_probe_resp_data_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_ASYNC_LOCKED,
struct iwl_probe_resp_data_notif),
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF,
iwl_mvm_channel_switch_noa_notif,
RX_HANDLER_SYNC),
RX_HANDLER_SYNC, struct iwl_channel_switch_noa_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@ -410,6 +502,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@ -552,6 +645,44 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
{
int ret;
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
if (ret && ret != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!iwlmvm_mod_params.init_dbg || !ret)
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
if (ret < 0)
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
return ret;
}
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
{
int ret;
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
ret = iwl_mvm_mac_setup_register(mvm);
if (ret)
return ret;
mvm->hw_registered = true;
iwl_mvm_dbgfs_register(mvm);
return 0;
}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -773,18 +904,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_free;
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm);
if (err && err != -ERFKILL)
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
if (err < 0) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
goto out_free;
}
scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
@ -798,26 +917,27 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;
err = iwl_mvm_mac_setup_register(mvm);
if (err)
goto out_free;
mvm->hw_registered = true;
min_backoff = iwl_mvm_min_backoff(mvm);
iwl_mvm_thermal_initialize(mvm, min_backoff);
iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
if (!iwl_mvm_has_new_rx_stats_api(mvm))
memset(&mvm->rx_stats_v3, 0,
sizeof(struct mvm_statistics_rx_v3));
else
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
mvm->debugfs_dir = dbgfs_dir;
if (iwl_mvm_start_get_nvm(mvm))
goto out_thermal_exit;
if (iwl_mvm_start_post_nvm(mvm))
goto out_thermal_exit;
return op_mode;
out_thermal_exit:
iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@ -964,6 +1084,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt)
{
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
int i;
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
@ -985,6 +1106,9 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
if (unlikely(pkt_len < rx_h->min_size))
return;
if (rx_h->context == RX_HANDLER_SYNC) {
rx_h->fn(mvm, rxb);
return;
@ -1024,9 +1148,9 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@ -1315,6 +1439,15 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm, true);
}
static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.async_cb = iwl_mvm_async_cb, \
@ -1327,7 +1460,8 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
.nic_config = iwl_mvm_nic_config, \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop
.stop = iwl_op_mode_mvm_stop, \
.time_point = iwl_op_mode_mvm_time_point
static const struct iwl_op_mode_ops iwl_mvm_ops = {
IWL_MVM_COMMON_OPS,

View File

@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2020 Intel Corporation
*/
#include "mvm.h"
#include "fw/api/commands.h"
#include "fw/api/phy-ctxt.h"
/**
* DDR needs frequency in units of 16.666MHz, so provide FW with the
* frequency values in the adjusted format.
*/
const static struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
/* LPDDR4 */
/* frequency 3733MHz */
{cpu_to_le16(223), {114, 116, 118, 120, 122,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4267MHz */
{cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* DDR5ePOR */
/* frequency 4000MHz */
{cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* frequency 4400MHz */
{cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* LPDDR5iPOR */
/* frequency 5200MHz */
{cpu_to_le16(312), {36, 38, 40, 42, 50,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 6000MHz */
{cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* frequency 6400MHz */
{cpu_to_le16(384), {79, 83, 85, 87, 89, 91, 93,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
};
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
{
int ret;
struct iwl_rfi_config_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(SYSTEM_GROUP, RFI_CONFIG_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
return -EOPNOTSUPP;
/* in case no table is passed, use the default one */
if (!rfi_table) {
memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
} else {
memcpy(cmd.table, rfi_table, sizeof(cmd.table));
/* notify FW the table is not the default one */
cmd.oem = 1;
}
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
mutex_unlock(&mvm->mutex);
if (ret)
IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
return ret;
}
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
{
struct iwl_rfi_freq_table_resp_cmd *resp;
int resp_size = sizeof(*resp);
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(SYSTEM_GROUP, RFI_GET_FREQ_TABLE_CMD),
.flags = CMD_WANT_SKB,
};
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &cmd);
mutex_unlock(&mvm->mutex);
if (ret)
return ERR_PTR(ret);
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
return ERR_PTR(-EIO);
resp = kzalloc(resp_size, GFP_KERNEL);
if (!resp)
return ERR_PTR(-ENOMEM);
memcpy(resp, cmd.resp_pkt->data, resp_size);
iwl_free_resp(&cmd);
return resp;
}

View File

@ -248,14 +248,13 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct iwl_tlc_config_cmd *cmd)
{
int i;
unsigned long tmp;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
u16 supp = 0;
unsigned long tmp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
tmp = sta->supp_rates[sband->band];
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);

View File

@ -20,6 +20,10 @@
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
if (unlikely(pkt_len < sizeof(mvm->last_phy_info)))
return;
memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
mvm->ampdu_ref++;
@ -874,12 +878,11 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
int i;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ONCE(pkt_len != sizeof(*notif),
"Received window status notification of wrong size (%u)\n",
pkt_len))
return;
BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX);
BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX);
BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX);
BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX);
rcu_read_lock();
for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {

View File

@ -272,7 +272,72 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
rx_status->chain_signal[2] = S8_MIN;
}
static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
struct iwl_rx_mpdu_desc *desc,
u32 status)
{
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY);
u8 keyid;
struct ieee80211_key_conf *key;
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
/*
* For non-beacon, we don't really care. But beacons may
* be filtered out, and we thus need the firmware's replay
* detection, otherwise beacons the firmware previously
* filtered could be replayed, or something like that, and
* it can filter a lot - though usually only if nothing has
* changed.
*/
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
!(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
return 0;
if (!sta)
return -1;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* what? */
if (fwkeyid != 6 && fwkeyid != 7)
return -1;
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]);
if (!key)
return -1;
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
return -1;
/*
* See if the key ID matches - if not this may be due to a
* switch and the firmware may erroneously report !MIC_OK.
*/
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
if (keyid != fwkeyid)
return -1;
/* Report status to mac80211 */
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
return -1;
}
static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats, u16 phy_info,
struct iwl_rx_mpdu_desc *desc,
u32 pkt_flags, int queue, u8 *crypt_len)
@ -345,6 +410,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return -1;
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
default:
/*
* Sometimes we can get frames that were not decrypted
@ -1567,6 +1634,23 @@ static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
}
}
struct iwl_rx_sta_csa {
bool all_sta_unblocked;
struct ieee80211_vif *vif;
};
static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_rx_sta_csa *rx_sta_csa = data;
if (mvmsta->vif != rx_sta_csa->vif)
return;
if (mvmsta->disable_tx)
rx_sta_csa->all_sta_unblocked = false;
}
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@ -1682,15 +1766,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_decode_lsig(skb, &phy_data);
rx_status = IEEE80211_SKB_RXCB(skb);
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
kfree_skb(skb);
return;
}
/*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
@ -1774,6 +1849,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
}
if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
kfree_skb(skb);
goto out;
}
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
@ -1798,10 +1880,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
struct iwl_rx_sta_csa rx_sta_csa = {
.all_sta_unblocked = true,
.vif = tx_blocked_vif,
};
if (mvmvif->csa_target_freq == rx_status->freq)
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
false);
ieee80211_iterate_stations_atomic(mvm->hw,
iwl_mvm_rx_get_sta_block_tx,
&rx_sta_csa);
if (rx_sta_csa.all_sta_unblocked) {
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
/* Unblock BCAST / MCAST station */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
}
}
rs_update_last_rssi(mvm, mvmsta, rx_status);
@ -1938,6 +2034,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
return;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@ -2067,6 +2166,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
return;
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
le16_to_cpu(release->nssn),
queue, 0);
@ -2087,6 +2189,9 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
IWL_BAR_FRAME_RELEASE_TID_MASK);
struct iwl_mvm_baid_data *baid_data;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
return;
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mvm->baid_map)))
return;

Some files were not shown because too many files have changed in this diff Show More