1
0
Fork 0

wireless-drivers-next patches for 5.1

Last set of patches. A new hardware support for mt76 otherwise quite
 normal.
 
 Major changes:
 
 mt76
 
 * add driver for MT7603E/MT7628
 
 ath10k
 
 * more preparation for SDIO support
 
 wil6210
 
 * support up to 20 stations in AP mode
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJceUH5AAoJEG4XJFUm622bCsgH/RCWwW+QR6JEF3NCxJ980u0E
 nreozLNjAyHGLh8apY7revFsWIRez/bEWdpGw1oVT6DGj2ksf0yVTKMHIwBiMUbE
 Z0t3BMP2Yfc91KIBsmGlYr8xIKOQfv7RTaCj16W3Aj7zGbJum4UMgxE5xdboS6KF
 DwhzGjJR7aK9mL+tgCs/A51aee0Q6WeP1BjvbzJUhvoqdsyXFxyTGivnqEwVY7G1
 Mj63fjkrSMlYA4A3/tmkm1G/buAlfIk2qgWzwuxTk7/+HLIac4IVLno8GFCA3vDY
 NlFcDrCsfQswWznz01gWlOUGe4bYO/IkB0diF+tVDNUqiVSOTblV+HeznQbDWBo=
 =R94J
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2019-03-01' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 5.1

Last set of patches. A new hardware support for mt76 otherwise quite
normal.

Major changes:

mt76

* add driver for MT7603E/MT7628

ath10k

* more preparation for SDIO support

wil6210

* support up to 20 stations in AP mode
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2019-03-02 00:56:59 -08:00
commit cf29576fee
103 changed files with 7120 additions and 767 deletions

View File

@ -4,6 +4,13 @@ This node provides properties for configuring the MediaTek mt76xx wireless
device. The node is expected to be specified as a child node of the PCI
controller to which the wireless chip is connected.
Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC.
For SoC, use the compatible string "mediatek,mt7628-wmac" and the following
properties:
- reg: Address and length of the register set for the device.
- interrupts: Main device interrupt
Optional properties:
- mac-address: See ethernet.txt in the parent directory
@ -30,3 +37,15 @@ Optional nodes:
};
};
};
MT7628 example:
wmac: wmac@10300000 {
compatible = "mediatek,mt7628-wmac";
reg = <0x10300000 0x100000>;
interrupt-parent = <&cpuintc>;
interrupts = <6>;
mediatek,mtd-eeprom = <&factory 0x0000>;
};

View File

@ -1066,8 +1066,8 @@ EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@ -1118,6 +1118,66 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int read_index;
struct ce_desc_64 *desc;
if (src_ring->hw_index == sw_index) {
/*
* The SW completion index has caught up with the cached
* version of the HW completion index.
* Update the cached HW completion index to see whether
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
if (read_index == 0xffffffff)
return -ENODEV;
read_index &= nentries_mask;
src_ring->hw_index = read_index;
}
if (ar->hw_params.rri_on_ddr)
read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
else
read_index = src_ring->hw_index;
if (read_index == sw_index)
return -EIO;
if (per_transfer_contextp)
*per_transfer_contextp =
src_ring->per_transfer_context[sw_index];
/* sanity */
src_ring->per_transfer_context[sw_index] = NULL;
desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
sw_index);
desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
src_ring->sw_index = sw_index;
return 0;
}
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
{
return ce_state->ops->ce_completed_send_next_nolock(ce_state,
per_transfer_contextp);
}
EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
static void ath10k_ce_extract_desc_data(struct ath10k *ar,
@ -1839,6 +1899,7 @@ static const struct ath10k_ce_ops ce_ops = {
.ce_send_nolock = _ath10k_ce_send_nolock,
.ce_set_src_ring_base_addr_hi = NULL,
.ce_set_dest_ring_base_addr_hi = NULL,
.ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
};
static const struct ath10k_ce_ops ce_64_ops = {
@ -1853,6 +1914,7 @@ static const struct ath10k_ce_ops ce_64_ops = {
.ce_send_nolock = _ath10k_ce_send_nolock_64,
.ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
.ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
.ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
};
static void ath10k_ce_set_ops(struct ath10k *ar,

View File

@ -329,6 +329,8 @@ struct ath10k_ce_ops {
void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar,
u32 ce_ctrl_addr,
u64 addr);
int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp);
};
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)

View File

@ -549,10 +549,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
.num_peers = TARGET_HL_TLV_NUM_PEERS,
.n_cipher_suites = 11,
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
.per_ce_irq = true,
@ -637,11 +637,24 @@ static void ath10k_init_sdio(struct ath10k *ar)
ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
ath10k_bmi_read32(ar, hi_acs_flags, &param);
param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET |
HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET |
HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE);
/* Data transfer is not initiated, when reduced Tx completion
* is used for SDIO. disable it until fixed
*/
param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
/* Alternate credit size of 1544 as used by SDIO firmware is
* not big enough for mac80211 / native wifi frames. disable it
*/
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
ath10k_bmi_write32(ar, hi_acs_flags, param);
/* Explicitly set fwlog prints to zero as target may turn it on
* based on scratch registers.
*/
ath10k_bmi_read32(ar, hi_option_flag, &param);
param |= HI_OPTION_DISABLE_DBGLOG;
ath10k_bmi_write32(ar, hi_option_flag, param);
}
static int ath10k_init_configure_target(struct ath10k *ar)
@ -2304,8 +2317,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
else
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV |
WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;

View File

@ -189,7 +189,7 @@ struct ath10k_fw_stats_peer {
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
u32 rx_duration;
u64 rx_duration;
};
struct ath10k_fw_extd_stats_peer {

View File

@ -1252,6 +1252,9 @@ static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
return -EINVAL;
if (ar->hw_params.cal_data_len == 0)
return -EOPNOTSUPP;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));

View File

@ -685,11 +685,12 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,40,80,160 MHz)\n", str[j]);
" BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
" %llu %llu %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3]);
stats->bw[j][2], stats->bw[j][3],
stats->bw[j][4], stats->bw[j][5]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,

View File

@ -578,6 +578,10 @@ struct htt_mgmt_tx_completion {
#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
struct htt_rx_indication_hdr {
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
__le16 peer_id;
@ -852,6 +856,88 @@ enum htt_data_tx_flags {
#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
struct htt_append_retries {
__le16 msdu_id;
u8 tx_retries;
u8 flag;
} __packed;
struct htt_data_tx_completion_ext {
struct htt_append_retries a_retries;
__le32 t_stamp;
__le16 msdus_rssi[0];
} __packed;
/**
* @brief target -> host TX completion indication message definition
*
* @details
* The following diagram shows the format of the TX completion indication sent
* from the target to the host
*
* |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
* |-------------------------------------------------------------|
* header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
* |-------------------------------------------------------------|
* payload: | MSDU1 ID | MSDU0 ID |
* |-------------------------------------------------------------|
* : MSDU3 ID : MSDU2 ID :
* |-------------------------------------------------------------|
* | struct htt_tx_compl_ind_append_retries |
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
* | struct htt_tx_compl_ind_append_tx_tstamp |
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
* | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
* |-------------------------------------------------------------|
* : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
* |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
* -msg_type
* Bits 7:0
* Purpose: identifies this as HTT TX completion indication
* -status
* Bits 10:8
* Purpose: the TX completion status of payload fragmentations descriptors
* Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
* -tid
* Bits 14:11
* Purpose: the tid associated with those fragmentation descriptors. It is
* valid or not, depending on the tid_invalid bit.
* Value: 0 to 15
* -tid_invalid
* Bits 15:15
* Purpose: this bit indicates whether the tid field is valid or not
* Value: 0 indicates valid, 1 indicates invalid
* -num
* Bits 23:16
* Purpose: the number of payload in this indication
* Value: 1 to 255
* -A0 = append
* Bits 24:24
* Purpose: append the struct htt_tx_compl_ind_append_retries which contains
* the number of tx retries for one MSDU at the end of this message
* Value: 0 indicates no appending, 1 indicates appending
* -A1 = append1
* Bits 25:25
* Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
* contains the timestamp info for each TX msdu id in payload.
* Value: 0 indicates no appending, 1 indicates appending
* -TP = MSDU tx power presence
* Bits 26:26
* Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
* for each MSDU referenced by the TX_COMPL_IND message.
* The order of the per-MSDU tx power reports matches the order
* of the MSDU IDs.
* Value: 0 indicates not appending, 1 indicates appending
* -A2 = append2
* Bits 27:27
* Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
* TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
* matches the order of the MSDU IDs.
* The ACK RSSI values are valid when status is COMPLETE_OK (and
* this append2 bit is set).
* Value: 0 indicates not appending, 1 indicates appending
*/
struct htt_data_tx_completion {
union {
u8 flags;

View File

@ -2119,9 +2119,15 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->chains |= BIT(0);
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
} else {
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
@ -2210,7 +2216,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
__le16 msdu_id, *msdus;
bool rssi_enabled = false;
u8 msdu_count = 0, num_airtime_records, tid;
int i;
int i, htt_pad = 0;
struct htt_data_tx_compl_ppdu_dur *ppdu_info;
struct ath10k_peer *peer;
u16 ppdu_info_offset = 0, peer_id;
@ -2239,9 +2245,11 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
msdu_count = resp->data_tx_completion.num_msdus;
msdus = resp->data_tx_completion.msdus;
rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
rssi_enabled = true;
if (rssi_enabled)
htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
resp);
for (i = 0; i < msdu_count; i++) {
msdu_id = msdus[i];
@ -2253,10 +2261,10 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
* last msdu id with 0xffff
*/
if (msdu_count & 0x01) {
msdu_id = msdus[msdu_count + i + 1];
msdu_id = msdus[msdu_count + i + 1 + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
} else {
msdu_id = msdus[msdu_count + i];
msdu_id = msdus[msdu_count + i + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
}
}
@ -2913,17 +2921,19 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
int idx, ht_idx, gi, mcs, bw, nss;
unsigned long flags;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
ht_idx = txrate->mcs + txrate->nss * 8;
mcs = txrate->mcs;
flags = txrate->flags;
gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
bw = txrate->bw;
nss = txrate->nss;
idx = mcs * 8 + 8 * 10 * nss;
ht_idx = mcs + (nss - 1) * 8;
idx = mcs * 8 + 8 * 10 * (nss - 1);
idx += bw * 2 + gi;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
@ -2969,7 +2979,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss] +=
STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
@ -2977,7 +2987,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
@ -2989,27 +2999,27 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
}
STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
if (txrate->flags >= RATE_INFO_FLAGS_MCS) {

View File

@ -1100,6 +1100,32 @@ int ath10k_hw_diag_fast_download(struct ath10k *ar,
return ret;
}
static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp)
{
return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI);
}
static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp)
{
return (resp->data_tx_completion.flags2 &
HTT_TX_DATA_RSSI_ENABLE_WCN3990);
}
static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
{
struct htt_data_tx_completion_ext extd;
int pad_bytes = 0;
if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES)
pad_bytes += sizeof(extd.a_retries) /
sizeof(extd.msdus_rssi[0]);
if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP)
pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]);
return pad_bytes;
}
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};
@ -1124,6 +1150,10 @@ const struct ath10k_hw_ops qca99x0_ops = {
const struct ath10k_hw_ops qca6174_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
const struct ath10k_hw_ops wcn3990_ops = {};
const struct ath10k_hw_ops wcn3990_ops = {
.tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
.is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,
};

View File

@ -609,6 +609,8 @@ struct ath10k_hw_params {
};
struct htt_rx_desc;
struct htt_resp;
struct htt_data_tx_completion_ext;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
@ -616,6 +618,8 @@ struct ath10k_hw_ops {
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
};
extern const struct ath10k_hw_ops qca988x_ops;
@ -643,6 +647,24 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
return false;
}
static inline int
ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
struct htt_resp *htt)
{
if (hw->hw_ops->tx_data_rssi_pad_bytes)
return hw->hw_ops->tx_data_rssi_pad_bytes(htt);
return 0;
}
static inline int
ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
struct htt_resp *resp)
{
if (hw->hw_ops->is_rssi_enable)
return hw->hw_ops->is_rssi_enable(resp);
return 0;
}
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@ -730,9 +752,9 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
#define TARGET_HL_10_TLV_NUM_PEERS 14
#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
#define TARGET_HL_TLV_NUM_PEERS 33
#define TARGET_HL_TLV_AST_SKID_LIMIT 16
#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2
/* Diagnostic Window */
#define CE_DIAG_PIPE 7

View File

@ -1382,6 +1382,12 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
ret = ath10k_sdio_config(ar);
if (ret) {
ath10k_err(ar, "failed to config sdio: %d\n", ret);
return ret;
}
sdio_claim_host(func);
ret = sdio_enable_func(func);
@ -1419,11 +1425,19 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
/* Disable the card */
sdio_claim_host(ar_sdio->func);
ret = sdio_disable_func(ar_sdio->func);
sdio_release_host(ar_sdio->func);
if (ret)
ret = sdio_disable_func(ar_sdio->func);
if (ret) {
ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
sdio_release_host(ar_sdio->func);
return;
}
ret = mmc_hw_reset(ar_sdio->func->card->host);
if (ret)
ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
sdio_release_host(ar_sdio->func);
ar_sdio->is_disabled = true;
}
@ -2028,12 +2042,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
ath10k_sdio_set_mbox_info(ar);
ret = ath10k_sdio_config(ar);
if (ret) {
ath10k_err(ar, "failed to config sdio: %d\n", ret);
goto err_free_wq;
}
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
bus_params.chip_id = 0;

View File

@ -13,6 +13,7 @@
#include "wmi-tlv.h"
#include "p2p.h"
#include "testmode.h"
#include <linux/bitfield.h>
/***************/
/* TLV helpers */
@ -673,6 +674,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
arg->desc_id = ev->desc_id;
arg->status = ev->status;
arg->pdev_id = ev->pdev_id;
arg->ppdu_id = ev->ppdu_id;
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
arg->ack_rssi = ev->ack_rssi;
kfree(tb);
return 0;
@ -682,8 +687,12 @@ struct wmi_tlv_tx_bundle_compl_parse {
const __le32 *num_reports;
const __le32 *desc_ids;
const __le32 *status;
const __le32 *ppdu_ids;
const __le32 *ack_rssi;
bool desc_ids_done;
bool status_done;
bool ppdu_ids_done;
bool ack_rssi_done;
};
static int
@ -703,6 +712,12 @@ ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
} else if (!bundle_tx_compl->status_done) {
bundle_tx_compl->status_done = true;
bundle_tx_compl->status = ptr;
} else if (!bundle_tx_compl->ppdu_ids_done) {
bundle_tx_compl->ppdu_ids_done = true;
bundle_tx_compl->ppdu_ids = ptr;
} else if (!bundle_tx_compl->ack_rssi_done) {
bundle_tx_compl->ack_rssi_done = true;
bundle_tx_compl->ack_rssi = ptr;
}
break;
default:
@ -733,6 +748,10 @@ static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
arg->num_reports = *bundle_tx_compl.num_reports;
arg->desc_ids = bundle_tx_compl.desc_ids;
arg->status = bundle_tx_compl.status;
arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
arg->ack_rssi = bundle_tx_compl.ack_rssi;
return 0;
}
@ -1278,6 +1297,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
{
const void **tb;
const struct wmi_tlv_stats_ev *ev;
u32 num_peer_stats_extd;
const void *data;
u32 num_pdev_stats;
u32 num_vdev_stats;
@ -1285,6 +1305,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
u32 num_bcnflt_stats;
u32 num_chan_stats;
size_t data_len;
u32 stats_id;
int ret;
int i;
@ -1309,11 +1330,13 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
stats_id = __le32_to_cpu(ev->stats_id);
num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
"wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
num_pdev_stats, num_vdev_stats, num_peer_stats,
num_bcnflt_stats, num_chan_stats);
num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
@ -1378,6 +1401,28 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
ath10k_wmi_pull_peer_stats(&src->old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
const struct wmi_tlv_peer_stats_extd *extd;
unsigned long rx_duration_high;
extd = data + sizeof(*src) * (num_peer_stats - i - 1)
+ sizeof(*extd) * i;
dst->rx_duration = __le32_to_cpu(extd->rx_duration);
rx_duration_high = __le32_to_cpu
(extd->rx_duration_high);
if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
&rx_duration_high)) {
rx_duration_high =
FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
rx_duration_high);
dst->rx_duration |= (u64)rx_duration_high <<
WMI_TLV_PEER_RX_DURATION_SHIFT;
}
}
list_add_tail(&dst->list, &stats->peers);
}
@ -1565,21 +1610,55 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
param_id, param_value);
return skb;
}
static void
ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
{
struct host_memory_chunk *chunk;
struct wmi_tlv *tlv;
int i;
__le16 tlv_len, tlv_tag;
tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
tlv_len = __cpu_to_le16(sizeof(*chunk));
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
tlv = host_mem_chunks;
tlv->tag = tlv_tag;
tlv->len = tlv_len;
chunk = (void *)tlv->value;
chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
i,
ar->wmi.mem_chunks[i].len,
(unsigned long long)ar->wmi.mem_chunks[i].paddr,
ar->wmi.mem_chunks[i].req_id);
host_mem_chunks += sizeof(*tlv);
host_mem_chunks += sizeof(*chunk);
}
}
static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
{
struct sk_buff *skb;
struct wmi_tlv *tlv;
struct wmi_tlv_init_cmd *cmd;
struct wmi_tlv_resource_config *cfg;
struct wmi_host_mem_chunks *chunks;
void *chunks;
size_t len, chunks_len;
void *ptr;
chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
chunks_len = ar->wmi.num_mem_chunks *
(sizeof(struct host_memory_chunk) + sizeof(*tlv));
len = (sizeof(*tlv) + sizeof(*cmd)) +
(sizeof(*tlv) + sizeof(*cfg)) +
(sizeof(*tlv) + chunks_len);
@ -1679,7 +1758,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_ocb_schedules = __cpu_to_le32(0);
cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
ath10k_wmi_put_host_mem_chunks(ar, chunks);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
return skb;
@ -2035,7 +2117,8 @@ ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
vdev_id, param_id, param_value);
return skb;
}
@ -2351,7 +2434,9 @@ ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
cmd->param_value = __cpu_to_le32(param_value);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
vdev_id, peer_addr, param_id, param_value);
return skb;
}
@ -2745,7 +2830,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
arvif = (void *)cb->vif->drv_priv;
vdev_id = arvif->vdev_id;
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
(!(ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)))))
return ERR_PTR(-EINVAL);
len = sizeof(*cmd) + 2 * sizeof(*tlv);
@ -2753,10 +2840,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
len += IEEE80211_CCMP_MIC_LEN;
ieee80211_has_protected(hdr->frame_control))
buf_len += IEEE80211_CCMP_MIC_LEN;
}
buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
buf_len = round_up(buf_len, 4);

View File

@ -14,6 +14,8 @@
#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
#define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18)
enum wmi_tlv_grp_id {
WMI_TLV_GRP_START = 0x3,
WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
@ -1384,6 +1386,25 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_AP_TWT = 153,
WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
WMI_TLV_SERVICE_MOTION_DET = 160,
WMI_TLV_SERVICE_INFRA_MBSSID = 161,
WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
WMI_TLV_SERVICE_ESP_SUPPORT = 170,
WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_MAX_EXT_SERVICE = 256,
};
@ -1557,6 +1578,8 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
WMI_SERVICE_THERM_THROT,
WMI_TLV_MAX_SERVICE);
SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@ -1588,6 +1611,8 @@ struct wmi_tlv_mgmt_tx_compl_ev {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
__le32 ppdu_id;
__le32 ack_rssi;
};
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
@ -1864,6 +1889,22 @@ struct wmi_tlv_req_stats_cmd {
struct wmi_mac_addr peer_macaddr;
} __packed;
#define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31
#define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0)
#define WMI_TLV_PEER_RX_DURATION_SHIFT 32
struct wmi_tlv_peer_stats_extd {
struct wmi_mac_addr peer_macaddr;
__le32 rx_duration;
__le32 peer_tx_bytes;
__le32 peer_rx_bytes;
__le32 last_tx_rate_code;
__le32 last_tx_power;
__le32 rx_mc_bc_cnt;
__le32 rx_duration_high;
__le32 reserved[2];
} __packed;
struct wmi_tlv_vdev_stats {
__le32 vdev_id;
__le32 beacon_snr;
@ -1957,6 +1998,10 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats;
__le32 num_bcnflt_stats;
__le32 num_chan_stats;
__le32 num_mib_stats;
__le32 pdev_id;
__le32 num_bcn_stats;
__le32 num_peer_stats_extd;
} __packed;
struct wmi_tlv_p2p_noa_ev {

View File

@ -2342,8 +2342,8 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
u32 status)
static int
wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
{
struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
struct ath10k_wmi *wmi = &ar->wmi;
@ -2353,10 +2353,10 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
spin_lock_bh(&ar->data_lock);
pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
if (!pkt_addr) {
ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
desc_id);
param->desc_id);
ret = -ENOENT;
goto out;
}
@ -2366,17 +2366,21 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
if (status)
if (param->status) {
info->flags &= ~IEEE80211_TX_STAT_ACK;
else
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
param->ack_rssi;
info->status.is_valid_ack_signal = true;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
out:
idr_remove(&wmi->mgmt_pending_tx, desc_id);
idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
spin_unlock_bh(&ar->data_lock);
return ret;
}
@ -2384,6 +2388,7 @@ out:
int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
struct mgmt_tx_compl_params param;
int ret;
ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
@ -2392,8 +2397,14 @@ int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
return ret;
}
wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
__le32_to_cpu(arg.status));
memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
param.desc_id = __le32_to_cpu(arg.desc_id);
param.status = __le32_to_cpu(arg.status);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
wmi_process_mgmt_tx_comp(ar, &param);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
@ -2403,6 +2414,7 @@ int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
struct mgmt_tx_compl_params param;
u32 num_reports;
int i, ret;
@ -2414,9 +2426,15 @@ int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb
num_reports = __le32_to_cpu(arg.num_reports);
for (i = 0; i < num_reports; i++)
wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_ids[i]),
__le32_to_cpu(arg.status[i]));
for (i = 0; i < num_reports; i++) {
memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
param.status = __le32_to_cpu(arg.desc_ids[i]);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
wmi_process_mgmt_tx_comp(ar, &param);
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
@ -8304,7 +8322,7 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate", peer->peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
"Peer RX duration", peer->rx_duration);
len += scnprintf(buf + len, buf_len - len, "\n");

View File

@ -4534,6 +4534,13 @@ enum wmi_10_4_stats_id {
WMI_10_4_STAT_VDEV_EXTD = BIT(4),
};
enum wmi_tlv_stats_id {
WMI_TLV_STAT_PDEV = BIT(0),
WMI_TLV_STAT_VDEV = BIT(1),
WMI_TLV_STAT_PEER = BIT(2),
WMI_TLV_STAT_PEER_EXTD = BIT(10),
};
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
@ -5045,12 +5052,13 @@ enum wmi_rate_preamble {
#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
#define ATH10K_VHT_MCS_NUM 10
#define ATH10K_BW_NUM 4
#define ATH10K_BW_NUM 6
#define ATH10K_NSS_NUM 4
#define ATH10K_LEGACY_NUM 12
#define ATH10K_GI_NUM 2
#define ATH10K_HT_MCS_NUM 32
#define ATH10K_RATE_TABLE_NUM 320
#define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@ -6725,16 +6733,27 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
struct mgmt_tx_compl_params {
u32 desc_id;
u32 status;
u32 ppdu_id;
int ack_rssi;
};
struct wmi_tlv_mgmt_tx_compl_ev_arg {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
__le32 ppdu_id;
__le32 ack_rssi;
};
struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg {
__le32 num_reports;
const __le32 *desc_ids;
const __le32 *status;
const __le32 *ppdu_ids;
const __le32 *ack_rssi;
};
struct wmi_mgmt_rx_ev_arg {

View File

@ -148,7 +148,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
{ "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
{ "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
{ "SPUR UP", ah->stats.ast_ani_spurup },
{ "SPUR DOWN", ah->stats.ast_ani_spurup },
{ "SPUR DOWN", ah->stats.ast_ani_spurdown },
{ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
{ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
{ "MRC-CCK ON", ah->stats.ast_ani_ccklow },

View File

@ -1006,9 +1006,6 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb)
{
struct ath_node *an;
struct ath_acq *acq;
struct ath_vif *avp;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@ -1019,7 +1016,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
int phy;
u16 len = rs->rs_datalen;
u32 airtime = 0;
u8 tidno, acno;
u8 tidno;
if (!ieee80211_is_data(hdr->frame_control))
return;
@ -1029,11 +1026,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
if (!sta)
goto exit;
an = (struct ath_node *) sta->drv_priv;
avp = (struct ath_vif *) an->vif->drv_priv;
tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
acno = TID_TO_WME_AC(tidno);
acq = &avp->chanctx->acq[acno];
rxs = IEEE80211_SKB_RXCB(skb);

View File

@ -2552,6 +2552,9 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
}
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
/* we report airtime in ath_tx_count_airtime(), don't report twice */
tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -395,7 +395,7 @@ static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
{
int i;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != mid)
@ -1580,6 +1580,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
u8 *buf, *dpos;
const u8 *spos;
if (!ies1)
ies1_len = 0;
if (!ies2)
ies2_len = 0;
if (ies1_len == 0 && ies2_len == 0) {
*merged_ies = NULL;
*merged_len = 0;
@ -1589,17 +1595,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, ies1, ies1_len);
if (ies1)
memcpy(buf, ies1, ies1_len);
dpos = buf + ies1_len;
spos = ies2;
while (spos + 1 < ies2 + ies2_len) {
while (spos && (spos + 1 < ies2 + ies2_len)) {
/* IE tag at offset 0, length at offset 1 */
u16 ielen = 2 + spos[1];
if (spos + ielen > ies2 + ies2_len)
break;
if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
!_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
(!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
spos, ielen))) {
memcpy(dpos, spos, ielen);
dpos += ielen;
}
@ -3007,7 +3015,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy,
wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX,
sector_type, WIL_CID_ALL);
if (rc == -EINVAL) {
for (i = 0; i < WIL6210_MAX_CID; i++) {
for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid != vif->mid)
continue;
rc = wil_rf_sector_wmi_set_selected(

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -162,7 +162,7 @@ static int ring_show(struct seq_file *s, void *data)
snprintf(name, sizeof(name), "tx_%2d", i);
if (cid < WIL6210_MAX_CID)
if (cid < max_assoc_sta)
seq_printf(s,
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
@ -792,14 +792,14 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
"BACK: del_rx require at least 2 params\n");
return -EINVAL;
}
if (p1 < 0 || p1 >= WIL6210_MAX_CID) {
if (p1 < 0 || p1 >= max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", p1);
return -EINVAL;
}
if (rc < 4)
p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
sta = &wil->sta[p1];
wmi_delba_rx(wil, sta->mid, mk_cidxtid(p1, p2), p3);
wmi_delba_rx(wil, sta->mid, p1, p2, p3);
} else {
wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
@ -1243,7 +1243,7 @@ static int bf_show(struct seq_file *s, void *data)
memset(&reply, 0, sizeof(reply));
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
u32 status;
cmd.cid = i;
@ -1340,7 +1340,7 @@ static int link_show(struct seq_file *s, void *data)
if (!sinfo)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
struct wil6210_vif *vif;
@ -1542,7 +1542,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, tid, mcs;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@ -1651,7 +1651,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, bin;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@ -1740,7 +1740,7 @@ static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
wil->tx_latency_res = val;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *sta = &wil->sta[i];
kfree(sta->tx_latency_bins);
@ -1825,7 +1825,7 @@ static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
}
seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != vif->mid)
@ -2386,6 +2386,7 @@ static const struct dbg_off dbg_statics[] = {
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
{"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8},
{},
};
@ -2439,7 +2440,7 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
for (i = 0; i < max_assoc_sta; i++)
kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -575,10 +575,14 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
complete(&wil->halp.comp);
if (wil->halp.handle_icr) {
/* no need to handle HALP ICRs until next vote */
wil->halp.handle_icr = false;
wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_halp(wil);
complete(&wil->halp.comp);
}
}
wil->isr_misc = isr;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -219,7 +219,7 @@ static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
{
int i;
for (i = 0; i < WIL6210_MAX_CID; i++) {
for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status == wil_sta_connected)
return true;
@ -322,7 +322,7 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
wil_disconnect_cid_complete(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect complete all\n");
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
for (cid = 0; cid < max_assoc_sta; cid++)
wil_disconnect_cid_complete(vif, cid, reason_code);
}
@ -434,7 +434,7 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
wil_disconnect_cid(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect all\n");
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
for (cid = 0; cid < max_assoc_sta; cid++)
wil_disconnect_cid(vif, cid, reason_code);
}
@ -1895,7 +1895,7 @@ int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac)
int i;
int rc = -ENOENT;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, mac)) {
@ -1919,11 +1919,14 @@ void wil_halp_vote(struct wil6210_priv *wil)
if (++wil->halp.ref_cnt == 1) {
reinit_completion(&wil->halp.comp);
/* mark to IRQ context to handle HALP ICR */
wil->halp.handle_icr = true;
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
wil->halp.handle_icr = false;
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -307,8 +307,8 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
}
/* Block Ack - Rx side (recipient) */
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
@ -316,7 +316,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
u16 agg_timeout = le16_to_cpu(ba_timeout);
u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
u8 cid, tid;
u16 agg_wsize = 0;
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
@ -335,10 +334,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
int rc = 0;
might_sleep();
parse_cidxtid(cidxtid, &cid, &tid);
/* sanity checks */
if (cid >= WIL6210_MAX_CID) {
if (cid >= max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
rc = -EINVAL;
goto out;

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -181,7 +182,7 @@ TRACE_EVENT(wil6210_rx,
__entry->seq = wil_rxdesc_seq(d);
__entry->mcs = wil_rxdesc_mcs(d);
),
TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
TP_printk("index %d len %d mid %d cid (%%8) %d tid %d mcs %d seq 0x%03x"
" type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
__entry->mid, __entry->cid, __entry->tid, __entry->mcs,
__entry->seq, __entry->type, __entry->subtype)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -30,11 +30,6 @@
#include "trace.h"
#include "txrx_edma.h"
static bool rtap_include_phy_info;
module_param(rtap_include_phy_info, bool, 0444);
MODULE_PARM_DESC(rtap_include_phy_info,
" Include PHY info in the radiotap header, default - no");
bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
@ -43,6 +38,9 @@ bool rx_large_buf;
module_param(rx_large_buf, bool, 0444);
MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
/* Drop Tx packets in case Tx ring is full */
bool drop_if_ring_full;
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
@ -332,87 +330,34 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
u8 mcs_flags;
u8 mcs_index;
} __packed;
struct wil6210_rtap_vendor {
struct wil6210_rtap rtap;
/* vendor */
u8 vendor_oui[3] __aligned(2);
u8 vendor_ns;
__le16 vendor_skip;
u8 vendor_data[0];
} __packed;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
struct wil6210_rtap_vendor *rtap_vendor;
struct wil6210_rtap *rtap;
int rtap_len = sizeof(struct wil6210_rtap);
int phy_length = 0; /* phy info header size, bytes */
static char phy_data[128];
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (rtap_include_phy_info) {
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
/* calculate additional length */
if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
/**
* PHY info starts from 8-byte boundary
* there are 8-byte lines, last line may be partially
* written (HW bug), thus FW configures for last line
* to be excessive. Driver skips this last line.
*/
int len = min_t(int, 8 + sizeof(phy_data),
wil_rxdesc_phy_length(d));
if (len > 8) {
void *p = skb_tail_pointer(skb);
void *pa = PTR_ALIGN(p, 8);
if (skb_tailroom(skb) >= len + (pa - p)) {
phy_length = len - 8;
memcpy(phy_data, pa, phy_length);
}
}
}
rtap_len += phy_length;
}
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
rtap_vendor = skb_push(skb, rtap_len);
memset(rtap_vendor, 0, rtap_len);
rtap = skb_push(skb, rtap_len);
memset(rtap, 0, rtap_len);
rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
(1 << IEEE80211_RADIOTAP_FLAGS) |
rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
rtap->rthdr.it_len = cpu_to_le16(rtap_len);
rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_MCS));
if (d->dma.status & RX_DMA_STATUS_ERROR)
rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
rtap->chnl_flags = cpu_to_le16(0);
rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
rtap_vendor->rtap.mcs_flags = 0;
rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
if (rtap_include_phy_info) {
rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
/* OUI for Wilocity 04:ce:14 */
rtap_vendor->vendor_oui[0] = 0x04;
rtap_vendor->vendor_oui[1] = 0xce;
rtap_vendor->vendor_oui[2] = 0x14;
rtap_vendor->vendor_ns = 1;
/* Rx descriptor + PHY data */
rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
phy_length);
memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
phy_length);
}
rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
rtap->mcs_flags = 0;
rtap->mcs_index = wil_rxdesc_mcs(d);
}
static bool wil_is_rx_idle(struct wil6210_priv *wil)
@ -427,6 +372,76 @@ static bool wil_is_rx_idle(struct wil6210_priv *wil)
return true;
}
static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int mid = wil_rxdesc_mid(d);
struct wil6210_vif *vif = wil->vifs[mid];
/* cid from DMA descriptor is limited to 3 bits.
* In case of cid>=8, the value would be cid modulo 8 and we need to
* find real cid by locating the transmitter (ta) inside sta array
*/
int cid = wil_rxdesc_cid(d);
unsigned int snaplen = wil_rx_snaplen();
struct ieee80211_hdr_3addr *hdr;
int i;
unsigned char *ta;
u8 ftype;
/* in monitor mode there are no connections */
if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
return cid;
ftype = wil_rxdesc_ftype(d) << 2;
if (likely(ftype == IEEE80211_FTYPE_DATA)) {
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
wil_err_ratelimited(wil,
"Short data frame, len = %d\n",
skb->len);
return -ENOENT;
}
ta = wil_skb_get_sa(skb);
} else {
if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
wil_err_ratelimited(wil, "Short frame, len = %d\n",
skb->len);
return -ENOENT;
}
hdr = (void *)skb->data;
ta = hdr->addr2;
}
if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
return cid;
/* assuming no concurrency between AP interfaces and STA interfaces.
* multista is used only in P2P_GO or AP mode. In other modes return
* cid from the rx descriptor
*/
if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
vif->wdev.iftype != NL80211_IFTYPE_AP)
return cid;
/* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
* to find the real cid, compare transmitter address with the stored
* stations mac address in the driver sta array
*/
for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
if (wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, ta)) {
cid = i;
break;
}
}
if (i >= max_assoc_sta) {
wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
ta, vif->wdev.iftype, ftype, skb->len);
cid = -ENOENT;
}
return cid;
}
/**
* reap 1 frame from @swhead
*
@ -452,7 +467,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
int i;
struct wil_net_stats *stats;
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
again:
if (unlikely(wil_ring_is_empty(vring)))
@ -484,7 +499,6 @@ again:
wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
cid = wil_rxdesc_cid(d);
mid = wil_rxdesc_mid(d);
vif = wil->vifs[mid];
@ -495,11 +509,9 @@ again:
goto again;
}
ndev = vif_to_ndev(vif);
stats = &wil->sta[cid].stats;
if (unlikely(dmalen > sz)) {
wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
stats->rx_large_frame++;
wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
dmalen);
kfree_skb(skb);
goto again;
}
@ -510,6 +522,14 @@ again:
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
cid = wil_rx_get_cid_by_skb(wil, skb);
if (cid == -ENOENT) {
kfree_skb(skb);
goto again;
}
wil_skb_set_cid(skb, (u8)cid);
stats = &wil->sta[cid].stats;
stats->last_mcs_rx = wil_rxdesc_mcs(d);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
@ -556,13 +576,6 @@ again:
goto again;
}
if (unlikely(skb->len < ETH_HLEN + snaplen)) {
wil_err(wil, "Short frame, len = %d\n", skb->len);
stats->rx_short_frame++;
kfree_skb(skb);
goto again;
}
/* L4 IDENT is on when HW calculated checksum, check status
* and in case of error drop the packet
* higher stack layers will handle retransmission (if required)
@ -659,7 +672,7 @@ int reverse_memcmp(const void *cs, const void *ct, size_t count)
static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int cid = wil_rxdesc_cid(d);
int cid = wil_skb_get_cid(skb);
int tid = wil_rxdesc_tid(d);
int key_id = wil_rxdesc_key_id(d);
int mc = wil_rxdesc_mcast(d);
@ -707,7 +720,7 @@ static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
*cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
*cid = wil_skb_get_cid(skb);
*security = wil_rxdesc_security(d);
}
@ -724,11 +737,11 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
unsigned int len = skb->len;
int cid;
int security;
struct ethhdr *eth = (void *)skb->data;
u8 *sa, *da = wil_skb_get_da(skb);
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
int mcast = is_multicast_ether_addr(eth->h_dest);
int mcast = is_multicast_ether_addr(da);
struct wil_net_stats *stats;
struct sk_buff *xmit_skb = NULL;
static const char * const gro_res_str[] = {
@ -759,7 +772,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
}
if (wdev->iftype == NL80211_IFTYPE_STATION) {
if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) {
sa = wil_skb_get_sa(skb);
if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
rc = GRO_DROP;
dev_kfree_skb(skb);
@ -772,8 +786,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
} else {
int xmit_cid = wil_find_cid(wil, vif->mid,
eth->h_dest);
int xmit_cid = wil_find_cid(wil, vif->mid, da);
if (xmit_cid >= 0) {
/* The destination station is associated to
@ -971,7 +984,6 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
.ring_size = cpu_to_le16(size),
},
.ringid = id,
.cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
@ -991,6 +1003,14 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
struct wil_ring *vring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.vring_cfg.cid = cid;
cmd.vring_cfg.tid = tid;
} else {
cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@ -1043,7 +1063,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring);
wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
wil->ring2cid_tid[id][0] = max_assoc_sta;
wil->ring2cid_tid[id][1] = 0;
out:
@ -1128,7 +1148,7 @@ fail:
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
return rc;
}
@ -1175,7 +1195,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
if (rc)
goto out;
wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */
wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@ -1217,12 +1237,13 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
int i;
struct ethhdr *eth = (void *)skb->data;
int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
int i, cid;
const u8 *da = wil_skb_get_da(skb);
int min_ring_id = wil_get_min_tx_ring_id(wil);
if (cid < 0)
cid = wil_find_cid(wil, vif->mid, da);
if (cid < 0 || cid >= max_assoc_sta)
return NULL;
/* TODO: fix for multiple TID */
@ -1235,7 +1256,7 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
eth->h_dest, i);
da, i);
if (v->va && txdata->enabled) {
return v;
} else {
@ -1274,7 +1295,7 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
@ -1326,10 +1347,10 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
struct ethhdr *eth = (void *)skb->data;
u8 *da = wil_skb_get_da(skb);
int cid = wil->ring2cid_tid[vring_index][0];
ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
ether_addr_copy(da, wil->sta[cid].addr);
}
static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
@ -1340,8 +1361,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
struct sk_buff *skb2;
int i;
u8 cid;
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
const u8 *src = wil_skb_get_sa(skb);
struct wil_ring_tx_data *txdata, *txdata2;
int min_ring_id = wil_get_min_tx_ring_id(wil);
@ -1353,7 +1373,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@ -1381,7 +1401,7 @@ found:
if (!v2->va || txdata2->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@ -2032,6 +2052,10 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
check_stop, vif->mid, vif->net_queue_stopped);
if (ring && drop_if_ring_full)
/* no need to stop/wake net queues */
return;
if (check_stop == vif->net_queue_stopped)
/* net queues already in desired state */
return;
@ -2095,8 +2119,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = vif_to_wil(vif);
struct ethhdr *eth = (void *)skb->data;
bool bcast = is_multicast_ether_addr(eth->h_dest);
const u8 *da = wil_skb_get_da(skb);
bool bcast = is_multicast_ether_addr(da);
struct wil_ring *ring;
static bool pr_once_fw;
int rc;
@ -2143,7 +2167,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ring = wil_find_tx_ucast(wil, vif, skb);
}
if (unlikely(!ring)) {
wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
goto drop;
}
/* set up vring entry */
@ -2157,6 +2181,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
case -ENOMEM:
if (drop_if_ring_full)
goto drop;
return NETDEV_TX_BUSY;
default:
break; /* goto drop; */
@ -2228,7 +2254,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
used_before_complete = wil_ring_used_tx(vring);
if (cid < WIL6210_MAX_CID)
if (cid < max_assoc_sta)
stats = &wil->sta[cid].stats;
while (!wil_ring_is_empty(vring)) {
@ -2337,7 +2363,7 @@ static void wil_get_reorder_params(struct wil6210_priv *wil,
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
*tid = wil_rxdesc_tid(d);
*cid = wil_rxdesc_cid(d);
*cid = wil_skb_get_cid(skb);
*mid = wil_rxdesc_mid(d);
*seq = wil_rxdesc_seq(d);
*mcast = wil_rxdesc_mcast(d);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -458,6 +458,18 @@ union wil_ring_desc {
union wil_rx_desc rx;
} __packed;
struct packet_rx_info {
u8 cid;
};
/* this struct will be stored in the skb cb buffer
* max length of the struct is limited to 48 bytes
*/
struct skb_rx_info {
struct vring_rx_desc rx_desc;
struct packet_rx_info rx_info;
};
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d0, 0, 3);
@ -530,11 +542,6 @@ static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d1, 13, 14);
}
static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->dma.d0, 16, 29);
}
static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
{
return (void *)skb->cb;
@ -560,11 +567,25 @@ static inline int wil_ring_is_full(struct wil_ring *ring)
return wil_ring_next_tail(ring) == ring->swhead;
}
static inline bool wil_need_txstat(struct sk_buff *skb)
static inline u8 *wil_skb_get_da(struct sk_buff *skb)
{
struct ethhdr *eth = (void *)skb->data;
return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
return eth->h_dest;
}
static inline u8 *wil_skb_get_sa(struct sk_buff *skb)
{
struct ethhdr *eth = (void *)skb->data;
return eth->h_source;
}
static inline bool wil_need_txstat(struct sk_buff *skb)
{
const u8 *da = wil_skb_get_da(skb);
return is_unicast_ether_addr(da) && skb->sk &&
(skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
}
@ -610,6 +631,20 @@ static inline bool wil_val_in_range(int val, int min, int max)
return val >= min && val < max;
}
static inline u8 wil_skb_get_cid(struct sk_buff *skb)
{
struct skb_rx_info *skb_rx_info = (void *)skb->cb;
return skb_rx_info->rx_info.cid;
}
static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid)
{
struct skb_rx_info *skb_rx_info = (void *)skb->cb;
skb_rx_info->rx_info.cid = cid;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -727,7 +727,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_ring_free_edma(wil, ring);
wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
out:
@ -932,7 +932,7 @@ again:
eop = wil_rx_status_get_eop(msg);
cid = wil_rx_status_get_cid(msg);
if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) {
wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
cid, sring->swhead);
rxdata->skipping = true;
@ -1137,7 +1137,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
/* Total number of completed descriptors in all descriptor rings */
int desc_cnt = 0;
int cid;
struct wil_net_stats *stats = NULL;
struct wil_net_stats *stats;
struct wil_tx_enhanced_desc *_d;
unsigned int ring_id;
unsigned int num_descs;
@ -1187,8 +1187,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
ndev = vif_to_ndev(vif);
cid = wil->ring2cid_tid[ring_id][0];
if (cid < WIL6210_MAX_CID)
stats = &wil->sta[cid].stats;
stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL);
wil_dbg_txrx(wil,
"tx_status: completed desc_ring (%d), num_descs (%d)\n",

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -38,6 +38,8 @@ extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
extern bool ftm_mode;
extern bool drop_if_ring_full;
extern uint max_assoc_sta;
struct wil6210_priv;
struct wil6210_vif;
@ -89,7 +91,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_RING_SIZE_ORDER_MIN (5)
#define WIL_RING_SIZE_ORDER_MAX (15)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_MAX_CID (20) /* max number of stations */
#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
@ -457,7 +460,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*/
static inline bool wil_cid_valid(u8 cid)
{
return cid < WIL6210_MAX_CID;
return (cid >= 0 && cid < max_assoc_sta);
}
struct wil6210_mbox_ring {
@ -791,6 +794,7 @@ struct wil_halp {
struct mutex lock; /* protect halp ref_cnt */
unsigned int ref_cnt;
struct completion comp;
u8 handle_icr;
};
struct wil_blob_wrapper {
@ -1235,7 +1239,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout);
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason);
int wmi_addba_rx_resp(struct wil6210_priv *wil,
u8 mid, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
@ -1248,8 +1252,8 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype);
int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -24,8 +24,9 @@
#include "wmi.h"
#include "trace.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, 0644);
/* set the default max assoc sta to max supported by driver */
uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, 0444);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
@ -770,6 +771,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
struct wil6210_priv *wil = vif_to_wil(vif);
struct wiphy *wiphy = wil_to_wiphy(wil);
struct wmi_ready_event *evt = d;
u8 fw_max_assoc_sta;
wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
wil->fw_version, le32_to_cpu(evt->sw_version),
@ -787,6 +789,25 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
evt->rfc_read_calib_result);
wil->fw_calib_result = evt->rfc_read_calib_result;
}
fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID;
if (len > offsetof(struct wmi_ready_event, max_assoc_sta) &&
evt->max_assoc_sta > 0) {
fw_max_assoc_sta = evt->max_assoc_sta;
wil_dbg_wmi(wil, "fw reported max assoc sta %d\n",
fw_max_assoc_sta);
if (fw_max_assoc_sta > WIL6210_MAX_CID) {
wil_dbg_wmi(wil,
"fw max assoc sta %d exceeds max driver supported %d\n",
fw_max_assoc_sta, WIL6210_MAX_CID);
fw_max_assoc_sta = WIL6210_MAX_CID;
}
}
max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
wil_dbg_wmi(wil, "setting max assoc sta to %d\n", max_assoc_sta);
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
/* let the reset sequence continue */
@ -952,7 +973,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
if (evt->cid >= WIL6210_MAX_CID) {
if (evt->cid >= max_assoc_sta) {
wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
return;
}
@ -1271,9 +1292,16 @@ static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
u8 cid, tid;
struct wmi_rcp_addba_req_event *evt = d;
wil_addba_rx_request(wil, vif->mid, evt->cidxtid, evt->dialog_token,
if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
parse_cidxtid(evt->cidxtid, &cid, &tid);
} else {
cid = evt->cid;
tid = evt->tid;
}
wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token,
evt->ba_param_set, evt->ba_timeout,
evt->ba_seq_ctrl);
}
@ -1289,7 +1317,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct wil_tid_ampdu_rx *r;
might_sleep();
parse_cidxtid(evt->cidxtid, &cid, &tid);
if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
parse_cidxtid(evt->cidxtid, &cid, &tid);
} else {
cid = evt->cid;
tid = evt->tid;
}
wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n",
vif->mid, cid, tid,
evt->from_initiator ? "originator" : "recipient",
@ -1404,7 +1438,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif,
u8 cid = basic->cid;
struct wil_sta_info *sta;
if (cid < 0 || cid >= WIL6210_MAX_CID) {
if (cid < 0 || cid >= max_assoc_sta) {
wil_err(wil, "invalid cid %d\n", cid);
return;
}
@ -1554,7 +1588,7 @@ static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
continue;
lcid = wil->ring2cid_tid[i][0];
if (lcid >= WIL6210_MAX_CID) /* skip BCAST */
if (lcid >= max_assoc_sta) /* skip BCAST */
continue;
wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
@ -2120,10 +2154,9 @@ int wmi_pcp_start(struct wil6210_vif *vif,
if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
(cmd.pcp_max_assoc_sta <= 0)) {
wil_info(wil,
"Requested connection limit %u, valid values are 1 - %d. Setting to %d\n",
max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID);
cmd.pcp_max_assoc_sta = WIL6210_MAX_CID;
wil_err(wil, "unexpected max_assoc_sta %d\n",
cmd.pcp_max_assoc_sta);
return -EOPNOTSUPP;
}
if (disable_ap_sme &&
@ -2516,7 +2549,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
if (ch)
cmd.sniffer_cfg.channel = ch->hw_value - 1;
cmd.sniffer_cfg.phy_info_mode =
cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED);
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
@ -2651,15 +2684,22 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason)
{
struct wmi_rcp_delba_cmd cmd = {
.cidxtid = cidxtid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
(cidxtid >> 4) & 0xf, reason);
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.cid = cid;
cmd.tid = tid;
} else {
cmd.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid,
tid, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd));
}
@ -2670,7 +2710,6 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
{
int rc;
struct wmi_rcp_addba_resp_cmd cmd = {
.cidxtid = mk_cidxtid(cid, tid),
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
@ -2689,6 +2728,14 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
};
if (cid >= WIL6210_RX_DESC_MAX_CID) {
cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
cmd.cid = cid;
cmd.tid = tid;
} else {
cmd.cidxtid = mk_cidxtid(cid, tid);
}
wil_dbg_wmi(wil,
"ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
mid, cid, tid, agg_wsize,

View File

@ -134,12 +134,20 @@ struct msgbuf_completion_hdr {
__le16 flow_ring_id;
};
/* Data struct for the MSGBUF_TYPE_GEN_STATUS */
struct msgbuf_gen_status {
struct msgbuf_common_hdr msg;
struct msgbuf_completion_hdr compl_hdr;
__le16 write_idx;
__le32 rsvd0[3];
};
/* Data struct for the MSGBUF_TYPE_RING_STATUS */
struct msgbuf_ring_status {
struct msgbuf_common_hdr msg;
struct msgbuf_completion_hdr compl_hdr;
__le16 write_idx;
__le32 rsvd0[5];
__le16 rsvd0[5];
};
struct msgbuf_rx_event {
@ -1194,6 +1202,18 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmf_netif_rx(ifp, skb);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
void *buf)
{
struct msgbuf_gen_status *gen_status = buf;
struct brcmf_pub *drvr = msgbuf->drvr;
int err;
err = le16_to_cpu(gen_status->compl_hdr.status);
if (err)
bphy_err(drvr, "Firmware reported general error: %d\n", err);
}
static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
void *buf)
{
@ -1273,6 +1293,10 @@ static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
msg = (struct msgbuf_common_hdr *)buf;
switch (msg->msgtype) {
case MSGBUF_TYPE_GEN_STATUS:
brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
brcmf_msgbuf_process_gen_status(msgbuf, buf);
break;
case MSGBUF_TYPE_RING_STATUS:
brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
brcmf_msgbuf_process_ring_status(msgbuf, buf);

View File

@ -21,3 +21,4 @@ config MT76x02_USB
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"

View File

@ -7,7 +7,7 @@ mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
tx.o agg-rx.o mcu.o
mt76-usb-y := usb.o usb_trace.o usb_mcu.o
mt76-usb-y := usb.o usb_trace.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
@ -22,3 +22,4 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/

View File

@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
part = np->name;
mtd = get_mtd_device_nm(part);
if (IS_ERR(mtd))
return PTR_ERR(mtd);
if (IS_ERR(mtd)) {
ret = PTR_ERR(mtd);
goto out_put_node;
}
if (size <= sizeof(*list))
return -EINVAL;
if (size <= sizeof(*list)) {
ret = -EINVAL;
goto out_put_node;
}
offset = be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
put_mtd_device(mtd);
if (ret)
return ret;
goto out_put_node;
if (retlen < len)
return -EINVAL;
if (retlen < len) {
ret = -EINVAL;
goto out_put_node;
}
return 0;
out_put_node:
of_node_put(np);
return ret;
#else
return -ENOENT;
#endif

View File

@ -714,6 +714,11 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(dev, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
dev->drv->sta_assoc)
dev->drv->sta_assoc(dev, vif, sta);
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);

View File

@ -304,6 +304,9 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
};
@ -384,8 +387,7 @@ struct mt76_usb {
struct mt76u_mcu {
struct mutex mutex;
struct completion cmpl;
struct mt76u_buf res;
u8 *data;
u32 msg_seq;
/* multiple reads */
@ -729,16 +731,20 @@ static inline u8 q2ep(u8 qid)
}
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int timeout)
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
int sent;
pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
return usb_bulk_msg(udev, pipe, data, len, &sent, timeout);
if (actual_len)
pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
else
pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
@ -747,13 +753,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
int len, int data_len, gfp_t gfp);
void mt76u_buf_free(struct mt76u_buf *buf);
int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, gfp_t gfp,
usb_complete_t complete_fn, void *context);
int mt76u_submit_rx_buffers(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_queues(struct mt76_dev *dev);
@ -767,8 +766,4 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
void mt76u_mcu_complete_urb(struct urb *urb);
int mt76u_mcu_init_rx(struct mt76_dev *dev);
void mt76u_mcu_deinit(struct mt76_dev *dev);
#endif

View File

@ -0,0 +1,9 @@
config MT7603E
tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support"
select MT76_CORE
depends on MAC80211
depends on PCI
help
This adds support for MT7603E wireless PCIe devices and the WLAN core on
MT7628/MT7688 SoC devices

View File

@ -0,0 +1,6 @@
obj-$(CONFIG_MT7603E) += mt7603e.o
mt7603e-y := \
pci.o soc.o main.o init.o mcu.o \
core.o dma.o mac.o eeprom.o \
beacon.o debugfs.o

View File

@ -0,0 +1,186 @@
/* SPDX-License-Identifier: ISC */
#include "mt7603.h"
struct beacon_bc_data {
struct mt7603_dev *dev;
struct sk_buff_head q;
struct sk_buff *tail[MT7603_MAX_INTERFACES];
int count[MT7603_MAX_INTERFACES];
};
static void
mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7603_dev *dev = (struct mt7603_dev *)priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct sk_buff *skb = NULL;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
&mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
spin_unlock_bh(&dev->ps_lock);
}
static void
mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct beacon_bc_data *data = priv;
struct mt7603_dev *dev = data->dev;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
if (!(dev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
info->control.vif = vif;
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
mt76_skb_set_moredata(skb, true);
__skb_queue_tail(&data->q, skb);
data->tail[mvif->idx] = skb;
data->count[mvif->idx]++;
}
void mt7603_pre_tbtt_tasklet(unsigned long arg)
{
struct mt7603_dev *dev = (struct mt7603_dev *)arg;
struct mt76_queue *q;
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i, nframes;
data.dev = dev;
__skb_queue_head_init(&data.q);
q = &dev->mt76.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7603_update_beacon_iter, dev);
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false);
mt76_csa_check(&dev->mt76);
if (dev->mt76.csa_complete)
goto out;
q = &dev->mt76.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7603_add_buffered_bc, &data);
} while (nframes != skb_queue_len(&data.q) &&
skb_queue_len(&data.q) < 8);
if (skb_queue_empty(&data.q))
goto out;
for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
if (!data.tail[i])
continue;
mt76_skb_set_moredata(data.tail[i], false);
}
spin_lock_bh(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
for (i = 0; i < ARRAY_SIZE(data.count); i++)
mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i));
mt76_wr(dev, MT_WF_ARB_CAB_START,
MT_WF_ARB_CAB_START_BSSn(0) |
(MT_WF_ARB_CAB_START_BSS0n(1) *
((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
__sw_hweight8(dev->beacon_mask))
dev->beacon_check++;
}
void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
{
u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64;
if (idx >= 0) {
if (intval)
dev->beacon_mask |= BIT(idx);
else
dev->beacon_mask &= ~BIT(idx);
}
if (!dev->beacon_mask || (!intval && idx < 0)) {
mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
mt76_wr(dev, MT_HW_INT_MASK(3), 0);
return;
}
dev->beacon_int = intval;
mt76_wr(dev, MT_TBTT,
FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */
mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK,
MT_BCNQ_OPMODE_AP);
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO);
mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO);
mt76_wr(dev, MT_PRE_TBTT, pre_tbtt);
mt76_set(dev, MT_HW_INT_MASK(3),
MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0);
mt76_set(dev, MT_WF_ARB_BCN_START,
MT_WF_ARB_BCN_START_BSSn(0) |
((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
if (dev->beacon_mask & ~BIT(0))
mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
else
mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
}

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: ISC */
#include "mt7603.h"
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
{
unsigned long flags;
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
dev->mt76.mmio.irqmask &= ~clear;
dev->mt76.mmio.irqmask |= set;
mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
}
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mt7603_irq_enable(dev, MT_INT_RX_DONE(q));
}
irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
{
struct mt7603_dev *dev = dev_instance;
u32 intr;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
return IRQ_NONE;
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_MAC_IRQ3) {
u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3));
mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
if (hwintr & MT_HW_INT3_PRE_TBTT0)
tasklet_schedule(&dev->pre_tbtt_tasklet);
if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
}
if (intr & MT_INT_TX_DONE_ALL) {
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->tx_tasklet);
}
if (intr & MT_INT_RX_DONE(0)) {
mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
napi_schedule(&dev->mt76.napi[1]);
}
return IRQ_HANDLED;
}
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
{
u32 base = addr & GENMASK(31, 19);
u32 offset = addr & GENMASK(18, 0);
dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
return MT_PCIE_REMAP_BASE_2 + offset;
}

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: ISC */
#include "mt7603.h"
static int
mt7603_reset_read(struct seq_file *s, void *data)
{
struct mt7603_dev *dev = dev_get_drvdata(s->private);
static const char * const reset_cause_str[] = {
[RESET_CAUSE_TX_HANG] = "TX hang",
[RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck",
[RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck",
[RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck",
[RESET_CAUSE_BEACON_STUCK] = "Beacon stuck",
[RESET_CAUSE_MCU_HANG] = "MCU hang",
[RESET_CAUSE_RESET_FAILED] = "PSE reset failed",
};
int i;
for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) {
if (!reset_cause_str[i])
continue;
seq_printf(s, "%20s: %u\n", reset_cause_str[i],
dev->reset_cause[i]);
}
return 0;
}
static int
mt7603_radio_read(struct seq_file *s, void *data)
{
struct mt7603_dev *dev = dev_get_drvdata(s->private);
seq_printf(s, "Sensitivity: %d\n", dev->sensitivity);
seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
dev->false_cca_ofdm, dev->false_cca_cck);
return 0;
}
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
struct dentry *dir;
dir = mt76_register_debugfs(&dev->mt76);
if (!dir)
return;
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
mt7603_reset_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7603_radio_read);
}

View File

@ -0,0 +1,215 @@
/* SPDX-License-Identifier: ISC */
#include "mt7603.h"
#include "mac.h"
#include "../dma.h"
static int
mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
int idx, int n_desc)
{
int ret;
q->hw_idx = idx;
q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
}
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
__le32 *txd = (__le32 *)skb->data;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
int idx;
u32 val;
if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
goto free;
val = le32_to_cpu(txd[1]);
idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
skb->priority = FIELD_GET(MT_TXD1_TID, val);
if (idx >= MT7603_WTBL_STA - 1)
goto free;
wcid = rcu_dereference(dev->mt76.wcid[idx]);
if (!wcid)
goto free;
msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
skb = __skb_dequeue(&msta->psq);
dev_kfree_skb(skb);
}
spin_unlock_bh(&dev->ps_lock);
return;
free:
dev_kfree_skb(skb);
}
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
if (q == MT_RXQ_MCU) {
if (type == PKT_TYPE_RX_EVENT)
mt76_mcu_rx_event(&dev->mt76, skb);
else
mt7603_rx_loopback_skb(dev, skb);
return;
}
switch (type) {
case PKT_TYPE_TXS:
for (rxd++; rxd + 5 <= end; rxd += 5)
mt7603_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt76_mcu_rx_event(&dev->mt76, skb);
return;
case PKT_TYPE_NORMAL:
if (mt7603_mac_fill_rx(dev, skb) == 0) {
mt76_rx(&dev->mt76, q, skb);
return;
}
/* fall through */
default:
dev_kfree_skb(skb);
break;
}
}
static int
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
int ret;
q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
return 0;
}
static void
mt7603_tx_tasklet(unsigned long data)
{
struct mt7603_dev *dev = (struct mt7603_dev *)data;
int i;
dev->tx_dma_check = 0;
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
}
int mt7603_dma_init(struct mt7603_dev *dev)
{
static const u8 wmm_queue_map[] = {
[IEEE80211_AC_BK] = 0,
[IEEE80211_AC_BE] = 1,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_VO] = 3,
};
int ret;
int i;
mt76_dma_attach(&dev->mt76);
init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN |
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
wmm_queue_map[i],
MT_TX_RING_SIZE);
if (ret)
return ret;
}
ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
if (ret)
return ret;
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
return mt76_init_queues(dev);
}
void mt7603_dma_cleanup(struct mt7603_dev *dev)
{
mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN |
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
tasklet_kill(&dev->tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}

View File

@ -0,0 +1,168 @@
/* SPDX-License-Identifier: ISC */
#include "mt7603.h"
#include "eeprom.h"
static int
mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data)
{
u32 val;
int i;
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
val |= MT_EFUSE_CTRL_KICK;
mt76_wr(dev, base + MT_EFUSE_CTRL, val);
if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
udelay(2);
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
static int
mt7603_efuse_init(struct mt7603_dev *dev)
{
u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE);
int len = MT7603_EEPROM_SIZE;
void *buf;
int ret, i;
if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
return 0;
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
dev->mt76.otp.size = len;
if (!dev->mt76.otp.data)
return -ENOMEM;
buf = dev->mt76.otp.data;
for (i = 0; i + 16 <= len; i += 16) {
ret = mt7603_efuse_read(dev, base, i, buf + i);
if (ret)
return ret;
}
return 0;
}
static bool
mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
{
if (!efuse[MT_EE_TEMP_SENSOR_CAL])
return false;
if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0)
return false;
if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0)
return false;
if (!efuse[MT_EE_CP_FT_VERSION])
return false;
if (!efuse[MT_EE_XTAL_FREQ_OFFSET])
return false;
if (!efuse[MT_EE_XTAL_WF_RFCAL])
return false;
return true;
}
static void
mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
{
static const u8 cal_free_bytes[] = {
MT_EE_TEMP_SENSOR_CAL,
MT_EE_CP_FT_VERSION,
MT_EE_XTAL_FREQ_OFFSET,
MT_EE_XTAL_WF_RFCAL,
/* Skip for MT7628 */
MT_EE_TX_POWER_0_START_2G,
MT_EE_TX_POWER_0_START_2G + 1,
MT_EE_TX_POWER_1_START_2G,
MT_EE_TX_POWER_1_START_2G + 1,
};
u8 *eeprom = dev->mt76.eeprom.data;
int n = ARRAY_SIZE(cal_free_bytes);
int i;
if (!mt7603_has_cal_free_data(dev, efuse))
return;
if (is_mt7628(dev))
n -= 4;
for (i = 0; i < n; i++) {
int offset = cal_free_bytes[i];
eeprom[offset] = efuse[offset];
}
}
static int
mt7603_eeprom_load(struct mt7603_dev *dev)
{
int ret;
ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE);
if (ret < 0)
return ret;
return mt7603_efuse_init(dev);
}
static int mt7603_check_eeprom(struct mt76_dev *dev)
{
u16 val = get_unaligned_le16(dev->eeprom.data);
switch (val) {
case 0x7628:
case 0x7603:
return 0;
default:
return -EINVAL;
}
}
int mt7603_eeprom_init(struct mt7603_dev *dev)
{
int ret;
ret = mt7603_eeprom_load(dev);
if (ret < 0)
return ret;
if (dev->mt76.otp.data) {
if (mt7603_check_eeprom(&dev->mt76) == 0)
mt7603_apply_cal_free_data(dev, dev->mt76.otp.data);
else
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7603_EEPROM_SIZE);
}
dev->mt76.cap.has_2ghz = true;
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mt76_eeprom_override(&dev->mt76);
return 0;
}

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: ISC */
#ifndef __MT7603_EEPROM_H
#define __MT7603_EEPROM_H
#include "mt7603.h"
enum mt7603_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_NIC_CONF_2 = 0x042,
MT_EE_XTAL_TRIM_1 = 0x03a,
MT_EE_RSSI_OFFSET_2G = 0x046,
MT_EE_WIFI_RF_SETTING = 0x048,
MT_EE_RSSI_OFFSET_5G = 0x04a,
MT_EE_TX_POWER_DELTA_BW40 = 0x050,
MT_EE_TX_POWER_DELTA_BW80 = 0x052,
MT_EE_TX_POWER_EXT_PA_5G = 0x054,
MT_EE_TEMP_SENSOR_CAL = 0x055,
MT_EE_TX_POWER_0_START_2G = 0x056,
MT_EE_TX_POWER_1_START_2G = 0x05c,
/* used as byte arrays */
#define MT_TX_POWER_GROUP_SIZE_5G 5
#define MT_TX_POWER_GROUPS_5G 6
MT_EE_TX_POWER_0_START_5G = 0x062,
MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
MT_EE_TX_POWER_1_START_5G = 0x080,
MT_EE_TX_POWER_CCK = 0x0a0,
MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
MT_EE_TX_POWER_OFDM_2G_54M = 0x0a6,
MT_EE_TX_POWER_HT_BPSK_QPSK = 0x0a8,
MT_EE_TX_POWER_HT_16_64_QAM = 0x0aa,
MT_EE_TX_POWER_HT_64_QAM = 0x0ac,
MT_EE_ELAN_RX_MODE_GAIN = 0x0c0,
MT_EE_ELAN_RX_MODE_NF = 0x0c1,
MT_EE_ELAN_RX_MODE_P1DB = 0x0c2,
MT_EE_ELAN_BYPASS_MODE_GAIN = 0x0c3,
MT_EE_ELAN_BYPASS_MODE_NF = 0x0c4,
MT_EE_ELAN_BYPASS_MODE_P1DB = 0x0c5,
MT_EE_STEP_NUM_NEG_6_7 = 0x0c6,
MT_EE_STEP_NUM_NEG_4_5 = 0x0c8,
MT_EE_STEP_NUM_NEG_2_3 = 0x0ca,
MT_EE_STEP_NUM_NEG_0_1 = 0x0cc,
MT_EE_REF_STEP_24G = 0x0ce,
MT_EE_STEP_NUM_PLUS_1_2 = 0x0d0,
MT_EE_STEP_NUM_PLUS_3_4 = 0x0d2,
MT_EE_STEP_NUM_PLUS_5_6 = 0x0d4,
MT_EE_STEP_NUM_PLUS_7 = 0x0d6,
MT_EE_CP_FT_VERSION = 0x0f0,
MT_EE_XTAL_FREQ_OFFSET = 0x0f4,
MT_EE_XTAL_TRIM_2_COMP = 0x0f5,
MT_EE_XTAL_TRIM_3_COMP = 0x0f6,
MT_EE_XTAL_WF_RFCAL = 0x0f7,
__MT_EE_MAX
};
enum mt7603_eeprom_source {
MT_EE_SRC_PROM,
MT_EE_SRC_EFUSE,
MT_EE_SRC_FLASH,
};
#endif

View File

@ -0,0 +1,578 @@
/* SPDX-License-Identifier: ISC */
#include <linux/etherdevice.h>
#include "mt7603.h"
#include "mac.h"
#include "eeprom.h"
const struct mt76_driver_ops mt7603_drv_ops = {
.txwi_size = MT_TXD_SIZE,
.tx_prepare_skb = mt7603_tx_prepare_skb,
.tx_complete_skb = mt7603_tx_complete_skb,
.rx_skb = mt7603_queue_rx_skb,
.rx_poll_complete = mt7603_rx_poll_complete,
.sta_ps = mt7603_sta_ps,
.sta_add = mt7603_sta_add,
.sta_assoc = mt7603_sta_assoc,
.sta_remove = mt7603_sta_remove,
.update_survey = mt7603_update_channel,
};
static void
mt7603_set_tmac_template(struct mt7603_dev *dev)
{
u32 desc[5] = {
[1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf),
[3] = MT_TXD5_SW_POWER_MGMT
};
u32 addr;
int i;
addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
addr += MT_CLIENT_TMAC_INFO_TEMPLATE;
for (i = 0; i < ARRAY_SIZE(desc); i++)
mt76_wr(dev, addr + 4 * i, desc[i]);
}
static void
mt7603_dma_sched_init(struct mt7603_dev *dev)
{
int page_size = 128;
int page_count;
int max_len = 1792;
int max_amsdu_pages = 4096 / page_size;
int max_mcu_len = 4096;
int max_beacon_len = 512 * 4 + max_len;
int max_mcast_pages = 4 * max_len / page_size;
int reserved_count = 0;
int beacon_pages;
int mcu_pages;
int i;
page_count = mt76_get_field(dev, MT_PSE_FC_P0,
MT_PSE_FC_P0_MAX_QUOTA);
beacon_pages = 4 * (max_beacon_len / page_size);
mcu_pages = max_mcu_len / page_size;
mt76_wr(dev, MT_PSE_FRP,
FIELD_PREP(MT_PSE_FRP_P0, 7) |
FIELD_PREP(MT_PSE_FRP_P1, 6) |
FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4));
mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553);
mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555);
mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e);
mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c);
mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff);
mt76_wr(dev, MT_SCH_1, page_count | (2 << 28));
mt76_wr(dev, MT_SCH_2, max_amsdu_pages);
for (i = 0; i <= 4; i++)
mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages);
reserved_count += 5 * max_amsdu_pages;
mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages);
reserved_count += mcu_pages;
mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages);
reserved_count += beacon_pages;
mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages);
reserved_count += max_mcast_pages;
if (is_mt7603(dev))
reserved_count = 0;
mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count);
if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) {
mt76_wr(dev, MT_GROUP_THRESH(0),
page_count - beacon_pages - mcu_pages);
mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages);
mt76_wr(dev, MT_BMAP_0, 0x0080ff5f);
mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages);
mt76_wr(dev, MT_BMAP_1, 0x00000020);
} else {
mt76_wr(dev, MT_GROUP_THRESH(0), page_count);
mt76_wr(dev, MT_BMAP_0, 0xffff);
}
mt76_wr(dev, MT_SCH_4, 0);
for (i = 0; i <= 15; i++)
mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff);
mt76_set(dev, MT_SCH_4, BIT(6));
}
static void
mt7603_phy_init(struct mt7603_dev *dev)
{
int rx_chains = dev->mt76.antenna_mask;
int tx_chains = __sw_hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
(MT_WF_RMAC_RMCR_SMPS_MODE |
MT_WF_RMAC_RMCR_RX_STREAMS),
(FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) |
FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains)));
mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS,
tx_chains);
dev->agc0 = mt76_rr(dev, MT_AGC(0));
dev->agc3 = mt76_rr(dev, MT_AGC(3));
}
static void
mt7603_mac_init(struct mt7603_dev *dev)
{
u8 bc_addr[ETH_ALEN];
u32 addr;
int i;
mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0,
(MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1,
(MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
(MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
mt76_wr(dev, MT_AGG_LIMIT,
FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
mt76_wr(dev, MT_AGG_LIMIT_1,
FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
mt76_wr(dev, MT_AGG_CONTROL,
FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) |
FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) |
MT_AGG_CONTROL_NO_BA_AR_RULE);
mt76_wr(dev, MT_AGG_RETRY_CONTROL,
FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31));
mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT);
mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000);
mt76_wr(dev, MT_WF_RFCR1, 0);
mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE);
mt7603_set_tmac_template(dev);
/* Enable RX group to HIF */
addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS);
/* Enable RX group to MCU */
mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11));
mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3);
mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN);
/* include preamble detection in CCA trigger signal */
mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2);
mt76_wr(dev, MT_RXREQ, 4);
/* Configure all rx packets to HIF */
mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000);
/* Configure MCU txs selection with aggregation */
mt76_wr(dev, MT_DMA_TCFR0,
FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
MT_DMA_TCFR_TXS_AGGR_COUNT);
/* Configure HIF txs selection with aggregation */
mt76_wr(dev, MT_DMA_TCFR1,
FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */
MT_DMA_TCFR_TXS_BIT_MAP);
mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR);
for (i = 0; i < MT7603_WTBL_SIZE; i++)
mt7603_wtbl_clear(dev, i);
eth_broadcast_addr(bc_addr);
mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr);
dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED;
rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED],
&dev->global_sta.wcid);
mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
mt76_wr(dev, MT_AGG_ARDCR,
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
max_t(int, 0, MT7603_RATE_RETRY - 2)) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1));
mt76_wr(dev, MT_AGG_ARCR,
(MT_AGG_ARCR_INIT_RATE1 |
FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE);
mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER);
mt76_clear(dev, MT_SEC_SCR, BIT(18));
/* Set secondary beacon time offsets */
for (i = 0; i <= 4; i++)
mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET,
(i + 1) * (20 + 4096));
}
static int
mt7603_init_hardware(struct mt7603_dev *dev)
{
int i, ret;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
ret = mt7603_eeprom_init(dev);
if (ret < 0)
return ret;
ret = mt7603_dma_init(dev);
if (ret)
return ret;
mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
mt7603_mac_dma_start(dev);
dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
for (i = 0; i < MT7603_WTBL_SIZE; i++) {
mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
FIELD_PREP(MT_PSE_RTA_TAG_ID, i));
mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
}
ret = mt7603_mcu_init(dev);
if (ret)
return ret;
mt7603_dma_sched_init(dev);
mt7603_mcu_set_eeprom(dev);
mt7603_phy_init(dev);
mt7603_mac_init(dev);
return 0;
}
#define CCK_RATE(_idx, _rate) { \
.bitrate = _rate, \
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
}
#define OFDM_RATE(_idx, _rate) { \
.bitrate = _rate, \
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
}
static struct ieee80211_rate mt7603_rates[] = {
CCK_RATE(0, 10),
CCK_RATE(1, 20),
CCK_RATE(2, 55),
CCK_RATE(3, 110),
OFDM_RATE(11, 60),
OFDM_RATE(15, 90),
OFDM_RATE(10, 120),
OFDM_RATE(14, 180),
OFDM_RATE(9, 240),
OFDM_RATE(13, 360),
OFDM_RATE(8, 480),
OFDM_RATE(12, 540),
};
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = MT7603_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_STATION) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP)
},
};
static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 4,
.num_different_channels = 1,
.beacon_int_infra_match = true,
}
};
static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
u8 delay_off)
{
struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
mt76);
u32 val, addr;
val = MT_LED_STATUS_DURATION(0xffff) |
MT_LED_STATUS_OFF(delay_off) |
MT_LED_STATUS_ON(delay_on);
addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
mt76_wr(dev, addr, val);
val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
MT_LED_CTRL_KICK(mt76->led_pin);
if (mt76->led_al)
val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
addr = mt7603_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
}
static int mt7603_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
led_cdev);
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
mt7603_led_set_config(mt76, delta_on, delta_off);
return 0;
}
static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
led_cdev);
if (!brightness)
mt7603_led_set_config(mt76, 0, 0xff);
else
mt7603_led_set_config(mt76, 0xff, 0);
}
static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
{
if (addr < 0x100000)
return addr;
return mt7603_reg_map(dev, addr);
}
static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
u32 addr = __mt7603_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
u32 addr = __mt7603_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
u32 addr = __mt7603_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static void
mt7603_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7603_dev *dev = hw->priv;
dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI;
}
static int
mt7603_txpower_signed(int val)
{
bool sign = val & BIT(6);
if (!(val & BIT(7)))
return 0;
val &= GENMASK(5, 0);
if (!sign)
val = -val;
return val;
}
static void
mt7603_init_txpower(struct mt7603_dev *dev,
struct ieee80211_supported_band *sband)
{
struct ieee80211_channel *chan;
u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
int max_offset, cur_offset;
int i;
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
max_offset = 0;
for (i = 0; i < 14; i++) {
cur_offset = mt7603_txpower_signed(rate_power[i]);
max_offset = max(max_offset, cur_offset);
}
target_power += max_offset;
dev->tx_power_limit = target_power;
dev->mt76.txpower_cur = target_power;
target_power = DIV_ROUND_UP(target_power, 2);
/* add 3 dBm for 2SS devices (combined output) */
if (dev->mt76.antenna_mask & BIT(1))
target_power += 3;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
chan->max_power = target_power;
}
}
int mt7603_register_device(struct mt7603_dev *dev)
{
struct mt76_bus_ops *bus_ops;
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
int ret;
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
bus_ops->rr = mt7603_rr;
bus_ops->wr = mt7603_wr;
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
dev->mt76.antenna_mask = 3;
if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
dev->mt76.antenna_mask = 1;
dev->slottime = 9;
ret = mt7603_init_hardware(dev);
if (ret)
return ret;
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
hw->sta_data_size = sizeof(struct mt7603_sta);
hw->vif_data_size = sizeof(struct mt7603_vif);
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_ADHOC);
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
ARRAY_SIZE(mt7603_rates));
if (ret)
return ret;
mt7603_init_debugfs(dev);
mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
return 0;
}
void mt7603_unregister_device(struct mt7603_dev *dev)
{
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76_unregister_device(&dev->mt76);
mt7603_mcu_exit(dev);
mt7603_dma_cleanup(dev);
ieee80211_free_hw(mt76_hw(dev));
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,242 @@
/* SPDX-License-Identifier: ISC */
#ifndef __MT7603_MAC_H
#define __MT7603_MAC_H
#define MT_RXD0_LENGTH GENMASK(15, 0)
#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
#define MT_RXD0_NORMAL_IP_SUM BIT(23)
#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
enum rx_pkt_type {
PKT_TYPE_TXS = 0,
PKT_TYPE_TXRXV = 1,
PKT_TYPE_NORMAL = 2,
PKT_TYPE_RX_DUP_RFB = 3,
PKT_TYPE_RX_TMR = 4,
PKT_TYPE_RETRIEVE = 5,
PKT_TYPE_RX_EVENT = 7,
};
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
#define MT_RXD1_NORMAL_BCAST BIT(3)
#define MT_RXD1_NORMAL_MCAST BIT(2)
#define MT_RXD1_NORMAL_U2M BIT(1)
#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
#define MT_RXD2_NORMAL_NDATA BIT(29)
#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
#define MT_RXD2_NORMAL_FRAG BIT(27)
#define MT_RXD2_NORMAL_UDF_VALID BIT(26)
#define MT_RXD2_NORMAL_LLC_MIS BIT(25)
#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
#define MT_RXD2_NORMAL_CLM BIT(19)
#define MT_RXD2_NORMAL_CM BIT(18)
#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
#define MT_RXD2_NORMAL_SW_BIT BIT(16)
#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
#define MT_RXD3_NORMAL_CLS BIT(10)
#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
#define MT_RXV1_VHTA1_B5_B4 GENMASK(31, 30)
#define MT_RXV1_VHTA2_B8_B1 GENMASK(29, 22)
#define MT_RXV1_HT_NO_SOUND BIT(21)
#define MT_RXV1_HT_SMOOTH BIT(20)
#define MT_RXV1_HT_SHORT_GI BIT(19)
#define MT_RXV1_HT_AGGR BIT(18)
#define MT_RXV1_VHTA1_B22 BIT(17)
#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
#define MT_RXV1_TX_MODE GENMASK(14, 12)
#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
#define MT_RXV1_HT_AD_CODE BIT(9)
#define MT_RXV1_HT_STBC GENMASK(8, 7)
#define MT_RXV1_TX_RATE GENMASK(6, 0)
#define MT_RXV2_VHTA1_B16_B6 GENMASK(31, 21)
#define MT_RXV2_LENGTH GENMASK(20, 0)
#define MT_RXV3_F_AGC1_CAL_GAIN GENMASK(31, 29)
#define MT_RXV3_F_AGC1_EQ_CAL BIT(28)
#define MT_RXV3_RCPI1 GENMASK(27, 20)
#define MT_RXV3_F_AGC0_CAL_GAIN GENMASK(19, 17)
#define MT_RXV3_F_AGC0_EQ_CAL BIT(16)
#define MT_RXV3_RCPI0 GENMASK(15, 8)
#define MT_RXV3_SEL_ANT BIT(7)
#define MT_RXV3_ACI_DET_X BIT(6)
#define MT_RXV3_OFDM_FREQ_TRANS_DETECT BIT(5)
#define MT_RXV3_VHTA1_B21_B17 GENMASK(4, 0)
#define MT_RXV4_F_AGC_CAL_GAIN GENMASK(31, 29)
#define MT_RXV4_F_AGC2_EQ_CAL BIT(28)
#define MT_RXV4_IB_RSSI1 GENMASK(27, 20)
#define MT_RXV4_F_AGC_LPF_GAIN_X GENMASK(19, 16)
#define MT_RXV4_WB_RSSI_X GENMASK(15, 8)
#define MT_RXV4_IB_RSSI0 GENMASK(7, 0)
#define MT_RXV5_LTF_SNR0 GENMASK(31, 26)
#define MT_RXV5_LTF_PROC_TIME GENMASK(25, 19)
#define MT_RXV5_FOE GENMASK(18, 7)
#define MT_RXV5_C_AGC_SATE GENMASK(6, 4)
#define MT_RXV5_F_AGC_LNA_GAIN_0 GENMASK(3, 2)
#define MT_RXV5_F_AGC_LNA_GAIN_1 GENMASK(1, 0)
#define MT_RXV6_C_AGC_STATE GENMASK(30, 28)
#define MT_RXV6_NS_TS_FIELD GENMASK(27, 25)
#define MT_RXV6_RX_VALID BIT(24)
#define MT_RXV6_NF2 GENMASK(23, 16)
#define MT_RXV6_NF1 GENMASK(15, 8)
#define MT_RXV6_NF0 GENMASK(7, 0)
enum mt7603_tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
MT_HDR_FORMAT_802_11,
MT_HDR_FORMAT_802_11_EXT,
};
#define MT_TXD_SIZE (8 * 4)
#define MT_TXD0_P_IDX BIT(31)
#define MT_TXD0_Q_IDX GENMASK(30, 27)
#define MT_TXD0_UTXB BIT(26)
#define MT_TXD0_UNXV BIT(25)
#define MT_TXD0_UDP_TCP_SUM BIT(24)
#define MT_TXD0_IP_SUM BIT(23)
#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
#define MT_TXD0_TX_BYTES GENMASK(15, 0)
#define MT_TXD1_OWN_MAC GENMASK(31, 26)
#define MT_TXD1_PROTECTED BIT(23)
#define MT_TXD1_TID GENMASK(22, 20)
#define MT_TXD1_NO_ACK BIT(19)
#define MT_TXD1_HDR_PAD GENMASK(18, 16)
#define MT_TXD1_LONG_FORMAT BIT(15)
#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
#define MT_TXD1_HDR_INFO GENMASK(12, 8)
#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
#define MT_TXD2_FIX_RATE BIT(31)
#define MT_TXD2_TIMING_MEASURE BIT(30)
#define MT_TXD2_BA_DISABLE BIT(29)
#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
#define MT_TXD2_FRAG GENMASK(15, 14)
#define MT_TXD2_HTC_VLD BIT(13)
#define MT_TXD2_DURATION BIT(12)
#define MT_TXD2_BIP BIT(11)
#define MT_TXD2_MULTICAST BIT(10)
#define MT_TXD2_RTS BIT(9)
#define MT_TXD2_SOUNDING BIT(8)
#define MT_TXD2_NDPA BIT(7)
#define MT_TXD2_NDP BIT(6)
#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
#define MT_TXD3_SN_VALID BIT(31)
#define MT_TXD3_PN_VALID BIT(30)
#define MT_TXD3_SEQ GENMASK(27, 16)
#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
#define MT_TXD3_TX_COUNT GENMASK(10, 6)
#define MT_TXD4_PN_LOW GENMASK(31, 0)
#define MT_TXD5_PN_HIGH GENMASK(31, 16)
#define MT_TXD5_SW_POWER_MGMT BIT(13)
#define MT_TXD5_BA_SEQ_CTRL BIT(12)
#define MT_TXD5_DA_SELECT BIT(11)
#define MT_TXD5_TX_STATUS_HOST BIT(10)
#define MT_TXD5_TX_STATUS_MCU BIT(9)
#define MT_TXD5_TX_STATUS_FMT BIT(8)
#define MT_TXD5_PID GENMASK(7, 0)
#define MT_TXD6_SGI BIT(31)
#define MT_TXD6_LDPC BIT(30)
#define MT_TXD6_TX_RATE GENMASK(29, 18)
#define MT_TXD6_I_TXBF BIT(17)
#define MT_TXD6_E_TXBF BIT(16)
#define MT_TXD6_DYN_BW BIT(15)
#define MT_TXD6_ANT_PRI GENMASK(14, 12)
#define MT_TXD6_SPE_EN BIT(11)
#define MT_TXD6_FIXED_BW BIT(10)
#define MT_TXD6_BW GENMASK(9, 8)
#define MT_TXD6_ANT_ID GENMASK(7, 2)
#define MT_TXD6_FIXED_RATE BIT(0)
#define MT_TX_RATE_STBC BIT(11)
#define MT_TX_RATE_NSS GENMASK(10, 9)
#define MT_TX_RATE_MODE GENMASK(8, 6)
#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXS0_ANTENNA GENMASK(31, 26)
#define MT_TXS0_TID GENMASK(25, 22)
#define MT_TXS0_BA_ERROR BIT(22)
#define MT_TXS0_PS_FLAG BIT(21)
#define MT_TXS0_TXOP_TIMEOUT BIT(20)
#define MT_TXS0_BIP_ERROR BIT(19)
#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
#define MT_TXS0_RTS_TIMEOUT BIT(17)
#define MT_TXS0_ACK_TIMEOUT BIT(16)
#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
#define MT_TXS0_TX_STATUS_HOST BIT(15)
#define MT_TXS0_TX_STATUS_MCU BIT(14)
#define MT_TXS0_TXS_FORMAT BIT(13)
#define MT_TXS0_FIXED_RATE BIT(12)
#define MT_TXS0_TX_RATE GENMASK(11, 0)
#define MT_TXS1_F0_TIMESTAMP GENMASK(31, 0)
#define MT_TXS1_F1_NOISE_2 GENMASK(23, 16)
#define MT_TXS1_F1_NOISE_1 GENMASK(15, 8)
#define MT_TXS1_F1_NOISE_0 GENMASK(7, 0)
#define MT_TXS2_F0_FRONT_TIME GENMASK(24, 0)
#define MT_TXS2_F1_RCPI_2 GENMASK(23, 16)
#define MT_TXS2_F1_RCPI_1 GENMASK(15, 8)
#define MT_TXS2_F1_RCPI_0 GENMASK(7, 0)
#define MT_TXS3_WCID GENMASK(31, 24)
#define MT_TXS3_RXV_SEQNO GENMASK(23, 16)
#define MT_TXS3_TX_DELAY GENMASK(15, 0)
#define MT_TXS4_LAST_TX_RATE GENMASK(31, 29)
#define MT_TXS4_TX_COUNT GENMASK(28, 24)
#define MT_TXS4_AMPDU BIT(23)
#define MT_TXS4_ACKED_MPDU BIT(22)
#define MT_TXS4_PID GENMASK(21, 14)
#define MT_TXS4_BW GENMASK(13, 12)
#define MT_TXS4_F0_SEQNO GENMASK(11, 0)
#define MT_TXS4_F1_TSSI GENMASK(11, 0)
#endif

View File

@ -0,0 +1,709 @@
/* SPDX-License-Identifier: ISC */
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7603.h"
#include "eeprom.h"
static int
mt7603_start(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
mt7603_mac_start(dev);
dev->survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt7603_mac_work(&dev->mac_work.work);
return 0;
}
static void
mt7603_stop(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->mac_work);
mt7603_mac_stop(dev);
}
static int
mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct mt7603_dev *dev = hw->priv;
struct mt76_txq *mtxq;
u8 bc_addr[ETH_ALEN];
int idx;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
mvif->idx = ffs(~dev->vif_mask) - 1;
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
mt76_wr(dev, MT_MAC_ADDR0(mvif->idx),
get_unaligned_le32(vif->addr));
mt76_wr(dev, MT_MAC_ADDR1(mvif->idx),
(get_unaligned_le16(vif->addr + 4) |
MT_MAC_ADDR1_VALID));
if (vif->type == NL80211_IFTYPE_AP) {
mt76_wr(dev, MT_BSSID0(mvif->idx),
get_unaligned_le32(vif->addr));
mt76_wr(dev, MT_BSSID1(mvif->idx),
(get_unaligned_le16(vif->addr + 4) |
MT_BSSID1_VALID));
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
dev->vif_mask |= BIT(mvif->idx);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
eth_broadcast_addr(bc_addr);
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
mt76_txq_init(&dev->mt76, vif->txq);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
}
static void
mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct mt7603_dev *dev = hw->priv;
int idx = mvif->sta.wcid.idx;
mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
mt7603_beacon_set_timer(dev, mvif->idx, 0);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
}
static void
mt7603_init_edcca(struct mt7603_dev *dev)
{
/* Set lower signal level to -65dBm */
mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
/* clear previous energy detect monitor results */
mt76_rr(dev, MT_MIB_STAT_ED);
if (dev->ed_monitor)
mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
else
mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
dev->ed_strict_mode = 0xff;
dev->ed_strong_signal = 0;
dev->ed_time = ktime_get_boottime();
mt7603_edcca_set_strict(dev, false);
}
static int
mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
{
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
cancel_delayed_work_sync(&dev->mac_work);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
mt76_set_channel(&dev->mt76);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
dev->mt76.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
failed = true;
goto out;
}
if (def->chan->band == NL80211_BAND_5GHZ) {
idx = 1;
rssi_data += MT_EE_RSSI_OFFSET_5G;
} else {
idx = 0;
rssi_data += MT_EE_RSSI_OFFSET_2G;
}
memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset));
idx |= (def->chan -
mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1;
mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx);
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
clear_bit(MT76_RESET, &dev->mt76.state);
mt76_txq_schedule_all(&dev->mt76);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT7603_WATCHDOG_TIME);
/* reset channel stats */
mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
mt76_set(dev, MT_MIB_CTL,
MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
mt76_rr(dev, MT_MIB_STAT_PSCCA);
mt7603_cca_stats_reset(dev);
dev->survey_time = ktime_get_boottime();
mt7603_init_edcca(dev);
out:
mutex_unlock(&dev->mt76.mutex);
if (failed)
mt7603_mac_work(&dev->mac_work.work);
return ret;
}
static int
mt7603_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7603_dev *dev = hw->priv;
int ret = 0;
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
ret = mt7603_set_channel(dev, &hw->conf.chandef);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
mutex_unlock(&dev->mt76.mutex);
}
return ret;
}
static void
mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct mt7603_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
dev->rxfilter &= ~(_hw); \
dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
MT_WF_RFCR_DROP_FRAME_REPORT |
MT_WF_RFCR_DROP_PROBEREQ |
MT_WF_RFCR_DROP_MCAST_FILTERED |
MT_WF_RFCR_DROP_MCAST |
MT_WF_RFCR_DROP_BCAST |
MT_WF_RFCR_DROP_DUPLICATE |
MT_WF_RFCR_DROP_A2_BSSID |
MT_WF_RFCR_DROP_UNWANTED_CTL |
MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
MT_WF_RFCR_DROP_A3_BSSID);
MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
MT_WF_RFCR_DROP_CTL_RSV |
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
}
static void
mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
mutex_lock(&dev->mt76.mutex);
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
if (info->assoc || info->ibss_joined) {
mt76_wr(dev, MT_BSSID0(mvif->idx),
get_unaligned_le32(info->bssid));
mt76_wr(dev, MT_BSSID1(mvif->idx),
(get_unaligned_le16(info->bssid + 4) |
MT_BSSID1_VALID));
} else {
mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
}
}
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
if (slottime != dev->slottime) {
dev->slottime = slottime;
mt7603_mac_set_timing(dev);
}
}
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
int beacon_int = !!info->enable_beacon * info->beacon_int;
tasklet_disable(&dev->pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
mutex_unlock(&dev->mt76.mutex);
}
int
mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
int idx;
int ret = 0;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1);
if (idx < 0)
return -ENOSPC;
__skb_queue_head_init(&msta->psq);
msta->ps = ~0;
msta->smps = ~0;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr);
mt7603_wtbl_set_ps(dev, msta, false);
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
return ret;
}
void
mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mt7603_wtbl_update_cap(dev, sta);
}
void
mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
spin_lock_bh(&dev->ps_lock);
__skb_queue_purge(&msta->psq);
mt7603_filter_tx(dev, wcid->idx, true);
spin_unlock_bh(&dev->ps_lock);
mt7603_wtbl_clear(dev, wcid->idx);
}
static void
mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
skb, 0);
}
void
mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
mt76_stop_tx_queues(&dev->mt76, sta, false);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
__skb_queue_head_init(&list);
spin_lock_bh(&dev->ps_lock);
skb_queue_splice_tail_init(&msta->psq, &list);
spin_unlock_bh(&dev->ps_lock);
mt7603_ps_tx_list(dev, &list);
}
static void
mt7603_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
struct sk_buff *skb, *tmp;
__skb_queue_head_init(&list);
spin_lock_bh(&dev->ps_lock);
skb_queue_walk_safe(&msta->psq, skb, tmp) {
if (!nframes)
break;
if (!(tids & BIT(skb->priority)))
continue;
skb_set_queue_mapping(skb, MT_TXQ_PSD);
__skb_unlink(skb, &msta->psq);
__skb_queue_tail(&list, skb);
nframes--;
}
spin_unlock_bh(&dev->ps_lock);
mt7603_ps_tx_list(dev, &list);
if (nframes)
mt76_release_buffered_frames(hw, sta, tids, nframes, reason,
more_data);
}
static int
mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv :
&mvif->sta;
struct mt76_wcid *wcid = &msta->wcid;
int idx = key->keyidx;
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
break;
default:
return -EOPNOTSUPP;
}
/*
* The hardware does not support per-STA RX GTK, fall back
* to software mode for these.
*/
if ((vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
} else {
if (idx == wcid->hw_key_idx)
wcid->hw_key_idx = -1;
key = NULL;
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
return mt7603_wtbl_set_key(dev, wcid->idx, key);
}
static int
mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7603_dev *dev = hw->priv;
u16 cw_min = (1 << 5) - 1;
u16 cw_max = (1 << 10) - 1;
u32 val;
queue = dev->mt76.q_tx[queue].hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
if (params->cw_max)
cw_max = params->cw_max;
mutex_lock(&dev->mt76.mutex);
mt7603_mac_stop(dev);
val = mt76_rr(dev, MT_WMM_TXOP(queue));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
mt76_wr(dev, MT_WMM_TXOP(queue), val);
val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
mt76_wr(dev, MT_WMM_AIFSN, val);
val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
mt76_wr(dev, MT_WMM_CWMIN, val);
val = mt76_rr(dev, MT_WMM_CWMAX(queue));
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
mt76_wr(dev, MT_WMM_CWMAX(queue), val);
mt7603_mac_start(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static void
mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
struct mt7603_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
mt7603_beacon_set_timer(dev, -1, 0);
}
static void
mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
}
static void
mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
}
static int
mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7603_dev *dev = hw->priv;
struct ieee80211_sta *sta = params->sta;
struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
u8 ba_size = params->buf_size;
struct mt76_txq *mtxq;
if (!txq)
return -EINVAL;
mtxq = (struct mt76_txq *)txq->drv_priv;
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
params->buf_size);
mt7603_mac_rx_ba_reset(dev, sta->addr, tid);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = *ssn << 4;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
return 0;
}
static void
mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7603_dev *dev = hw->priv;
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
msta->rates[i].count = sta_rates->rate[i].count;
msta->rates[i].flags = sta_rates->rate[i].flags;
if (msta->rates[i].idx < 0 || !msta->rates[i].count)
break;
}
msta->n_rates = i;
mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
msta->rate_probe = false;
mt7603_wtbl_set_smps(dev, msta,
sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
spin_unlock_bh(&dev->mt76.lock);
}
static void
mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
dev->coverage_class = coverage_class;
mt7603_mac_set_timing(dev);
}
static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_dev *dev = hw->priv;
struct mt76_wcid *wcid = &dev->global_sta.wcid;
if (control->sta) {
struct mt7603_sta *msta;
msta = (struct mt7603_sta *)control->sta->drv_priv;
wcid = &msta->wcid;
} else if (vif) {
struct mt7603_vif *mvif;
mvif = (struct mt7603_vif *)vif->drv_priv;
wcid = &mvif->sta.wcid;
}
mt76_tx(&dev->mt76, control->sta, wcid, skb);
}
static int
mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
return 0;
}
const struct ieee80211_ops mt7603_ops = {
.tx = mt7603_tx,
.start = mt7603_start,
.stop = mt7603_stop,
.add_interface = mt7603_add_interface,
.remove_interface = mt7603_remove_interface,
.config = mt7603_config,
.configure_filter = mt7603_configure_filter,
.bss_info_changed = mt7603_bss_info_changed,
.sta_state = mt76_sta_state,
.set_key = mt7603_set_key,
.conf_tx = mt7603_conf_tx,
.sw_scan_start = mt7603_sw_scan,
.sw_scan_complete = mt7603_sw_scan_complete,
.flush = mt7603_flush,
.ampdu_action = mt7603_ampdu_action,
.get_txpower = mt76_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
.release_buffered_frames = mt7603_release_buffered_frames,
.set_coverage_class = mt7603_set_coverage_class,
.set_tim = mt7603_set_tim,
.get_survey = mt76_get_survey,
};
MODULE_LICENSE("Dual BSD/GPL");
static int __init mt7603_init(void)
{
int ret;
ret = platform_driver_register(&mt76_wmac_driver);
if (ret)
return ret;
#ifdef CONFIG_PCI
ret = pci_register_driver(&mt7603_pci_driver);
if (ret)
platform_driver_unregister(&mt76_wmac_driver);
#endif
return ret;
}
static void __exit mt7603_exit(void)
{
#ifdef CONFIG_PCI
pci_unregister_driver(&mt7603_pci_driver);
#endif
platform_driver_unregister(&mt76_wmac_driver);
}
module_init(mt7603_init);
module_exit(mt7603_exit);

View File

@ -0,0 +1,483 @@
/* SPDX-License-Identifier: ISC */
#include <linux/firmware.h>
#include "mt7603.h"
#include "mcu.h"
#include "eeprom.h"
#define MCU_SKB_RESERVE 8
struct mt7603_fw_trailer {
char fw_ver[10];
char build_date[15];
__le32 dl_len;
} __packed;
static int
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
int query, int *wait_seq)
{
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
struct mt76_dev *mdev = &dev->mt76;
struct mt7603_mcu_txd *txd;
u8 seq;
if (!skb)
return -EINVAL;
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
memset(txd, 0, hdrlen);
txd->len = cpu_to_le16(skb->len);
if (cmd == -MCU_CMD_FW_SCATTER)
txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW);
else
txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE);
txd->pkt_type = MCU_PKT_ID;
txd->seq = seq;
if (cmd < 0) {
txd->cid = -cmd;
} else {
txd->cid = MCU_CMD_EXT_CID;
txd->ext_cid = cmd;
if (query != MCU_Q_NA)
txd->ext_cid_ack = 1;
}
txd->set_query = query;
if (wait_seq)
*wait_seq = seq;
return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
}
static int
mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
int query)
{
struct mt76_dev *mdev = &dev->mt76;
unsigned long expires = jiffies + 3 * HZ;
struct mt7603_mcu_rxd *rxd;
int ret, seq;
mutex_lock(&mdev->mmio.mcu.mutex);
ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
if (ret)
goto out;
while (1) {
bool check_seq = false;
skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n",
cmd, seq);
dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
ret = -ETIMEDOUT;
break;
}
rxd = (struct mt7603_mcu_rxd *)skb->data;
if (seq == rxd->seq)
check_seq = true;
dev_kfree_skb(skb);
if (check_seq)
break;
}
out:
mutex_unlock(&mdev->mmio.mcu.mutex);
return ret;
}
static int
mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
{
struct {
__le32 addr;
__le32 len;
__le32 mode;
} req = {
.addr = cpu_to_le32(addr),
.len = cpu_to_le32(len),
.mode = cpu_to_le32(BIT(31)),
};
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
MCU_Q_NA);
}
static int
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
{
struct sk_buff *skb;
int ret = 0;
while (len > 0) {
int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
len);
skb = mt7603_mcu_msg_alloc(data, cur_len);
if (!skb)
return -ENOMEM;
ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
MCU_Q_NA, NULL);
if (ret)
break;
data += cur_len;
len -= cur_len;
}
return ret;
}
static int
mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
{
struct {
__le32 override;
__le32 addr;
} req = {
.override = cpu_to_le32(addr ? 1 : 0),
.addr = cpu_to_le32(addr),
};
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
MCU_Q_NA);
}
static int
mt7603_mcu_restart(struct mt7603_dev *dev)
{
struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
MCU_Q_NA);
}
static int
mt7603_load_firmware(struct mt7603_dev *dev)
{
const struct firmware *fw;
const struct mt7603_fw_trailer *hdr;
const char *firmware;
int dl_len;
u32 addr, val;
int ret;
if (is_mt7628(dev)) {
if (mt76xx_rev(dev) == MT7628_REV_E1)
firmware = MT7628_FIRMWARE_E1;
else
firmware = MT7628_FIRMWARE_E2;
} else {
if (mt76xx_rev(dev) < MT7603_REV_E2)
firmware = MT7603_FIRMWARE_E1;
else
firmware = MT7603_FIRMWARE_E2;
}
ret = request_firmware(&fw, firmware, dev->mt76.dev);
if (ret)
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(dev->mt76.dev, "Invalid firmware\n");
ret = -EINVAL;
goto out;
}
hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size -
sizeof(*hdr));
dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver);
dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date);
addr = mt7603_reg_map(dev, 0x50012498);
mt76_wr(dev, addr, 0x5);
mt76_wr(dev, addr, 0x5);
udelay(1);
/* switch to bypass mode */
mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID,
MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5));
val = mt76_rr(dev, MT_TOP_MISC2);
if (val & BIT(1)) {
dev_info(dev->mt76.dev, "Firmware already running...\n");
goto running;
}
if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) {
dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n");
ret = -EIO;
goto out;
}
dl_len = le32_to_cpu(hdr->dl_len) + 4;
ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
}
ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
goto out;
}
ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS);
if (ret) {
dev_err(dev->mt76.dev, "Failed to start firmware\n");
goto out;
}
if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) {
dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n");
ret = -EIO;
goto out;
}
running:
mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS);
mt76_set(dev, MT_SCH_4, BIT(8));
mt76_clear(dev, MT_SCH_4, BIT(8));
dev->mcu_running = true;
dev_info(dev->mt76.dev, "firmware init done\n");
out:
release_firmware(fw);
return ret;
}
int mt7603_mcu_init(struct mt7603_dev *dev)
{
mutex_init(&dev->mt76.mmio.mcu.mutex);
return mt7603_load_firmware(dev);
}
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
mt7603_mcu_restart(dev);
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
}
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
{
static const u16 req_fields[] = {
#define WORD(_start) \
_start, \
_start + 1
#define GROUP_2G(_start) \
WORD(_start), \
WORD(_start + 2), \
WORD(_start + 4)
MT_EE_NIC_CONF_0 + 1,
WORD(MT_EE_NIC_CONF_1),
MT_EE_WIFI_RF_SETTING,
MT_EE_TX_POWER_DELTA_BW40,
MT_EE_TX_POWER_DELTA_BW80 + 1,
MT_EE_TX_POWER_EXT_PA_5G,
MT_EE_TEMP_SENSOR_CAL,
GROUP_2G(MT_EE_TX_POWER_0_START_2G),
GROUP_2G(MT_EE_TX_POWER_1_START_2G),
WORD(MT_EE_TX_POWER_CCK),
WORD(MT_EE_TX_POWER_OFDM_2G_6M),
WORD(MT_EE_TX_POWER_OFDM_2G_24M),
WORD(MT_EE_TX_POWER_OFDM_2G_54M),
WORD(MT_EE_TX_POWER_HT_BPSK_QPSK),
WORD(MT_EE_TX_POWER_HT_16_64_QAM),
WORD(MT_EE_TX_POWER_HT_64_QAM),
MT_EE_ELAN_RX_MODE_GAIN,
MT_EE_ELAN_RX_MODE_NF,
MT_EE_ELAN_RX_MODE_P1DB,
MT_EE_ELAN_BYPASS_MODE_GAIN,
MT_EE_ELAN_BYPASS_MODE_NF,
MT_EE_ELAN_BYPASS_MODE_P1DB,
WORD(MT_EE_STEP_NUM_NEG_6_7),
WORD(MT_EE_STEP_NUM_NEG_4_5),
WORD(MT_EE_STEP_NUM_NEG_2_3),
WORD(MT_EE_STEP_NUM_NEG_0_1),
WORD(MT_EE_REF_STEP_24G),
WORD(MT_EE_STEP_NUM_PLUS_1_2),
WORD(MT_EE_STEP_NUM_PLUS_3_4),
WORD(MT_EE_STEP_NUM_PLUS_5_6),
MT_EE_STEP_NUM_PLUS_7,
MT_EE_XTAL_FREQ_OFFSET,
MT_EE_XTAL_TRIM_2_COMP,
MT_EE_XTAL_TRIM_3_COMP,
MT_EE_XTAL_WF_RFCAL,
/* unknown fields below */
WORD(0x24),
0x34,
0x39,
0x3b,
WORD(0x42),
WORD(0x9e),
0xf2,
WORD(0xf8),
0xfa,
0x12e,
WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136),
WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e),
#undef GROUP_2G
#undef WORD
};
struct req_data {
u16 addr;
u8 val;
u8 pad;
} __packed;
struct {
u8 buffer_mode;
u8 len;
u8 pad[2];
} req_hdr = {
.buffer_mode = 1,
.len = ARRAY_SIZE(req_fields) - 1,
};
struct sk_buff *skb;
struct req_data *data;
const int size = 0xff * sizeof(struct req_data);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int i;
BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
data = (struct req_data *)skb_put(skb, size);
memset(data, 0, size);
for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
data[i].addr = cpu_to_le16(req_fields[i]);
data[i].val = eep[req_fields[i]];
data[i].pad = 0;
}
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
MCU_Q_SET);
}
static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
{
struct {
u8 center_channel;
u8 tssi;
u8 temp_comp;
u8 target_power[2];
u8 rate_power_delta[14];
u8 bw_power_delta;
u8 ch_power_delta[6];
u8 temp_comp_power[17];
u8 reserved;
} req = {
.center_channel = dev->mt76.chandef.chan->hw_value,
#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
.tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
.temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
.target_power = {
EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2),
EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2)
},
.bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40),
.ch_power_delta = {
EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3),
EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4),
EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5),
EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3),
EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4),
EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5)
},
#undef EEP_VAL
};
struct sk_buff *skb;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
sizeof(req.rate_power_delta));
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
sizeof(req.temp_comp_power));
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
MCU_Q_SET);
}
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
u8 bw;
u8 tx_streams;
u8 rx_streams;
u8 _res0[7];
u8 txpower[21];
u8 _res1[3];
} req = {
.control_chan = chandef->chan->hw_value,
.center_chan = chandef->chan->hw_value,
.bw = MT_BW_20,
.tx_streams = n_chains,
.rx_streams = n_chains,
};
struct sk_buff *skb;
s8 tx_power;
int ret;
int i;
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
if (chandef->center_freq1 > chandef->chan->center_freq)
req.center_chan += 2;
else
req.center_chan -= 2;
}
tx_power = hw->conf.power_level * 2;
if (dev->mt76.antenna_mask == 3)
tx_power -= 6;
tx_power = min(tx_power, dev->tx_power_limit);
dev->mt76.txpower_cur = tx_power;
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
MCU_Q_SET);
if (ret)
return ret;
return mt7603_mcu_set_tx_power(dev);
}

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: ISC */
#ifndef __MT7603_MCU_H
#define __MT7603_MCU_H
struct mt7603_mcu_txd {
__le16 len;
__le16 pq_id;
u8 cid;
u8 pkt_type;
u8 set_query;
u8 seq;
u8 uc_d2b0_rev;
u8 ext_cid;
u8 uc_d2b2_rev;
u8 ext_cid_ack;
u32 au4_d3_to_d7_rev[5];
} __packed __aligned(4);
struct mt7603_mcu_rxd {
__le16 len;
__le16 pkt_type_id;
u8 eid;
u8 seq;
__le16 __rsv;
u8 ext_eid;
u8 __rsv1[3];
};
#define MCU_PKT_ID 0xa0
#define MCU_PORT_QUEUE 0x8000
#define MCU_PORT_QUEUE_FW 0xc000
#define MCU_FIRMWARE_ADDRESS 0x100000
enum {
MCU_Q_QUERY,
MCU_Q_SET,
MCU_Q_RESERVED,
MCU_Q_NA
};
enum {
MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
MCU_CMD_FW_START_REQ = 0x02,
MCU_CMD_INIT_ACCESS_REG = 0x3,
MCU_CMD_PATCH_START_REQ = 0x05,
MCU_CMD_PATCH_FINISH_REQ = 0x07,
MCU_CMD_PATCH_SEM_CONTROL = 0x10,
MCU_CMD_HIF_LOOPBACK = 0x20,
MCU_CMD_CH_PRIVILEGE = 0x20,
MCU_CMD_ACCESS_REG = 0xC2,
MCU_CMD_EXT_CID = 0xED,
MCU_CMD_FW_SCATTER = 0xEE,
MCU_CMD_RESTART_DL_REQ = 0xEF,
};
enum {
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
MCU_EXT_CMD_RF_TEST = 0x04,
MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05,
MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_NIC_CAPABILITY = 0x09,
MCU_EXT_CMD_PWR_SAVING = 0x0A,
MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E,
MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF,
MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
MCU_EXT_CMD_PS_RETRIEVE_START = 0x14,
MCU_EXT_CMD_LED_CTRL = 0x17,
MCU_EXT_CMD_PACKET_FILTER = 0x18,
MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_THERMAL_PROTECT = 0x23,
MCU_EXT_CMD_EDCA_SET = 0x27,
MCU_EXT_CMD_SLOT_TIME_SET = 0x28,
MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29,
MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B,
MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C,
MCU_EXT_CMD_WAKEUP_OPTION = 0x2E,
MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31,
MCU_EXT_CMD_BCN_UPDATE = 0x33
};
enum {
MCU_EXT_EVENT_CMD_RESULT = 0x0,
MCU_EXT_EVENT_RF_REG_ACCESS = 0x2,
MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E,
MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
MCU_EXT_EVENT_BEACON_LOSS = 0x1A,
MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
MCU_EXT_EVENT_BCN_UPDATE = 0x31,
};
static inline struct sk_buff *
mt7603_mcu_msg_alloc(const void *data, int len)
{
return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd),
len, 0);
}
#endif

View File

@ -0,0 +1,253 @@
/* SPDX-License-Identifier: ISC */
#ifndef __MT7603_H
#define __MT7603_H
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include "../mt76.h"
#include "regs.h"
#define MT7603_MAX_INTERFACES 4
#define MT7603_WTBL_SIZE 128
#define MT7603_WTBL_RESERVED (MT7603_WTBL_SIZE - 1)
#define MT7603_WTBL_STA (MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES)
#define MT7603_RATE_RETRY 2
#define MT7603_RX_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
#define MT7603_FIRMWARE_E2 "mt7603_e2.bin"
#define MT7628_FIRMWARE_E1 "mt7628_e1.bin"
#define MT7628_FIRMWARE_E2 "mt7628_e2.bin"
#define MT7603_EEPROM_SIZE 1024
#define MT_AGG_SIZE_LIMIT(_n) (((_n) + 1) * 4)
#define MT7603_PRE_TBTT_TIME 5000 /* ms */
#define MT7603_WATCHDOG_TIME 100 /* ms */
#define MT7603_WATCHDOG_TIMEOUT 10 /* number of checks */
#define MT7603_EDCCA_BLOCK_TH 10
#define MT7603_CFEND_RATE_DEFAULT 0x69 /* chip default (24M) */
#define MT7603_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
struct mt7603_vif;
struct mt7603_sta;
enum {
MT7603_REV_E1 = 0x00,
MT7603_REV_E2 = 0x10,
MT7628_REV_E1 = 0x8a00,
};
enum mt7603_bw {
MT_BW_20,
MT_BW_40,
MT_BW_80,
};
struct mt7603_sta {
struct mt76_wcid wcid; /* must be first */
struct mt7603_vif *vif;
struct sk_buff_head psq;
struct ieee80211_tx_rate rates[8];
u8 rate_count;
u8 n_rates;
u8 rate_probe;
u8 smps;
u8 ps;
};
struct mt7603_vif {
struct mt7603_sta sta; /* must be first */
u8 idx;
};
enum mt7603_reset_cause {
RESET_CAUSE_TX_HANG,
RESET_CAUSE_TX_BUSY,
RESET_CAUSE_RX_BUSY,
RESET_CAUSE_BEACON_STUCK,
RESET_CAUSE_RX_PSE_BUSY,
RESET_CAUSE_MCU_HANG,
RESET_CAUSE_RESET_FAILED,
__RESET_CAUSE_MAX
};
struct mt7603_dev {
struct mt76_dev mt76; /* must be first */
const struct mt76_bus_ops *bus_ops;
u32 rxfilter;
u8 vif_mask;
struct mt7603_sta global_sta;
u32 agc0, agc3;
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
u8 rssi_offset[3];
u8 slottime;
s16 coverage_class;
s8 tx_power_limit;
ktime_t survey_time;
ktime_t ed_time;
int beacon_int;
struct mt76_queue q_rx;
spinlock_t ps_lock;
u8 mac_work_count;
u8 mcu_running;
u8 ed_monitor;
s8 ed_trigger;
u8 ed_strict_mode;
u8 ed_strong_signal;
s8 sensitivity;
u8 beacon_mask;
u8 beacon_check;
u8 tx_hang_check;
u8 tx_dma_check;
u8 rx_dma_check;
u8 rx_pse_check;
u8 mcu_hang;
enum mt7603_reset_cause cur_reset_cause;
u16 tx_dma_idx[4];
u16 rx_dma_idx;
u32 reset_test;
unsigned int reset_cause[__RESET_CAUSE_MAX];
struct delayed_work mac_work;
struct tasklet_struct tx_tasklet;
struct tasklet_struct pre_tbtt_tasklet;
};
extern const struct mt76_driver_ops mt7603_drv_ops;
extern const struct ieee80211_ops mt7603_ops;
extern struct pci_driver mt7603_pci_driver;
extern struct platform_driver mt76_wmac_driver;
static inline bool is_mt7603(struct mt7603_dev *dev)
{
return mt76xx_chip(dev) == 0x7603;
}
static inline bool is_mt7628(struct mt7603_dev *dev)
{
return mt76xx_chip(dev) == 0x7628;
}
/* need offset to prevent conflict with ampdu_ack_len */
#define MT_RATE_DRIVER_DATA_OFFSET 4
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr);
irqreturn_t mt7603_irq_handler(int irq, void *dev_instance);
int mt7603_register_device(struct mt7603_dev *dev);
void mt7603_unregister_device(struct mt7603_dev *dev);
int mt7603_eeprom_init(struct mt7603_dev *dev);
int mt7603_dma_init(struct mt7603_dev *dev);
void mt7603_dma_cleanup(struct mt7603_dev *dev);
int mt7603_mcu_init(struct mt7603_dev *dev);
void mt7603_init_debugfs(struct mt7603_dev *dev);
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
{
mt7603_set_irq_mask(dev, 0, mask);
}
static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
{
mt7603_set_irq_mask(dev, mask, 0);
}
void mt7603_mac_dma_start(struct mt7603_dev *dev);
void mt7603_mac_start(struct mt7603_dev *dev);
void mt7603_mac_stop(struct mt7603_dev *dev);
void mt7603_mac_work(struct work_struct *work);
void mt7603_mac_set_timing(struct mt7603_dev *dev);
void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
int ba_size);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
int mt7603_mcu_set_channel(struct mt7603_dev *dev);
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
void mt7603_mcu_exit(struct mt7603_dev *dev);
void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
const u8 *mac_addr);
void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx);
void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta);
void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
struct ieee80211_key_conf *key);
void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
bool enabled);
void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
bool enabled);
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7603_pre_tbtt_tasklet(unsigned long arg);
void mt7603_update_channel(struct mt76_dev *mdev);
void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
void mt7603_cca_stats_reset(struct mt7603_dev *dev);
#endif

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: ISC */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "mt7603.h"
static const struct pci_device_id mt76pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7603) },
{ },
};
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mt7603_dev *dev;
struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (ret)
return ret;
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret)
return ret;
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
&mt7603_drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7603_dev, mt76);
mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
ret = mt7603_register_device(dev);
if (ret)
goto error;
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
return ret;
}
static void
mt76pci_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mt7603_unregister_device(dev);
}
MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
MODULE_FIRMWARE(MT7603_FIRMWARE_E1);
MODULE_FIRMWARE(MT7603_FIRMWARE_E2);
struct pci_driver mt7603_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt76pci_device_table,
.probe = mt76pci_probe,
.remove = mt76pci_remove,
};

View File

@ -0,0 +1,774 @@
/* SPDX-License-Identifier: ISC */
#ifndef __MT7603_REGS_H
#define __MT7603_REGS_H
#define MT_HW_REV 0x1000
#define MT_HW_CHIPID 0x1008
#define MT_TOP_MISC2 0x1134
#define MT_MCU_BASE 0x2000
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
#define MT_HIF_BASE 0x4000
#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
#define MT_INT_MASK_CSR MT_HIF(0x204)
#define MT_DELAY_INT_CFG MT_HIF(0x210)
#define MT_INT_RX_DONE(_n) BIT(_n)
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
#define MT_INT_RX_COHERENT BIT(20)
#define MT_INT_TX_COHERENT BIT(21)
#define MT_INT_MAC_IRQ3 BIT(27)
#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
#define MT_WPDMA_GLO_CFG_FORCE_TX_EOF BIT(25)
#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
#define MT_WPDMA_DEBUG MT_HIF(0x244)
#define MT_WPDMA_DEBUG_VALUE GENMASK(17, 0)
#define MT_WPDMA_DEBUG_SEL BIT(27)
#define MT_WPDMA_DEBUG_IDX GENMASK(31, 28)
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)
#define MT_TXTIME_THRESH_BASE MT_HIF(0x500)
#define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4))
#define MT_PAGE_COUNT_BASE MT_HIF(0x540)
#define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4))
#define MT_SCH_1 MT_HIF(0x588)
#define MT_SCH_2 MT_HIF(0x58c)
#define MT_SCH_3 MT_HIF(0x590)
#define MT_SCH_4 MT_HIF(0x594)
#define MT_SCH_4_FORCE_QID GENMASK(4, 0)
#define MT_SCH_4_BYPASS BIT(5)
#define MT_SCH_4_RESET BIT(8)
#define MT_GROUP_THRESH_BASE MT_HIF(0x598)
#define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4))
#define MT_QUEUE_PRIORITY_1 MT_HIF(0x580)
#define MT_QUEUE_PRIORITY_2 MT_HIF(0x584)
#define MT_BMAP_0 MT_HIF(0x5b0)
#define MT_BMAP_1 MT_HIF(0x5b4)
#define MT_BMAP_2 MT_HIF(0x5b8)
#define MT_HIGH_PRIORITY_1 MT_HIF(0x5bc)
#define MT_HIGH_PRIORITY_2 MT_HIF(0x5c0)
#define MT_PRIORITY_MASK MT_HIF(0x5c4)
#define MT_RSV_MAX_THRESH MT_HIF(0x5c8)
#define MT_PSE_BASE 0x8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
#define MT_MCU_DEBUG_RESET MT_PSE(0x16c)
#define MT_MCU_DEBUG_RESET_PSE BIT(0)
#define MT_MCU_DEBUG_RESET_PSE_S BIT(1)
#define MT_MCU_DEBUG_RESET_QUEUES GENMASK(6, 2)
#define MT_PSE_FC_P0 MT_PSE(0x120)
#define MT_PSE_FC_P0_MIN_RESERVE GENMASK(11, 0)
#define MT_PSE_FC_P0_MAX_QUOTA GENMASK(27, 16)
#define MT_PSE_FRP MT_PSE(0x138)
#define MT_PSE_FRP_P0 GENMASK(2, 0)
#define MT_PSE_FRP_P1 GENMASK(5, 3)
#define MT_PSE_FRP_P2_RQ0 GENMASK(8, 6)
#define MT_PSE_FRP_P2_RQ1 GENMASK(11, 9)
#define MT_PSE_FRP_P2_RQ2 GENMASK(14, 12)
#define MT_FC_RSV_COUNT_0 MT_PSE(0x13c)
#define MT_FC_RSV_COUNT_0_P0 GENMASK(11, 0)
#define MT_FC_RSV_COUNT_0_P1 GENMASK(27, 16)
#define MT_FC_SP2_Q0Q1 MT_PSE(0x14c)
#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0 GENMASK(11, 0)
#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1 GENMASK(27, 16)
#define MT_PSE_FW_SHARED MT_PSE(0x17c)
#define MT_PSE_RTA MT_PSE(0x194)
#define MT_PSE_RTA_QUEUE_ID GENMASK(4, 0)
#define MT_PSE_RTA_PORT_ID GENMASK(6, 5)
#define MT_PSE_RTA_REDIRECT_EN BIT(7)
#define MT_PSE_RTA_TAG_ID GENMASK(15, 8)
#define MT_PSE_RTA_WRITE BIT(16)
#define MT_PSE_RTA_BUSY BIT(31)
#define MT_WF_PHY_BASE 0x10000
#define MT_WF_PHY_OFFSET 0x1000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
#define MT_AGC_BASE MT_WF_PHY(0x500)
#define MT_AGC(n) (MT_AGC_BASE + ((n) * 4))
#define MT_AGC1_BASE MT_WF_PHY(0x1500)
#define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4))
#define MT_AGC_41_RSSI_0 GENMASK(23, 16)
#define MT_AGC_41_RSSI_1 GENMASK(7, 0)
#define MT_RXTD_BASE MT_WF_PHY(0x600)
#define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4))
#define MT_RXTD_6_ACI_TH GENMASK(4, 0)
#define MT_RXTD_6_CCAED_TH GENMASK(14, 8)
#define MT_RXTD_8_LOWER_SIGNAL GENMASK(5, 0)
#define MT_RXTD_13_ACI_TH_EN BIT(0)
#define MT_WF_PHY_CR_TSSI_BASE MT_WF_PHY(0xd00)
#define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \
((phy) * MT_WF_PHY_OFFSET) + \
((n) * 4))
#define MT_PHYCTRL_BASE MT_WF_PHY(0x4100)
#define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4))
#define MT_PHYCTRL_2_STATUS_RESET BIT(6)
#define MT_PHYCTRL_2_STATUS_EN BIT(7)
#define MT_PHYCTRL_STAT_PD MT_PHYCTRL(3)
#define MT_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
#define MT_PHYCTRL_STAT_MDRDY MT_PHYCTRL(8)
#define MT_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
#define MT_WF_AGG_BASE 0x21200
#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
#define MT_AGG_ARCR MT_WF_AGG(0x010)
#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
#define MT_AGG_ARCR_FB_SGI_DISABLE BIT(1)
#define MT_AGG_ARCR_RATE8_DOWN_WRAP BIT(2)
#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
#define MT_AGG_ARCR_SPE_DIS_TH GENMASK(27, 24)
#define MT_AGG_ARUCR MT_WF_AGG(0x014)
#define MT_AGG_ARDCR MT_WF_AGG(0x018)
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
MT_AGG_ARxCR_LIMIT_SHIFT(_n))
#define MT_AGG_LIMIT MT_WF_AGG(0x040)
#define MT_AGG_LIMIT_1 MT_WF_AGG(0x044)
#define MT_AGG_LIMIT_AC(_n) GENMASK(((_n) + 1) * 8 - 1, (_n) * 8)
#define MT_AGG_BA_SIZE_LIMIT_0 MT_WF_AGG(0x048)
#define MT_AGG_BA_SIZE_LIMIT_1 MT_WF_AGG(0x04c)
#define MT_AGG_BA_SIZE_LIMIT_SHIFT 8
#define MT_AGG_PCR MT_WF_AGG(0x050)
#define MT_AGG_PCR_MM BIT(16)
#define MT_AGG_PCR_GF BIT(17)
#define MT_AGG_PCR_BW40 BIT(18)
#define MT_AGG_PCR_RIFS BIT(19)
#define MT_AGG_PCR_BW80 BIT(20)
#define MT_AGG_PCR_BW160 BIT(21)
#define MT_AGG_PCR_ERP BIT(22)
#define MT_AGG_PCR_RTS MT_WF_AGG(0x054)
#define MT_AGG_PCR_RTS_THR GENMASK(19, 0)
#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25)
#define MT_AGG_CONTROL MT_WF_AGG(0x070)
#define MT_AGG_CONTROL_NO_BA_RULE BIT(0)
#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1)
#define MT_AGG_CONTROL_CFEND_SPE_EN BIT(3)
#define MT_AGG_CONTROL_CFEND_RATE GENMASK(15, 4)
#define MT_AGG_CONTROL_BAR_SPE_EN BIT(19)
#define MT_AGG_CONTROL_BAR_RATE GENMASK(31, 20)
#define MT_AGG_TMP MT_WF_AGG(0x0d8)
#define MT_AGG_BWCR MT_WF_AGG(0x0ec)
#define MT_AGG_BWCR_BW GENMASK(3, 2)
#define MT_AGG_RETRY_CONTROL MT_WF_AGG(0x0f4)
#define MT_AGG_RETRY_CONTROL_RTS_LIMIT GENMASK(11, 7)
#define MT_AGG_RETRY_CONTROL_BAR_LIMIT GENMASK(15, 12)
#define MT_WF_DMA_BASE 0x21c00
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
#define MT_DMA_DCR1 MT_WF_DMA(0x004)
#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
#define MT_DMA_FQCR0_TARGET_WCID GENMASK(7, 0)
#define MT_DMA_FQCR0_TARGET_BSS GENMASK(13, 8)
#define MT_DMA_FQCR0_TARGET_QID GENMASK(20, 16)
#define MT_DMA_FQCR0_DEST_PORT_ID GENMASK(23, 22)
#define MT_DMA_FQCR0_DEST_QUEUE_ID GENMASK(28, 24)
#define MT_DMA_FQCR0_MODE BIT(29)
#define MT_DMA_FQCR0_STATUS BIT(30)
#define MT_DMA_FQCR0_BUSY BIT(31)
#define MT_DMA_RCFR0 MT_WF_DMA(0x070)
#define MT_DMA_VCFR0 MT_WF_DMA(0x07c)
#define MT_DMA_TCFR0 MT_WF_DMA(0x080)
#define MT_DMA_TCFR1 MT_WF_DMA(0x084)
#define MT_DMA_TCFR_TXS_AGGR_TIMEOUT GENMASK(27, 16)
#define MT_DMA_TCFR_TXS_QUEUE BIT(14)
#define MT_DMA_TCFR_TXS_AGGR_COUNT GENMASK(12, 8)
#define MT_DMA_TCFR_TXS_BIT_MAP GENMASK(6, 0)
#define MT_DMA_TMCFR0 MT_WF_DMA(0x088)
#define MT_WF_ARB_BASE 0x21400
#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
#define MT_WMM_AIFSN MT_WF_ARB(0x020)
#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
#define MT_WMM_CWMAX_BASE MT_WF_ARB(0x028)
#define MT_WMM_CWMAX(_n) (MT_WMM_CWMAX_BASE + (((_n) / 2) << 2))
#define MT_WMM_CWMAX_SHIFT(_n) (((_n) & 1) * 16)
#define MT_WMM_CWMAX_MASK GENMASK(15, 0)
#define MT_WMM_CWMIN MT_WF_ARB(0x040)
#define MT_WMM_CWMIN_MASK GENMASK(7, 0)
#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 8)
#define MT_WF_ARB_RQCR MT_WF_ARB(0x070)
#define MT_WF_ARB_RQCR_RX_START BIT(0)
#define MT_WF_ARB_RQCR_RXV_START BIT(4)
#define MT_WF_ARB_RQCR_RXV_R_EN BIT(7)
#define MT_WF_ARB_RQCR_RXV_T_EN BIT(8)
#define MT_ARB_SCR MT_WF_ARB(0x080)
#define MT_ARB_SCR_BCNQ_OPMODE_MASK GENMASK(1, 0)
#define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2)
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
#define MT_ARB_SCR_BCNQ_EMPTY_SKIP BIT(28)
#define MT_ARB_SCR_TTTT_BTIM_PRIO BIT(29)
#define MT_ARB_SCR_TBTT_BCN_PRIO BIT(30)
#define MT_ARB_SCR_TBTT_BCAST_PRIO BIT(31)
enum {
MT_BCNQ_OPMODE_STA = 0,
MT_BCNQ_OPMODE_AP = 1,
MT_BCNQ_OPMODE_ADHOC = 2,
};
#define MT_WF_ARB_TX_START_0 MT_WF_ARB(0x100)
#define MT_WF_ARB_TX_START_1 MT_WF_ARB(0x104)
#define MT_WF_ARB_TX_FLUSH_0 MT_WF_ARB(0x108)
#define MT_WF_ARB_TX_FLUSH_1 MT_WF_ARB(0x10c)
#define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110)
#define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114)
#define MT_WF_ARB_BCN_START MT_WF_ARB(0x118)
#define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n))
#define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10)
#define MT_WF_ARB_BCN_START_T_TTTT BIT(11)
#define MT_WF_ARB_BCN_START_T_PRE_TBTT BIT(12)
#define MT_WF_ARB_BCN_START_T_TBTT BIT(13)
#define MT_WF_ARB_BCN_START_T_SLOT_IDLE BIT(14)
#define MT_WF_ARB_BCN_START_T_TX_START BIT(15)
#define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
#define MT_WF_ARB_BCN_FLUSH MT_WF_ARB(0x11c)
#define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n))
#define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
#define MT_WF_ARB_CAB_START MT_WF_ARB(0x120)
#define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n))
#define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
#define MT_WF_ARB_CAB_FLUSH MT_WF_ARB(0x124)
#define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n))
#define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
#define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4)
#define MT_WF_ARB_CAB_COUNT_SHIFT 4
#define MT_WF_ARB_CAB_COUNT_MASK GENMASK(3, 0)
#define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \
((n) > 4 ? 1 : 0)))
#define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \
((n) > 4 ? (n) - 4 : \
(n) ? (n) + 3 : 0)) * 4)
#define MT_TX_ABORT MT_WF_ARB(0x134)
#define MT_TX_ABORT_EN BIT(0)
#define MT_TX_ABORT_WCID GENMASK(15, 8)
#define MT_WF_TMAC_BASE 0x21600
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
#define MT_TMAC_TCR MT_WF_TMAC(0x000)
#define MT_TMAC_TCR_BLINK_SEL GENMASK(7, 6)
#define MT_TMAC_TCR_PRE_RTS_GUARD GENMASK(11, 8)
#define MT_TMAC_TCR_PRE_RTS_SEC_IDLE GENMASK(13, 12)
#define MT_TMAC_TCR_RTS_SIGTA BIT(14)
#define MT_TMAC_TCR_LDPC_OFS BIT(15)
#define MT_TMAC_TCR_TX_STREAMS GENMASK(17, 16)
#define MT_TMAC_TCR_SCH_IDLE_SEL GENMASK(19, 18)
#define MT_TMAC_TCR_SCH_DET_PER_IOD BIT(20)
#define MT_TMAC_TCR_DCH_DET_DISABLE BIT(21)
#define MT_TMAC_TCR_TX_RIFS BIT(22)
#define MT_TMAC_TCR_RX_RIFS_MODE BIT(23)
#define MT_TMAC_TCR_TXOP_TBTT_CTL BIT(24)
#define MT_TMAC_TCR_TBTT_TX_STOP_CTL BIT(25)
#define MT_TMAC_TCR_TXOP_BURST_STOP BIT(26)
#define MT_TMAC_TCR_RDG_RA_MODE BIT(27)
#define MT_TMAC_TCR_RDG_RESP BIT(29)
#define MT_TMAC_TCR_RDG_NO_PENDING BIT(30)
#define MT_TMAC_TCR_SMOOTHING BIT(31)
#define MT_WMM_TXOP_BASE MT_WF_TMAC(0x010)
#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + \
((((_n) / 2) ^ 0x1) << 2))
#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16)
#define MT_WMM_TXOP_MASK GENMASK(15, 0)
#define MT_TIMEOUT_CCK MT_WF_TMAC(0x090)
#define MT_TIMEOUT_OFDM MT_WF_TMAC(0x094)
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
#define MT_TXREQ MT_WF_TMAC(0x09c)
#define MT_TXREQ_CCA_SRC_SEL GENMASK(31, 30)
#define MT_RXREQ MT_WF_TMAC(0x0a0)
#define MT_RXREQ_DELAY GENMASK(8, 0)
#define MT_IFS MT_WF_TMAC(0x0a4)
#define MT_IFS_EIFS GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
#define MT_TMAC_PCR MT_WF_TMAC(0x0b4)
#define MT_TMAC_PCR_RATE GENMASK(8, 0)
#define MT_TMAC_PCR_RATE_FIXED BIT(15)
#define MT_TMAC_PCR_ANT_ID GENMASK(21, 16)
#define MT_TMAC_PCR_ANT_ID_SEL BIT(22)
#define MT_TMAC_PCR_SPE_EN BIT(23)
#define MT_TMAC_PCR_ANT_PRI GENMASK(26, 24)
#define MT_TMAC_PCR_ANT_PRI_SEL GENMASK(27)
#define MT_WF_RMAC_BASE 0x21800
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
#define MT_WF_RFCR MT_WF_RMAC(0x000)
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
#define MT_WF_RFCR_DROP_VERSION BIT(3)
#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
#define MT_WF_RFCR_DROP_MCAST BIT(5)
#define MT_WF_RFCR_DROP_BCAST BIT(6)
#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
#define MT_WF_RFCR_DROP_CTS BIT(14)
#define MT_WF_RFCR_DROP_RTS BIT(15)
#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
#define MT_BSSID0(idx) MT_WF_RMAC(0x004 + (idx) * 8)
#define MT_BSSID1(idx) MT_WF_RMAC(0x008 + (idx) * 8)
#define MT_BSSID1_VALID BIT(16)
#define MT_MAC_ADDR0(idx) MT_WF_RMAC(0x024 + (idx) * 8)
#define MT_MAC_ADDR1(idx) MT_WF_RMAC(0x028 + (idx) * 8)
#define MT_MAC_ADDR1_ADDR GENMASK(15, 0)
#define MT_MAC_ADDR1_VALID BIT(16)
#define MT_BA_CONTROL_0 MT_WF_RMAC(0x068)
#define MT_BA_CONTROL_1 MT_WF_RMAC(0x06c)
#define MT_BA_CONTROL_1_ADDR GENMASK(15, 0)
#define MT_BA_CONTROL_1_TID GENMASK(19, 16)
#define MT_BA_CONTROL_1_IGNORE_TID BIT(20)
#define MT_BA_CONTROL_1_IGNORE_ALL BIT(21)
#define MT_BA_CONTROL_1_RESET BIT(22)
#define MT_WF_RMACDR MT_WF_RMAC(0x078)
#define MT_WF_RMACDR_TSF_PROBERSP_DIS BIT(0)
#define MT_WF_RMACDR_TSF_TIM BIT(4)
#define MT_WF_RMACDR_MBSSID_MASK GENMASK(25, 24)
#define MT_WF_RMACDR_CHECK_HTC_BY_RATE BIT(26)
#define MT_WF_RMACDR_MAXLEN_20BIT BIT(30)
#define MT_WF_RMAC_RMCR MT_WF_RMAC(0x080)
#define MT_WF_RMAC_RMCR_SMPS_MODE GENMASK(21, 20)
#define MT_WF_RMAC_RMCR_RX_STREAMS GENMASK(24, 22)
#define MT_WF_RMAC_RMCR_SMPS_RTS BIT(25)
#define MT_WF_RMAC_CH_FREQ MT_WF_RMAC(0x090)
#define MT_WF_RMAC_MAXMINLEN MT_WF_RMAC(0x098)
#define MT_WF_RFCR1 MT_WF_RMAC(0x0a4)
#define MT_WF_RMAC_TMR_PA MT_WF_RMAC(0x0e0)
#define MT_WF_SEC_BASE 0x21a00
#define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
#define MT_SEC_SCR MT_WF_SEC(0x004)
#define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
#define MT_WTBL_OFF_BASE 0x23000
#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
#define MT_WTBL_UPDATE MT_WTBL_OFF(0x000)
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
#define MT_WTBL_UPDATE_WTBL2 BIT(11)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_RX_COUNT_CLEAR BIT(15)
#define MT_WTBL_UPDATE_BUSY BIT(16)
#define MT_WTBL_RMVTCR MT_WTBL_OFF(0x008)
#define MT_WTBL_RMVTCR_RX_MV_MODE BIT(23)
#define MT_LPON_BASE 0x24000
#define MT_LPON(n) (MT_LPON_BASE + (n))
#define MT_LPON_BTEIR MT_LPON(0x020)
#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29)
#define MT_PRE_TBTT MT_LPON(0x030)
#define MT_PRE_TBTT_MASK GENMASK(7, 0)
#define MT_PRE_TBTT_SHIFT 8
#define MT_TBTT MT_LPON(0x034)
#define MT_TBTT_PERIOD GENMASK(15, 0)
#define MT_TBTT_DTIM_PERIOD GENMASK(23, 16)
#define MT_TBTT_TBTT_WAKE_PERIOD GENMASK(27, 24)
#define MT_TBTT_DTIM_WAKE_PERIOD GENMASK(30, 28)
#define MT_TBTT_CAL_ENABLE BIT(31)
#define MT_TBTT_TIMER_CFG MT_LPON(0x05c)
#define MT_LPON_SBTOR(n) MT_LPON(0x0a0)
#define MT_LPON_SBTOR_SUB_BSS_EN BIT(29)
#define MT_LPON_SBTOR_TIME_OFFSET GENMASK(19, 0)
#define MT_INT_WAKEUP_BASE 0x24400
#define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n))
#define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8)
#define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8)
#define MT_HW_INT3_TBTT0 BIT(15)
#define MT_HW_INT3_PRE_TBTT0 BIT(31)
#define MT_WTBL1_BASE 0x28000
#define MT_WTBL_ON_BASE (MT_WTBL1_BASE + 0x2000)
#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x200)
#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x204)
#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x208)
#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x20c)
#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
#define MT_MIB_BASE 0x2c000
#define MT_MIB(_n) (MT_MIB_BASE + (_n))
#define MT_MIB_CTL MT_MIB(0x00)
#define MT_MIB_CTL_PSCCA_TIME GENMASK(13, 11)
#define MT_MIB_CTL_CCA_NAV_TX GENMASK(16, 14)
#define MT_MIB_CTL_ED_TIME GENMASK(30, 28)
#define MT_MIB_CTL_READ_CLR_DIS BIT(31)
#define MT_MIB_STAT(_n) MT_MIB(0x08 + (_n) * 4)
#define MT_MIB_STAT_CCA MT_MIB_STAT(9)
#define MT_MIB_STAT_CCA_MASK GENMASK(23, 0)
#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16)
#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0)
#define MT_MIB_STAT_ED MT_MIB_STAT(18)
#define MT_MIB_STAT_ED_MASK GENMASK(23, 0)
#define MT_PCIE_REMAP_BASE_1 0x40000
#define MT_PCIE_REMAP_BASE_2 0x80000
#define MT_TX_HW_QUEUE_MGMT 4
#define MT_TX_HW_QUEUE_MCU 5
#define MT_TX_HW_QUEUE_BCN 7
#define MT_TX_HW_QUEUE_BMC 8
#define MT_LED_BASE_PHYS 0x80024000
#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
#define MT_LED_CTRL MT_LED_PHYS(0x00)
#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
#define MT_LED_STATUS_OFF(_v) (((_v) << \
__ffs(MT_LED_STATUS_OFF_MASK)) & \
MT_LED_STATUS_OFF_MASK)
#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
#define MT_LED_STATUS_ON(_v) (((_v) << \
__ffs(MT_LED_STATUS_ON_MASK)) & \
MT_LED_STATUS_ON_MASK)
#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0)
#define MT_LED_STATUS_DURATION(_v) (((_v) << \
__ffs(MT_LED_STATUS_DURATION_MASK)) &\
MT_LED_STATUS_DURATION_MASK)
#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
#define MT_CLIENT_TMAC_INFO_TEMPLATE 0x040
#define MT_CLIENT_STATUS 0x06c
#define MT_CLIENT_RESET_TX 0x070
#define MT_CLIENT_RESET_TX_R_E_1 BIT(16)
#define MT_CLIENT_RESET_TX_R_E_2 BIT(17)
#define MT_CLIENT_RESET_TX_R_E_1_S BIT(20)
#define MT_CLIENT_RESET_TX_R_E_2_S BIT(21)
#define MT_EFUSE_BASE 0x81070000
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
#define MT_EFUSE_CTRL 0x008
#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
#define MT_EFUSE_CTRL_VALID BIT(29)
#define MT_EFUSE_CTRL_KICK BIT(30)
#define MT_EFUSE_CTRL_SEL BIT(31)
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
#define MT_CLIENT_RXINF 0x068
#define MT_CLIENT_RXINF_RXSH_GROUPS GENMASK(2, 0)
#define MT_PSE_BASE_PHYS_ADDR 0xa0000000
#define MT_PSE_WTBL_2_PHYS_ADDR 0xa5000000
#define MT_WTBL1_SIZE (8 * 4)
#define MT_WTBL2_SIZE (16 * 4)
#define MT_WTBL3_OFFSET (MT7603_WTBL_SIZE * MT_WTBL2_SIZE)
#define MT_WTBL3_SIZE (16 * 4)
#define MT_WTBL4_OFFSET (MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \
MT_WTBL3_OFFSET)
#define MT_WTBL4_SIZE (8 * 4)
#define MT_WTBL1_W0_ADDR_HI GENMASK(15, 0)
#define MT_WTBL1_W0_MUAR_IDX GENMASK(21, 16)
#define MT_WTBL1_W0_RX_CHECK_A1 BIT(22)
#define MT_WTBL1_W0_KEY_IDX GENMASK(24, 23)
#define MT_WTBL1_W0_RX_CHECK_KEY_IDX BIT(25)
#define MT_WTBL1_W0_RX_KEY_VALID BIT(26)
#define MT_WTBL1_W0_RX_IK_VALID BIT(27)
#define MT_WTBL1_W0_RX_VALID BIT(28)
#define MT_WTBL1_W0_RX_CHECK_A2 BIT(29)
#define MT_WTBL1_W0_RX_DATA_VALID BIT(30)
#define MT_WTBL1_W0_WRITE_BURST BIT(31)
#define MT_WTBL1_W1_ADDR_LO GENMASK(31, 0)
#define MT_WTBL1_W2_MPDU_DENSITY GENMASK(2, 0)
#define MT_WTBL1_W2_KEY_TYPE GENMASK(6, 3)
#define MT_WTBL1_W2_EVEN_PN BIT(7)
#define MT_WTBL1_W2_TO_DS BIT(8)
#define MT_WTBL1_W2_FROM_DS BIT(9)
#define MT_WTBL1_W2_HEADER_TRANS BIT(10)
#define MT_WTBL1_W2_AMPDU_FACTOR GENMASK(13, 11)
#define MT_WTBL1_W2_PWR_MGMT BIT(14)
#define MT_WTBL1_W2_RDG BIT(15)
#define MT_WTBL1_W2_RTS BIT(16)
#define MT_WTBL1_W2_CFACK BIT(17)
#define MT_WTBL1_W2_RDG_BA BIT(18)
#define MT_WTBL1_W2_SMPS BIT(19)
#define MT_WTBL1_W2_TXS_BAF_REPORT BIT(20)
#define MT_WTBL1_W2_DYN_BW BIT(21)
#define MT_WTBL1_W2_LDPC BIT(22)
#define MT_WTBL1_W2_ITXBF BIT(23)
#define MT_WTBL1_W2_ETXBF BIT(24)
#define MT_WTBL1_W2_TXOP_PS BIT(25)
#define MT_WTBL1_W2_MESH BIT(26)
#define MT_WTBL1_W2_QOS BIT(27)
#define MT_WTBL1_W2_HT BIT(28)
#define MT_WTBL1_W2_VHT BIT(29)
#define MT_WTBL1_W2_ADMISSION_CONTROL BIT(30)
#define MT_WTBL1_W2_GROUP_ID BIT(31)
#define MT_WTBL1_W3_WTBL2_FRAME_ID GENMASK(10, 0)
#define MT_WTBL1_W3_WTBL2_ENTRY_ID GENMASK(15, 11)
#define MT_WTBL1_W3_WTBL4_FRAME_ID GENMASK(26, 16)
#define MT_WTBL1_W3_CHECK_PER BIT(27)
#define MT_WTBL1_W3_KEEP_I_PSM BIT(28)
#define MT_WTBL1_W3_I_PSM BIT(29)
#define MT_WTBL1_W3_POWER_SAVE BIT(30)
#define MT_WTBL1_W3_SKIP_TX BIT(31)
#define MT_WTBL1_W4_WTBL3_FRAME_ID GENMASK(10, 0)
#define MT_WTBL1_W4_WTBL3_ENTRY_ID GENMASK(16, 11)
#define MT_WTBL1_W4_WTBL4_ENTRY_ID GENMASK(22, 17)
#define MT_WTBL1_W4_PARTIAL_AID GENMASK(31, 23)
#define MT_WTBL2_W0_PN_LO GENMASK(31, 0)
#define MT_WTBL2_W1_PN_HI GENMASK(15, 0)
#define MT_WTBL2_W1_NON_QOS_SEQNO GENMASK(27, 16)
#define MT_WTBL2_W2_TID0_SN GENMASK(11, 0)
#define MT_WTBL2_W2_TID1_SN GENMASK(23, 12)
#define MT_WTBL2_W2_TID2_SN_LO GENMASK(31, 24)
#define MT_WTBL2_W3_TID2_SN_HI GENMASK(3, 0)
#define MT_WTBL2_W3_TID3_SN GENMASK(15, 4)
#define MT_WTBL2_W3_TID4_SN GENMASK(27, 16)
#define MT_WTBL2_W3_TID5_SN_LO GENMASK(31, 28)
#define MT_WTBL2_W4_TID5_SN_HI GENMASK(7, 0)
#define MT_WTBL2_W4_TID6_SN GENMASK(19, 8)
#define MT_WTBL2_W4_TID7_SN GENMASK(31, 20)
#define MT_WTBL2_W5_TX_COUNT_RATE1 GENMASK(15, 0)
#define MT_WTBL2_W5_FAIL_COUNT_RATE1 GENAMSK(31, 16)
#define MT_WTBL2_W6_TX_COUNT_RATE2 GENMASK(7, 0)
#define MT_WTBL2_W6_TX_COUNT_RATE3 GENMASK(15, 8)
#define MT_WTBL2_W6_TX_COUNT_RATE4 GENMASK(23, 16)
#define MT_WTBL2_W6_TX_COUNT_RATE5 GENMASK(31, 24)
#define MT_WTBL2_W7_TX_COUNT_CUR_BW GENMASK(15, 0)
#define MT_WTBL2_W7_FAIL_COUNT_CUR_BW GENMASK(31, 16)
#define MT_WTBL2_W8_TX_COUNT_OTHER_BW GENMASK(15, 0)
#define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW GENMASK(31, 16)
#define MT_WTBL2_W9_POWER_OFFSET GENMASK(4, 0)
#define MT_WTBL2_W9_SPATIAL_EXT BIT(5)
#define MT_WTBL2_W9_ANT_PRIORITY GENMASK(8, 6)
#define MT_WTBL2_W9_CC_BW_SEL GENMASK(10, 9)
#define MT_WTBL2_W9_CHANGE_BW_RATE GENMASK(13, 11)
#define MT_WTBL2_W9_BW_CAP GENMASK(15, 14)
#define MT_WTBL2_W9_SHORT_GI_20 BIT(16)
#define MT_WTBL2_W9_SHORT_GI_40 BIT(17)
#define MT_WTBL2_W9_SHORT_GI_80 BIT(18)
#define MT_WTBL2_W9_SHORT_GI_160 BIT(19)
#define MT_WTBL2_W9_MPDU_FAIL_COUNT GENMASK(25, 23)
#define MT_WTBL2_W9_MPDU_OK_COUNT GENMASK(28, 26)
#define MT_WTBL2_W9_RATE_IDX GENMASK(31, 29)
#define MT_WTBL2_W10_RATE1 GENMASK(11, 0)
#define MT_WTBL2_W10_RATE2 GENMASK(23, 12)
#define MT_WTBL2_W10_RATE3_LO GENMASK(31, 24)
#define MT_WTBL2_W11_RATE3_HI GENMASK(3, 0)
#define MT_WTBL2_W11_RATE4 GENMASK(15, 4)
#define MT_WTBL2_W11_RATE5 GENMASK(27, 16)
#define MT_WTBL2_W11_RATE6_LO GENMASK(31, 28)
#define MT_WTBL2_W12_RATE6_HI GENMASK(7, 0)
#define MT_WTBL2_W12_RATE7 GENMASK(19, 8)
#define MT_WTBL2_W12_RATE8 GENMASK(31, 20)
#define MT_WTBL2_W13_AVG_RCPI0 GENMASK(7, 0)
#define MT_WTBL2_W13_AVG_RCPI1 GENMASK(15, 8)
#define MT_WTBL2_W13_AVG_RCPI2 GENAMSK(23, 16)
#define MT_WTBL2_W14_CC_NOISE_1S GENMASK(6, 0)
#define MT_WTBL2_W14_CC_NOISE_2S GENMASK(13, 7)
#define MT_WTBL2_W14_CC_NOISE_3S GENMASK(20, 14)
#define MT_WTBL2_W14_CHAN_EST_RMS GENMASK(24, 21)
#define MT_WTBL2_W14_CC_NOISE_SEL BIT(15)
#define MT_WTBL2_W14_ANT_SEL GENMASK(31, 26)
#define MT_WTBL2_W15_BA_WIN_SIZE GENMASK(2, 0)
#define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT 3
#define MT_WTBL2_W15_BA_EN_TIDS GENMASK(31, 24)
#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300)
#define MT_WTBL1_OR_PSM_WRITE BIT(31)
enum mt7603_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_TKIP,
MT_CIPHER_TKIP_NO_MIC,
MT_CIPHER_AES_CCMP,
MT_CIPHER_WEP104,
MT_CIPHER_BIP_CMAC_128,
MT_CIPHER_WEP128,
MT_CIPHER_WAPI,
};
#endif

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: ISC */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "mt7603.h"
static int
mt76_wmac_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct mt7603_dev *dev;
void __iomem *mem_base;
struct mt76_dev *mdev;
int irq;
int ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Failed to get device IRQ\n");
return irq;
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (!mem_base) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
return -EINVAL;
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
&mt7603_drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt7603_dev, mt76);
mt76_mmio_init(mdev, mem_base);
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
ret = mt7603_register_device(dev);
if (ret)
goto error;
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
return ret;
}
static int
mt76_wmac_remove(struct platform_device *pdev)
{
struct mt76_dev *mdev = platform_get_drvdata(pdev);
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mt7603_unregister_device(dev);
return 0;
}
static const struct of_device_id of_wmac_match[] = {
{ .compatible = "mediatek,mt7628-wmac" },
{},
};
MODULE_DEVICE_TABLE(of, of_wmac_match);
MODULE_FIRMWARE(MT7628_FIRMWARE_E1);
MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
struct platform_driver mt76_wmac_driver = {
.probe = mt76_wmac_probe,
.remove = mt76_wmac_remove,
.driver = {
.name = "mt76_wmac",
.of_match_table = of_wmac_match,
},
};

View File

@ -187,6 +187,8 @@ void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i = 200, ok = 0;
mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
/* Page count on TxQ */
while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
(mt76_rr(dev, 0x0a30) & 0x000000ff) ||

View File

@ -34,6 +34,8 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
mt76x02_edcca_init(dev, true);
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
tasklet_enable(&dev->pre_tbtt_tasklet);

View File

@ -1007,17 +1007,13 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
/* enable vco */
mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7));
if (scan) {
mt76x02_edcca_init(dev, false);
if (scan)
return 0;
}
mt76x02_init_agc_gain(dev);
mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
mt76x02_edcca_init(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);

View File

@ -79,7 +79,6 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
}
static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
@ -193,10 +192,6 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto out_err;
err = mt76u_mcu_init_rx(&dev->mt76);
if (err < 0)
goto out_err;
err = mt76x0u_init_hardware(dev);
if (err < 0)
goto out_err;
@ -311,13 +306,11 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
pm_message_t state)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
mt76x0u_mac_stop(dev);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
usb_kill_urb(usb->mcu.res.urb);
return 0;
}
@ -328,15 +321,6 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
struct mt76_usb *usb = &dev->mt76.usb;
int ret;
reinit_completion(&usb->mcu.cmpl);
ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
MT_EP_IN_CMD_RESP,
&usb->mcu.res, GFP_KERNEL,
mt76u_mcu_complete_urb,
&usb->mcu.cmpl);
if (ret < 0)
goto err;
ret = mt76u_submit_rx_buffers(&dev->mt76);
if (ret < 0)
goto err;

View File

@ -98,6 +98,7 @@ struct mt76x02_dev {
u32 tx_hang_reset;
u8 tx_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;

View File

@ -905,14 +905,14 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
ed_th << 8 | ed_th);
if (!is_mt76x2(dev))
mt76_set(dev, MT_TXOP_HLDR_ET,
MT_TXOP_HLDR_TX40M_BLK_EN);
mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
} else {
mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
if (is_mt76x2(dev)) {
mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
mt76_set(dev, MT_TXOP_HLDR_ET,
MT_TXOP_HLDR_TX40M_BLK_EN);
} else {
mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
mt76_clear(dev, MT_TXOP_HLDR_ET,
@ -1126,6 +1126,9 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
else if (val)
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!dev->beacon_mask)
dev->tbtt_count = 0;
__mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
if (mt76_is_mmio(dev))

View File

@ -61,6 +61,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
dev->mcu_timeout = 1;
break;
}

View File

@ -79,24 +79,24 @@ mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
* Beacon timer drifts by 1us every tick, the timer is configured
* in 1/16 TU (64us) units.
*/
if (dev->tbtt_count < 62)
if (dev->tbtt_count < 63)
return;
if (dev->tbtt_count >= 64) {
dev->tbtt_count = 0;
return;
}
/*
* The updated beacon interval takes effect after two TBTT, because
* at this point the original interval has already been loaded into
* the next TBTT_TIMER value
*/
if (dev->tbtt_count == 62)
if (dev->tbtt_count == 63)
timer_val -= 1;
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
if (dev->tbtt_count >= 64) {
dev->tbtt_count = 0;
return;
}
}
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
@ -494,18 +494,28 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
{
if (mt76x02_tx_hang(dev)) {
if (++dev->tx_hang_check < MT_TX_HANG_TH)
return;
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
if (++dev->tx_hang_check >= MT_TX_HANG_TH)
goto restart;
} else {
dev->tx_hang_check = 0;
}
if (dev->mcu_timeout)
goto restart;
return;
restart:
mt76x02_watchdog_reset(dev);
mutex_lock(&dev->mt76.mmio.mcu.mutex);
dev->mcu_timeout = 0;
mutex_unlock(&dev->mt76.mmio.mcu.mutex);
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}
void mt76x02_wdt_work(struct work_struct *work)

View File

@ -61,33 +61,21 @@ mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
{
struct mt76_usb *usb = &dev->usb;
struct mt76u_buf *buf = &usb->mcu.res;
struct urb *urb = buf->urb;
u8 *data = buf->buf;
int i, ret;
u8 *data = usb->mcu.data;
int i, len, ret;
u32 rxfce;
for (i = 0; i < 5; i++) {
if (!wait_for_completion_timeout(&usb->mcu.cmpl,
msecs_to_jiffies(300)))
ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
if (ret == -ETIMEDOUT)
continue;
if (urb->status)
return -EIO;
if (ret)
goto out;
if (usb->mcu.rp)
mt76x02u_multiple_mcu_reads(dev, data + 4,
urb->actual_length - 8);
mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
rxfce = get_unaligned_le32(data);
ret = mt76u_submit_buf(dev, USB_DIR_IN,
MT_EP_IN_CMD_RESP,
buf, GFP_KERNEL,
mt76u_mcu_complete_urb,
&usb->mcu.cmpl);
if (ret)
return ret;
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
return 0;
@ -96,9 +84,9 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
}
dev_err(dev->dev, "error: %s timed out\n", __func__);
return -ETIMEDOUT;
out:
dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
return ret;
}
static int
@ -126,7 +114,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
ret = mt76u_bulk_msg(dev, skb->data, skb->len, 500);
ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
if (ret)
return ret;
@ -271,7 +259,7 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
data_len = MT_CMD_HDR_LEN + len + sizeof(info);
err = mt76u_bulk_msg(&dev->mt76, data, data_len, 1000);
err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
if (err) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
return err;

View File

@ -679,9 +679,9 @@ void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
}
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
for (i = 0; i < 8; i++)
@ -704,9 +704,6 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED)
mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
info->ht_operation_mode);
@ -716,9 +713,11 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
dev->beacon_int = info->beacon_int;
dev->tbtt_count = 0;
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
if (changed & BSS_CHANGED_ERP_PREAMBLE)
mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);

View File

@ -23,6 +23,9 @@ void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
u32 rts_cfg;
int i;
mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);

View File

@ -25,6 +25,12 @@ struct mt76x02_vif;
int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x02_dev *dev);
static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
MT_MAC_SYS_CTRL_ENABLE_RX);
}
#endif

View File

@ -35,7 +35,6 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev);
void mt76x2u_stop_hw(struct mt76x02_dev *dev);
int mt76x2u_mac_reset(struct mt76x02_dev *dev);
void mt76x2u_mac_resume(struct mt76x02_dev *dev);
int mt76x2u_mac_start(struct mt76x02_dev *dev);
int mt76x2u_mac_stop(struct mt76x02_dev *dev);

View File

@ -173,13 +173,6 @@ int mt76x2_mac_start(struct mt76x02_dev *dev)
return 0;
}
void mt76x2_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
MT_MAC_SYS_CTRL_ENABLE_RX);
}
static void
mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{

View File

@ -74,6 +74,7 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
mt76x02_edcca_init(dev, true);
dev->cal.channel_cal_done = true;
}
@ -240,10 +241,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
if (scan) {
mt76x02_edcca_init(dev, false);
if (scan)
return 0;
}
mt76x2_phy_channel_calibrate(dev, true);
mt76x02_init_agc_gain(dev);
@ -256,8 +255,6 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
0x38);
}
mt76x02_edcca_init(dev, true);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);

View File

@ -100,11 +100,9 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
pm_message_t state)
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
mt76x2u_stop_hw(dev);
usb_kill_urb(usb->mcu.res.urb);
return 0;
}
@ -115,15 +113,6 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
struct mt76_usb *usb = &dev->mt76.usb;
int err;
reinit_completion(&usb->mcu.cmpl);
err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
MT_EP_IN_CMD_RESP,
&usb->mcu.res, GFP_KERNEL,
mt76u_mcu_complete_urb,
&usb->mcu.cmpl);
if (err < 0)
goto err;
err = mt76u_submit_rx_buffers(&dev->mt76);
if (err < 0)
goto err;

View File

@ -214,10 +214,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
err = mt76u_mcu_init_rx(&dev->mt76);
if (err < 0)
goto fail;
err = mt76x2u_init_hardware(dev);
if (err < 0)
goto fail;
@ -259,5 +255,4 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev)
mt76x02_mcu_set_radio_state(dev, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
}

View File

@ -143,8 +143,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
/* wait tx dma to stop */
for (i = 0; i < 2000; i++) {
@ -211,12 +211,3 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
return 0;
}
void mt76x2u_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
MT_MAC_SYS_CTRL_ENABLE_RX);
mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
}

View File

@ -57,13 +57,12 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76_set_channel(&dev->mt76);
mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
mt76x2u_mac_resume(dev);
mt76x2_mac_resume(dev);
mt76x02_edcca_init(dev, true);
clear_bit(MT76_RESET, &dev->mt76.state);
mt76_txq_schedule_all(&dev->mt76);

View File

@ -43,8 +43,9 @@ mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
if (!mac_stopped)
mt76x2u_mac_resume(dev);
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
mt76x02_edcca_init(dev, true);
dev->cal.channel_cal_done = true;
}

View File

@ -324,66 +324,61 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
}
static int
mt76u_buf_alloc_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen, gfp_t gfp)
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76u_buf *buf, int nsgs, gfp_t gfp)
{
buf->urb = usb_alloc_urb(0, gfp);
if (!buf->urb)
return -ENOMEM;
buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp);
if (!buf->urb->sg)
return -ENOMEM;
sg_init_table(buf->urb->sg, nsgs);
buf->dev = dev;
return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
if (dev->usb.sg_en) {
return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size));
} else {
buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
return buf->buf ? 0 : -ENOMEM;
}
}
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
int len, int data_len, gfp_t gfp)
static int
mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
buf->urb = usb_alloc_urb(0, gfp);
buf->len = SKB_WITH_OVERHEAD(q->buf_size);
buf->dev = dev;
buf->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!buf->urb)
return -ENOMEM;
buf->buf = page_frag_alloc(&q->rx_page, len, gfp);
if (!buf->buf)
return -ENOMEM;
if (dev->usb.sg_en) {
buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
sizeof(*buf->urb->sg),
GFP_KERNEL);
if (!buf->urb->sg)
return -ENOMEM;
buf->len = data_len;
buf->dev = dev;
sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
}
return 0;
return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
}
void mt76u_buf_free(struct mt76u_buf *buf)
static void mt76u_buf_free(struct mt76u_buf *buf)
{
struct urb *urb = buf->urb;
struct scatterlist *sg;
int i;
for (i = 0; i < urb->num_sgs; i++) {
sg = &urb->sg[i];
if (!sg)
continue;
for (i = 0; i < urb->num_sgs; i++)
skb_free_frag(sg_virt(&urb->sg[i]));
skb_free_frag(sg_virt(sg));
}
if (buf->buf)
skb_free_frag(buf->buf);
usb_free_urb(buf->urb);
}
EXPORT_SYMBOL_GPL(mt76u_buf_free);
int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, gfp_t gfp,
usb_complete_t complete_fn, void *context)
static void
mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, usb_complete_t complete_fn,
void *context)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
@ -397,11 +392,19 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
complete_fn, context);
}
static int
mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, gfp_t gfp,
usb_complete_t complete_fn, void *context)
{
mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
context);
trace_submit_urb(dev, buf->urb);
return usb_submit_urb(buf->urb, gfp);
}
EXPORT_SYMBOL_GPL(mt76u_submit_buf);
static inline struct mt76u_buf
*mt76u_get_next_rx_entry(struct mt76_queue *q)
@ -464,7 +467,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
__skb_put(skb, data_len);
len -= data_len;
while (len > 0 && urb->num_sgs) {
while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
@ -510,20 +513,6 @@ out:
spin_unlock_irqrestore(&q->lock, flags);
}
static int
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76u_buf *buf, int nsgs)
{
if (dev->usb.sg_en) {
return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size));
} else {
buf->buf = page_frag_alloc(&q->rx_page, q->buf_size,
GFP_ATOMIC);
return buf->buf ? 0 : -ENOMEM;
}
}
static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
@ -540,7 +529,8 @@ static void mt76u_rx_tasklet(unsigned long data)
count = mt76u_process_rx_entry(dev, buf);
if (count > 0) {
err = mt76u_refill_rx(dev, q, buf, count);
err = mt76u_refill_rx(dev, q, buf, count,
GFP_ATOMIC);
if (err < 0)
break;
}
@ -577,9 +567,14 @@ EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
struct mt76_usb *usb = &dev->usb;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i, err;
usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
if (!usb->mcu.data)
return -ENOMEM;
spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
@ -591,16 +586,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
for (i = 0; i < q->ndesc; i++) {
if (dev->usb.sg_en)
err = mt76u_buf_alloc_sg(dev, &q->entry[i].ubuf,
MT_SG_MAX_SIZE, q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size),
GFP_KERNEL);
else
err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size),
GFP_KERNEL);
err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
if (err < 0)
return err;
}
@ -724,21 +710,15 @@ static void mt76u_complete_tx(struct urb *urb)
}
static int
mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
struct urb *urb)
{
int nsgs = 1 + skb_shinfo(skb)->nr_frags;
struct sk_buff *iter;
if (!dev->usb.sg_en)
return 0;
skb_walk_frags(skb, iter)
nsgs += 1 + skb_shinfo(iter)->nr_frags;
memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
sg_init_marker(urb->sg, nsgs);
urb->num_sgs = nsgs;
return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
sg_init_table(urb->sg, MT_SG_MAX_SIZE);
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
return urb->num_sgs;
}
static int
@ -746,12 +726,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
u8 *data = NULL, ep = q2ep(q->hw_idx);
struct mt76u_buf *buf;
u16 idx = q->tail;
unsigned int pipe;
int err;
if (q->queued == q->ndesc)
@ -763,19 +739,16 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return err;
buf = &q->entry[idx].ubuf;
buf->buf = skb->data;
buf->len = skb->len;
buf->done = false;
if (dev->usb.sg_en) {
err = mt76u_tx_build_sg(skb, buf->urb);
if (err < 0)
return err;
} else {
data = skb->data;
}
err = mt76u_tx_build_sg(dev, skb, buf->urb);
if (err < 0)
return err;
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
usb_fill_bulk_urb(buf->urb, udev, pipe, data, skb->len,
mt76u_complete_tx, buf);
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
buf, mt76u_complete_tx, buf);
q->tail = (q->tail + 1) % q->ndesc;
q->entry[idx].skb = skb;
@ -933,7 +906,6 @@ int mt76u_init(struct mt76_dev *dev,
INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
init_completion(&usb->mcu.cmpl);
mutex_init(&usb->mcu.mutex);
mutex_init(&usb->usb_ctrl_mtx);

View File

@ -1,57 +0,0 @@
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76.h"
void mt76u_mcu_complete_urb(struct urb *urb)
{
struct completion *cmpl = urb->context;
complete(cmpl);
}
EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
int mt76u_mcu_init_rx(struct mt76_dev *dev)
{
struct mt76_usb *usb = &dev->usb;
int err;
err = mt76u_buf_alloc(dev, &usb->mcu.res, MCU_RESP_URB_SIZE,
MCU_RESP_URB_SIZE, GFP_KERNEL);
if (err < 0)
return err;
err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
&usb->mcu.res, GFP_KERNEL,
mt76u_mcu_complete_urb,
&usb->mcu.cmpl);
if (err < 0)
mt76u_buf_free(&usb->mcu.res);
return err;
}
EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
void mt76u_mcu_deinit(struct mt76_dev *dev)
{
struct mt76u_buf *buf = &dev->usb.mcu.res;
if (buf->urb) {
usb_kill_urb(buf->urb);
mt76u_buf_free(buf);
}
}
EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);

View File

@ -430,6 +430,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
@ -459,7 +460,6 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
(void *)rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
(void *)rtl_c2hcmd_wq_callback);
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@ -618,6 +618,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
u8 rate_flag = info->control.rates[0].flags;
u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
u8 sgi_80 = 0, bw_80 = 0;
tcb_desc->use_shortgi = false;
if (sta == NULL)
@ -1850,6 +1851,7 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
return 0;
}
int rtl_tx_agg_oper(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid)
{
@ -2073,7 +2075,6 @@ void rtl_watchdog_wq_callback(void *data)
* busytraffic we don't change channel
*/
if (mac->link_state >= MAC80211_LINKED) {
/* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
for (idx = 0; idx <= 2; idx++) {
rtlpriv->link_info.num_rx_in4period[idx] =
@ -2233,6 +2234,7 @@ void rtl_watch_dog_timer_callback(struct timer_list *t)
mod_timer(&rtlpriv->works.watchdog_timer,
jiffies + MSECS(RTL_WATCH_DOG_TIME));
}
void rtl_fwevt_wq_callback(void *data)
{
struct rtl_works *rtlworks =
@ -2384,6 +2386,7 @@ void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
}
/*********************************************************
*
* frame process functions

View File

@ -188,6 +188,7 @@ static void rtl_op_tx(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_tcb_desc tcb_desc;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
@ -346,12 +347,14 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtlpriv->locks.conf_mutex);
}
static int rtl_op_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type, bool p2p)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int ret;
rtl_op_remove_interface(hw, vif);
vif->type = new_type;
@ -881,6 +884,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&mac->rx_conf));
}
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@ -933,6 +937,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_sta_info *sta_entry;
if (sta) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Remove sta addr is %pM\n", sta->addr);
@ -945,6 +950,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
}
return 0;
}
static int _rtl_get_hal_qnum(u16 queue)
{
int qnum;
@ -1066,6 +1072,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
/*TODO: reference to enum ieee80211_bss_change */
if (changed & BSS_CHANGED_ASSOC) {
u8 mstatus;
if (bss_conf->assoc) {
struct ieee80211_sta *sta = NULL;
u8 keep_alive = 10;
@ -1294,6 +1301,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
* set in sta_add, and will be NULL here */
if (vif->type == NL80211_IFTYPE_STATION) {
struct rtl_sta_info *sta_entry;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = mac->mode;
}

View File

@ -474,6 +474,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (word_en != 0x0F) {
u8 tmpdata[8];
memcpy(tmpdata,
&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
8);
@ -487,7 +488,6 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
break;
}
}
}
efuse_power_switch(hw, true, false);
@ -662,6 +662,7 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
efuse_power_switch(hw, false, true);
read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
efuse_power_switch(hw, false, false);
@ -812,6 +813,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = offset;
u8 reorg_worden = badworden;
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
@ -901,6 +903,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = tmp_pkt.offset;
u8 reorg_worden = badworden;
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
@ -957,7 +960,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
if (write_state == PG_STATE_HEADER) {
dataempty = true;
badworden = 0x0F;
@ -1114,7 +1116,6 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
u16 tmpv16;
if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) {
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE &&
rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) {
rtl_write_byte(rtlpriv,
@ -1219,6 +1220,7 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
static u8 efuse_calculate_word_cnts(u8 word_en)
{
u8 word_cnts = 0;
if (!(word_en & BIT(0)))
word_cnts++;
if (!(word_en & BIT(1)))

View File

@ -718,6 +718,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
u8 noa_num, index , i, noa_index = 0;
bool find_p2p_ie = false , find_p2p_ps_ie = false;
pos = (u8 *)mgmt->u.beacon.variable;
end = data + len;
ie = NULL;

View File

@ -236,6 +236,7 @@ static void rtl_tx_status(void *ppriv,
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
tid)) {
sta_entry->tids[tid].agg.agg_state =
@ -293,6 +294,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rtl_rate_priv *rate_priv = priv_sta;
kfree(rate_priv);
}

View File

@ -41,7 +41,6 @@ static struct country_code_to_enum_rd all_countries[] = {
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_NO_OFDM)
/* 5G chan 36 - chan 64*/
#define RTL819x_5GHZ_5150_5350 \
REG_RULE(5150-10, 5350+10, 80, 0, 30, 0)

View File

@ -425,7 +425,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
@ -504,7 +503,6 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
rtl92c_dm_cck_packet_detection_thresh(hw);
dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
}
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
@ -607,6 +605,7 @@ static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
@ -660,7 +659,6 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting))) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
@ -685,6 +683,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
&tmp);
rtlpriv->dm.current_turbo_edca = false;
@ -1635,7 +1634,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) {

View File

@ -18,6 +18,7 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
if (enable)
value32 |= MCUFWDL_EN;
else
@ -25,8 +26,8 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
u8 tmp;
if (enable) {
if (enable) {
tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
tmp | 0x04);
@ -37,7 +38,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
} else {
tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
@ -622,7 +622,6 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
"rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
skb_put_data(skb, &reserved_page_packet, totalpacketlen);

View File

@ -4,7 +4,6 @@
#include "../wifi.h"
#include <linux/module.h>
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Georgia <georgia@realtek.com>");

View File

@ -747,6 +747,7 @@ static void _rtl92c_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) {
if (channel == 6 &&
rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {

View File

@ -144,6 +144,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_BASIC_RATE:{
u16 rate_cfg = ((u16 *) val)[0];
u8 rate_index = 0;
rate_cfg &= 0x15f;
rate_cfg |= 0x01;
rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
@ -197,6 +198,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@ -293,6 +295,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_AC_PARAM:{
u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != EACMWAY2_SW)
@ -456,6 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
case HW_VAR_AID:{
u16 u2btmp;
u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
u2btmp &= 0xC000;
rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
@ -661,6 +665,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
if (rtlpriv->btcoexist.bt_coexistence) {
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32);
@ -1245,6 +1250,7 @@ int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92c_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK:
@ -2279,7 +2285,6 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.reg_bt_sco = 0;
}
void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);

View File

@ -443,6 +443,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
u32 initializecount = 0;
do {
initializecount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,

View File

@ -3,7 +3,6 @@
#include "table.h"
u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = {
0x024, 0x0011800f,
0x028, 0x00ffdb83,

View File

@ -36,6 +36,7 @@ static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
static u8 _rtl92c_evm_db_to_percentage(s8 value)
{
s8 ret_val;
ret_val = value;
if (ret_val >= 0)
@ -109,6 +110,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
if (is_cck_rate) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
@ -120,6 +122,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
@ -138,6 +141,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
@ -182,6 +186,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
/* (3) Get Signal Quality (EVM) */
if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {
@ -318,6 +323,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
struct ieee80211_hdr *hdr;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
@ -497,6 +503,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
}
@ -733,6 +740,7 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (hw_queue == BEACON_QUEUE) {
rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
} else {

View File

@ -747,6 +747,7 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
u8 queue_sel)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (IS_NORMAL_CHIP(rtlhal->version))
_rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num,
queue_sel);
@ -813,6 +814,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
u8 wmm_enable = false; /* TODO */
u8 out_ep_nums = rtlusb->out_ep_nums;
u8 queue_sel = rtlusb->out_queue_sel;
err = _rtl92cu_init_power_on(hw);
if (err) {
@ -1013,6 +1015,7 @@ d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
***************************************/
u8 erfpath = 0, value8 = 0;
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
rtl_set_rfreg(hw, (enum radio_path)erfpath, 0x0, MASKBYTE0, 0x0);
@ -1204,6 +1207,7 @@ static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 tmp1byte = 0;
if (IS_NORMAL_CHIP(rtlhal->version)) {
tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
@ -1353,6 +1357,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (check_bssid) {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
@ -1365,6 +1370,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
_rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
} else {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
@ -1631,6 +1637,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
u8 short_preamble = (bool)*val;
reg_tmp = 0;
if (short_preamble)
reg_tmp |= 0x80;
@ -1881,6 +1888,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
case HW_VAR_KEEP_ALIVE:{
u8 array[2];
array[0] = 0xff;
array[1] = *((u8 *)val);
rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
@ -1963,7 +1971,6 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
if (nmode && ((curtxbw_40mhz &&
curshortgi_40mhz) || (!curtxbw_40mhz &&
curshortgi_20mhz))) {
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);

View File

@ -14,14 +14,12 @@
#define TX_TOTAL_PAGE_NUMBER 0xF8
#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
#define CHIP_B_PAGE_NUM_PUBQ 0xE7
/* For Test Chip Setting
* (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
#define CHIP_A_PAGE_NUM_PUBQ 0x7E
/* For Chip A Setting */
#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
@ -31,8 +29,6 @@
#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
/* Note: For Chip B Setting ,modify later */
#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
#define WMM_CHIP_B_TX_PAGE_BOUNDARY \

View File

@ -24,7 +24,6 @@
#define RX_EVM rx_evm_percentage
#define RX_SIGQ rx_mimo_sig_qual
void rtl92c_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@ -143,6 +142,7 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
} while (++count);
return status;
}
/**
* rtl92c_init_LLT_table - Init LLT table
* @io: io callback
@ -189,6 +189,7 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
}
return rst;
}
void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all)
@ -370,6 +371,7 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
}
@ -647,6 +649,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->RX_SIGQ[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (!in_powersavemode)
cck_highpwr = rtlphy->cck_high_power;
@ -654,6 +657,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
cck_highpwr = false;
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
@ -672,6 +676,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
@ -694,6 +699,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {

View File

@ -18,7 +18,6 @@ void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
/*---------------------------------------------------------------
* Hardware init functions
*---------------------------------------------------------------*/
@ -130,6 +129,4 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
* Card disable functions
*---------------------------------------------------------------*/
#endif

View File

@ -108,6 +108,7 @@ static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
@ -141,6 +142,7 @@ static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
}
static int _out_ep_mapping(struct ieee80211_hw *hw)
{
int err = 0;
@ -174,11 +176,13 @@ err_out:
return err;
}
/* endpoint mapping */
int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int error = 0;
if (likely(IS_NORMAL_CHIP(rtlhal->version)))
error = configvernoutep(hw);
else
@ -442,6 +446,7 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc)
SET_TX_DESC_LAST_SEG(txdesc, 1);
SET_TX_DESC_FIRST_SEG(txdesc, 1);
}
/**
* For HW recovery information
*/
@ -531,11 +536,13 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
}
rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:

View File

@ -198,7 +198,6 @@ struct rx_drv_info_92c {
#define SET_TX_DESC_OWN(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value)
/* Dword 1 */
#define SET_TX_DESC_MACID(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value)
@ -355,7 +354,6 @@ struct rx_drv_info_92c {
#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value)
int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,

View File

@ -136,7 +136,6 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb;
struct rtl_tcb_desc *tcb_desc;
unsigned char *seg_ptr;
u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE;
u16 frag_length, frag_offset = 0;
u16 extra_descoffset = 0;
@ -166,9 +165,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
if (!skb)
return false;
skb_reserve(skb, extra_descoffset);
seg_ptr = skb_put_data(skb,
code_virtual_address + frag_offset,
(u32)(frag_length - extra_descoffset));
skb_put_data(skb, code_virtual_address + frag_offset,
(u32)(frag_length - extra_descoffset));
tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
tcb_desc->queue_index = TXCMD_QUEUE;

View File

@ -1405,7 +1405,6 @@ static void rtl8723e_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_retry_cnt;
u8 bt_info_original;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
@ -1417,7 +1416,6 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
"[BTCoex] c2h for bt_info not rcvd yet!!\n");
}
bt_retry_cnt = hal_coex_8723.bt_retry_cnt;
bt_info_original = hal_coex_8723.c2h_bt_info_original;
/* when bt inquiry or page scan, we have to set h2c 0x25 */

View File

@ -995,12 +995,9 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
u32 edca_be = 0x5ea42b;
u32 iot_peer = 0;
bool b_is_cur_rdlstate;
bool b_last_is_cur_rdlstate = false;
bool b_bias_on_rx = false;
bool b_edca_turbo_on = false;
b_last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;

View File

@ -267,6 +267,7 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Invalid endpoint map setting!\n");
@ -331,6 +332,7 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
for (epidx = 0; epidx < epnums; epidx++) {
struct usb_endpoint_descriptor *pep_desc;
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
if (usb_endpoint_dir_in(pep_desc))
@ -753,6 +755,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
return err;
}
/**
*
*

Some files were not shown because too many files have changed in this diff Show More