mt76: introduce mt76_tx_info data structure

Add mt76_tx_info as auxiliary data structure to pass values
to tx_prepare_skb pointer. This is a preliminary patch to add
support for new chipsets (e.g. mt7615)

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Lorenzo Bianconi 2019-03-14 14:54:12 +01:00 committed by Felix Fietkau
parent eb071ba77c
commit b5903c4703
8 changed files with 32 additions and 24 deletions

View file

@ -283,13 +283,12 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_tx_info tx_info = {};
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
struct mt76_queue_buf buf[32];
int len, n = 0, ret = -ENOMEM;
struct sk_buff *iter;
dma_addr_t addr;
u32 tx_info = 0;
t = mt76_get_txwi(dev);
if (!t) {
@ -306,13 +305,13 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (dma_mapping_error(dev->dev, addr))
goto free;
buf[n].addr = t->dma_addr;
buf[n++].len = dev->drv->txwi_size;
buf[n].addr = addr;
buf[n++].len = len;
tx_info.buf[n].addr = t->dma_addr;
tx_info.buf[n++].len = dev->drv->txwi_size;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
if (n == ARRAY_SIZE(buf))
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
@ -320,9 +319,10 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (dma_mapping_error(dev->dev, addr))
goto unmap;
buf[n].addr = addr;
buf[n++].len = iter->len;
tx_info.buf[n].addr = addr;
tx_info.buf[n++].len = iter->len;
}
tx_info.nbuf = n;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
@ -333,17 +333,18 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (ret < 0)
goto unmap;
if (q->queued + (n + 1) / 2 >= q->ndesc - 1) {
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, skb, t);
unmap:
for (n--; n > 0; n--)
dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
DMA_TO_DEVICE);
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
e.skb = skb;

View file

@ -83,6 +83,12 @@ struct mt76_queue_buf {
int len;
};
struct mt76_tx_info {
struct mt76_queue_buf buf[32];
int nbuf;
u32 info;
};
struct mt76u_buf {
struct mt76_dev *dev;
struct urb *urb;
@ -296,7 +302,8 @@ struct mt76_driver_ops {
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid,
struct ieee80211_sta *sta, u32 *tx_info);
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_queue_entry *e);

View file

@ -914,7 +914,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
struct mt76_tx_info *tx_info)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);

View file

@ -225,7 +225,7 @@ void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
struct mt76_tx_info *tx_info);
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e);

View file

@ -174,7 +174,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
struct mt76_tx_info *tx_info);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,

View file

@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@ -169,11 +169,11 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
MT_TXD_INFO_80211;
tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
MT_TXD_INFO_80211;
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
*tx_info |= MT_TXD_INFO_WIV;
tx_info->info |= MT_TXD_INFO_WIV;
return 0;
}

View file

@ -28,7 +28,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
struct mt76_tx_info *tx_info);
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e);
#endif /* __MT76x02_USB_H */

View file

@ -74,7 +74,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);