1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes and more updates from David Miller:

 1) Tunneling fixes from Tom Herbert and Alexander Duyck.

 2) AF_UNIX updates some struct sock bit fields with the socket lock,
    whereas setsockopt() sets overlapping ones with locking.  Seperate
    out the synchronized vs.  the AF_UNIX unsynchronized ones to avoid
    corruption.  From Andrey Ryabinin.

 3) Mount BPF filesystem with mount_nodev rather than mount_ns, from
    Eric Biederman.

 4) A couple kmemdup conversions, from Muhammad Falak R Wani.

 5) BPF verifier fixes from Alexei Starovoitov.

 6) Don't let tunneled UDP packets get stuck in socket queues, if
    something goes wrong during the encapsulation just drop the packet
    rather than signalling an error up the call stack.  From Hannes
    Frederic Sowa.

 7) SKB ref after free in batman-adv, from Florian Westphal.

 8) TCP iSCSI, ocfs2, rds, and tipc have to disable BH in it's TCP
    callbacks since the TCP stack runs pre-emptibly now.  From Eric
    Dumazet.

 9) Fix crash in fixed_phy_add, from Rabin Vincent.

10) Fix length checks in xen-netback, from Paul Durrant.

11) Fix mixup in KEY vs KEYID macsec attributes, from Sabrina Dubroca.

12) RDS connection spamming bug fixes from Sowmini Varadhan

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (152 commits)
  net: suppress warnings on dev_alloc_skb
  uapi glibc compat: fix compilation when !__USE_MISC in glibc
  udp: prevent skbs lingering in tunnel socket queues
  bpf: teach verifier to recognize imm += ptr pattern
  bpf: support decreasing order in direct packet access
  net: usb: ch9200: use kmemdup
  ps3_gelic: use kmemdup
  net:liquidio: use kmemdup
  bpf: Use mount_nodev not mount_ns to mount the bpf filesystem
  net: cdc_ncm: update datagram size after changing mtu
  tuntap: correctly wake up process during uninit
  intel: Add support for IPv6 IP-in-IP offload
  ip6_gre: Do not allow segmentation offloads GRE_CSUM is enabled with FOU/GUE
  RDS: TCP: Avoid rds connection churn from rogue SYNs
  RDS: TCP: rds_tcp_accept_worker() must exit gracefully when terminating rds-tcp
  net: sock: move ->sk_shutdown out of bitfields.
  ipv6: Don't reset inner headers in ip6_tnl_xmit
  ip4ip6: Support for GSO/GRO
  ip6ip6: Support for GSO/GRO
  ipv6: Set features for IPv6 tunnels
  ...
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2016-05-20 20:01:26 -07:00
commit 087afe8aaf
173 changed files with 7723 additions and 5220 deletions

View File

@ -35,8 +35,6 @@ Optional Properties:
- broken-turn-around: If set, indicates the PHY device does not correctly
release the turn around line low at the end of a MDIO transaction.
- reset-gpios: Reference to a GPIO used to reset the phy.
Example:
ethernet-phy@0 {
@ -44,5 +42,4 @@ ethernet-phy@0 {
interrupt-parent = <40000>;
interrupts = <35 1>;
reg = <0>;
reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
};

View File

@ -5902,6 +5902,7 @@ F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com>
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Luca Coelho <luciano.coelho@intel.com>
M: Intel Linux Wireless <linuxwifi@intel.com>
L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org

View File

@ -35,8 +35,8 @@
#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
ethernet adaptor have the name "eth[0123...]".
*/
* ethernet adaptor have the name "eth[0123...]".
*/
struct devprobe2 {
struct net_device *(*probe)(int unit);
@ -46,6 +46,7 @@ struct devprobe2 {
static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
{
struct net_device *dev;
for (; p->probe; p++) {
if (autoprobe && p->status)
continue;
@ -58,8 +59,7 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
return -ENODEV;
}
/*
* ISA probes that touch addresses < 0x400 (including those that also
/* ISA probes that touch addresses < 0x400 (including those that also
* look for EISA/PCI cards in addition to ISA cards).
*/
static struct devprobe2 isa_probes[] __initdata = {
@ -86,11 +86,11 @@ static struct devprobe2 isa_probes[] __initdata = {
#endif
#ifdef CONFIG_CS89x0
#ifndef CONFIG_CS89x0_PLATFORM
{cs89x0_probe, 0},
{cs89x0_probe, 0},
#endif
#endif
#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
{i82596_probe, 0},
#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel */
{i82596_probe, 0}, /* I82596 */
#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
@ -118,13 +118,12 @@ static struct devprobe2 m68k_probes[] __initdata = {
{mac8390_probe, 0},
#endif
#ifdef CONFIG_MAC89x0
{mac89x0_probe, 0},
{mac89x0_probe, 0},
#endif
{NULL, 0},
};
/*
* Unified ethernet device probe, segmented per architecture and
/* Unified ethernet device probe, segmented per architecture and
* per bus interface. This drives the legacy devices only for now.
*/
@ -135,7 +134,7 @@ static void __init ethif_probe2(int unit)
if (base_addr == 1)
return;
(void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
(void)(probe_list2(unit, m68k_probes, base_addr == 0) &&
probe_list2(unit, isa_probes, base_addr == 0));
}

View File

@ -1269,7 +1269,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy_irq = pd->phy_irq;
}
if (aup->phy_busid && aup->phy_busid > 0) {
if (aup->phy_busid > 0) {
dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;

View File

@ -13259,12 +13259,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
NETIF_F_GSO_IPXIP4;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_GSO_IPIP |
NETIF_F_GSO_SIT |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}

View File

@ -6311,7 +6311,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
@ -6321,8 +6321,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
NETIF_F_GSO_PARTIAL;
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;

View File

@ -602,12 +602,10 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
buffer = kmalloc(size, GFP_KERNEL);
buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memcpy(buffer, data, size);
p = buffer + sizeof(struct octeon_firmware_file_header);
/* load all images */

View File

@ -274,12 +274,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev = lmac->phydev;
struct phy_device *phydev;
int link_changed = 0;
if (!lmac)
return;
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
link_changed = -1;

View File

@ -1169,16 +1169,15 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
if (!port) {
netdev_err(NULL, "unknown portnum %x\n", portnum);
return;
}
dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
if (!port) {
netdev_err(dev, "unknown portnum %x\n", portnum);
break;
}
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);

View File

@ -9083,8 +9083,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPIP |
NETIF_F_GSO_SIT |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |

View File

@ -2284,8 +2284,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&

View File

@ -1559,8 +1559,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&

View File

@ -2230,8 +2230,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPIP |
NETIF_F_GSO_SIT |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |

View File

@ -2418,8 +2418,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)

View File

@ -2763,8 +2763,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)

View File

@ -9482,8 +9482,8 @@ skip_sriov:
#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)

View File

@ -4062,8 +4062,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPIP | \
NETIF_F_GSO_SIT | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)

View File

@ -1076,8 +1076,7 @@ static void qede_tpa_start(struct qede_dev *edev,
* start until its over and we don't want to risk allocation failing
* here, so re-allocate when aggregation will be over.
*/
dma_unmap_addr_set(sw_rx_data_prod, mapping,
dma_unmap_addr(replace_buf, mapping));
sw_rx_data_prod->mapping = replace_buf->mapping;
sw_rx_data_prod->data = replace_buf->data;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@ -2655,7 +2654,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
dma_unmap_addr(replace_buf, mapping),
replace_buf->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(replace_buf->data);
}
@ -2755,7 +2754,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
goto err;
}
dma_unmap_addr_set(replace_buf, mapping, mapping);
replace_buf->mapping = mapping;
tpa_info->replace_buf.page_offset = 0;
tpa_info->replace_buf_mapping = mapping;

View File

@ -1616,13 +1616,13 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
target->valid = 1;
target->eurus_index = i;
kfree(target->hwinfo);
target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
target->hwinfo = kmemdup(scan_info,
be16_to_cpu(scan_info->size),
GFP_KERNEL);
if (!target->hwinfo)
continue;
/* copy hw scan info */
memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;

View File

@ -335,15 +335,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
/* Need Geneve and inner Ethernet header to be present */
if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
goto error;
goto drop;
/* Return packets with reserved bits set */
geneveh = geneve_hdr(skb);
if (unlikely(geneveh->ver != GENEVE_VER))
goto error;
goto drop;
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
goto error;
goto drop;
gs = rcu_dereference_sk_user_data(sk);
if (!gs)
@ -366,10 +366,6 @@ drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
error:
/* Let the UDP layer deal with the skb */
return 1;
}
static struct socket *geneve_create_sock(struct net *net, bool ipv6,

View File

@ -915,7 +915,6 @@ static void adf7242_debug(u8 irq1)
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
(stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "",
(stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : "");
}
#endif
}

View File

@ -1253,7 +1253,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
*/
static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
{
struct net_device *dev = self->netdev;
struct net_device *dev;
__u8 mcr = MCR_SIR;
int iobase;
__u8 bank;
@ -1263,6 +1263,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
IRDA_ASSERT(self != NULL, return 0;);
dev = self->netdev;
iobase = self->io.fir_base;
/* Update accounting for new speed */

View File

@ -1646,7 +1646,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rx_sa->sc = rx_sc;
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@ -1785,7 +1785,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
}
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);

View File

@ -255,7 +255,8 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
fmb->mii_bus->irq[phy_addr] = irq;
if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq;
fp->addr = phy_addr;
fp->status = *status;
@ -314,6 +315,9 @@ struct phy_device *fixed_phy_register(unsigned int irq,
int phy_addr;
int ret;
if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
spin_lock(&phy_fixed_addr_lock);
if (phy_fixed_addr == PHY_MAX_ADDR) {

View File

@ -34,7 +34,6 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <asm/irq.h>
@ -1571,16 +1570,9 @@ static int phy_probe(struct device *dev)
struct device_driver *drv = phydev->mdio.dev.driver;
struct phy_driver *phydrv = to_phy_driver(drv);
int err = 0;
struct gpio_descs *reset_gpios;
phydev->drv = phydrv;
/* take phy out of reset */
reset_gpios = devm_gpiod_get_array_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(reset_gpios))
return PTR_ERR(reset_gpios);
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/

View File

@ -580,11 +580,13 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
@ -641,6 +643,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
goto out;
}
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
@ -1491,9 +1494,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
if (!iov_iter_count(to))
return 0;
if (tun->dev->reg_state != NETREG_REGISTERED)
return -EIO;
/* Read frames from queue */
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
&peeked, &off, &err);

View File

@ -740,12 +740,14 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
int maxmtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
if (new_mtu <= 0 || new_mtu > maxmtu)
return -EINVAL;
net->mtu = new_mtu;
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
return 0;
}
EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);

View File

@ -155,12 +155,11 @@ static int control_write(struct usbnet *dev, unsigned char request,
index, size);
if (data) {
buf = kmalloc(size, GFP_KERNEL);
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
memcpy(buf, data, size);
}
err = usb_control_msg(dev->udev,

View File

@ -1304,7 +1304,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
return 1;
goto drop;
unparsed = *vxlan_hdr(skb);
/* VNI flag always required to be set */
@ -1313,7 +1313,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
ntohl(vxlan_hdr(skb)->vx_flags),
ntohl(vxlan_hdr(skb)->vx_vni));
/* Return non vxlan pkt */
return 1;
goto drop;
}
unparsed.vx_flags &= ~VXLAN_HF_VNI;
unparsed.vx_vni &= ~VXLAN_VNI_MASK;

View File

@ -202,6 +202,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.has_shifted_cc_wraparound = true,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
.channel_counters_freq_hz = 125000,
@ -686,6 +687,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
if (!IS_ERR(ar->pre_cal_file))
release_firmware(ar->pre_cal_file);
ath10k_swap_code_seg_release(ar);
ar->normal_mode_fw.fw_file.otp_data = NULL;
@ -696,6 +700,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->normal_mode_fw.fw_file.firmware_len = 0;
ar->cal_file = NULL;
ar->pre_cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
@ -1392,6 +1397,7 @@ static void ath10k_core_restart(struct work_struct *work)
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
complete_all(&ar->thermal.wmi_sync);
complete_all(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@ -1724,6 +1730,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (ath10k_peer_stats_enabled(ar))
val = WMI_10_4_PEER_STATS;
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
val |= WMI_10_4_BSS_CHANNEL_INFO_64;
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
@ -1758,6 +1767,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
/* we don't care about HTT in UTF mode */
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_htt_setup(&ar->htt);
@ -1771,10 +1784,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err_hif_stop;
ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
return 0;
err_hif_stop:
@ -2085,6 +2094,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);

View File

@ -876,6 +876,7 @@ struct ath10k {
* avoid reporting garbage data.
*/
bool ch_info_can_report_survey;
struct completion bss_survey_done;
struct dfs_pattern_detector *dfs_detector;
@ -883,8 +884,6 @@ struct ath10k {
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
@ -893,6 +892,7 @@ struct ath10k {
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
#endif
struct {
/* protected by conf_mutex */

View File

@ -4278,9 +4278,6 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->ht_cap = ht_cap;
/* Enable the VHT support at 2.4 GHz */
band->vht_cap = vht_cap;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
@ -4346,7 +4343,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
/*
* This makes sense only when restarting hw. It is harmless to call
* uncoditionally. This is necessary to make sure no HTT/WMI tx
* unconditionally. This is necessary to make sure no HTT/WMI tx
* commands will be submitted while restarting.
*/
ath10k_drain_tx(ar);
@ -6407,6 +6404,39 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
static void
ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
struct ieee80211_channel *channel)
{
int ret;
enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
(ar->rx_channel != channel))
return;
if (ar->scan.state != ATH10K_SCAN_IDLE) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
return;
}
reinit_completion(&ar->bss_survey_done);
ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
if (ret) {
ath10k_warn(ar, "failed to send pdev bss chan info request\n");
return;
}
ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
if (!ret) {
ath10k_warn(ar, "bss channel survey timed out\n");
return;
}
}
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
@ -6431,6 +6461,8 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
goto exit;
}
ath10k_mac_update_bss_chan_survey(ar, survey->channel);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);

View File

@ -191,6 +191,9 @@ struct wmi_ops {
u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@ -1361,4 +1364,22 @@ ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
return ar->wmi.ops->get_vdev_subtype(ar, subtype);
}
static inline int
ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
enum wmi_bss_survey_req_type type)
{
struct ath10k_wmi *wmi = &ar->wmi;
struct sk_buff *skb;
if (!wmi->ops->gen_pdev_bss_chan_info_req)
return -EOPNOTSUPP;
skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
wmi->cmd->pdev_bss_chan_info_request_cmdid);
}
#endif

View File

@ -521,7 +521,8 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_bss_chan_info_request_cmdid =
WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* 10.4 WMI cmd track */
@ -1633,6 +1634,7 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
ch->antenna_max = arg->max_antenna_gain;
ch->max_tx_power = arg->max_power;
/* mode & flags share storage */
ch->mode = arg->mode;
@ -4792,6 +4794,58 @@ static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
struct sk_buff *skb)
{
struct wmi_pdev_bss_chan_info_event *ev;
struct survey_info *survey;
u64 busy, total, tx, rx, rx_bss;
u32 freq, noise_floor;
u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
int idx;
ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
if (WARN_ON(skb->len < sizeof(*ev)))
return -EPROTO;
freq = __le32_to_cpu(ev->freq);
noise_floor = __le32_to_cpu(ev->noise_floor);
busy = __le64_to_cpu(ev->cycle_busy);
total = __le64_to_cpu(ev->cycle_total);
tx = __le64_to_cpu(ev->cycle_tx);
rx = __le64_to_cpu(ev->cycle_rx);
rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
freq, noise_floor, busy, total, tx, rx, rx_bss);
spin_lock_bh(&ar->data_lock);
idx = freq_to_idx(ar, freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
freq, idx);
goto exit;
}
survey = &ar->survey[idx];
survey->noise = noise_floor;
survey->time = div_u64(total, cc_freq_hz);
survey->time_busy = div_u64(busy, cc_freq_hz);
survey->time_rx = div_u64(rx_bss, cc_freq_hz);
survey->time_tx = div_u64(tx, cc_freq_hz);
survey->filled |= (SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_TX);
exit:
spin_unlock_bh(&ar->data_lock);
complete(&ar->bss_survey_done);
return 0;
}
static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@ -5135,6 +5189,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@ -5212,6 +5269,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@ -5221,6 +5279,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@ -5606,6 +5667,9 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
if (ath10k_peer_stats_enabled(ar))
features |= WMI_10_2_PEER_STATS;
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
features |= WMI_10_2_BSS_CHAN_INFO;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@ -6636,6 +6700,26 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
return skb;
}
static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
enum wmi_bss_survey_req_type type)
{
struct wmi_pdev_chan_info_req_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
cmd->type = __cpu_to_le32(type);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev bss info request type %d\n", type);
return skb;
}
/* This function assumes the beacon is already DMA mapped */
static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
@ -7735,6 +7819,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_init = ath10k_wmi_10_2_op_gen_init,
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
/* shared with 10.1 */
.map_svc = wmi_10x_svc_map,
@ -7861,6 +7946,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
};
int ath10k_wmi_attach(struct ath10k *ar)

View File

@ -1444,6 +1444,7 @@ enum wmi_10_2_cmd_id {
WMI_10_2_MU_CAL_START_CMDID,
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@ -1487,6 +1488,8 @@ enum wmi_10_2_event_id {
WMI_10_2_WDS_PEER_EVENTID,
WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
WMI_10_2_PDEV_TEMPERATURE_EVENTID,
WMI_10_2_MU_REPORT_EVENTID,
WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
@ -1795,6 +1798,7 @@ struct wmi_channel {
__le32 reginfo1;
struct {
u8 antenna_max;
u8 max_tx_power;
} __packed;
} __packed;
} __packed;
@ -2450,6 +2454,7 @@ enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
WMI_10_2_BSS_CHAN_INFO = BIT(6),
WMI_10_2_PEER_STATS = BIT(7),
};
@ -6280,6 +6285,17 @@ struct wmi_pdev_temperature_event {
__le32 temperature;
} __packed;
struct wmi_pdev_bss_chan_info_event {
__le32 freq;
__le32 noise_floor;
__le64 cycle_busy;
__le64 cycle_total;
__le64 cycle_tx;
__le64 cycle_rx;
__le64 cycle_rx_bss;
__le32 reserved;
} __packed;
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@ -6483,6 +6499,16 @@ enum wmi_host_platform_type {
WMI_HOST_PLATFORM_LOW_PERF,
};
enum wmi_bss_survey_req_type {
WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
};
struct wmi_pdev_chan_info_req_cmd {
__le32 type;
__le32 reserved;
} __packed;
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;

View File

@ -31,6 +31,7 @@ unsigned int debug_mask;
static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug;
static unsigned int uart_rate = 115200;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
static unsigned int recovery_enable;
@ -40,6 +41,7 @@ module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
module_param(uart_rate, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
@ -180,6 +182,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
ar->hw.uarttx_rate = uart_rate;
set_bit(FIRST_BOOT, &ar->flag);

View File

@ -781,6 +781,7 @@ struct ath6kl {
u32 board_addr;
u32 refclk_hz;
u32 uarttx_pin;
u32 uarttx_rate;
u32 testscript_addr;
u8 tx_ant;
u8 rx_ant;

View File

@ -173,6 +173,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 7168,
.board_addr = 0x436400,
.testscript_addr = 0,
.uarttx_pin = 11,
.flags = 0,
.fw = {
@ -650,6 +651,14 @@ int ath6kl_configure_target(struct ath6kl *ar)
if (status)
return status;
/* Only set the baud rate if we're actually doing debug */
if (ar->conf_flags & ATH6KL_CONF_UART_DEBUG) {
status = ath6kl_bmi_write_hi32(ar, hi_desired_baud_rate,
ar->hw.uarttx_rate);
if (status)
return status;
}
/* Configure target refclk_hz */
if (ar->hw.refclk_hz != 0) {
status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz,

View File

@ -75,6 +75,26 @@ config ATH9K_STATION_STATISTICS
---help---
This option enables detailed statistics for association stations.
config ATH9K_TX99
bool "Atheros ath9k TX99 testing support"
depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled on systems undergoing
certification testing and evaluation in a controlled environment.
Enabling this will only enable TX99 support, all other modes of
operation will be disabled.
TX99 support enables Specific Absorption Rate (SAR) testing.
SAR is the unit of measurement for the amount of radio frequency(RF)
absorbed by the body when using a wireless device. The RF exposure
limits used are expressed in the terms of SAR, which is a measure
of the electric and magnetic field strength and power density for
transmitters operating at frequencies from 300 kHz to 100 GHz.
Regulatory bodies around the world require that wireless device
be evaluated to meet the RF exposure limits set forth in the
governmental SAR regulations.
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@ -103,26 +123,6 @@ config ATH9K_DYNACK
based on ACK frame RX timestamp, TX frame timestamp and frame
duration
config ATH9K_TX99
bool "Atheros ath9k TX99 testing support"
depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled on systems undergoing
certification testing and evaluation in a controlled environment.
Enabling this will only enable TX99 support, all other modes of
operation will be disabled.
TX99 support enables Specific Absorption Rate (SAR) testing.
SAR is the unit of measurement for the amount of radio frequency(RF)
absorbed by the body when using a wireless device. The RF exposure
limits used are expressed in the terms of SAR, which is a measure
of the electric and magnetic field strength and power density for
transmitters operating at frequencies from 300 kHz to 100 GHz.
Regulatory bodies around the world require that wireless device
be evaluated to meet the RF exposure limits set forth in the
governmental SAR regulations.
config ATH9K_WOW
bool "Wake on Wireless LAN support (EXPERIMENTAL)"
depends on ATH9K && PM

View File

@ -4402,7 +4402,7 @@ static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
}
/* Set tx power registers to array of values passed in */
static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */

View File

@ -355,5 +355,6 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray);
#endif

View File

@ -17,6 +17,7 @@
#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
#include "ar9003_eeprom.h"
#define AR9300_OFDM_RATES 8
#define AR9300_HT_SS_RATES 8
@ -1009,7 +1010,7 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
@ -1840,73 +1841,14 @@ static void ar9003_hw_tx99_stop(struct ath_hw *ah)
static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
{
static s16 p_pwr_array[ar9300RateSize] = { 0 };
static u8 p_pwr_array[ar9300RateSize] = { 0 };
unsigned int i;
if (txpower <= MAX_RATE_POWER) {
for (i = 0; i < ar9300RateSize; i++)
p_pwr_array[i] = txpower;
} else {
for (i = 0; i < ar9300RateSize; i++)
p_pwr_array[i] = MAX_RATE_POWER;
}
txpower = txpower <= MAX_RATE_POWER ? txpower : MAX_RATE_POWER;
for (i = 0; i < ar9300RateSize; i++)
p_pwr_array[i] = txpower;
REG_WRITE(ah, 0xa458, 0);
REG_WRITE(ah, 0xa3c0,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
REG_WRITE(ah, 0xa3c4,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
REG_WRITE(ah, 0xa3c8,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
REG_WRITE(ah, 0xa3cc,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
REG_WRITE(ah, 0xa3d0,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
REG_WRITE(ah, 0xa3d4,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
REG_WRITE(ah, 0xa3e4,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
REG_WRITE(ah, 0xa3e8,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
REG_WRITE(ah, 0xa3d8,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
REG_WRITE(ah, 0xa3dc,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
REG_WRITE(ah, 0xa3ec,
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
ar9003_hw_tx_power_regwrite(ah, p_pwr_array);
}
static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)

View File

@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
#define HAL_MSG_TIMEOUT 500
#define HAL_MSG_TIMEOUT 10000
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */

View File

@ -375,8 +375,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
/* scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
wil->scan_request = request;
wil->radio_wdev = wdev;
rc = wil_p2p_search(wil, request);

View File

@ -17,7 +17,7 @@
#include "wil6210.h"
#include "trace.h"
void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {
@ -32,7 +32,7 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
va_end(args);
}
void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
if (net_ratelimit()) {
struct net_device *ndev = wil_to_ndev(wil);
@ -49,7 +49,23 @@ void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
}
}
void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!net_ratelimit())
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
netdev_dbg(wil_to_ndev(wil), "%pV", &vaf);
trace_wil6210_log_dbg(&vaf);
va_end(args);
}
void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {

View File

@ -171,6 +171,8 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
int rsize;
uint i;
wil_halp_vote(wil);
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
@ -236,6 +238,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
}
out:
seq_puts(s, "}\n");
wil_halp_unvote(wil);
}
static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
@ -500,9 +503,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
enum { max_count = 4096 };
struct debugfs_blob_wrapper *blob = file->private_data;
struct wil_blob_wrapper *wil_blob = file->private_data;
loff_t pos = *ppos;
size_t available = blob->size;
size_t available = wil_blob->blob.size;
void *buf;
size_t ret;
@ -521,8 +524,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
pos, count);
wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
(const volatile void __iomem *)
wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
@ -545,9 +549,9 @@ static
struct dentry *wil_debugfs_create_ioblob(const char *name,
umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
struct wil_blob_wrapper *wil_blob)
{
return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
/*---reset---*/
@ -1437,6 +1441,118 @@ static const struct file_operations fops_sta = {
.llseek = seq_lseek,
};
static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[80];
int n;
n = snprintf(buf, sizeof(buf),
"led_id is set to %d, echo 1 to enable, 0 to disable\n",
led_id);
n = min_t(int, n, sizeof(buf));
return simple_read_from_buffer(user_buf, count, ppos,
buf, n);
}
static ssize_t wil_write_file_led_cfg(struct file *file,
const char __user *buf_,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
int val;
int rc;
rc = kstrtoint_from_user(buf_, count, 0, &val);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id);
rc = wmi_led_cfg(wil, val);
if (rc) {
wil_info(wil, "%s led %d failed\n",
val ? "Enabling" : "Disabling", led_id);
return rc;
}
return count;
}
static const struct file_operations fops_led_cfg = {
.read = wil_read_file_led_cfg,
.write = wil_write_file_led_cfg,
.open = simple_open,
};
/* led_blink_time, write:
* "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
*/
static ssize_t wil_write_led_blink_time(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
int rc;
char *kbuf = kmalloc(len + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
if (rc != len) {
kfree(kbuf);
return rc >= 0 ? -EIO : rc;
}
kbuf[len] = '\0';
rc = sscanf(kbuf, "%d %d %d %d %d %d",
&led_blink_time[WIL_LED_TIME_SLOW].on_ms,
&led_blink_time[WIL_LED_TIME_SLOW].off_ms,
&led_blink_time[WIL_LED_TIME_MED].on_ms,
&led_blink_time[WIL_LED_TIME_MED].off_ms,
&led_blink_time[WIL_LED_TIME_FAST].on_ms,
&led_blink_time[WIL_LED_TIME_FAST].off_ms);
kfree(kbuf);
if (rc < 0)
return rc;
if (rc < 6)
return -EINVAL;
return len;
}
static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
static char text[400];
snprintf(text, sizeof(text),
"To set led blink on/off time variables write:\n"
"<blink_on_slow> <blink_off_slow> <blink_on_med> "
"<blink_off_med> <blink_on_fast> <blink_off_fast>\n"
"The current values are:\n"
"%d %d %d %d %d %d\n",
led_blink_time[WIL_LED_TIME_SLOW].on_ms,
led_blink_time[WIL_LED_TIME_SLOW].off_ms,
led_blink_time[WIL_LED_TIME_MED].on_ms,
led_blink_time[WIL_LED_TIME_MED].off_ms,
led_blink_time[WIL_LED_TIME_FAST].on_ms,
led_blink_time[WIL_LED_TIME_FAST].off_ms);
return simple_read_from_buffer(user_buf, count, ppos, text,
sizeof(text));
}
static const struct file_operations fops_led_blink_time = {
.read = wil_read_led_blink_time,
.write = wil_write_led_blink_time,
.open = simple_open,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@ -1445,16 +1561,18 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
char name[32];
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
struct debugfs_blob_wrapper *blob = &wil->blobs[i];
struct wil_blob_wrapper *wil_blob = &wil->blobs[i];
struct debugfs_blob_wrapper *blob = &wil_blob->blob;
const struct fw_map *map = &fw_mapping[i];
if (!map->name)
continue;
wil_blob->wil = wil;
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob);
wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
}
}
@ -1483,6 +1601,8 @@ static const struct {
{"link", S_IRUGO, &fops_link},
{"info", S_IRUGO, &fops_info},
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@ -1545,6 +1665,7 @@ static const struct dbg_off dbg_statics[] = {
{"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
{"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
doff_u32},
{"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
{},
};

View File

@ -35,15 +35,19 @@
*
*/
#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL)
#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
BIT_DMA_EP_RX_ICR_RX_HTRSH)
#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \
BIT_DMA_EP_MISC_ICR_HALP)
#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
BIT_DMA_PSEUDO_CAUSE_TX | \
BIT_DMA_PSEUDO_CAUSE_MISC))
@ -51,6 +55,7 @@
#if defined(CONFIG_WIL6210_ISR_COR)
/* configure to Clear-On-Read mode */
#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@ -58,6 +63,7 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
#else /* defined(CONFIG_WIL6210_ISR_COR) */
/* configure to Write-1-to-Clear mode */
#define WIL_ICR_ICC_VALUE (0UL)
#define WIL_ICR_ICC_MISC_VALUE (0UL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@ -86,10 +92,21 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
mask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
}
static void wil6210_mask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
@ -109,14 +126,27 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = test_bit(wil_status_fwconnected, wil->status);
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_RX);
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
unmask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_MISC);
unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
}
static void wil6210_unmask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@ -134,7 +164,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
wil6210_mask_irq_misc(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@ -147,12 +177,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
WIL_ICR_ICC_MISC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
wil6210_unmask_irq_misc(wil);
wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
@ -228,11 +258,8 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
*/
if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
wil_dbg_irq(wil, "RX done\n");
if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
wil_err_ratelimited(wil,
"Received \"Rx buffer is in risk of overflow\" interrupt\n");
wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
isr);
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
@ -344,7 +371,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
return IRQ_NONE;
}
wil6210_mask_irq_misc(wil);
wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
@ -372,12 +399,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
isr &= ~ISR_MISC_FW_READY;
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
complete(&wil->halp.comp);
}
wil->isr_misc = isr;
if (isr) {
return IRQ_WAKE_THREAD;
} else {
wil6210_unmask_irq_misc(wil);
wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
}
@ -414,7 +448,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil->isr_misc = 0;
wil6210_unmask_irq_misc(wil);
wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
@ -556,6 +590,23 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
void wil6210_set_halp(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
}
void wil6210_clear_halp(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
wil6210_unmask_halp(wil);
}
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;

View File

@ -23,6 +23,8 @@
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
@ -132,6 +134,14 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
*d++ = __raw_readl(s++);
}
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
const volatile void __iomem *src, size_t count)
{
wil_halp_vote(wil);
wil_memcpy_fromio_32(dst, src, count);
wil_halp_unvote(wil);
}
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
@ -142,6 +152,15 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
volatile void __iomem *dst,
const void *src, size_t count)
{
wil_halp_vote(wil);
wil_memcpy_toio_32(dst, src, count);
wil_halp_unvote(wil);
}
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@ -194,6 +213,18 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(&sta->stats, 0, sizeof(sta->stats));
}
static bool wil_ap_is_connected(struct wil6210_priv *wil)
{
int i;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
if (wil->sta[i].status == wil_sta_connected)
return true;
}
return false;
}
static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
@ -247,6 +278,11 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
}
clear_bit(wil_status_fwconnecting, wil->status);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!wil_ap_is_connected(wil))
clear_bit(wil_status_fwconnected, wil->status);
break;
default:
break;
}
@ -457,9 +493,11 @@ int wil_priv_init(struct wil6210_priv *wil)
mutex_init(&wil->wmi_mutex);
mutex_init(&wil->probe_client_mutex);
mutex_init(&wil->p2p_wdev_mutex);
mutex_init(&wil->halp.lock);
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
init_completion(&wil->halp.comp);
wil->bcast_vring = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
@ -555,11 +593,10 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
{
wil_info(wil, "%s: enable=%d\n", __func__, enable);
if (enable) {
if (enable)
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
} else {
else
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
}
}
static int wil_target_reset(struct wil6210_priv *wil)
@ -804,6 +841,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
@ -871,6 +911,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil->ap_isolate = 0;
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
@ -1061,3 +1102,51 @@ int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
return rc;
}
void wil_halp_vote(struct wil6210_priv *wil)
{
unsigned long rc;
unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
mutex_lock(&wil->halp.lock);
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc)
wil_err(wil, "%s: HALP vote timed out\n", __func__);
else
wil_dbg_misc(wil,
"%s: HALP vote completed after %d ms\n",
__func__,
jiffies_to_msecs(to_jiffies - rc));
}
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
void wil_halp_unvote(struct wil6210_priv *wil)
{
WARN_ON(wil->halp.ref_cnt == 0);
mutex_lock(&wil->halp.lock);
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
}
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}

View File

@ -22,6 +22,12 @@
#define P2P_SEARCH_DURATION_MS 500
#define P2P_DEFAULT_BI 100
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
{
return (request->n_channels == 1) &&
(request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
}
void wil_p2p_discovery_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;

View File

@ -1759,7 +1759,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
goto drop;
}
if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
wil_err_ratelimited(wil, "FW not connected\n");
wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
goto drop;
}
if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {

View File

@ -168,6 +168,7 @@ struct RGF_ICR {
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
#define BIT_DMA_EP_MISC_ICR_HALP BIT(27)
#define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
/* Legacy interrupt moderation control (before Sparrow v2)*/
@ -534,6 +535,41 @@ struct pmc_ctx {
int descriptor_size;
};
struct wil_halp {
struct mutex lock; /* protect halp ref_cnt */
unsigned int ref_cnt;
struct completion comp;
};
struct wil_blob_wrapper {
struct wil6210_priv *wil;
struct debugfs_blob_wrapper blob;
};
#define WIL_LED_MAX_ID (2)
#define WIL_LED_INVALID_ID (0xF)
#define WIL_LED_BLINK_ON_SLOW_MS (300)
#define WIL_LED_BLINK_OFF_SLOW_MS (300)
#define WIL_LED_BLINK_ON_MED_MS (200)
#define WIL_LED_BLINK_OFF_MED_MS (200)
#define WIL_LED_BLINK_ON_FAST_MS (100)
#define WIL_LED_BLINK_OFF_FAST_MS (100)
enum {
WIL_LED_TIME_SLOW = 0,
WIL_LED_TIME_MED,
WIL_LED_TIME_FAST,
WIL_LED_TIME_LAST,
};
struct blink_on_off_time {
u32 on_ms;
u32 off_ms;
};
extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
extern u8 led_id;
extern u8 led_polarity;
struct wil6210_priv {
struct pci_dev *pdev;
struct wireless_dev *wdev;
@ -606,7 +642,7 @@ struct wil6210_priv {
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
void *platform_handle;
@ -622,6 +658,10 @@ struct wil6210_priv {
struct wireless_dev *p2p_wdev;
struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
struct wireless_dev *radio_wdev;
/* High Access Latency Policy voting */
struct wil_halp halp;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@ -635,11 +675,13 @@ struct wil6210_priv {
__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
void __wil_err(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
void __wil_info(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
wil_dbg_trace(wil, fmt, ##arg); \
@ -650,6 +692,10 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
#define wil_err(wil, fmt, arg...) __wil_err(wil, "%s: " fmt, __func__, ##arg)
#define wil_info(wil, fmt, arg...) __wil_info(wil, "%s: " fmt, __func__, ##arg)
#define wil_err_ratelimited(wil, fmt, arg...) \
__wil_err_ratelimited(wil, "%s: " fmt, __func__, ##arg)
/* target operations */
/* register read */
@ -707,6 +753,12 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
volatile void __iomem *dst,
const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@ -772,6 +824,7 @@ void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
void wil_p2p_discovery_timer_fn(ulong x);
int wil_p2p_search(struct wil6210_priv *wil,
struct cfg80211_scan_request *request);
@ -805,6 +858,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@ -842,4 +896,9 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
void wil_halp_vote(struct wil6210_priv *wil);
void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
#endif /* __WIL6210_H__ */

View File

@ -32,6 +32,11 @@ module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
u8 led_id = WIL_LED_INVALID_ID;
module_param(led_id, byte, S_IRUGO);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
/**
* WMI event receiving - theory of operations
*
@ -94,6 +99,14 @@ const struct fw_map fw_mapping[] = {
*/
};
struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
{WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
};
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
* return AHB address for given firmware/ucode internal (linker) address
* @x - internal address
@ -194,6 +207,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
void __iomem *dst;
void __iomem *head = wmi_addr(wil, r->head);
uint retry;
int rc = 0;
if (sizeof(cmd) + len > r->entry_size) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
@ -212,6 +226,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
}
wil_halp_vote(wil);
/* read Tx head till it is not busy */
for (retry = 5; retry > 0; retry--) {
wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
@ -221,7 +238,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (d_head.sync != 0) {
wil_err(wil, "WMI head busy\n");
return -EBUSY;
rc = -EBUSY;
goto out;
}
/* next head */
next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
@ -230,7 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
for (retry = 5; retry > 0; retry--) {
if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
rc = -EAGAIN;
goto out;
}
r->tail = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
@ -240,13 +259,15 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (next_head == r->tail) {
wil_err(wil, "WMI ring full\n");
return -EBUSY;
rc = -EBUSY;
goto out;
}
dst = wmi_buffer(wil, d_head.addr);
if (!dst) {
wil_err(wil, "invalid WMI buffer: 0x%08x\n",
le32_to_cpu(d_head.addr));
return -EINVAL;
rc = -EAGAIN;
goto out;
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
@ -269,7 +290,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
SW_INT_MBOX);
return 0;
out:
wil_halp_unvote(wil);
return rc;
}
int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
@ -961,6 +984,60 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
}
int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
{
int rc = 0;
struct wmi_led_cfg_cmd cmd = {
.led_mode = enable,
.id = led_id,
.slow_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms),
.slow_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms),
.medium_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms),
.medium_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms),
.fast_blink_cfg.blink_on =
cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms),
.fast_blink_cfg.blink_off =
cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms),
.led_polarity = led_polarity,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_led_cfg_done_event evt;
} __packed reply;
if (led_id == WIL_LED_INVALID_ID)
goto out;
if (led_id > WIL_LED_MAX_ID) {
wil_err(wil, "Invalid led id %d\n", led_id);
rc = -EINVAL;
goto out;
}
wil_dbg_wmi(wil,
"%s led %d\n",
enable ? "enabling" : "disabling", led_id);
rc = wmi_call(wil, WMI_LED_CFG_CMDID, &cmd, sizeof(cmd),
WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
100);
if (rc)
goto out;
if (reply.evt.status) {
wil_err(wil, "led %d cfg failed with status %d\n",
led_id, le32_to_cpu(reply.evt.status));
rc = -EINVAL;
}
out:
return rc;
}
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go)
{
@ -1003,11 +1080,21 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
rc = -EINVAL;
if (wmi_nettype != WMI_NETTYPE_P2P)
/* Don't fail due to error in the led configuration */
wmi_led_cfg(wil, true);
return rc;
}
int wmi_pcp_stop(struct wil6210_priv *wil)
{
int rc;
rc = wmi_led_cfg(wil, false);
if (rc)
return rc;
return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
}

View File

@ -129,6 +129,7 @@ enum wmi_command_id {
WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
WMI_OTP_READ_CMDID = 0x856,
WMI_OTP_WRITE_CMDID = 0x857,
WMI_LED_CFG_CMDID = 0x858,
/* Performance monitoring commands */
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
@ -868,6 +869,7 @@ enum wmi_event_id {
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
WMI_OTP_READ_RESULT_EVENTID = 0x1856,
WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
@ -1349,4 +1351,63 @@ enum wmi_hidden_ssid {
WMI_HIDDEN_SSID_CLEAR = 0xFE,
};
/* WMI_LED_CFG_CMDID
*
* Configure LED On\Off\Blinking operation
*
* Returned events:
* - WMI_LED_CFG_DONE_EVENTID
*/
enum led_mode {
LED_DISABLE = 0x00,
LED_ENABLE = 0x01,
};
/* The names of the led as
* described on HW schemes.
*/
enum wmi_led_id {
WMI_LED_WLAN = 0x00,
WMI_LED_WPAN = 0x01,
WMI_LED_WWAN = 0x02,
};
/* Led polarity mode. */
enum wmi_led_polarity {
LED_POLARITY_HIGH_ACTIVE = 0x00,
LED_POLARITY_LOW_ACTIVE = 0x01,
};
/* Combination of on and off
* creates the blinking period
*/
struct wmi_led_blink_mode {
__le32 blink_on;
__le32 blink_off;
} __packed;
/* WMI_LED_CFG_CMDID */
struct wmi_led_cfg_cmd {
/* enum led_mode_e */
u8 led_mode;
/* enum wmi_led_id_e */
u8 id;
/* slow speed blinking combination */
struct wmi_led_blink_mode slow_blink_cfg;
/* medium speed blinking combination */
struct wmi_led_blink_mode medium_blink_cfg;
/* high speed blinking combination */
struct wmi_led_blink_mode fast_blink_cfg;
/* polarity of the led */
u8 led_polarity;
/* reserved */
u8 reserved;
} __packed;
/* WMI_LED_CFG_DONE_EVENTID */
struct wmi_led_cfg_done_event {
/* led config status */
__le32 status;
} __packed;
#endif /* __WILOCITY_WMI_H__ */

View File

@ -2275,7 +2275,7 @@ static int atmel_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if ((fwrq->m > 1000) || (fwrq->e > 0))
if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;

View File

@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);

View File

@ -1333,6 +1333,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */

View File

@ -609,6 +609,7 @@ BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt");
BRCMF_FW_NVRAM_DEF(43430, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt");
BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt");
BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt");
static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@ -624,7 +625,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354)
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356)
};
static void pkt_align(struct sk_buff *p, int len, int align)

View File

@ -5794,7 +5794,7 @@ static int airo_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if((fwrq->m > 1000) || (fwrq->e > 0))
if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;

View File

@ -134,12 +134,6 @@ config IWLWIFI_DEBUGFS
is a low-impact option that allows getting insight into the
driver's state at runtime.
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
bool "Experimental uCode support"
depends on IWLWIFI_DEBUG
---help---
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
depends on EVENT_TRACING

View File

@ -52,7 +52,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
.pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,

View File

@ -62,7 +62,6 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@ -76,7 +75,6 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,

View File

@ -53,7 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,

View File

@ -71,7 +71,6 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@ -84,7 +83,6 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@ -97,7 +95,6 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,

View File

@ -122,7 +122,6 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
.pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,

View File

@ -112,7 +112,6 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
.pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@ -237,6 +236,20 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8265",
.fw_name_pre = IWL8265_FW_PRE,
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4165",
.fw_name_pre = IWL8000_FW_PRE,

View File

@ -72,16 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-"
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260LC_MODULE_FIRMWARE(api) \
IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
.pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@ -138,11 +143,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.apmg_not_supported = true, \
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true
.mac_addr_from_csr = true, \
.rf_id = true
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE,
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
*/
const struct iwl_cfg iwl9260lc_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260LC_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@ -161,3 +181,5 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -165,20 +167,22 @@ static inline u8 num_of_ant(u8 mask)
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
int eeprom_size;
int num_of_queues; /* def: HW dependent */
/* for iwl_pcie_apm_init() */
u32 pll_cfg_val;
const u16 max_ll_items;
const bool shadow_ram_support;
u16 led_compensation;
unsigned int wd_timeout;
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool pcie_l1_allowed;
const bool apmg_wake_up_wa;
const bool scd_chain_ext_wa;
u16 eeprom_size;
u16 max_event_log_size;
u8 pll_cfg:1, /* for iwl_pcie_apm_init() */
shadow_ram_support:1,
shadow_reg_enable:1,
pcie_l1_allowed:1,
apmg_wake_up_wa:1,
scd_chain_ext_wa:1;
u8 num_of_queues; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
};
/*
@ -189,10 +193,10 @@ struct iwl_base_params {
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
const bool stbc;
const bool ldpc;
bool use_rts_for_aggregation;
u8 ht_greenfield_support:1,
stbc:1,
ldpc:1,
use_rts_for_aggregation:1;
u8 ht40_bands;
};
@ -233,10 +237,10 @@ struct iwl_tt_params {
u32 tx_protection_entry;
u32 tx_protection_exit;
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
bool support_ct_kill;
bool support_dynamic_smps;
bool support_tx_protection;
bool support_tx_backoff;
u8 support_ct_kill:1,
support_dynamic_smps:1,
support_tx_protection:1,
support_tx_backoff:1;
};
/*
@ -314,6 +318,7 @@ struct iwl_pwr_tx_backoff {
* @smem_len: the length of SMEM
* @mq_rx_supported: multi-queue rx support
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@ -323,50 +328,51 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
const unsigned int ucode_api_max;
const unsigned int ucode_api_min;
const enum iwl_device_family device_family;
const u32 max_data_size;
const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
bool bt_shared_single_ant;
u16 nvm_ver;
u16 nvm_calib_ver;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_eeprom_params *eeprom_params;
enum iwl_led_mode led_mode;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
u8 nvm_hw_section_num;
bool mac_addr_from_csr;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
netdev_features_t features;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;
unsigned int max_ht_ampdu_exponent;
unsigned int max_vht_ampdu_exponent;
const u32 dccm_offset;
const u32 dccm_len;
const u32 dccm2_offset;
const u32 dccm2_len;
const u32 smem_offset;
const u32 smem_len;
const struct iwl_tt_params *thermal_params;
bool apmg_not_supported;
bool mq_rx_supported;
bool vht_mu_mimo_supported;
enum iwl_device_family device_family;
enum iwl_led_mode led_mode;
u32 max_data_size;
u32 max_inst_size;
netdev_features_t features;
u32 dccm_offset;
u32 dccm_len;
u32 dccm2_offset;
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
bt_shared_single_ant:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
high_temp:1,
mac_addr_from_csr:1,
lp_xtal_workaround:1,
no_power_up_nic_in_init:1,
disable_dummy_notification:1,
apmg_not_supported:1,
mq_rx_supported:1,
vht_mu_mimo_supported:1,
rf_id:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
u8 nvm_hw_section_num;
u8 max_rx_agg_size;
u8 max_tx_agg_size;
u8 max_ht_ampdu_exponent;
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
};
/*
@ -437,8 +443,10 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260lc_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg;
#endif /* CONFIG_IWLMVM */

View File

@ -107,6 +107,17 @@
*/
#define CSR_HW_REV (CSR_BASE+0x028)
/*
* RF ID revision info
* Bit fields:
* 31:24: Reserved (set to 0x0)
* 23:12: Type
* 11:8: Step (A - 0x0, B - 0x1, etc)
* 7:4: Dash
* 3:0: Flavor
*/
#define CSR_HW_RF_ID (CSR_BASE+0x09c)
/*
* EEPROM and OTP (one-time-programmable) memory reads
*
@ -333,6 +344,10 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)

View File

@ -117,7 +117,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
char firmware_name[32]; /* name of firmware file to load */
char firmware_name[64]; /* name of firmware file to load */
struct completion request_firmware_complete;
@ -211,20 +211,12 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
void *context);
#define UCODE_EXPERIMENTAL_INDEX 100
#define UCODE_EXPERIMENTAL_TAG "exp"
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const char *name_pre = drv->cfg->fw_name_pre;
char tag[8];
if (first) {
#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
} else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
#endif
drv->fw_index = drv->cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
@ -240,9 +232,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? "EXPERIMENTAL " : "",
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
@ -541,9 +531,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
}
if (build)
sprintf(buildstr, " build %u%s", build,
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? " (EXP)" : "");
sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@ -627,9 +615,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
build = le32_to_cpu(ucode->build);
if (build)
sprintf(buildstr, " build %u%s", build,
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? " (EXP)" : "");
sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@ -1277,15 +1263,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
/* no api version check required for experimental uCode */
if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
api_max, api_ver);
goto try_again;
}
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
api_max, api_ver);
goto try_again;
}
/*
@ -1744,4 +1727,4 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");

View File

@ -98,6 +98,7 @@ struct iwl_nvm_data {
s8 max_tx_pwr_half_dbm;
bool lar_enabled;
bool vht160_supported;
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels[];
};

View File

@ -321,6 +321,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
/* Write index table */
#define RFH_Q0_FRBDCB_WIDX 0xA08080
#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
/* Write index table - shadow registers */
#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
/* Read index table */
#define RFH_Q0_FRBDCB_RIDX 0xA080C0
#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)

View File

@ -288,6 +288,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
!data->sku_cap_band_52GHz_enable)
continue;
if (ch_flags & NVM_CHANNEL_160MHZ)
data->vht160_supported = true;
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
@ -331,17 +334,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = 0;
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
"Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
channel->hw_value,
is_5ghz ? "5.2" : "2.4",
ch_flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
ch_flags,
CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
channel->max_power,
((ch_flags & NVM_CHANNEL_IBSS) &&
!(ch_flags & NVM_CHANNEL_RADAR))
@ -370,6 +376,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (data->vht160_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_SHORT_GI_160;
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -72,8 +73,6 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
#define IWL_NUM_PAPD_CH_GROUPS 9
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
u16 size;
@ -86,14 +85,18 @@ struct iwl_phy_db_entry {
* @cfg: phy configuration.
* @calib_nch: non channel specific calibration data.
* @calib_ch: channel specific calibration data.
* @n_group_papd: number of entries in papd channel group.
* @calib_ch_group_papd: calibration data related to papd channel group.
* @n_group_txp: number of entries in tx power channel group.
* @calib_ch_group_txp: calibration data related to tx power chanel group.
*/
struct iwl_phy_db {
struct iwl_phy_db_entry cfg;
struct iwl_phy_db_entry calib_nch;
struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
int n_group_papd;
struct iwl_phy_db_entry *calib_ch_group_papd;
int n_group_txp;
struct iwl_phy_db_entry *calib_ch_group_txp;
struct iwl_trans *trans;
};
@ -143,6 +146,9 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
phy_db->trans = trans;
phy_db->n_group_txp = -1;
phy_db->n_group_papd = -1;
/* TODO: add default values of the phy db. */
return phy_db;
}
@ -166,11 +172,11 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
case IWL_PHY_DB_CALIB_NCH:
return &phy_db->calib_nch;
case IWL_PHY_DB_CALIB_CHG_PAPD:
if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
if (chg_id >= phy_db->n_group_papd)
return NULL;
return &phy_db->calib_ch_group_papd[chg_id];
case IWL_PHY_DB_CALIB_CHG_TXP:
if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
if (chg_id >= phy_db->n_group_txp)
return NULL;
return &phy_db->calib_ch_group_txp[chg_id];
default:
@ -202,17 +208,21 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
for (i = 0; i < phy_db->n_group_papd; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
kfree(phy_db->calib_ch_group_papd);
for (i = 0; i < phy_db->n_group_txp; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
kfree(phy_db->calib_ch_group_txp);
kfree(phy_db);
}
IWL_EXPORT_SYMBOL(iwl_phy_db_free);
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
gfp_t alloc_ctx)
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
struct iwl_rx_packet *pkt)
{
struct iwl_calib_res_notif_phy_db *phy_db_notif =
(struct iwl_calib_res_notif_phy_db *)pkt->data;
@ -224,16 +234,42 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
if (!phy_db)
return -EINVAL;
if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
type == IWL_PHY_DB_CALIB_CHG_TXP)
if (type == IWL_PHY_DB_CALIB_CHG_PAPD) {
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
if (phy_db && !phy_db->calib_ch_group_papd) {
/*
* Firmware sends the largest index first, so we can use
* it to know how much we should allocate.
*/
phy_db->calib_ch_group_papd = kcalloc(chg_id + 1,
sizeof(struct iwl_phy_db_entry),
GFP_ATOMIC);
if (!phy_db->calib_ch_group_papd)
return -ENOMEM;
phy_db->n_group_papd = chg_id + 1;
}
} else if (type == IWL_PHY_DB_CALIB_CHG_TXP) {
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
if (phy_db && !phy_db->calib_ch_group_txp) {
/*
* Firmware sends the largest index first, so we can use
* it to know how much we should allocate.
*/
phy_db->calib_ch_group_txp = kcalloc(chg_id + 1,
sizeof(struct iwl_phy_db_entry),
GFP_ATOMIC);
if (!phy_db->calib_ch_group_txp)
return -ENOMEM;
phy_db->n_group_txp = chg_id + 1;
}
}
entry = iwl_phy_db_get_section(phy_db, type, chg_id);
if (!entry)
return -EINVAL;
kfree(entry->data);
entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC);
if (!entry->data) {
entry->size = 0;
return -ENOMEM;
@ -296,7 +332,7 @@ static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
if (ch_index == 0xff)
return 0xff;
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
for (i = 0; i < phy_db->n_group_txp; i++) {
txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
if (!txp_chg)
return 0xff;
@ -447,7 +483,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_PAPD,
IWL_NUM_PAPD_CH_GROUPS);
phy_db->n_group_papd);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific PAPD groups\n");
@ -457,7 +493,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_TXP,
IWL_NUM_TXP_CH_GROUPS);
phy_db->n_group_txp);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific TX power groups\n");

View File

@ -73,8 +73,8 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
void iwl_phy_db_free(struct iwl_phy_db *phy_db);
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
gfp_t alloc_ctx);
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
struct iwl_rx_packet *pkt);
int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);

View File

@ -753,6 +753,7 @@ enum iwl_plat_pm_mode {
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
* @hw_rf_id a u32 with the device RF ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
@ -797,6 +798,7 @@ struct iwl_trans {
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];

View File

@ -109,6 +109,7 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
#define IWL_MVM_HW_CSUM_DISABLE 0
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2

View File

@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status.wake_packet = fw_status->wake_packet;
/* still at hard-coded place 0 for D3 image */
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[0],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta))
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
if (!mvm_ap_sta)
goto out_free;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status.qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */

View File

@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_STATION_COUNT) {
struct ieee80211_sta *sta;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (!IS_ERR_OR_NULL(sta)) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
if (mvm_sta) {
pos += scnprintf(buf+pos, bufsz-pos,
"ap_sta_id %d - reduced Tx power %d\n",
ap_sta_id,

View File

@ -1309,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
PRINT_MVM_REF(IWL_MVM_REF_RX);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}

View File

@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status {
u8 decrypt_key[16];
u8 tkip_mic_key[8];
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;

View File

@ -437,21 +437,28 @@ struct iwl_rxq_sync_notification {
/**
* Internal message identifier
*
* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
* FW is agnostic to the payload, so there are no endianity requirements.
*
* @type: value from &iwl_mvm_rxq_notif_type
* @sync: ctrl path is waiting for all notifications to be received
* @cookie: internal cookie to identify old notifications
* @data: payload
*/
struct iwl_mvm_internal_rxq_notif {
u32 type;
u16 type;
u16 sync;
u32 cookie;
u8 data[];
} __packed;

View File

@ -173,7 +173,7 @@ enum iwl_sta_key_flag {
/**
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_KEY: this command modifies %key
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @STA_MODIFY_TX_RATE: unused
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
@ -183,7 +183,7 @@ enum iwl_sta_key_flag {
* @STA_MODIFY_QUEUES: modify the queues used by this station
*/
enum iwl_sta_modify_flag {
STA_MODIFY_KEY = BIT(0),
STA_MODIFY_QUEUE_REMOVAL = BIT(0),
STA_MODIFY_TID_DISABLE_TX = BIT(1),
STA_MODIFY_TX_RATE = BIT(2),
STA_MODIFY_ADD_BA_TID = BIT(3),
@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo {
__le64 hw_tkip_mic_tx_key;
} __packed;
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_MASK 0xFF00
#define IWL_ADD_STA_STATUS_MASK 0xFF
#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
#define IWL_ADD_STA_BAID_MASK 0x7F00
#define IWL_ADD_STA_BAID_SHIFT 8
/**
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.

View File

@ -90,6 +90,7 @@ enum {
* DQA queue numbers
*
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
* that we are never left without the possibility to connect to an AP.
@ -97,6 +98,8 @@ enum {
* Each MGMT queue is mapped to a single STA
* MGMT frames are frames that return true on ieee80211_is_mgmt()
* @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
* @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
* responses
* @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
* DATA frames are intended for !ieee80211_is_mgmt() frames, but if
* the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
@ -105,10 +108,12 @@ enum {
*/
enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
};

View File

@ -271,9 +271,6 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
@ -289,6 +286,10 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_FIFO_ITEM_CNT));
@ -339,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
static const struct {
struct iwl_prph_range {
u32 start, end;
} iwl_prph_dump_addr[] = {
};
static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a00000, .end = 0x00a00000 },
{ .start = 0x00a0000c, .end = 0x00a00024 },
{ .start = 0x00a0002c, .end = 0x00a0003c },
@ -439,8 +442,18 @@ static const struct {
{ .start = 0x00a44000, .end = 0x00a7bf80 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a05c00, .end = 0x00a05c18 },
{ .start = 0x00a05400, .end = 0x00a056e8 },
{ .start = 0x00a08000, .end = 0x00a098bc },
{ .start = 0x00adfc00, .end = 0x00adfd1c },
{ .start = 0x00a02400, .end = 0x00a02758 },
};
static u32 iwl_dump_prph(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data)
struct iwl_fw_error_dump_data **data,
const struct iwl_prph_range *iwl_prph_dump_addr,
u32 range_len)
{
struct iwl_fw_error_dump_prph *prph;
unsigned long flags;
@ -449,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
@ -572,16 +585,31 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Make room for PRPH registers */
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
int num_bytes_in_chunk =
iwl_prph_dump_addr_comm[i].end -
iwl_prph_dump_addr_comm[i].start + 4;
prph_len += sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
if (mvm->cfg->mq_rx_supported) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk =
iwl_prph_dump_addr_9000[i].end -
iwl_prph_dump_addr_9000[i].start + 4;
prph_len += sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
}
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
@ -769,8 +797,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
}
if (prph_len)
iwl_dump_prph(mvm->trans, &dump_data);
if (prph_len) {
iwl_dump_prph(mvm->trans, &dump_data,
iwl_prph_dump_addr_comm,
ARRAY_SIZE(iwl_prph_dump_addr_comm));
if (mvm->cfg->mq_rx_supported)
iwl_dump_prph(mvm->trans, &dump_data,
iwl_prph_dump_addr_9000,
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,

View File

@ -535,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return true;
}
WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
return false;
}

View File

@ -501,9 +501,11 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
if (!iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, 0,
wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
@ -533,13 +535,21 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
0);
if (!iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_txq(mvm,
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
/* fall through */
default:
/*

View File

@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
spin_lock_bh(&mvm->refs_lock);
WARN_ON(!mvm->refs[ref_type]--);
if (WARN_ON(!mvm->refs[ref_type])) {
spin_unlock_bh(&mvm->refs_lock);
return;
}
mvm->refs[ref_type]--;
spin_unlock_bh(&mvm->refs_lock);
iwl_trans_unref(mvm->trans);
}
@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
if (iwl_mvm_has_new_rx_api(mvm))
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (mvm->trans->num_rx_queues > 1)
ieee80211_hw_set(hw, USES_RSS);
if (mvm->trans->max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
hw->queues = mvm->first_agg_queue;
if (!iwl_mvm_is_dqa_supported(mvm))
hw->queues = mvm->first_agg_queue;
else
hw->queues = IEEE80211_MAX_QUEUES;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@ -848,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@ -888,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
timeout);
break;
case IEEE80211_AMPDU_RX_STOP:
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
timeout);
break;
case IEEE80211_AMPDU_TX_START:
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
@ -4037,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
if (notif->sync)
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
if (notif->sync)
ret = wait_event_timeout(notif_waitq,
atomic_read(&mvm->queue_sync_counter) == 0,
HZ);
WARN_ON_ONCE(!ret);
out:
atomic_set(&mvm->queue_sync_counter, 0);
mvm->queue_sync_cookie++;
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_internal_rxq_notif data = {
.type = IWL_MVM_RXQ_EMPTY,
.sync = 1,
};
mutex_lock(&mvm->mutex);
iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
mutex_unlock(&mvm->mutex);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@ -4093,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.event_callback = iwl_mvm_mac_event_callback,
.sync_rx_queues = iwl_mvm_sync_rx_queues,
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP

View File

@ -301,6 +301,8 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_PROTECT_CSA,
IWL_MVM_REF_FW_DBG_COLLECT,
IWL_MVM_REF_INIT_UCODE,
IWL_MVM_REF_SENDING_CMD,
IWL_MVM_REF_RX,
/* update debugfs.c when changing this */
@ -613,6 +615,84 @@ struct iwl_mvm_shared_mem_cfg {
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
/**
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
* @buf_size: the reorder buffer size as set by the last addba request
* @sta_id: sta id of this reorder buffer
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
* @entries: list of skbs stored
* @reorder_time: time the packet was stored in the reorder buffer
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
* @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context
*/
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
u8 buf_size;
u8 sta_id;
int queue;
u16 last_amsdu;
u8 last_sub_index;
struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
spinlock_t lock;
struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
/**
* struct iwl_mvm_baid_data - BA session data
* @sta_id: station id
* @tid: tid of the session
* @baid baid of the session
* @timeout: the timeout set in the addba request
* @last_rx: last rx jiffies, updated only if timeout passed from last update
* @session_timer: timer to check if BA session expired, runs at 2 * timeout
* @mvm: mvm pointer, needed for timer context
* @reorder_buf: reorder buffer, allocated per queue
*/
struct iwl_mvm_baid_data {
struct rcu_head rcu_head;
u8 sta_id;
u8 tid;
u8 baid;
u16 timeout;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm *mvm;
struct iwl_mvm_reorder_buffer reorder_buf[];
};
/*
* enum iwl_mvm_queue_status - queue status
* @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
* Basically, this means that this queue can be used for any purpose
* @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
* This is the state of a queue that has been dedicated for some RATID
* (agg'd or not), but that hasn't yet gone through the actual enablement
* of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
* Note that in this state there is no requirement to already know what TID
* should be used with this queue, it is just marked as a queue that will
* be used, and shouldn't be allocated to anyone else.
* @IWL_MVM_QUEUE_READY: queue is ready to be used
* This is the state of a queue that has been fully configured (including
* SCD pointers, etc), has a specific RA/TID assigned to it, and can be
* used to send traffic.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
};
struct iwl_mvm {
/* for logger access */
struct device *dev;
@ -633,6 +713,8 @@ struct iwl_mvm {
unsigned long status;
u32 queue_sync_cookie;
atomic_t queue_sync_counter;
/*
* for beacon filtering -
* currently only one interface can be supported
@ -666,13 +748,8 @@ struct iwl_mvm {
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
/*
* This is to mark that queue is reserved for a STA but not yet
* allocated. This is needed to make sure we have at least one
* available queue to use when adding a new STA
*/
bool setup_reserved;
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
@ -920,6 +997,10 @@ struct iwl_mvm {
u32 ciphers[6];
struct iwl_mvm_tof_data tof_data;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
* Drop beacons from other APs in AP mode when there are no connected
* clients.
@ -1065,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
!IWL_MVM_HW_CSUM_DISABLE;
}
static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
@ -1242,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count);
@ -1566,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
/* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn);
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@ -1628,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size);
void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);

View File

@ -554,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
mvm->aux_queue = 15;
mvm->first_agg_queue = 16;
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
if (!iwl_mvm_is_dqa_supported(mvm)) {
mvm->first_agg_queue = 16;
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
} else {
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
@ -586,6 +591,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
atomic_set(&mvm->queue_sync_counter, 0);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
@ -930,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
else if (pkt->hdr.cmd == FRAME_RELEASE)
iwl_mvm_rx_frame_release(mvm, rxb, 0);
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
@ -1208,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
struct iwl_d0i3_iter_data *iter_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvmsta;
u32 available_tids = 0;
u8 tid;
@ -1217,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
return false;
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
if (IS_ERR_OR_NULL(ap_sta))
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
if (!mvmsta)
return false;
mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
@ -1632,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, rxb, queue);
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
pkt->hdr.group_id == DATA_PATH_GROUP))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);

View File

@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
/*
@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 rate_n_flags;
u32 rx_pkt_status;
u8 crypt_len = 0;
bool take_ref;
phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
crypt_len, rxb);
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
rx_status->boottime_ns = ktime_get_boot_ns();
/* Take a reference briefly to kick off a d0i3 entry delay so
* we can handle bursts of RX packets without toggling the
* state too often. But don't do this for beacons if we are
* going to idle because the beacon filtering changes we make
* cause the firmware to send us collateral beacons. */
take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
ieee80211_is_beacon(hdr->frame_control));
if (take_ref)
iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
ampdu_status, crypt_len, rxb);
if (take_ref)
iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,

View File

@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
else
ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@ -395,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
return ret;
}
/*
* Returns true if sn2 - buffer_size < sn1 < sn2.
* To be used only in order to compare reorder buffer head with NSSN.
* We fully trust NSSN unless it is behind us due to reorder timeout.
* Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
*/
static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
{
return ieee80211_sn_less(sn1, sn2) &&
!ieee80211_sn_less(sn1, sn2 - buffer_size);
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mvm_reorder_buffer *reorder_buf,
u16 nssn)
{
u16 ssn = reorder_buf->head_sn;
lockdep_assert_held(&reorder_buf->lock);
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
return;
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
struct sk_buff_head *skb_list = &reorder_buf->entries[index];
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
/* holes are valid since nssn indicates frames were received. */
if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
continue;
/* Empty the list. Will have more than one frame for A-MSDU */
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
sta);
reorder_buf->num_stored--;
}
}
reorder_buf->head_sn = nssn;
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
while (!skb_peek_tail(&reorder_buf->entries[index]))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
reorder_buf->reorder_time[index] + 1 +
RX_REORDER_BUF_TIMEOUT_MQ);
} else {
del_timer(&reorder_buf->reorder_timer);
}
}
void iwl_mvm_reorder_timer_expired(unsigned long data)
{
struct iwl_mvm_reorder_buffer *buf = (void *)data;
int i;
u16 sn = 0, index = 0;
bool expired = false;
spin_lock_bh(&buf->lock);
if (!buf->num_stored || buf->removed) {
spin_unlock_bh(&buf->lock);
return;
}
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
if (!skb_peek_tail(&buf->entries[index]))
continue;
if (!time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
break;
expired = true;
sn = ieee80211_sn_add(buf->head_sn, i + 1);
}
if (expired) {
struct ieee80211_sta *sta;
rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
/* SN is set to the last expired frame + 1 */
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
rcu_read_unlock();
} else if (buf->num_stored) {
/*
* If no frame expired and there are stored frames, index is now
* pointing to the first unexpired frame - modify timer
* accordingly to this frame.
*/
mod_timer(&buf->reorder_timer,
buf->reorder_time[index] +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
spin_unlock_bh(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_delba_data *data)
{
struct iwl_mvm_baid_data *ba_data;
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
u8 baid = data->baid;
if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
return;
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON_ONCE(!ba_data))
goto out;
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
goto out;
reorder_buf = &ba_data->reorder_buf[queue];
/* release all frames that are in the reorder buffer to the stack */
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
reorder_buf->buf_size));
spin_unlock_bh(&reorder_buf->lock);
del_timer_sync(&reorder_buf->reorder_timer);
out:
rcu_read_unlock();
}
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue)
{
@ -405,15 +549,182 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
if (internal_notif->sync) {
if (mvm->queue_sync_cookie != internal_notif->cookie) {
WARN_ONCE(1,
"Received expired RX queue sync message\n");
return;
}
atomic_dec(&mvm->queue_sync_counter);
}
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
/* TODO */
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
}
/*
* Returns true if the MPDU was buffered\dropped, false if it should be passed
* to upper layer.
*/
static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct napi_struct *napi,
int queue,
struct ieee80211_sta *sta,
struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
struct sk_buff *tail;
u32 reorder = le32_to_cpu(desc->reorder_data);
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
int index;
u16 nssn, sn;
u8 baid;
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
/* no sta yet */
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return false;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return false;
baid_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN(!baid_data,
"Received baid %d, but no data exists for this BAID\n", baid))
return false;
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
tid))
return false;
nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
IWL_RX_MPDU_REORDER_SN_SHIFT;
buffer = &baid_data->reorder_buf[queue];
spin_lock_bh(&buffer->lock);
/*
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the
* rest of the function will take of storing it and releasing up to the
* nssn
*/
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
}
/* drop any oudated packets */
if (ieee80211_sn_less(sn, buffer->head_sn))
goto drop;
/* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
buffer->buf_size))
buffer->head_sn = nssn;
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
}
index = sn % buffer->buf_size;
/*
* Check if we already stored this frame
* As AMSDU is either received or not as whole, logic is simple:
* If we have frames in that position in the buffer and the last frame
* originated from AMSDU had a different SN then it is a retransmission.
* If it is the same SN then if the subframe index is incrementing it
* is the same AMSDU - otherwise it is a retransmission.
*/
tail = skb_peek_tail(&buffer->entries[index]);
if (tail && !amsdu)
goto drop;
else if (tail && (sn != buffer->last_amsdu ||
buffer->last_sub_index >= sub_frame_idx))
goto drop;
/* put in reorder buffer */
__skb_queue_tail(&buffer->entries[index], skb);
buffer->num_stored++;
buffer->reorder_time[index] = jiffies;
if (amsdu) {
buffer->last_amsdu = sn;
buffer->last_sub_index = sub_frame_idx;
}
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
spin_unlock_bh(&buffer->lock);
return true;
drop:
kfree_skb(skb);
spin_unlock_bh(&buffer->lock);
return true;
}
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
{
unsigned long now = jiffies;
unsigned long timeout;
struct iwl_mvm_baid_data *data;
rcu_read_lock();
data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON(!data))
goto out;
if (!data->timeout)
goto out;
timeout = data->timeout;
/*
* Do not update last rx all the time to avoid cache bouncing
* between the rx queues.
* Update it every timeout. Worst case is the session will
* expire after ~ 2 * timeout, which doesn't matter that much.
*/
if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
/* Update is atomic */
data->last_rx = now;
out:
rcu_read_unlock();
}
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@ -484,6 +795,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
/*
* We have tx blocked stations (with CS bit). If we heard
@ -536,6 +850,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid);
}
/*
@ -593,12 +909,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* TODO: PHY info - gscan */
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
rcu_read_unlock();
}
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
/* TODO */
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data;
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
struct iwl_mvm_baid_data *ba_data;
int baid = release->baid;
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
return;
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON_ONCE(!ba_data))
goto out;
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
goto out;
reorder_buf = &ba_data->reorder_buf[queue];
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
le16_to_cpu(release->nssn));
spin_unlock_bh(&reorder_buf->lock);
out:
rcu_read_unlock();
}

View File

@ -223,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
static void iwl_mvm_rx_agg_session_expired(unsigned long data)
{
struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
struct iwl_mvm_baid_data *ba_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvm_sta;
unsigned long timeout;
rcu_read_lock();
ba_data = rcu_dereference(*rcu_ptr);
if (WARN_ON(!ba_data))
goto unlock;
if (!ba_data->timeout)
goto unlock;
timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
if (time_is_after_jiffies(timeout)) {
mod_timer(&ba_data->session_timer, timeout);
goto unlock;
}
/* Timer expired */
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
sta->addr, ba_data->tid);
unlock:
rcu_read_unlock();
}
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
@ -293,6 +326,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
int ssn;
int ret;
lockdep_assert_held(&mvm->mutex);
@ -321,8 +355,15 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (queue < 0)
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
/*
* Mark TXQ as ready, even though it hasn't been fully configured yet,
* to make sure no one else takes it.
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
if (queue >= 0)
mvm->queue_info[queue].setup_reserved = false;
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
@ -354,7 +395,16 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
spin_unlock_bh(&mvmsta->lock);
return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
if (ret)
goto out_err;
return 0;
out_err:
iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
return ret;
}
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
@ -460,7 +510,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
else
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
@ -470,7 +521,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC;
}
mvm->queue_info[queue].setup_reserved = true;
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
spin_unlock_bh(&mvm->queue_info_lock);
@ -1000,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_VO,
.sta_id = mvmvif->bcast_sta.sta_id,
.tid = IWL_MAX_TID_COUNT,
.aggregate = false,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
int queue;
if ((vif->type == NL80211_IFTYPE_AP) &&
(mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
(mvmvif->bcast_sta.tfd_queue_msk &
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
return -EINVAL;
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
@ -1028,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 qmask;
u32 qmask = 0;
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(vif);
if (!iwl_mvm_is_dqa_supported(mvm))
qmask = iwl_mvm_mac_get_queues_mask(vif);
/*
* The firmware defines the TFD queue mask to only be relevant
* for *unicast* queues, so the multicast (CAB) queue shouldn't
* be included.
*/
if (vif->type == NL80211_IFTYPE_AP)
if (vif->type == NL80211_IFTYPE_AP) {
/*
* The firmware defines the TFD queue mask to only be relevant
* for *unicast* queues, so the multicast (CAB) queue shouldn't
* be included.
*/
qmask &= ~BIT(vif->cab_queue);
if (iwl_mvm_is_dqa_supported(mvm))
qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
} else if (iwl_mvm_is_dqa_supported(mvm) &&
vif->type == NL80211_IFTYPE_P2P_DEVICE) {
qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
ieee80211_vif_type_p2p(vif));
}
@ -1099,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#define IWL_MAX_RX_BA_SESSIONS 16
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
struct iwl_mvm_delba_notif notif = {
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
.metadata.sync = 1,
.delba.baid = baid,
};
iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
};
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data)
{
int i;
iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
int j;
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
spin_lock_bh(&reorder_buf->lock);
if (likely(!reorder_buf->num_stored)) {
spin_unlock_bh(&reorder_buf->lock);
continue;
}
/*
* This shouldn't happen in regular DELBA since the internal
* delBA notification should trigger a release of all frames in
* the reorder buffer.
*/
WARN_ON(1);
for (j = 0; j < reorder_buf->buf_size; j++)
__skb_queue_purge(&reorder_buf->entries[j]);
/*
* Prevent timer re-arm. This prevents a very far fetched case
* where we timed out on the notification. There may be prior
* RX frames pending in the RX queue before the notification
* that might get processed between now and the actual deletion
* and we would re-arm the timer although we are deleting the
* reorder buffer.
*/
reorder_buf->removed = true;
spin_unlock_bh(&reorder_buf->lock);
del_timer_sync(&reorder_buf->reorder_timer);
}
}
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
u32 sta_id,
struct iwl_mvm_baid_data *data,
u16 ssn, u8 buf_size)
{
int i;
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
int j;
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
reorder_buf->buf_size = buf_size;
/* rx reorder timer */
reorder_buf->reorder_timer.function =
iwl_mvm_reorder_timer_expired;
reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
init_timer(&reorder_buf->reorder_timer);
spin_lock_init(&reorder_buf->lock);
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->sta_id = sta_id;
for (j = 0; j < reorder_buf->buf_size; j++)
__skb_queue_head_init(&reorder_buf->entries[j]);
}
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u8 buf_size)
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
struct iwl_mvm_baid_data *baid_data = NULL;
int ret;
u32 status;
@ -1114,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return -ENOSPC;
}
if (iwl_mvm_has_new_rx_api(mvm) && start) {
/*
* Allocate here so if allocation fails we can bail out early
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
mvm->trans->num_rx_queues *
sizeof(baid_data->reorder_buf[0]),
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
}
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
@ -1132,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
goto out_free;
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
@ -1150,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
if (!ret) {
if (start)
mvm->rx_ba_sessions++;
else if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
mvm->rx_ba_sessions--;
}
if (ret)
goto out_free;
if (start) {
u8 baid;
mvm->rx_ba_sessions++;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
ret = -EINVAL;
goto out_free;
}
baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
IWL_ADD_STA_BAID_SHIFT);
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
init_timer(&baid_data->session_timer);
baid_data->session_timer.function =
iwl_mvm_rx_agg_session_expired;
baid_data->session_timer.data =
(unsigned long)&mvm->baid_map[baid];
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
mvm_sta->tid_to_baid[tid] = baid;
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
baid_data, ssn, buf_size);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
* supposed to happen) and we will free the session data while
* RX is being processed in parallel
*/
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else if (mvm->rx_ba_sessions > 0) {
u8 baid = mvm_sta->tid_to_baid[tid];
/* check that restart flow didn't zero the counter */
mvm->rx_ba_sessions--;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
return -EINVAL;
baid_data = rcu_access_pointer(mvm->baid_map[baid]);
if (WARN_ON(!baid_data))
return -EINVAL;
/* synchronize all rx queues so we can safely delete */
iwl_mvm_free_reorder(mvm, baid_data);
del_timer_sync(&baid_data->session_timer);
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
}
return 0;
out_free:
kfree(baid_data);
return ret;
}
@ -1175,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm_sta->tfd_queue_msk |= BIT(queue);
mvm_sta->tid_disable_agg &= ~BIT(tid);
} else {
mvm_sta->tfd_queue_msk &= ~BIT(queue);
/* In DQA-mode the queue isn't removed on agg termination */
if (!iwl_mvm_is_dqa_supported(mvm))
mvm_sta->tfd_queue_msk &= ~BIT(queue);
mvm_sta->tid_disable_agg |= BIT(tid);
}
@ -1258,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_lock_bh(&mvm->queue_info_lock);
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
/*
* Note the possible cases:
* 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
* 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
* one and mark it as reserved
* 3. In DQA mode, but no traffic yet on this TID: same treatment as in
* non-DQA mode, since the TXQ hasn't yet been allocated
*/
txq_id = mvmsta->tid_data[tid].txq_id;
if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
}
mvm->queue_info[txq_id].setup_reserved = true;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n",
tid, txq_id);
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
@ -1303,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret;
bool alloc_queue = true;
u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
@ -1328,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
ssn, &cfg, wdg_timeout);
/* In DQA mode, the existing queue might need to be reconfigured */
if (iwl_mvm_is_dqa_supported(mvm)) {
spin_lock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false;
spin_unlock_bh(&mvm->queue_info_lock);
/*
* Only reconfig the SCD for the queue if the window size has
* changed from current (become smaller)
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
/*
* If reconfiguring an existing queue, it first must be
* drained
*/
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
BIT(queue));
if (ret) {
IWL_ERR(mvm,
"Error draining queue before reconfig\n");
return ret;
}
ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
mvmsta->sta_id, tid,
buf_size, ssn);
if (ret) {
IWL_ERR(mvm,
"Error reconfiguring TXQ #%d\n", queue);
return ret;
}
}
}
if (alloc_queue)
iwl_mvm_enable_txq(mvm, queue,
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
&cfg, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@ -1337,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].setup_reserved = false;
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/*
@ -1384,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
/* No need to mark as reserved anymore */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[txq_id].setup_reserved = false;
/*
* The TXQ is marked as reserved only if no traffic came through yet
* This means no traffic has been sent on this TID (agg'd or not), so
* we no longer have use for the queue. Since it hasn't even been
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
* free.
*/
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) {
@ -1412,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
0);
if (!iwl_mvm_is_dqa_supported(mvm)) {
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
}
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@ -1465,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[txq_id].setup_reserved = false;
/*
* The TXQ is marked as reserved only if no traffic came through yet
* This means no traffic has been sent on this TID (agg'd or not), so
* we no longer have use for the queue. Since it hasn't even been
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
* free.
*/
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
if (old_state >= IWL_AGG_ON) {
@ -1480,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
0);
if (!iwl_mvm_is_dqa_supported(mvm)) {
int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
tid, 0);
}
}
return 0;
@ -1533,17 +1852,12 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
* be the AP ID, and no station was passed by mac80211.
*/
if (IS_ERR_OR_NULL(sta))
return NULL;
return iwl_mvm_sta_from_mac80211(sta);
return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
}
return NULL;

View File

@ -348,6 +348,15 @@ struct iwl_mvm_key_pn {
} ____cacheline_aligned_in_smp q[];
};
struct iwl_mvm_delba_data {
u32 baid;
} __packed;
struct iwl_mvm_delba_notif {
struct iwl_mvm_internal_rxq_notif metadata;
struct iwl_mvm_delba_data delba;
} __packed;
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
@ -373,6 +382,7 @@ struct iwl_mvm_rxq_dup_data {
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
* @tid_to_baid: a simple map of TID to baid
* @reserved_queue: the queue reserved for this STA for DQA purposes
* Every STA has is given one reserved queue to allow it to operate. If no
* such queue can be guaranteed, the STA addition will fail.
@ -406,6 +416,7 @@ struct iwl_mvm_sta {
bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
u8 tid_to_baid[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
@ -487,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u8 buf_size);
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,

View File

@ -359,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int i, err;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (enable == mvmsta->tt_tx_protection)
continue;
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);

View File

@ -475,6 +475,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return dev_cmd;
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
{
if (iwl_mvm_is_dqa_supported(mvm)) {
if (info->control.vif->type == NL80211_IFTYPE_AP &&
ieee80211_is_probe_resp(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
else if (ieee80211_is_mgmt(fc) &&
info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
}
return info->hw_queue;
}
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@ -484,6 +499,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
memcpy(&info, skb->cb, sizeof(info));
@ -508,6 +524,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
queue = info.hw_queue;
/*
* If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated
@ -523,10 +541,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP)
info.control.vif->type == NL80211_IFTYPE_AP) {
sta_id = mvmvif->bcast_sta.sta_id;
else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT)
@ -534,7 +554,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd)
@ -545,7 +565,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
}
@ -589,9 +609,11 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
!mvmsta->tlc_amsdu) {
(!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
num_subframes = 1;
pad = 0;
goto segment;
@ -622,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
max_amsdu_len = sta->max_amsdu_len;
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
@ -636,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
if (dbg_max_amsdu_len)
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
dbg_max_amsdu_len);
@ -912,7 +933,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
if (txq_id < mvm->first_agg_queue)
/* Increase pending frames count if this isn't AMPDU */
if (!is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@ -1160,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs);
@ -1290,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm,
@ -1345,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
if (txq_id >= mvm->first_agg_queue)
if (txq_agg)
goto out;
/* We can't free more than one frame at once on a shared queue */
WARN_ON(skb_freed > 1);
WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@ -1443,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence);
if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
(!iwl_mvm_is_dqa_supported(mvm) ||
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@ -1455,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].tx_time =

View File

@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
if (!(cmd->flags & CMD_ASYNC))
if (!(cmd->flags & CMD_ASYNC)) {
lockdep_assert_held(&mvm->mutex);
if (!(cmd->flags & CMD_SEND_IN_IDLE))
iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
}
ret = iwl_trans_send_cmd(mvm->trans, cmd);
if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
/*
* If the caller wants the SKB, then don't hide any problems, the
* caller might access the response buffer which will be NULL if
@ -581,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
!mvm->queue_info[i].setup_reserved)
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
return -ENOSPC;
}
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 1,
.window = frame_limit,
.sta_id = sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = fifo,
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
.tid = tid,
};
int ret;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO;
}
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
queue, fifo, ret);
return ret;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
@ -682,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
if (!cmd.enable)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",

View File

@ -493,19 +493,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@ -595,6 +596,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
@ -622,6 +624,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
if (iwl_trans->cfg->rf_id) {
if (cfg == &iwl9260_2ac_cfg)
cfg_9260lc = &iwl9260lc_2ac_cfg;
if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
cfg = cfg_9260lc;
iwl_trans->cfg = cfg_9260lc;
}
}
#endif
pci_set_drvdata(pdev, iwl_trans);

View File

@ -481,9 +481,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
void iwl_trans_pcie_ref(struct iwl_trans *trans);
void iwl_trans_pcie_unref(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];

View File

@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
u64 val)
{
iwl_write_prph(trans, ofs, val & 0xffffffff);
iwl_write_prph(trans, ofs + 4, val >> 32);
iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
/*
@ -208,8 +209,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = round_down(rxq->write, 8);
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
rxq->write_actual);
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
/*
* write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
* hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
@ -698,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@ -715,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
if (!iwl_trans_grab_nic_access(trans, &flags))
return;
/* Stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* reset and flush pointers */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
@ -741,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
* RB timeout 0x10
* 256 RBDs
*/
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
rb_size|
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
rb_size |
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@ -761,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@ -778,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
if (!iwl_trans_grab_nic_access(trans, &flags))
return;
/* Stop Rx DMA */
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
/* disable free amd used rx queue operation */
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
(u64)(trans_pcie->rxq[i].bd_dma));
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_FRBDCB_BA_LSB(i),
trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
(u64)(trans_pcie->rxq[i].used_bd_dma));
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_URBDCB_BA_LSB(i),
trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
trans_pcie->rxq[i].rb_stts_dma);
iwl_pcie_write_prph_64_no_grab(trans,
RFH_Q_URBD_STTS_WPTR_LSB(i),
trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
enabled |= BIT(i) | BIT(i + 16);
}
@ -812,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Drop frames that exceed RB size
* 512 RBDs
*/
iwl_write_prph(trans, RFH_RXF_DMA_CFG,
RFH_DMA_EN_ENABLE_VAL |
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
RFH_RXF_DMA_MIN_RB_4_8 |
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
RFH_RXF_DMA_RBDCB_SIZE_512);
iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
RFH_DMA_EN_ENABLE_VAL |
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
RFH_RXF_DMA_MIN_RB_4_8 |
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
* Set RX DMA chunk size to 64B
* Default queue is 0
*/
iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
(DEFAULT_RXQ_NUM <<
RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@ -1298,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/*
@ -1817,13 +1834,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock(&trans_pcie->irq_lock);
inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
* Clear causes registers to avoid being handling the same cause.
*/
iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock(&trans_pcie->irq_lock);
if (unlikely(!(inta_fh | inta_hw))) {

View File

@ -269,9 +269,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
if (trans->cfg->base_params->pll_cfg_val)
iwl_set_bit(trans, CSR_ANA_PLL_CFG,
trans->cfg->base_params->pll_cfg_val);
if (trans->cfg->base_params->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
/*
* Set "initialization complete" bit to move adapter from
@ -361,8 +360,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
usleep_range(1000, 2000);
/*
* Set "initialization complete" bit to move adapter from
@ -408,8 +406,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* SHRD_HW_RST). Turn MAC off before proceeding.
*/
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
usleep_range(1000, 2000);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@ -506,8 +503,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
usleep_range(1000, 2000);
/*
* Clear "initialization complete" bit to move adapter from
@ -586,7 +582,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
msleep(1);
usleep_range(1000, 2000);
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
@ -1074,7 +1070,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(20);
usleep_range(1000, 2000);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
@ -1526,8 +1522,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* Reset the entire device */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(10, 15);
usleep_range(1000, 2000);
iwl_pcie_apm_init(trans);
@ -1950,7 +1945,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
"WR pointer moved while flushing %d -> %d\n",
wr_ptr, write_ptr))
return -ETIMEDOUT;
msleep(1);
usleep_range(1000, 2000);
}
if (q->read_ptr != q->write_ptr) {
@ -2013,7 +2008,7 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
void iwl_trans_pcie_ref(struct iwl_trans *trans)
static void iwl_trans_pcie_ref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -2028,7 +2023,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans)
#endif /* CONFIG_PM */
}
void iwl_trans_pcie_unref(struct iwl_trans *trans)
static void iwl_trans_pcie_unref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -2907,6 +2902,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
iwl_pcie_set_interrupt_capa(pdev, trans);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),

Some files were not shown because too many files have changed in this diff Show More