1
0
Fork 0

Major changes:

ath10k:
 
 * add support for qca99x0 family of devices
 * improve performance of tx_lock
 * add support for raw mode (802.11 frame format) and software crypto
   engine enabled via a module parameter
 
 ath9k:
 
 * add fast-xmit support
 
 wil6210:
 
 * implement TSO support
 * support bootloader v1 and onwards
 
 iwlwifi:
 
 * Deprecate -10.ucode
 * Clean ups towards multiple Rx queues
 * Add support for longer CMD IDs. This will be required by new
   firmwares since we are getting close to the u8 limit.
 * bugfixes for the D0i3 power state
 * Add basic support for FTM
 * polish the Miracast operation
 * fix a few power consumption issues
 * scan cleanup
 * fixes for D0i3 system state
 * add paging for devices that support it
 * add again the new RBD allocation model
 * add more options to the firmware debug system
 * add support for frag SKBs in Tx
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQEcBAABAgAGBQJV1FziAAoJEG4XJFUm622bEJwH/RdNlAK4+IS9Tx0K6EA2fDE3
 001J16in/1IPPr8/RmQWNzHGN5/GAYlAdl5v4fkwb8qvcDCyPlQbSEC8ghd32H6M
 G1101qEfGk9bhLTpI8xjeqsT1gl98LswPYNAfoRX4AJKAmNCkfJ1WILLi/Q2DUOf
 oG6IersuqQOdQbkXDnMm49FBZeSkVvzsJL+WQhKhYblh0bDH3qGuLcYvtkSnt+P8
 8QG5X+DPEpmaYcu+5E5N0XgffsdPj/+xXlFova4DFySDYb4OzEH1MYp/rHjPpnUo
 nDJqxZIgkkmhu0CGtvpWNeSZ7GQBI0Bzdd+dwBY5yyg5AqWuQ5A1W+4r3SjvHyU=
 =A8Ax
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2015-08-19' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
Major changes:

ath10k:

* add support for qca99x0 family of devices
* improve performance of tx_lock
* add support for raw mode (802.11 frame format) and software crypto
  engine enabled via a module parameter

ath9k:

* add fast-xmit support

wil6210:

* implement TSO support
* support bootloader v1 and onwards

iwlwifi:

* Deprecate -10.ucode
* Clean ups towards multiple Rx queues
* Add support for longer CMD IDs. This will be required by new
  firmwares since we are getting close to the u8 limit.
* bugfixes for the D0i3 power state
* Add basic support for FTM
* polish the Miracast operation
* fix a few power consumption issues
* scan cleanup
* fixes for D0i3 system state
* add paging for devices that support it
* add again the new RBD allocation model
* add more options to the firmware debug system
* add support for frag SKBs in Tx
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2015-08-20 14:13:25 -07:00
commit ef09242f39
193 changed files with 7124 additions and 2796 deletions

View File

@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA && GPIOLIB
select IRQ_DOMAIN if BCMA_HOST_SOC
select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.

View File

@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);

View File

@ -8,10 +8,8 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
}
#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
return irq_find_mapping(cc->irq_domain, gpio);
else
return -EINVAL;
}
static void bcma_gpio_irq_unmask(struct irq_data *d)
{
struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d)
static void bcma_gpio_irq_mask(struct irq_data *d)
{
struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
int gpio = irqd_to_hwirq(d);
bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = {
static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
{
struct bcma_drv_cc *cc = dev_id;
struct gpio_chip *gc = &cc->gpio;
u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
if (!irqs)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
for_each_set_bit(gpio, &irqs, gc->ngpio)
generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
}
static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
int gpio, hwirq, err;
int hwirq, err;
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return 0;
cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
&irq_domain_simple_ops, cc);
if (!cc->irq_domain) {
err = -ENODEV;
goto err_irq_domain;
}
for (gpio = 0; gpio < chip->ngpio; gpio++) {
int irq = irq_create_mapping(cc->irq_domain, gpio);
irq_set_chip_data(irq, cc);
irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
handle_simple_irq);
}
hwirq = bcma_core_irq(cc->core, 0);
err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
cc);
if (err)
goto err_req_irq;
return err;
bcma_chipco_gpio_intmask(cc, ~0, 0);
bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
return 0;
err_req_irq:
for (gpio = 0; gpio < chip->ngpio; gpio++) {
int irq = irq_find_mapping(cc->irq_domain, gpio);
irq_dispose_mapping(irq);
err = gpiochip_irqchip_add(chip,
&bcma_gpio_irq_chip,
0,
handle_simple_irq,
IRQ_TYPE_NONE);
if (err) {
free_irq(hwirq, cc);
return err;
}
irq_domain_remove(cc->irq_domain);
err_irq_domain:
return err;
return 0;
}
static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
int gpio;
if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
return;
bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
free_irq(bcma_core_irq(cc->core, 0), cc);
for (gpio = 0; gpio < chip->ngpio; gpio++) {
int irq = irq_find_mapping(cc->irq_domain, gpio);
irq_dispose_mapping(irq);
}
irq_domain_remove(cc->irq_domain);
}
#else
static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
{
return 0;
}
static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
{
}
#endif
@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
chip->to_irq = bcma_gpio_to_irq;
#endif
chip->owner = THIS_MODULE;
chip->dev = bcma_bus_get_host_dev(bus);
#if IS_BUILTIN(CONFIG_OF)
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->of_node = cc->core->dev.of_node;
@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
else
chip->base = -1;
err = bcma_gpio_irq_domain_init(cc);
err = gpiochip_add(chip);
if (err)
return err;
err = gpiochip_add(chip);
err = bcma_gpio_irq_init(cc);
if (err) {
bcma_gpio_irq_domain_exit(cc);
gpiochip_remove(chip);
return err;
}
@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
bcma_gpio_irq_domain_exit(cc);
bcma_gpio_irq_exit(cc);
gpiochip_remove(&cc->gpio);
return 0;
}

View File

@ -7,7 +7,9 @@
#include "bcma_private.h"
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
@ -269,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
{
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
if (bus->host_pci)
return &bus->host_pci->dev;
else
return NULL;
case BCMA_HOSTTYPE_SOC:
if (bus->host_pdev)
return &bus->host_pdev->dev;
else
return NULL;
case BCMA_HOSTTYPE_SDIO:
if (bus->host_sdio)
return &bus->host_sdio->dev;
else
return NULL;
}
return NULL;
}
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
@ -388,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
@ -410,13 +435,12 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
dev = bcma_bus_get_host_dev(bus);
/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
* of_default_bus_match_table is exported or in some other way
* accessible. This is just a temporary workaround.
*/
if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) {
struct device *dev = &bus->host_pdev->dev;
if (IS_BUILTIN(CONFIG_BCMA) && dev) {
of_platform_populate(dev->of_node, of_default_bus_match_table,
NULL, dev);
}

View File

@ -31,16 +31,19 @@
#include "wmi-ops.h"
unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@ -1073,6 +1076,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return -EINVAL;
}
ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
switch (ath10k_cryptmode_param) {
case ATH10K_CRYPT_MODE_HW:
clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
break;
case ATH10K_CRYPT_MODE_SW:
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
ar->fw_features)) {
ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
return -EINVAL;
}
set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
break;
default:
ath10k_info(ar, "invalid cryptmode: %d\n",
ath10k_cryptmode_param);
return -EINVAL;
}
ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
/* Workaround:
*
* Firmware A-MSDU aggregation breaks with RAW Tx encap mode
* and causes enormous performance issues (malformed frames,
* etc).
*
* Disabling A-MSDU makes RAW mode stable with heavy traffic
* albeit a bit slower compared to regular operation.
*/
ar->htt.max_num_amsdu = 1;
}
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
@ -1606,6 +1649,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
if (!ar->workqueue)
goto err_free_mac;
ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
if (!ar->workqueue_aux)
goto err_free_wq;
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
@ -1626,10 +1673,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_wq;
goto err_free_aux_wq;
return ar;
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
destroy_workqueue(ar->workqueue);
@ -1645,6 +1694,9 @@ void ath10k_core_destroy(struct ath10k *ar)
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
ath10k_mac_destroy(ar);
}

View File

@ -92,6 +92,7 @@ struct ath10k_skb_cb {
u8 tid;
u16 freq;
bool is_offchan;
bool nohwcrypt;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
@ -152,6 +153,7 @@ struct ath10k_wmi {
const struct wmi_ops *ops;
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
@ -341,6 +343,7 @@ struct ath10k_vif {
} u;
bool use_cts_prot;
bool nohwcrypt;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
@ -382,9 +385,6 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
struct ath10k_fw_crash_data *fw_crash_data;
};
@ -453,16 +453,21 @@ enum ath10k_fw_features {
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* Don't trust error code from otp.bin */
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
* it 8 bytes long in Native Wifi Rx decap.
*/
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
/* Firmware supports bypassing PLL setting on init. */
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
/* Raw mode support. If supported, FW supports receiving and trasmitting
* frames in raw mode.
*/
ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@ -476,6 +481,15 @@ enum ath10k_dev_flags {
* waiters should immediately cancel instead of waiting for a time out.
*/
ATH10K_FLAG_CRASH_FLUSH,
/* Use Raw mode instead of native WiFi Tx/Rx encap mode.
* Raw mode supports both hardware and software crypto. Native WiFi only
* supports hardware crypto.
*/
ATH10K_FLAG_RAW_MODE,
/* Disable HW crypto engine */
ATH10K_FLAG_HW_CRYPTO_DISABLED,
};
enum ath10k_cal_mode {
@ -484,6 +498,13 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_DT,
};
enum ath10k_crypt_mode {
/* Only use hardware crypto engine */
ATH10K_CRYPT_MODE_HW,
/* Only use software crypto engine */
ATH10K_CRYPT_MODE_SW,
};
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
{
switch (mode) {
@ -673,6 +694,8 @@ struct ath10k {
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
/* Auxiliary workqueue */
struct workqueue_struct *workqueue_aux;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
@ -695,6 +718,9 @@ struct ath10k {
int num_active_peers;
int num_tids;
struct work_struct svc_rdy_work;
struct sk_buff *svc_rdy_skb;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;

View File

@ -124,11 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
char fw_features[128];
char fw_features[128] = {};
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
@ -144,6 +144,8 @@ void ath10k_print_driver_info(struct ath10k *ar)
ar->htt.op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations,
test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
!test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
fw_features);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
@ -1363,12 +1365,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
mutex_lock(&ar->conf_mutex);
if (ar->debug.htt_max_amsdu)
amsdu = ar->debug.htt_max_amsdu;
if (ar->debug.htt_max_ampdu)
ampdu = ar->debug.htt_max_ampdu;
amsdu = ar->htt.max_num_amsdu;
ampdu = ar->htt.max_num_ampdu;
mutex_unlock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@ -1402,8 +1400,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
goto out;
res = count;
ar->debug.htt_max_amsdu = amsdu;
ar->debug.htt_max_ampdu = ampdu;
ar->htt.max_num_amsdu = amsdu;
ar->htt.max_num_ampdu = ampdu;
out:
mutex_unlock(&ar->conf_mutex);
@ -1905,9 +1903,6 @@ void ath10k_debug_stop(struct ath10k *ar)
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
ar->debug.htt_max_amsdu = 0;
ar->debug.htt_max_ampdu = 0;
ath10k_wmi_pdev_pktlog_disable(ar);
}

View File

@ -246,12 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
}
status = ath10k_htt_verify_version(htt);
if (status)
if (status) {
ath10k_warn(ar, "failed to verify htt version: %d\n",
status);
return status;
}
status = ath10k_htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
return ath10k_htt_send_rx_ring_cfg_ll(htt);
status = ath10k_htt_send_rx_ring_cfg_ll(htt);
if (status) {
ath10k_warn(ar, "failed to setup rx ring: %d\n",
status);
return status;
}
status = ath10k_htt_h2t_aggr_cfg_msg(htt,
htt->max_num_ampdu,
htt->max_num_amsdu);
if (status) {
ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
status);
return status;
}
return 0;
}

View File

@ -83,15 +83,39 @@ struct htt_ver_req {
* around the mask + shift defs.
*/
struct htt_data_tx_desc_frag {
__le32 paddr;
__le32 len;
union {
struct double_word_addr {
__le32 paddr;
__le32 len;
} __packed dword_addr;
struct triple_word_addr {
__le32 paddr_lo;
__le16 paddr_hi;
__le16 len_16;
} __packed tword_addr;
} __packed;
} __packed;
struct htt_msdu_ext_desc {
__le32 tso_flag[4];
__le32 tso_flag[3];
__le16 ip_identification;
u8 flags;
u8 reserved;
struct htt_data_tx_desc_frag frags[6];
};
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
@ -260,6 +284,9 @@ struct htt_aggr_conf {
} __packed;
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
struct htt_mgmt_tx_desc_qca99x0 {
__le32 rate;
} __packed;
struct htt_mgmt_tx_desc {
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@ -268,6 +295,9 @@ struct htt_mgmt_tx_desc {
__le32 len;
__le32 vdev_id;
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
union {
struct htt_mgmt_tx_desc_qca99x0 qca99x0;
} __packed;
} __packed;
enum htt_mgmt_tx_status {
@ -1366,6 +1396,8 @@ struct ath10k_htt {
u8 target_version_minor;
struct completion target_version_received;
enum ath10k_fw_htt_op_version op_version;
u8 max_num_amsdu;
u8 max_num_ampdu;
const enum htt_t2h_msg_type *t2h_msg_types;
u32 t2h_msg_types_max;
@ -1528,6 +1560,12 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
/* These values are default in most firmware revisions and apparently are a
* sweet spot performance wise.
*/
#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_init(struct ath10k *ar);
int ath10k_htt_setup(struct ath10k_htt *htt);

View File

@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
if (!(rxd->msdu_end.info0 &
if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
bool is_last;
rxd = (void *)msdu->data - sizeof(*rxd);
is_first = !!(rxd->msdu_end.info0 &
is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd->msdu_end.info0 &
is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
skb_trim(msdu, msdu->len - FCS_LEN);
/* In most cases this will be true for sniffed frames. It makes sense
* to deliver them as-is without stripping the crypto param. This would
* also make sense for software based decryption (which is not
* implemented in ath10k).
* to deliver them as-is without stripping the crypto param. This is
* necessary for software based decryption.
*
* If there's no error then the frame is decrypted. At least that is
* the case for frames that come in via fragmented rx indication.
@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
is_first = !!(rxd->msdu_end.info0 &
is_first = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd->msdu_end.info0 &
is_last = !!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
@ -1214,7 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
*/
rxd = (void *)msdu->data - sizeof(*rxd);
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
@ -1244,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
info = __le32_to_cpu(rxd->msdu_start.info1);
info = __le32_to_cpu(rxd->msdu_start.common.info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@ -1437,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
if (!chained)
@ -1631,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
__le16 msdu_id;
int i;
lockdep_assert_held(&htt->tx_lock);
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.no_ack = true;
@ -1757,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
__skb_queue_tail(amsdu, msdu);
rxd = (void *)msdu->data - sizeof(*rxd);
if (rxd->msdu_end.info0 &
if (rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
rxd = (void *)msdu->data - sizeof(*rxd);
if (!(rxd->msdu_end.info0 &
if (!(rxd->msdu_end.common.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
@ -1998,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
spin_lock_bh(&htt->tx_lock);
__skb_queue_tail(&htt->tx_compl_q, skb);
spin_unlock_bh(&htt->tx_lock);
skb_queue_tail(&htt->tx_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
@ -2072,6 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
@ -2095,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
struct htt_resp *resp;
struct sk_buff *skb;
spin_lock_bh(&htt->tx_lock);
while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
while ((skb = skb_dequeue(&htt->tx_compl_q))) {
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
}
spin_unlock_bh(&htt->tx_lock);
spin_lock_bh(&htt->rx_ring.lock);
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {

View File

@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
lockdep_assert_held(&htt->tx_lock);
ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
ret = idr_alloc(&htt->pending_tx, skb, 0,
htt->max_num_pending_tx, GFP_ATOMIC);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
@ -133,9 +134,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock);
return 0;
}
@ -259,6 +258,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
__cpu_to_le32(htt->frag_desc.paddr);
cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
__cpu_to_le16(htt->max_num_pending_tx - 1);
@ -427,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
@ -448,6 +447,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
memset(cmd, 0, len);
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
@ -494,6 +495,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
u16 msdu_id, flags1 = 0;
dma_addr_t paddr = 0;
u32 frags_paddr = 0;
struct htt_msdu_ext_desc *ext_desc = NULL;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@ -501,12 +503,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
@ -522,8 +523,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control))
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
} else if (!skb_cb->htt.nohwcrypt &&
skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
@ -537,16 +542,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags;
if (ar->hw_params.continuous_frag_desc) {
memset(&htt->frag_desc.vaddr[msdu_id], 0,
sizeof(struct htt_msdu_ext_desc));
frags = (struct htt_data_tx_desc_frag *)
&htt->frag_desc.vaddr[msdu_id].frags;
ext_desc = &htt->frag_desc.vaddr[msdu_id];
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi = 0;
frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
frags[0].len = __cpu_to_le32(msdu->len);
frags[1].paddr = 0;
frags[1].len = 0;
frags_paddr = htt->frag_desc.paddr +
(sizeof(struct htt_msdu_ext_desc) * msdu_id);
} else {
frags = skb_cb->htt.txbuf->frags;
frags[0].dword_addr.paddr =
__cpu_to_le32(skb_cb->paddr);
frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
frags[1].dword_addr.paddr = 0;
frags[1].dword_addr.len = 0;
frags_paddr = skb_cb->htt.txbuf_paddr;
}
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@ -580,14 +599,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (skb_cb->htt.nohwcrypt)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
if (msdu->ip_summed == CHECKSUM_PARTIAL &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
if (ar->hw_params.continuous_frag_desc)
ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
}
/* Prevent firmware from sending up tx inspection requests. There's

View File

@ -217,14 +217,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
/* Native Wifi decap mode is used to align IP frames to 4-byte
* boundaries and avoid a very expensive re-alignment in mac80211.
*/
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
@ -286,10 +288,6 @@ enum ath10k_hw_rate_cck {
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@ -324,7 +322,6 @@ enum ath10k_hw_rate_cck {
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
@ -363,10 +360,7 @@ enum ath10k_hw_rate_cck {
(TARGET_10_4_NUM_VDEVS))
#define TARGET_10_4_ACTIVE_PEERS 0
/* TODO: increase qcache max client limit to 512 after
* testing with 512 client.
*/
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0

View File

@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
return -EOPNOTSUPP;
}
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
if (cmd == DISABLE_KEY) {
arg.key_cipher = WMI_CIPHER_NONE;
arg.key_data = NULL;
@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
reinit_completion(&ar->install_key_done);
if (arvif->nohwcrypt)
return 1;
ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
@ -256,7 +263,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
addr, flags);
if (ret)
if (ret < 0)
return ret;
flags = 0;
@ -264,7 +271,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
addr, flags);
if (ret)
if (ret < 0)
return ret;
spin_lock_bh(&ar->data_lock);
@ -322,10 +329,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret)
if (ret < 0)
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
@ -398,7 +405,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
break;
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
if (ret < 0 && first_errno == 0)
first_errno = ret;
if (ret)
@ -591,11 +598,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
enum wmi_peer_type peer_type)
{
struct ath10k_vif *arvif;
int num_peers = 0;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->num_peers >= ar->max_num_peers)
num_peers = ar->num_peers;
/* Each vdev consumes a peer entry as well */
list_for_each_entry(arvif, &ar->arvifs, list)
num_peers++;
if (num_peers >= ar->max_num_peers)
return -ENOBUFS;
ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@ -671,20 +686,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
if (value != 0xFFFFFFFF)
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
ATH10K_FRAGMT_THRESHOLD_MIN,
ATH10K_FRAGMT_THRESHOLD_MAX);
vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
@ -836,7 +837,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
struct ieee80211_channel *channel = chandef->chan;
struct ieee80211_channel *channel = NULL;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@ -2502,6 +2503,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
u32 param;
u32 value;
if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
return 0;
if (!(ar->vht_cap_info &
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@ -3149,13 +3153,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
* Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
* NativeWifi txmode - it selects AP key instead of peer key. It seems
* to work with Ethernet txmode so use it.
*
* FIXME: Check if raw mode works with TDLS.
*/
if (ieee80211_is_data_present(fc) && sta && sta->tdls)
return ATH10K_HW_TXRX_ETHERNET;
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
return ATH10K_HW_TXRX_RAW;
return ATH10K_HW_TXRX_NATIVE_WIFI;
}
static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
struct sk_buff *skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_INJECTED;
if ((info->flags & mask) == mask)
return false;
if (vif)
return !ath10k_vif_to_arvif(vif)->nohwcrypt;
return true;
}
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
@ -3322,6 +3343,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
int vdev_id;
int ret;
unsigned long time_left;
bool tmp_peer_created = false;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
@ -3359,6 +3381,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
tmp_peer_created = (ret == 0);
}
spin_lock_bh(&ar->data_lock);
@ -3374,7 +3397,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
if (!peer) {
if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@ -3600,6 +3623,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@ -3615,12 +3639,11 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
/* FIXME: Packet injection isn't implemented. It should be
* doable with firmware 10.2 on qca988x.
*/
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
return;
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
return;
}
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@ -4019,6 +4042,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
return 1;
}
static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
{
u32 value = 0;
struct ath10k *ar = arvif->ar;
if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
return 0;
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
if (!value)
return 0;
if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
ar->wmi.vdev_param->txbf, value);
}
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@ -4060,6 +4120,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
}
if (ar->num_peers >= ar->max_num_peers) {
ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
return -ENOBUFS;
}
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
ret = -EBUSY;
@ -4139,6 +4204,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
}
if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
arvif->nohwcrypt = true;
if (arvif->nohwcrypt &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
goto err;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@ -4237,16 +4310,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
ret = ath10k_mac_set_txbf_conf(arvif);
if (ret) {
ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@ -4728,6 +4801,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return 1;
if (arvif->nohwcrypt)
return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@ -4797,6 +4873,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
@ -4812,13 +4889,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
if (ret) {
WARN_ON(ret > 0);
ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
peer_addr, flags);
if (ret2)
if (ret2) {
WARN_ON(ret2 > 0);
ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret2);
}
goto exit;
}
}
@ -5545,6 +5625,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
/* Even though there's a WMI enum for fragmentation threshold no known
* firmware actually implements it. Moreover it is not possible to rely
* frame fragmentation to mac80211 because firmware clears the "more
* fragments" bit in frame control making it impossible for remote
* devices to reassemble frames.
*
* Hence implement a dummy callback just to say fragmentation isn't
* supported. This effectively prevents mac80211 from doing frame
* fragmentation in software.
*/
return -EOPNOTSUPP;
}
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@ -6387,6 +6482,7 @@ static const struct ieee80211_ops ath10k_ops = {
.remain_on_channel = ath10k_remain_on_channel,
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
.set_rts_threshold = ath10k_set_rts_threshold,
.set_frag_threshold = ath10k_mac_op_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
@ -6892,7 +6988,6 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@ -6900,6 +6995,9 @@ int ath10k_mac_register(struct ath10k *ar)
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@ -7003,7 +7101,8 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
ar->hw->netdev_features = NETIF_F_HW_CSUM;
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */

View File

@ -64,6 +64,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
{0}
};
@ -78,6 +79,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@ -2761,7 +2763,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
static int ath10k_pci_cold_reset(struct ath10k *ar)
{
int i;
u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@ -2777,23 +2778,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
val |= 1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
}
/* After writing into SOC_GLOBAL_RESET to put device into
* reset and pulling out of reset pcie may not be stable
* for any immediate pcie register access and cause bus error,
* add delay before any pcie access request to fix this issue.
*/
msleep(20);
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
msleep(20);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");

View File

@ -422,6 +422,12 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
@ -449,12 +455,23 @@ enum rx_msdu_decap_format {
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
struct rx_msdu_start {
struct rx_msdu_start_common {
__le32 info0; /* %RX_MSDU_START_INFO0_ */
__le32 flow_id_crc;
__le32 info1; /* %RX_MSDU_START_INFO1_ */
} __packed;
struct rx_msdu_start_qca99x0 {
__le32 info2; /* %RX_MSDU_START_INFO2_ */
} __packed;
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
struct rx_msdu_start_qca99x0 qca99x0;
} __packed;
} __packed;
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
@ -540,7 +557,7 @@ struct rx_msdu_start {
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
struct rx_msdu_end {
struct rx_msdu_end_common {
__le16 ip_hdr_cksum;
__le16 tcp_hdr_cksum;
u8 key_id_octet;
@ -549,6 +566,36 @@ struct rx_msdu_end {
__le32 info0;
} __packed;
#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
struct rx_msdu_end_qca99x0 {
__le32 ipv6_crc;
__le32 tcp_seq_no;
__le32 tcp_ack_no;
__le32 info1;
__le32 info2;
} __packed;
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
struct rx_msdu_end_qca99x0 qca99x0;
} __packed;
} __packed;
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
@ -870,7 +917,11 @@ struct rx_ppdu_start {
#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
struct rx_ppdu_end_common {
__le32 evm_p0;
@ -891,13 +942,13 @@ struct rx_ppdu_end_common {
__le32 evm_p15;
__le32 tsf_timestamp;
__le32 wb_timestamp;
} __packed;
struct rx_ppdu_end_qca988x {
u8 locationing_timestamp;
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
} __packed;
struct rx_ppdu_end_qca988x {
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x {
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
struct rx_ppdu_end_qca6174 {
u8 locationing_timestamp;
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
__le32 rtt; /* %RX_PPDU_END_RTT_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
#define RX_LOCATION_INFO_PKT_BW_LSB 20
#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
struct rx_pkt_end {
__le32 info0; /* %RX_PKT_END_INFO0_ */
__le32 phy_timestamp_1;
__le32 phy_timestamp_2;
__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
} __packed;
enum rx_phy_ppdu_end_info0 {
RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
};
enum rx_phy_ppdu_end_info1 {
RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
};
struct rx_phy_ppdu_end {
__le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
__le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
} __packed;
#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
struct rx_ppdu_end_qca99x0 {
struct rx_pkt_end rx_pkt_end;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
} __packed;
} __packed;

View File

@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
tx_done->msdu_id, !!tx_done->discard,
@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
return;
}
spin_lock_bh(&htt->tx_lock);
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
return;
}
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
skb_cb = ATH10K_SKB_CB(msdu);
@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
return;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,

View File

@ -49,6 +49,7 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@ -319,6 +320,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
if (!ar->wmi.ops->get_txbf_conf_scheme)
return WMI_TXBF_CONF_UNSUPPORTED;
return ar->wmi.ops->get_txbf_conf_scheme(ar);
}
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{

View File

@ -519,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_TLV_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
break;
return;
case WMI_TLV_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@ -1279,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
return skb;
}
static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
{
return WMI_TXBF_CONF_AFTER_ASSOC;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
u32 param_value)
@ -1373,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
cfg->rx_decap_mode = __cpu_to_le32(1);
cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@ -3408,6 +3413,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,

View File

@ -3122,6 +3122,11 @@ static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
return 0;
}
static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
{
return WMI_TXBF_CONF_BEFORE_ASSOC;
}
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_swba_ev_arg arg = {};
@ -3498,7 +3503,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
fftr, fftr_len,
tsf);
if (res < 0) {
ath10k_warn(ar, "failed to process fft report: %d\n",
ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
res);
return;
}
@ -3789,7 +3794,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
pool_size,
&paddr,
GFP_ATOMIC);
GFP_KERNEL);
if (!ar->wmi.mem_chunks[idx].vaddr) {
ath10k_warn(ar, "failed to allocate memory chunk\n");
return -ENOMEM;
@ -3878,12 +3883,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
struct sk_buff *skb = ar->svc_rdy_skb;
struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
if (!skb) {
ath10k_warn(ar, "invalid service ready event skb\n");
return;
}
ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@ -4003,9 +4015,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(arg.eeprom_rd),
__le32_to_cpu(arg.num_mem_reqs));
dev_kfree_skb(skb);
ar->svc_rdy_skb = NULL;
complete(&ar->wmi.service_ready);
}
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
{
ar->svc_rdy_skb = skb;
queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
}
static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_rdy_ev_arg *arg)
{
@ -4177,7 +4197,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
break;
return;
case WMI_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@ -4298,7 +4318,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10X_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
break;
return;
case WMI_10X_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@ -4409,7 +4429,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_2_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
break;
return;
case WMI_10_2_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
@ -4461,7 +4481,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_SERVICE_READY_EVENTID:
ath10k_wmi_event_service_ready(ar, skb);
break;
return;
case WMI_10_4_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
break;
@ -4688,8 +4708,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
@ -4757,8 +4776,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@ -4823,7 +4841,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@ -6431,6 +6449,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@ -6514,6 +6533,8 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
return 0;
}
@ -6521,6 +6542,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
{
int i;
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
dev_kfree_skb(ar->svc_rdy_skb);
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,

View File

@ -4628,6 +4628,11 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
@ -6008,6 +6013,12 @@ struct wmi_tdls_peer_capab_arg {
u32 pref_offchan_bw;
};
enum wmi_txbf_conf {
WMI_TXBF_CONF_UNSUPPORTED,
WMI_TXBF_CONF_BEFORE_ASSOC,
WMI_TXBF_CONF_AFTER_ASSOC,
};
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;

View File

@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
exit:
if (ret) {
switch (ar->state) {
case ATH10K_STATE_ON:
ar->state = ATH10K_STATE_RESTARTING;
ret = 1;
break;
case ATH10K_STATE_OFF:
case ATH10K_STATE_RESTARTING:
case ATH10K_STATE_RESTARTED:
case ATH10K_STATE_UTF:
case ATH10K_STATE_WEDGED:
ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
ar->state);
ret = -EIO;
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
return ret;
}
int ath10k_wow_init(struct ath10k *ar)

View File

@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist {
};
/*
* credit distibution code that is passed into the distrbution function,
* credit distribution code that is passed into the distribution function,
* there are mandatory and optional codes that must be handled
*/
enum htc_credit_dist_reason {

View File

@ -172,14 +172,6 @@ struct ath_txq {
struct sk_buff_head complete_q;
};
struct ath_atx_ac {
struct ath_txq *txq;
struct list_head list;
struct list_head tid_q;
bool clear_ps_filter;
bool sched;
};
struct ath_frame_info {
struct ath_buf *bf;
u16 framelen;
@ -242,7 +234,7 @@ struct ath_atx_tid {
struct sk_buff_head buf_q;
struct sk_buff_head retry_q;
struct ath_node *an;
struct ath_atx_ac *ac;
struct ath_txq *txq;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
u16 seq_start;
u16 seq_next;
@ -252,8 +244,8 @@ struct ath_atx_tid {
int baw_tail; /* next unused tx buffer slot */
s8 bar_index;
bool sched;
bool active;
bool clear_ps_filter;
};
struct ath_node {
@ -261,7 +253,6 @@ struct ath_node {
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
struct ath_atx_ac ac[IEEE80211_NUM_ACS];
u16 maxampdu;
u8 mpdudensity;
@ -410,6 +401,12 @@ enum ath_offchannel_state {
ATH_OFFCHANNEL_ROC_DONE,
};
enum ath_roc_complete_reason {
ATH_ROC_COMPLETE_EXPIRE,
ATH_ROC_COMPLETE_ABORT,
ATH_ROC_COMPLETE_CANCEL,
};
struct ath_offchannel {
struct ath_chanctx chan;
struct timer_list timer;
@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath_chanctx_set_next(struct ath_softc *sc, bool force);
void ath_offchannel_next(struct ath_softc *sc);
void ath_scan_complete(struct ath_softc *sc, bool abort);
void ath_roc_complete(struct ath_softc *sc, bool abort);
void ath_roc_complete(struct ath_softc *sc,
enum ath_roc_complete_reason reason);
struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
#else

View File

@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc)
}
}
void ath_roc_complete(struct ath_softc *sc, bool abort)
void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
if (abort)
ath_dbg(common, CHAN_CTX, "RoC aborted\n");
else
ath_dbg(common, CHAN_CTX, "RoC expired\n");
sc->offchannel.roc_vif = NULL;
sc->offchannel.roc_chan = NULL;
ieee80211_remain_on_channel_expired(sc->hw);
switch (reason) {
case ATH_ROC_COMPLETE_ABORT:
ath_dbg(common, CHAN_CTX, "RoC aborted\n");
ieee80211_remain_on_channel_expired(sc->hw);
break;
case ATH_ROC_COMPLETE_EXPIRE:
ath_dbg(common, CHAN_CTX, "RoC expired\n");
ieee80211_remain_on_channel_expired(sc->hw);
break;
case ATH_ROC_COMPLETE_CANCEL:
ath_dbg(common, CHAN_CTX, "RoC canceled\n");
break;
}
ath_offchannel_next(sc);
ath9k_ps_restore(sc);
}
@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data)
case ATH_OFFCHANNEL_ROC_START:
case ATH_OFFCHANNEL_ROC_WAIT:
sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
ath_roc_complete(sc, false);
ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
break;
default:
break;

View File

@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
struct ath_node *an = file->private_data;
struct ath_softc *sc = an->sc;
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
u32 len = 0, size = 4096;
char *buf;
size_t retval;
int tidno, acno;
int tidno;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@ -48,19 +47,6 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
an->mpdudensity);
len += scnprintf(buf + len, size - len,
"%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
len += scnprintf(buf + len, size - len,
"%2d%7d\n",
acno, ac->sched);
ath_txq_unlock(sc, txq);
}
len += scnprintf(buf + len, size - len,
"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
@ -68,7 +54,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
txq = tid->txq;
ath_txq_lock(sc, txq);
if (tid->active) {
len += scnprintf(buf + len, size - len,
@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
tid->baw_head,
tid->baw_tail,
tid->bar_index,
tid->sched);
!list_empty(&tid->list));
}
ath_txq_unlock(sc, txq);
}

View File

@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = {
static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
{
int time_left;
unsigned long time_left;
if (atomic_read(&priv->htc->tgt_ready) > 0) {
atomic_dec(&priv->htc->tgt_ready);

View File

@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_config_pipe_msg *cp_msg;
int ret, time_left;
int ret;
unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_comp_msg *comp_msg;
int ret = 0, time_left;
int ret = 0;
unsigned long time_left;
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target,
struct sk_buff *skb;
struct htc_endpoint *endpoint;
struct htc_conn_svc_msg *conn_msg;
int ret, time_left;
int ret;
unsigned long time_left;
/* Find an available endpoint */
endpoint = get_next_avail_ep(target->endpoint);

View File

@ -3186,6 +3186,7 @@ static struct {
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
{ AR_SREV_VERSION_9531, "9531" },
{ AR_SREV_VERSION_9561, "9561" },
};
/* For devices with external radios */

View File

@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = {
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
static const struct ieee80211_iface_combination if_comb_multi[] = {
{
.limits = if_limits_multi,
.n_limits = ARRAY_SIZE(if_limits_multi),
.max_interfaces = 2,
.max_interfaces = 3,
.num_different_channels = 2,
.beacon_int_infra_match = true,
},
@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
if (ath9k_ps_enable)
ieee80211_hw_set(hw, SUPPORTS_PS);
@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
if (ath9k_is_chanctx_enabled())
hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}

View File

@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_tx_control txctl;
int time_left;
unsigned long time_left;
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];

View File

@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath_softc *sc = hw->priv;
struct ath_chanctx *ctx;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
spin_lock_bh(&sc->chan_lock);
sc->cur_chan->rxfilter = *total_flags;
ath_for_each_chanctx(sc, ctx)
ctx->rxfilter = *total_flags;
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
sc->offchannel.chan.rxfilter = *total_flags;
#endif
spin_unlock_bh(&sc->chan_lock);
ath9k_ps_wakeup(sc);
@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
del_timer_sync(&sc->offchannel.timer);
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
ath_roc_complete(sc, true);
ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
}
if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
if (sc->offchannel.roc_vif) {
if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
ath_roc_complete(sc, true);
ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
}
mutex_unlock(&sc->mutex);

View File

@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
sizeof(struct wmi_cmd_hdr);
struct sk_buff *skb;
u8 *data;
int time_left, ret = 0;
unsigned long time_left;
int ret = 0;
if (ah->ah_flags & AH_UNPLUGGED)
return 0;

View File

@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
struct list_head *list;
struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
struct ath_chanctx *ctx = avp->chanctx;
@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
if (!ctx)
return;
if (tid->sched)
return;
tid->sched = true;
list_add_tail(&tid->list, &ac->tid_q);
if (ac->sched)
return;
ac->sched = true;
list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
list_add_tail(&ac->list, list);
if (list_empty(&tid->list))
list_add_tail(&tid->list, list);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
static void
ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
struct ath_txq *txq = tid->txq;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb, *tskb;
struct ath_buf *bf;
@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
struct ath_txq *txq = tid->txq;
struct sk_buff *skb;
struct ath_buf *bf;
struct list_head bf_head;
@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_queue_tid(sc, txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->ac->clear_ps_filter = true;
tid->clear_ps_filter = true;
}
}
@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, bt_aggr_limit, legacy = 0;
int q = tid->ac->txq->mac80211_qnum;
int q = tid->txq->mac80211_qnum;
int i;
skb = bf->bf_mpdu;
@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (list_empty(&bf_q))
return false;
if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
tid->ac->clear_ps_filter = false;
if (tid->clear_ps_filter || tid->an->no_ps_filter) {
tid->clear_ps_filter = false;
tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
}
@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
txq = txtid->ac->txq;
txq = txtid->txq;
ath_txq_lock(sc, txq);
@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = txtid->ac->txq;
struct ath_txq *txq = txtid->txq;
ath_txq_lock(sc, txq);
txtid->active = false;
@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
ac = tid->ac;
txq = ac->txq;
txq = tid->txq;
ath_txq_lock(sc, txq);
if (!tid->sched) {
if (list_empty(&tid->list)) {
ath_txq_unlock(sc, txq);
continue;
}
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
if (ac->sched) {
ac->sched = false;
list_del(&ac->list);
}
list_del_init(&tid->list);
ath_txq_unlock(sc, txq);
@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
ac = tid->ac;
txq = ac->txq;
txq = tid->txq;
ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
tid->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(sc, txq, tid);
@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
an = (struct ath_node *)sta->drv_priv;
tid = ATH_AN_2_TID(an, tidno);
txq = tid->ac->txq;
txq = tid->txq;
ath_txq_lock(sc, txq);
@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
tid = ATH_AN_2_TID(an, i);
ath_txq_lock(sc, tid->ac->txq);
ath_txq_lock(sc, tid->txq);
while (nframes > 0) {
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
if (an->sta && !ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->ac->txq);
ath_txq_unlock_complete(sc, tid->txq);
}
if (list_empty(&bf_q))
@ -1918,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
struct list_head *ac_list;
struct list_head *tid_list;
bool sent = false;
if (txq->mac80211_qnum < 0)
@ -1930,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
return;
spin_lock_bh(&sc->chan_lock);
ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
if (list_empty(ac_list)) {
if (list_empty(tid_list)) {
spin_unlock_bh(&sc->chan_lock);
return;
}
rcu_read_lock();
last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
while (!list_empty(ac_list)) {
last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
while (!list_empty(tid_list)) {
bool stop = false;
if (sc->cur_chan->stopped)
break;
ac = list_first_entry(ac_list, struct ath_atx_ac, list);
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
list_del(&ac->list);
ac->sched = false;
tid = list_first_entry(tid_list, struct ath_atx_tid, list);
list_del_init(&tid->list);
while (!list_empty(&ac->tid_q)) {
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
sent = true;
tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
list);
list_del(&tid->list);
tid->sched = false;
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
sent = true;
/*
* add tid to round-robin queue if more frames
* are pending for the tid
*/
if (ath_tid_has_buffered(tid))
ath_tx_queue_tid(sc, txq, tid);
if (stop || tid == last_tid)
break;
}
if (!list_empty(&ac->tid_q) && !ac->sched) {
ac->sched = true;
list_add_tail(&ac->list, ac_list);
}
/*
* add tid to round-robin queue if more frames
* are pending for the tid
*/
if (ath_tid_has_buffered(tid))
ath_tx_queue_tid(sc, txq, tid);
if (stop)
break;
if (ac == last_ac) {
if (tid == last_tid) {
if (!sent)
break;
sent = false;
last_ac = list_entry(ac_list->prev,
struct ath_atx_ac, list);
last_tid = list_entry(tid_list->prev,
struct ath_atx_tid, list);
}
}
@ -2376,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an && queue) {
WARN_ON(tid->ac->txq != txctl->txq);
WARN_ON(tid->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
tid->ac->clear_ps_filter = true;
tid->clear_ps_filter = true;
/*
* Add this frame to software queue for scheduling later
@ -2873,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
int tidno, acno;
for (tidno = 0, tid = &an->tid[tidno];
@ -2884,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->seq_start = tid->seq_next = 0;
tid->baw_size = WME_MAX_BA;
tid->baw_head = tid->baw_tail = 0;
tid->sched = false;
tid->active = false;
tid->clear_ps_filter = true;
__skb_queue_head_init(&tid->buf_q);
__skb_queue_head_init(&tid->retry_q);
INIT_LIST_HEAD(&tid->list);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
}
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
ac->sched = false;
ac->clear_ps_filter = true;
ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
tid->txq = sc->tx.txq_map[acno];
}
}
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_ac *ac;
struct ath_atx_tid *tid;
struct ath_txq *txq;
int tidno;
@ -2911,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
ac = tid->ac;
txq = ac->txq;
txq = tid->txq;
ath_txq_lock(sc, txq);
if (tid->sched) {
list_del(&tid->list);
tid->sched = false;
}
if (ac->sched) {
list_del(&ac->list);
tid->ac->sched = false;
}
if (!list_empty(&tid->list))
list_del_init(&tid->list);
ath_tid_drain(sc, txq, tid);
tid->active = false;

View File

@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode)
return "P2P-CLIENT";
case NL80211_IFTYPE_P2P_GO:
return "P2P-GO";
case NL80211_IFTYPE_OCB:
return "OCB";
default:
return "UNKNOWN";
}

View File

@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
wil6210-y += pm.o
wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o

View File

@ -0,0 +1,61 @@
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains the definitions for the boot loader
* for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
*/
#ifndef BOOT_LOADER_EXPORT_H_
#define BOOT_LOADER_EXPORT_H_
struct bl_dedicated_registers_v1 {
__le32 boot_loader_ready; /* 0x880A3C driver will poll
* this Dword until BL will
* set it to 1 (initial value
* should be 0)
*/
__le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
__le16 rf_type; /* 0x880A44 connected RF ID */
__le16 rf_status; /* 0x880A46 RF status,
* 0 is OK else error
*/
__le32 baseband_type; /* 0x880A48 board type ID */
u8 mac_address[6]; /* 0x880A4c BL mac address */
u8 bl_version_major; /* 0x880A52 BL ver. major */
u8 bl_version_minor; /* 0x880A53 BL ver. minor */
__le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */
__le16 bl_version_build; /* 0x880A56 BL ver. build */
/* valid only for version 2 and above */
__le32 bl_assert_code; /* 0x880A58 BL Assert code */
__le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
__le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
__le32 bl_magic_number; /* 0x880AB8 BL Magic number */
} __packed;
/* the following struct is the version 0 struct */
struct bl_dedicated_registers_v0 {
__le32 boot_loader_ready; /* 0x880A3C driver will poll
* this Dword until BL will
* set it to 1 (initial value
* should be 0)
*/
#define BL_READY (1) /* ready indication */
__le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
__le32 rf_type; /* 0x880A44 connected RF ID */
__le32 baseband_type; /* 0x880A48 board type ID */
u8 mac_address[6]; /* 0x880A4c BL mac address */
} __packed;
#endif /* BOOT_LOADER_EXPORT_H_ */

View File

@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
else
wil_dbg_misc(wil, "Scan has no IE's\n");
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
request->ie);
if (rc) {
wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
if (rc)
goto out;
}
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
* ies in FW.
*/
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
if (rc) {
wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
if (rc)
goto out;
}
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
@ -722,56 +717,52 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
{
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
int rc = 0;
if (bcon->probe_resp_len <= hlen)
return 0;
/* always use IE's from full probe frame, they has more info
* notable RSN
*/
bcon->proberesp_ies = f->u.probe_resp.variable;
bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
rc = 1;
bcon->assocresp_ies = bcon->proberesp_ies;
bcon->assocresp_ies_len = bcon->proberesp_ies_len;
}
return rc;
return 1;
}
/* internal functions for device reset and starting AP */
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
size_t probe_ies_len, const u8 *probe_ies,
size_t assoc_ies_len, const u8 *assoc_ies)
struct cfg80211_beacon_data *bcon)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
/* FW do not form regular beacon, so bcon IE's are not set
* For the DMG bcon, when it will be supported, bcon IE's will
* be reused; add something like:
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
bcon->proberesp_ies);
if (rc)
return rc;
}
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
if (rc) {
wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
#if 0 /* to use beacon IE's, remove this #if 0 */
if (rc)
return rc;
}
return 0;
rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
#endif
return rc;
}
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
size_t probe_ies_len, const u8 *probe_ies,
size_t assoc_ies_len, const u8 *assoc_ies,
struct cfg80211_beacon_data *bcon,
u8 hidden_ssid)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
@ -792,8 +783,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (rc)
goto out;
rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
assoc_ies_len, assoc_ies);
rc = _wil_cfg80211_set_ies(wiphy, bcon);
if (rc)
goto out;
@ -827,27 +817,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
int rc;
u32 privacy = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
}
if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
if (bcon->proberesp_ies &&
cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
bcon->proberesp_ies_len))
privacy = 1;
/* in case privacy has changed, need to restart the AP */
@ -860,14 +843,10 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
wdev->ssid_len, privacy,
wdev->beacon_interval,
wil->channel, pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies,
wil->channel, bcon,
wil->hidden_ssid);
} else {
rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies);
rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
return rc;
@ -882,10 +861,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
const u8 *pr_ies = NULL;
size_t pr_ies_len = 0;
u8 hidden_ssid;
wil_dbg_misc(wil, "%s()\n", __func__);
@ -925,11 +900,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
if (bcon->probe_resp_len > hlen) {
pr_ies = f->u.probe_resp.variable;
pr_ies_len = bcon->probe_resp_len - hlen;
}
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
@ -938,10 +908,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
pr_ies_len, pr_ies,
bcon->assocresp_ies_len,
bcon->assocresp_ies,
hidden_ssid);
bcon, hidden_ssid);
return rc;
}

View File

@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_printf(s, " swhead = %d\n", vring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
if (x) {
v = ioread32(x);
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
@ -268,7 +268,7 @@ static const struct file_operations fops_mbox = {
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
iowrite32(val, (void __iomem *)data);
writel(val, (void __iomem *)data);
wmb(); /* make sure write propagated to HW */
return 0;
@ -276,7 +276,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val)
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
*val = ioread32((void __iomem *)data);
*val = readl((void __iomem *)data);
return 0;
}
@ -306,7 +306,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
wil_debugfs_ulong_set, "%llu\n");
wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
@ -477,7 +477,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
@ -1344,6 +1344,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
{
int i;
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
r->head_seq_num);
@ -1353,7 +1354,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
else
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
}
seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
seq_printf(s,
"] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
r->total, drop_dup + drop_old, drop_dup, drop_old,
r->ssn_last_drop);
}
static int wil_sta_debugfs_show(struct seq_file *s, void *data)

View File

@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
wil_dbg_misc(wil, "%s()\n", __func__);
tx_itr_en = ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
tx_itr_val =
ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
rx_itr_en = ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
rx_itr_val =
ioread32(wil->csr +
HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;

View File

@ -22,16 +22,6 @@
MODULE_FIRMWARE(WIL_FW_NAME);
MODULE_FIRMWARE(WIL_FW2_NAME);
/* target operations */
/* register read */
#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write. wmb() to make sure it is completed */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
/* register set = read, OR, write */
#define S(a, v) W(a, R(a) | v)
/* register clear = read, AND with inverted, write */
#define C(a, v) W(a, R(a) & ~v)
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
size_t count)

View File

@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
FW_ADDR_CHECK(dst, block[i].addr, "address");
x = ioread32(dst);
x = readl(dst);
y = (x & m) | (v & ~m);
wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
"(old 0x%08x val 0x%08x mask 0x%08x)\n",
le32_to_cpu(block[i].addr), y, x, v, m);
iowrite32(y, dst);
writel(y, dst);
wmb(); /* finish before processing next record */
}
@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
{
unsigned delay = 0;
iowrite32(a, gwa_addr);
iowrite32(gw_cmd, gwa_cmd);
writel(a, gwa_addr);
writel(gw_cmd, gwa_cmd);
wmb(); /* finish before activate gw */
iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
do {
udelay(1); /* typical time is few usec */
if (delay++ > 100) {
wil_err_fw(wil, "gw timeout\n");
return -EINVAL;
}
} while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
} while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
return 0;
}
@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
i, a, v);
iowrite32(v, gwa_val);
writel(v, gwa_val);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;
@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
sizeof(v), false);
for (k = 0; k < ARRAY_SIZE(block->value); k++)
iowrite32(v[k], gwa_val[k]);
writel(v[k], gwa_val[k]);
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
if (rc)
return rc;

View File

@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
iowrite32(x, addr);
writel(x, addr);
}
#endif /* defined(CONFIG_WIL6210_ISR_COR) */
static inline u32 wil_ioread32_and_clear(void __iomem *addr)
{
u32 x = ioread32(addr);
u32 x = readl(addr);
wil_icr_clear(x, addr);
@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr)
static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, IMS));
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, IMS));
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, IMS));
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
clear_bit(wil_status_irqen, wil->status);
}
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IMC_TX, wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, IMC));
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_TX);
}
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IMC_RX, wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, IMC));
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_RX);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
{
iowrite32(WIL6210_IMC_MISC, wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, IMC));
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_MISC);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
set_bit(wil_status_irqen, wil->status);
iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
}
void wil_mask_irq(struct wil6210_priv *wil)
@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICC));
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICC));
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICC));
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil6210_unmask_irq_misc(wil);
}
/* target write operation */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
return;
/* Disable and clear tx counter before (re)configuration */
W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
wil->tx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
W(RGF_DMA_ITR_TX_CNT_CTL,
BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear tx idle counter before (re)configuration */
W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
wil->tx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx counter before (re)configuration */
W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
wil->rx_max_burst_duration);
/* Configure TX max burst duration timer to use usec units */
W(RGF_DMA_ITR_RX_CNT_CTL,
BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
/* Disable and clear rx idle counter before (re)configuration */
W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
wil->rx_interframe_timeout);
/* Configure TX max burst duration timer to use usec units */
W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
}
#undef W
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
u32 icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_rx = ioread32(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, IMV));
u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
u32 icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_tx = ioread32(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, IMV));
u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
offsetof(struct RGF_ICR, IMV));
u32 icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_misc = ioread32(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, IMV));
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
{
irqreturn_t rc = IRQ_HANDLED;
struct wil6210_priv *wil = cookie;
u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
/**
* pseudo_cause is Clear-On-Read, no need to ACK
@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
return rc;
}
static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
{
int rc;
/*
* IRQ's are in the following order:
* - Tx
* - Rx
* - Misc
*/
rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
WIL_NAME"_tx", wil);
if (rc)
return rc;
rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
WIL_NAME"_rx", wil);
if (rc)
goto free0;
rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
wil6210_irq_misc_thread,
IRQF_SHARED, WIL_NAME"_misc", wil);
if (rc)
goto free1;
return 0;
/* error branch */
free1:
free_irq(irq + 1, wil);
free0:
free_irq(irq, wil);
return rc;
}
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
static inline void wil_clear32(void __iomem *addr)
{
u32 x = ioread32(addr);
u32 x = readl(addr);
iowrite32(x, addr);
writel(x, addr);
}
void wil6210_clear_irq(struct wil6210_priv *wil)
@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
if (wil->n_msi == 3)
rc = wil6210_request_3msi(wil, irq);
else
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
wil->n_msi ? 0 : IRQF_SHARED,
WIL_NAME, wil);
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
use_msi ? 0 : IRQF_SHARED,
WIL_NAME, wil);
return rc;
}
@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
wil_mask_irq(wil);
free_irq(irq, wil);
if (wil->n_msi == 3) {
free_irq(irq + 1, wil);
free_irq(irq + 2, wil);
}
}

View File

@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
/* operation */
switch (io.op & wil_mmio_op_mask) {
case wil_mmio_read:
io.val = ioread32(a);
io.val = readl(a);
need_copy = true;
break;
case wil_mmio_write:
iowrite32(io.val, a);
writel(io.val, a);
wmb(); /* make sure write propagated to HW */
break;
default:

View File

@ -21,6 +21,7 @@
#include "wil6210.h"
#include "txrx.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x)
clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
wil_fw_error_recovery(wil);
}
static int wil_wait_for_recovery(struct wil6210_priv *wil)
@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil)
destroy_workqueue(wil->wmi_wq);
}
/* target operations */
/* register read */
#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write. wmb() to make sure it is completed */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
/* register set = read, OR, write */
#define S(a, v) W(a, R(a) | v)
/* register clear = read, AND with inverted, write */
#define C(a, v) W(a, R(a) & ~v)
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
W(RGF_USER_USER_CPU_0, 1);
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static int wil_target_reset(struct wil6210_priv *wil)
@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
S(RGF_HP_CTRL, BIT(15));
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_s(wil, RGF_HP_CTRL, BIT(15));
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_halt_cpu(wil);
/* clear all boot loader "ready" bits */
W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
wil_w(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
/* Clear Fw Download notification */
C(RGF_USER_USAGE_6, BIT(0));
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
/* XTAL stabilization should take about 3ms */
usleep_range(5000, 7000);
x = R(RGF_CAF_PLL_LOCK_STATUS);
x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
wil_err(wil, "Xtal stabilization timeout\n"
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
return -ETIME;
}
/* switch 10k to XTAL*/
C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
/* 40 MHz */
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
/* reset A2 PCIE AHB */
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
do {
msleep(RST_DELAY);
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
x = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
x1 = x;
@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
x);
return -ETIME;
}
} while (x != BIT_BL_READY);
} while (x != BL_READY);
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
struct RGF_BL bl;
union {
struct bl_dedicated_registers_v0 bl0;
struct bl_dedicated_registers_v1 bl1;
} bl;
u32 bl_ver;
u8 *mac;
u16 rf_status;
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
le32_to_cpus(&bl.ready);
le32_to_cpus(&bl.version);
le32_to_cpus(&bl.rf_type);
le32_to_cpus(&bl.baseband_type);
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
sizeof(bl));
bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
mac = bl.bl0.mac_address;
if (!is_valid_ether_addr(bl.mac_address)) {
wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
if (bl_ver == 0) {
le32_to_cpus(&bl.bl0.rf_type);
le32_to_cpus(&bl.bl0.baseband_type);
rf_status = 0; /* actually, unknown */
wil_info(wil,
"Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
bl_ver, mac,
bl.bl0.rf_type, bl.bl0.baseband_type);
wil_info(wil, "Boot Loader build unknown for struct v0\n");
} else {
le16_to_cpus(&bl.bl1.rf_type);
rf_status = le16_to_cpu(bl.bl1.rf_status);
le32_to_cpus(&bl.bl1.baseband_type);
le16_to_cpus(&bl.bl1.bl_version_subminor);
le16_to_cpus(&bl.bl1.bl_version_build);
wil_info(wil,
"Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
bl_ver, mac,
bl.bl1.rf_type, rf_status,
bl.bl1.baseband_type);
wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
}
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "BL: Invalid MAC %pM\n", mac);
return -EINVAL;
}
ether_addr_copy(ndev->perm_addr, bl.mac_address);
ether_addr_copy(ndev->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
ether_addr_copy(ndev->dev_addr, bl.mac_address);
wil_info(wil,
"Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
ether_addr_copy(ndev->dev_addr, mac);
if (rf_status) {/* bad RF cable? */
wil_err(wil, "RF communication error 0x%04x",
rf_status);
return -EAGAIN;
}
return 0;
}
static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
{
u32 bl_assert_code, bl_assert_blink, bl_magic_number;
u32 bl_ver = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_struct_version));
if (bl_ver < 2)
return;
bl_assert_code = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_assert_code));
bl_assert_blink = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_assert_blink));
bl_magic_number = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v1,
bl_magic_number));
if (is_err) {
wil_err(wil,
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
bl_assert_code, bl_assert_blink, bl_magic_number);
} else {
wil_dbg_misc(wil,
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
bl_assert_code, bl_assert_blink, bl_magic_number);
}
}
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_dbg_misc(wil, "%s()\n", __func__);
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return 0;
}
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
wil_bl_crash_info(wil, false);
rc = wil_target_reset(wil);
wil_rx_fini(wil);
if (rc)
if (rc) {
wil_bl_crash_info(wil, true);
return rc;
}
rc = wil_get_bl_info(wil);
if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
rc = 0;
if (rc)
return rc;
@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
/* Mark FW as loaded from host */
S(RGF_USER_USAGE_6, 1);
wil_s(wil, RGF_USER_USAGE_6, 1);
/* clear any interrupts which on-card-firmware
* may have set
@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil6210_clear_irq(wil);
/* CAF_ICR - clear and mask */
/* it is W1C, clear by writing back same value */
S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
wil_release_cpu(wil);
}
@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
#undef R
#undef W
#undef S
#undef C
void wil_fw_error_recovery(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "starting fw error recovery\n");

View File

@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev)
wil_set_ethtoolops(ndev);
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GRO;
NETIF_F_SG | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXHASH;
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;

View File

@ -21,16 +21,14 @@
#include "wil6210.h"
static int use_msi = 1;
module_param(use_msi, int, S_IRUGO);
MODULE_PARM_DESC(use_msi,
" Use MSI interrupt: "
"0 - don't, 1 - (default) - single, or 3");
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
void wil_disable_irq(struct wil6210_priv *wil)
{
int irq = wil->pdev->irq;
disable_irq(irq);
if (wil->n_msi == 3) {
disable_irq(irq + 1);
disable_irq(irq + 2);
}
disable_irq(wil->pdev->irq);
}
void wil_enable_irq(struct wil6210_priv *wil)
{
int irq = wil->pdev->irq;
enable_irq(irq);
if (wil->n_msi == 3) {
enable_irq(irq + 1);
enable_irq(irq + 2);
}
enable_irq(wil->pdev->irq);
}
/* Bus ops */
@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
* and only MSI should be used
*/
int msi_only = pdev->msi_enabled;
bool _use_msi = use_msi;
wil_dbg_misc(wil, "%s()\n", __func__);
@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
pci_set_master(pdev);
/*
* how many MSI interrupts to request?
*/
switch (use_msi) {
case 3:
case 1:
wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
break;
case 0:
wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
use_msi = 1;
}
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
use_msi = 1;
}
if (use_msi == 1 && pci_enable_msi(pdev)) {
if (use_msi && pci_enable_msi(pdev)) {
wil_err(wil, "pci_enable_msi failed, use INTx\n");
use_msi = 0;
_use_msi = false;
}
wil->n_msi = use_msi;
if ((wil->n_msi == 0) && msi_only) {
if (!_use_msi && msi_only) {
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
rc = -ENODEV;
goto stop_master;
}
rc = wil6210_init_irq(wil, pdev->irq);
rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
if (rc)
goto stop_master;
@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
#ifdef CONFIG_PM
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
rc = wil_can_suspend(wil, is_runtime);
if (rc)
goto out;
rc = wil_suspend(wil, is_runtime);
if (rc)
goto out;
/* TODO: how do I bring card in low power state? */
/* disable bus mastering */
pci_clear_master(pdev);
/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
out:
return rc;
}
static int wil6210_resume(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
/* allow master */
pci_set_master(pdev);
rc = wil_resume(wil, is_runtime);
if (rc)
pci_clear_master(pdev);
return rc;
}
#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
}
static int wil6210_pm_resume(struct device *dev)
{
return wil6210_resume(dev, false);
}
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
static const struct dev_pm_ops wil6210_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
};
static struct pci_driver wil6210_driver = {
.probe = wil_pcie_probe,
.remove = wil_pcie_remove,
.id_table = wil6210_pcie_ids,
.name = WIL_NAME,
.driver = {
.pm = &wil6210_pm_ops,
},
};
static int __init wil6210_driver_init(void)

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "wil6210.h"
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
break;
/* AP-like interface - can't suspend */
default:
wil_dbg_pm(wil, "AP-like interface\n");
rc = -EBUSY;
break;
}
wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
return rc;
}
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down : %d\n", rc);
goto out;
}
}
if (wil->platform_ops.suspend)
rc = wil->platform_ops.suspend(wil->platform_handle);
out:
wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
is_runtime ? "runtime" : "system", rc);
return rc;
}
int wil_resume(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle);
if (rc) {
wil_err(wil, "platform_ops.resume : %d\n", rc);
goto out;
}
}
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up()
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
out:
wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
is_runtime ? "runtime" : "system", rc);
return rc;
}

View File

@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
goto out;
}
r->total++;
hseq = r->head_seq_num;
/** Due to the race between WMI events, where BACK establishment
@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
r->ssn_last_drop = seq;
r->drop_old++;
wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
seq, r->head_seq_num);
dev_kfree_skb(skb);
goto out;
}
@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
/* check if we already stored this frame */
if (r->reorder_buf[index]) {
r->drop_dup++;
wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
dev_kfree_skb(skb);
goto out;
}

View File

@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
break;
}
}
iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
wil_w(wil, v->hwtail, v->swtail);
return rc;
}
@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_DROP] = "GRO_DROP",
};
if (ndev->features & NETIF_F_RXHASH)
/* fake L4 to ensure it won't be re-calculated later
* set hash to any non-zero value to activate rps
* mechanism, core will be chosen according
* to user-level rps configuration.
*/
skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
skb_orphan(skb);
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
static inline
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
{
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
struct vring_tx_desc *d,
struct sk_buff *skb)
/**
* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
*/
static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
struct sk_buff *skb,
int tso_desc_type, bool is_ipv4,
int tcp_hdr_len, int skb_net_hdr_len)
{
d->dma.b11 = ETH_HLEN; /* MAC header length */
d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
/* L4 header len: TCP header length */
d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
/* Setup TSO: bit and desc type */
d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
d->dma.ip_length = skb_net_hdr_len;
/* Enable TCP/UDP checksum */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
/* Calculate pseudo-header */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
}
/**
* Sets the descriptor @d up for csum. The corresponding
* @skb is used to obtain the protocol and headers length.
* Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
* Note, if d==NULL, the function only returns the protocol result.
*
* It is very similar to previous wil_tx_desc_offload_setup_tso. This
* is "if unrolling" to optimize the critical path.
*/
static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
struct sk_buff *skb){
int protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
return 0;
}
static inline void wil_tx_last_desc(struct vring_tx_desc *d)
{
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
}
static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
{
d->dma.d0 |= wil_tso_type_lst <<
DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
}
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
struct device *dev = wil_to_dev(wil);
/* point to descriptors in shared memory */
volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
*_first_desc = NULL;
/* pointers to shadow descriptors */
struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
*d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
*first_desc = &first_desc_mem;
/* pointer to shadow descriptors' context */
struct wil_ctx *hdr_ctx, *first_ctx = NULL;
int descs_used = 0; /* total number of used descriptors */
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
u32 swhead = vring->swhead;
int used, avail = wil_vring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
int min_desc_required = nr_frags + 1;
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
int f, len, hdrlen, headlen;
int vring_index = vring - wil->vring_tx;
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
uint i = swhead;
dma_addr_t pa;
const skb_frag_t *frag = NULL;
int rem_data = mss;
int lenmss;
int hdr_compensation_need = true;
int desc_tso_type = wil_tso_type_first;
bool is_ipv4;
int tcp_hdr_len;
int skb_net_hdr_len;
int gso_type;
wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
__func__, skb->len, vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
/* A typical page 4K is 3-4 payloads, we assume each fragment
* is a full payload, that's how min_desc_required has been
* calculated. In real we might need more or less descriptors,
* this is the initial check only.
*/
if (unlikely(avail < min_desc_required)) {
wil_err_ratelimited(wil,
"TSO: Tx ring[%2d] full. No space for %d fragments\n",
vring_index, min_desc_required);
return -ENOMEM;
}
/* Header Length = MAC header len + IP header len + TCP header len*/
hdrlen = ETH_HLEN +
(int)skb_network_header_len(skb) +
tcp_hdrlen(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
case SKB_GSO_TCPV4:
/* TCP v4, zero out the IP length and IPv4 checksum fields
* as required by the offloading doc
*/
ip_hdr(skb)->tot_len = 0;
ip_hdr(skb)->check = 0;
is_ipv4 = true;
break;
case SKB_GSO_TCPV6:
/* TCP v6, zero out the payload length */
ipv6_hdr(skb)->payload_len = 0;
is_ipv4 = false;
break;
default:
/* other than TCPv4 or TCPv6 types are not supported for TSO.
* It is also illegal for both to be set simultaneously
*/
return -EINVAL;
}
if (skb->ip_summed != CHECKSUM_PARTIAL)
return -EINVAL;
/* tcp header length and skb network header length are fixed for all
* packet's descriptors - read then once here
*/
tcp_hdr_len = tcp_hdrlen(skb);
skb_net_hdr_len = skb_network_header_len(skb);
_hdr_desc = &vring->va[i].tx;
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "TSO: Skb head DMA map error\n");
goto err_exit;
}
wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
tcp_hdr_len, skb_net_hdr_len);
wil_tx_last_desc(hdr_desc);
vring->ctx[i].mapped_as = wil_mapped_as_single;
hdr_ctx = &vring->ctx[i];
descs_used++;
headlen = skb_headlen(skb) - hdrlen;
for (f = headlen ? -1 : 0; f < nr_frags; f++) {
if (headlen) {
len = headlen;
wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
len);
} else {
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
}
while (len) {
wil_dbg_txrx(wil,
"TSO: len %d, rem_data %d, descs_used %d\n",
len, rem_data, descs_used);
if (descs_used == avail) {
wil_err(wil, "TSO: ring overflow\n");
goto dma_error;
}
lenmss = min_t(int, rem_data, len);
i = (swhead + descs_used) % vring->size;
wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
if (!headlen) {
pa = skb_frag_dma_map(dev, frag,
frag->size - len, lenmss,
DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_page;
} else {
pa = dma_map_single(dev,
skb->data +
skb_headlen(skb) - headlen,
lenmss,
DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_single;
headlen -= lenmss;
}
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
_desc = &vring->va[i].tx;
if (!_first_desc) {
_first_desc = _desc;
first_ctx = &vring->ctx[i];
d = first_desc;
} else {
d = &desc_mem;
}
wil_tx_desc_map(d, pa, lenmss, vring_index);
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
is_ipv4, tcp_hdr_len,
skb_net_hdr_len);
/* use tso_type_first only once */
desc_tso_type = wil_tso_type_mid;
descs_used++; /* desc used so far */
sg_desc_cnt++; /* desc used for this segment */
len -= lenmss;
rem_data -= lenmss;
wil_dbg_txrx(wil,
"TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
len, rem_data, descs_used, sg_desc_cnt);
/* Close the segment if reached mss size or last frag*/
if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
if (hdr_compensation_need) {
/* first segment include hdr desc for
* release
*/
hdr_ctx->nr_frags = sg_desc_cnt;
wil_tx_desc_set_nr_frags(first_desc,
sg_desc_cnt +
1);
hdr_compensation_need = false;
} else {
wil_tx_desc_set_nr_frags(first_desc,
sg_desc_cnt);
}
first_ctx->nr_frags = sg_desc_cnt - 1;
wil_tx_last_desc(d);
/* first descriptor may also be the last
* for this mss - make sure not to copy
* it twice
*/
if (first_desc != d)
*_first_desc = *first_desc;
/*last descriptor will be copied at the end
* of this TS processing
*/
if (f < nr_frags - 1 || len > 0)
*_desc = *d;
rem_data = mss;
_first_desc = NULL;
sg_desc_cnt = 0;
} else if (first_desc != d) /* update mid descriptor */
*_desc = *d;
}
}
/* first descriptor may also be the last.
* in this case d pointer is invalid
*/
if (_first_desc == _desc)
d = first_desc;
/* Last data descriptor */
wil_set_tx_desc_last_tso(d);
*_desc = *d;
/* Fill the total number of descriptors in first desc (hdr)*/
wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
*_hdr_desc = *hdr_desc;
/* hold reference to skb
* to prevent skb release before accounting
* in case of immediate "tx done"
*/
vring->ctx[i].skb = skb_get(skb);
/* performance monitoring */
used = wil_vring_used_tx(vring);
if (wil_val_in_range(vring_idle_trsh,
used, used + descs_used)) {
txdata->idle += get_cycles() - txdata->last_idle;
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
vring_index, used, used + descs_used);
}
/* advance swhead */
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
wil_vring_advance_head(vring, descs_used);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
dma_error:
wil_err(wil, "TSO: DMA map page error\n");
while (descs_used > 0) {
struct wil_ctx *ctx;
i = (swhead + descs_used) % vring->size;
d = (struct vring_tx_desc *)&vring->va[i].tx;
_desc = &vring->va[i].tx;
*d = *_desc;
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
err_exit:
return -EINVAL;
}
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
wil_dbg_txrx(wil, "%s()\n", __func__);
wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
__func__, skb->len, vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
vring_index);
goto dma_error;
}
vring->ctx[i].nr_frags = nr_frags;
wil_tx_desc_set_nr_frags(d, nr_frags);
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
/* middle segments */
for (; f < nr_frags; f++) {
@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
* if it succeeded for 1-st descriptor,
* it will succeed here too
*/
wil_tx_desc_offload_cksum_set(wil, d, skb);
wil_tx_desc_offload_setup(d, skb);
}
/* for the last seg only */
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
*/
wmb();
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
dma_error:
@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
int rc;
spin_lock(&txdata->lock);
rc = __wil_tx_vring(wil, vring, skb);
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
(wil, vring, skb);
spin_unlock(&txdata->lock);
return rc;
}
@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
/**
* For the fragmented skb, HW will set DU bit only for the
* last fragment. look for it
* last fragment. look for it.
* In TSO the first DU will include hdr desc
*/
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
/* TODO: check we are not past head */

View File

@ -291,6 +291,14 @@ struct vring_tx_dma {
__le16 length;
} __packed;
/* TSO type used in dma descriptor d0 bits 11-12 */
enum {
wil_tso_type_hdr = 0,
wil_tso_type_first = 1,
wil_tso_type_mid = 2,
wil_tso_type_lst = 3,
};
/* Rx descriptor - MAC part
* [dword 0]
* bit 0.. 3 : tid:4 The QoS (b3-0) TID Field

View File

@ -127,16 +127,6 @@ struct RGF_ICR {
u32 IMC; /* Mask Clear, write 1 to clear */
} __packed;
struct RGF_BL {
u32 ready; /* 0x880A3C bit [0] */
#define BIT_BL_READY BIT(0)
u32 version; /* 0x880A40 version of the BL struct */
u32 rf_type; /* 0x880A44 ID of the connected RF */
u32 baseband_type; /* 0x880A48 ID of the baseband */
u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
u8 pad[2];
} __packed;
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
@ -262,9 +252,8 @@ enum {
};
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
offsetof(struct RGF_ICR, ICS))
#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD
#define HOST_MBOX HOSTADDR(RGF_MBOX)
#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
/* ISR register bits */
@ -434,12 +423,12 @@ struct pci_dev;
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @ssn_last_drop: SSN of the last dropped frame
* @total: total number of processed incoming frames
* @drop_dup: duplicate frames dropped for this reorder buffer
* @drop_old: old frames dropped for this reorder buffer
* @dialog_token: dialog token for aggregation session
* @rcu_head: RCU head used for freeing this struct
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
*
* @first_time: true when this buffer used 1-st time
*/
struct wil_tid_ampdu_rx {
struct sk_buff **reorder_buf;
@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u16 ssn_last_drop;
unsigned long long total; /* frames processed */
unsigned long long drop_dup;
unsigned long long drop_old;
u8 dialog_token;
bool first_time; /* is it 1-st time this buffer used? */
};
@ -543,7 +535,6 @@ struct pmc_ctx {
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
@ -656,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
/* target operations */
/* register read */
static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
{
return readl(wil->csr + HOSTADDR(reg));
}
/* register write. wmb() to make sure it is completed */
static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
{
writel(val, wil->csr + HOSTADDR(reg));
wmb(); /* wait for write to propagate to the HW */
}
/* register set = read, OR, write */
static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
{
wil_w(wil, reg, wil_r(wil, reg) | val);
}
/* register clear = read, AND with inverted, write */
static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
{
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
#if defined(CONFIG_DYNAMIC_DEBUG)
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
@ -746,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work);
void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil_mask_irq(struct wil6210_priv *wil);
void wil_unmask_irq(struct wil6210_priv *wil);
@ -798,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
#endif /* __WIL6210_H__ */

View File

@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "linux/device.h"
#include <linux/device.h>
#include "wil_platform.h"
int __init wil_platform_modinit(void)

View File

@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
/* wait till FW finish with previous command */
for (retry = 5; retry > 0; retry--) {
r->tail = ioread32(wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
r->tail = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
if (next_head != r->tail)
break;
msleep(20);
@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
/* mark entry as full */
iowrite32(1, wil->csr + HOSTADDR(r->head) +
offsetof(struct wil6210_mbox_ring_desc, sync));
wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
/* advance next ptr */
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.head));
wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
r->head = next_head);
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
SW_INT_MBOX);
return 0;
}
@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int ch_no = data->info.channel+1;
u32 freq = ieee80211_channel_to_frequency(ch_no,
IEEE80211_BAND_60GHZ);
struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
s32 signal = data->info.sqi;
__le16 fc = rx_mgmt_frame->frame_control;
u32 d_len = le32_to_cpu(data->info.len);
u16 d_status = le16_to_cpu(data->info.status);
int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
int ch_no;
u32 freq;
struct ieee80211_channel *channel;
s32 signal;
__le16 fc;
u32 d_len;
u16 d_status;
wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
if (flen < 0) {
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
return;
}
d_len = le32_to_cpu(data->info.len);
if (d_len != flen) {
wil_err(wil,
"MGMT Rx: length mismatch, d_len %d should be %d\n",
d_len, flen);
return;
}
ch_no = data->info.channel + 1;
freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
signal = data->info.sqi;
d_status = le16_to_cpu(data->info.status);
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.snr,
data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
}
static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
{
struct wmi_tx_mgmt_packet_event *data = d;
struct ieee80211_mgmt *mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
flen, true);
}
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
@ -659,6 +692,7 @@ static const struct {
{WMI_READY_EVENTID, wmi_evt_ready},
{WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
{WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
u16 len;
bool q;
r->head = ioread32(wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
r->head = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
if (r->tail == r->head)
break;
@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
cmd = (void *)&evt->event.wmi;
wil_memcpy_fromio_32(cmd, src, len);
/* mark entry as empty */
iowrite32(0, wil->csr + HOSTADDR(r->tail) +
offsetof(struct wil6210_mbox_ring_desc, sync));
wil_w(wil, r->tail +
offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
/* advance tail */
r->tail = r->base + ((r->tail - r->base +
sizeof(struct wil6210_mbox_ring_desc)) % r->size);
iowrite32(r->tail, wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.tail));
wil_w(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
/* add to the pending list */
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
{
static const char *const names[] = {
[WMI_FRAME_BEACON] = "BEACON",
[WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
[WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
[WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
[WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (!cmd) {
rc = -ENOMEM;
goto out;
}
if (!ie)
ie_len = 0;
@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
memcpy(cmd->ie_info, ie, ie_len);
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
kfree(cmd);
out:
if (rc) {
const char *name = type < ARRAY_SIZE(names) ?
names[type] : "??";
wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
}
return rc;
}
@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
{
int rc;
u16 reason_code;
struct wmi_disconnect_sta_cmd cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
struct wmi_disconnect_event evt;
} __packed reply;
ether_addr_copy(cmd.dst_mac, mac);
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
/* failure to disconnect in reasonable time treated as FW error */
if (rc) {
wil_fw_error_recovery(wil);
return rc;
}
/* call event handler manually after processing wmi_call,
* to avoid deadlock - disconnect event handler acquires wil->mutex
* while it is already held here
*/
reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
reply.evt.bssid, reason_code,
reply.evt.disconnect_reason);
wil->sinfo_gen++;
wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
return 0;
}
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
/* search for handler */
if (!wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi))) {
wil_err(wil, "Unhandled event 0x%04x\n", id);
wil_info(wil, "Unhandled event 0x%04x\n", id);
}
} else {
wil_err(wil, "Unknown event type\n");

View File

@ -2564,15 +2564,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
}
}
static void atomic_orr(int val, atomic_t *v)
{
int old_val;
old_val = atomic_read(v);
while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
old_val = atomic_read(v);
}
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
struct brcmf_core *buscore;
@ -2595,7 +2586,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
if (val) {
brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
atomic_orr(val, &bus->intstatus);
atomic_or(val, &bus->intstatus);
}
return ret;
@ -2712,7 +2703,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
/* Keep still-pending events for next scheduling */
if (intstatus)
atomic_orr(intstatus, &bus->intstatus);
atomic_or(intstatus, &bus->intstatus);
brcmf_sdio_clrintr(bus);

View File

@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
void iwl_down(struct iwl_priv *priv);
void iwl_cancel_deferred_work(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb);
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta);

View File

@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
pos += scnprintf(buf + pos, buf_size - pos,
"NVM version: 0x%x\n", nvm_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
buf_size - pos, 0);
pos += strlen(buf + pos);
if (buf_size - pos > 0)
buf[pos++] = '\n';
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
ofs, ptr + ofs);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);

View File

@ -680,9 +680,8 @@ struct iwl_priv {
enum ieee80211_band band;
u8 valid_contexts;
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb);
struct iwl_notif_wait_data notif_wait;

View File

@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
return need_update;
}
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
/* bt coex disabled */
return 0;
return;
}
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
/* FIXME: based on notification, adjust the prio_boost */
priv->bt_ci_compliance = coex->bt_ci_compliance;
return 0;
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)

View File

@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv)
}
}
ret = iwl_trans_start_hw(priv->trans);
if (ret) {
IWL_ERR(priv, "Failed to start HW: %d\n", ret);
goto error;
}
ret = iwl_run_init_ucode(priv);
if (ret) {
IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
goto error;
}
ret = iwl_trans_start_hw(priv->trans);
if (ret) {
IWL_ERR(priv, "Failed to start HW: %d\n", ret);
goto error;
}
ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
u32 error_id;
} err_info;
struct iwl_notification_wait status_wait;
static const u8 status_cmd[] = {
static const u16 status_cmd[] = {
REPLY_WOWLAN_GET_STATUS,
};
struct iwlagn_wowlan_status status_data = {};

View File

@ -2029,18 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
static void iwl_napi_add(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
netif_napi_add(napi_dev, napi, poll, weight);
priv->napi = napi;
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@ -2053,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
.napi_add = iwl_napi_add,
};
/*****************************************************************************

View File

@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
/*
* Try to switch to new modulation mode from legacy
*/
static int rs_move_legacy_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
int index)
static void rs_move_legacy_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
int index)
{
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
}
search_tbl->lq_type = LQ_NONE;
return 0;
return;
out:
lq_sta->search_better_tbl = 1;
@ -1584,17 +1584,15 @@ out:
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from SISO
*/
static int rs_move_siso_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
static void rs_move_siso_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
return;
out:
lq_sta->search_better_tbl = 1;
@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from MIMO2
*/
static int rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
static void rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from MIMO3
*/
static int rs_move_mimo3_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
static void rs_move_mimo3_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
return;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*

View File

@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
*
******************************************************************************/
static int iwlagn_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_error_resp *err_resp = (void *)pkt->data;
@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
err_resp->cmd_id,
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_info));
return 0;
}
static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = (void *)pkt->data;
@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return 0;
return;
if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
rxon->channel = csa->channel;
@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
return 0;
}
static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_spectrum_notification *report = (void *)pkt->data;
@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
if (!report->state) {
IWL_DEBUG_11H(priv,
"Spectrum Measure Notification: Start\n");
return 0;
return;
}
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
return 0;
}
static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
return 0;
}
static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 __maybe_unused len = iwl_rx_packet_len(pkt);
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
return 0;
}
static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
return 0;
}
/**
@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
}
#endif
static int iwlagn_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
unsigned long stamp = jiffies;
const int reg_recalib_period = 60;
@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
len, sizeof(struct iwl_bt_notif_statistics),
sizeof(struct iwl_notif_statistics));
spin_unlock(&priv->statistics.lock);
return 0;
return;
}
change = common->temperature != priv->statistics.common.temperature ||
@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
priv->lib->temperature(priv);
spin_unlock(&priv->statistics.lock);
return 0;
}
static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)pkt->data;
@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
iwlagn_rx_statistics(priv, rxb, cmd);
return 0;
iwlagn_rx_statistics(priv, rxb);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
return 0;
}
static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
if (!test_bit(STATUS_SCANNING, &priv->status))
iwl_init_sensitivity(priv);
}
return 0;
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
priv->ampdu_ref++;
memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
return 0;
}
/*
@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
}
/* Called for REPLY_RX_MPDU_CMD */
static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status = {};
@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
if (!priv->last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return 0;
return;
}
phy_res = &priv->last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
return 0;
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return 0;
return;
}
/* This will be used in several places later */
@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
return 0;
}
static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
if (old_data)
kfree_rcu(old_data, rcu_head);
return 0;
}
/**
@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
handlers = priv->rx_handlers;
@ -1102,12 +1073,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
iwlagn_bt_rx_handler_setup(priv);
}
int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
int err = 0;
/*
* Do the notification wait before RX handlers so
@ -1121,12 +1091,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
* rx_handlers table. See iwl_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
priv->rx_handlers_stats[pkt->hdr.cmd]++;
err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
return err;
}

View File

@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
static const u8 deactivate_cmd[] = {
static const u16 deactivate_cmd[] = {
REPLY_WIPAN_DEACTIVATION_COMPLETE
};

View File

@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
}
/* Service response to REPLY_SCAN_CMD (0x80) */
static int iwl_rx_reply_scan(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwl_rx_reply_scan(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
#endif
return 0;
}
/* Service SCAN_START_NOTIFICATION (0x82) */
static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scanstart_notification *notif = (void *)pkt->data;
@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
return 0;
}
/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
return 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
return 0;
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)

View File

@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
return 0;
}
static int iwl_process_add_sta_resp(struct iwl_priv *priv,
struct iwl_addsta_cmd *addsta,
struct iwl_rx_packet *pkt)
static void iwl_process_add_sta_resp(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
u8 sta_id = addsta->sta.sta_id;
int ret = -EIO;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
pkt->hdr.flags);
return ret;
}
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
ret = iwl_sta_ucode_activate(priv, sta_id);
break;
case ADD_STA_NO_ROOM_IN_TABLE:
IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
sta_id);
IWL_ERR(priv, "Adding station failed, no room in table.\n");
break;
case ADD_STA_NO_BLOCK_ACK_RESOURCE:
IWL_ERR(priv, "Adding station %d failed, no block ack "
"resource.\n", sta_id);
IWL_ERR(priv,
"Adding station failed, no block ack resource.\n");
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
sta_id);
IWL_ERR(priv, "Attempting to modify non-existing station\n");
break;
default:
IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
break;
}
IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
sta_id, priv->stations[sta_id].sta.sta.addr);
/*
* XXX: The MAC address in the command buffer is often changed from
* the original sent to the device. That is, the MAC address
* written to the command buffer often is not the same MAC address
* read from the command buffer when the command returns. This
* issue has not yet been resolved and this debugging is left to
* observe the problem.
*/
IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
spin_unlock_bh(&priv->sta_lock);
return ret;
}
int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (!cmd)
return 0;
return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
iwl_process_add_sta_resp(priv, pkt);
}
int iwl_send_add_sta(struct iwl_priv *priv,
@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
struct iwl_rx_packet *pkt;
struct iwl_add_sta_resp *add_sta_resp;
IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
/*else the command was successfully sent in SYNC mode, need to free
* the reply page */
pkt = cmd.resp_pkt;
add_sta_resp = (void *)pkt->data;
/* debug messages are printed in the handler */
if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
spin_lock_bh(&priv->sta_lock);
ret = iwl_sta_ucode_activate(priv, sta_id);
spin_unlock_bh(&priv->sta_lock);
} else {
ret = -EIO;
}
iwl_free_resp(&cmd);
if (cmd.handler_status)
IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
cmd.handler_status);
return cmd.handler_status;
return ret;
}
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
struct iwl_rx_packet *pkt;
int ret;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_rem_sta_resp *rem_sta_resp;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
return ret;
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
pkt->hdr.flags);
rem_sta_resp = (void *)pkt->data;
switch (rem_sta_resp->status) {
case REM_STA_SUCCESS_MSK:
if (!temporary) {
spin_lock_bh(&priv->sta_lock);
iwl_sta_ucode_deactivate(priv, sta_id);
spin_unlock_bh(&priv->sta_lock);
}
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
ret = -EIO;
IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
break;
}
if (!ret) {
struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
switch (rem_sta_resp->status) {
case REM_STA_SUCCESS_MSK:
if (!temporary) {
spin_lock_bh(&priv->sta_lock);
iwl_sta_ucode_deactivate(priv, sta_id);
spin_unlock_bh(&priv->sta_lock);
}
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
ret = -EIO;
IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
break;
}
}
iwl_free_resp(&cmd);
return ret;

View File

@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
}
}
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
skb = __skb_dequeue(&skbs);
ieee80211_tx_status(priv->hw, skb);
}
return 0;
}
/**
@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
if (scd_flow >= priv->cfg->base_params->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return 0;
return;
}
sta_id = ba_resp->sta_id;
@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_bh(&priv->sta_lock);
return 0;
return;
}
if (unlikely(scd_flow != agg->txq_id)) {
@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
spin_unlock_bh(&priv->sta_lock);
return 0;
return;
}
__skb_queue_head_init(&reclaimed_skbs);
@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
skb = __skb_dequeue(&reclaimed_skbs);
ieee80211_tx_status(priv->hw, skb);
}
return 0;
}

View File

@ -3,6 +3,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
static const u8 alive_cmd[] = { REPLY_ALIVE };
static const u16 alive_cmd[] = { REPLY_ALIVE };
fw = iwl_get_ucode_image(priv, ucode_type);
if (WARN_ON(!fw))
@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
int iwl_run_init_ucode(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
static const u8 calib_complete[] = {
static const u16 calib_complete[] = {
CALIBRATION_RES_NOTIFICATION,
CALIBRATION_COMPLETE_NOTIFICATION
};

View File

@ -69,14 +69,14 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 15
#define IWL7260_UCODE_API_MAX 16
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 10
#define IWL7260_UCODE_API_MIN 12
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */

View File

@ -69,13 +69,13 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 15
#define IWL8000_UCODE_API_MAX 16
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 10
#define IWL8000_UCODE_API_MIN 12
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@ -97,8 +97,9 @@
#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
/* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28
/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 21
#define MAX_TX_AGG_SIZE_8260_SDIO 40
/* Max A-MPDU exponent for HT and VHT */
#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,

View File

@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
* mode set
* @d0i3: device uses d0i3 instead of d3
* @nvm_hw_section_num: the ID of the HW NVM section
* @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@ -348,6 +349,7 @@ struct iwl_cfg {
bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
netdev_features_t features;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
unsigned int max_tx_agg_size;

View File

@ -200,6 +200,7 @@
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
@ -210,6 +211,7 @@
CSR_INT_BIT_HW_ERR | \
CSR_INT_BIT_FH_TX | \
CSR_INT_BIT_SW_ERR | \
CSR_INT_BIT_PAGING | \
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
@ -422,6 +424,7 @@ enum {
/* DRAM INT TABLE */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/*

View File

@ -35,8 +35,8 @@
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
struct sk_buff *skb,
void *data, size_t data_len),
TP_ARGS(dev, skb, data, data_len),
u8 hdr_len, size_t data_len),
TP_ARGS(dev, skb, hdr_len, data_len),
TP_STRUCT__entry(
DEV_ENTRY
@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
memcpy(__get_dynamic_array(data), data, data_len);
skb_copy_bits(skb, hdr_len,
__get_dynamic_array(data), data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);

View File

@ -36,7 +36,7 @@
TRACE_EVENT(iwlwifi_dev_hcmd,
TP_PROTO(const struct device *dev,
struct iwl_host_cmd *cmd, u16 total_size,
struct iwl_cmd_header *hdr),
struct iwl_cmd_header_wide *hdr),
TP_ARGS(dev, cmd, total_size, hdr),
TP_STRUCT__entry(
DEV_ENTRY
@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
__field(u32, flags)
),
TP_fast_assign(
int i, offset = sizeof(*hdr);
int i, offset = sizeof(struct iwl_cmd_header);
if (hdr->group_id)
offset = sizeof(struct iwl_cmd_header_wide);
DEV_ASSIGN;
__entry->flags = cmd->flags;
memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
memcpy(__get_dynamic_array(hcmd), hdr, offset);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
if (!cmd->len[i])
@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
offset += cmd->len[i];
}
),
TP_printk("[%s] hcmd %#.2x (%ssync)",
__get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
__get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
((u8 *)__get_dynamic_array(hcmd))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);

View File

@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
const u32 len)
{
struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
if (len < sizeof(*fw_capa))
return -EINVAL;
capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
capa->max_ap_cache_per_scan =
le32_to_cpu(fw_capa->max_ap_cache_per_scan);
capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
capa->max_scan_reporting_threshold =
le32_to_cpu(fw_capa->max_scan_reporting_threshold);
capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
capa->max_significant_change_aps =
le32_to_cpu(fw_capa->max_significant_change_aps);
capa->max_bssid_history_entries =
le32_to_cpu(fw_capa->max_bssid_history_entries);
return 0;
}
/*
* Gets uCode section from tlv.
*/
@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
size_t len = ucode_raw->size;
const u8 *data;
u32 tlv_len;
u32 usniffer_img;
enum iwl_ucode_tlv_type tlv_type;
const u8 *tlv_data;
char buildstr[25];
u32 build;
u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_images = false;
bool usniffer_req = false;
bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_UCODE_REGULAR_USNIFFER,
tlv_len);
break;
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
IWL_DEBUG_FW(drv,
"Paging: paging enabled (size = %u bytes)\n",
paging_mem_size);
if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
IWL_ERR(drv,
"Paging: driver supports up to %lu bytes for paging image\n",
MAX_PAGING_IMAGE_SIZE);
return -EINVAL;
}
if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
IWL_ERR(drv,
"Paging: image isn't multiple %lu\n",
FW_PAGING_SIZE);
return -EINVAL;
}
drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
paging_mem_size;
usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
drv->fw.img[usniffer_img].paging_mem_size =
paging_mem_size;
break;
case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.sdio_adma_addr =
le32_to_cpup((__le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
goto invalid_tlv_len;
gscan_capa = true;
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
/*
* If ucode advertises that it supports GSCAN but GSCAN
* capabilities TLV is not present, warn and continue without GSCAN.
*/
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
WARN(!gscan_capa,
"GSCAN is supported but capabilities TLV is unavailable\n"))
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
capa->_capa);
return 0;
invalid_tlv_len:

View File

@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
while (chan->band != band && idx < n_channels)
while (idx < n_channels && chan->band != band)
chan = &data->channels[++idx];
sband->channels = &data->channels[idx];
while (chan->band == band && idx < n_channels) {
while (idx < n_channels && chan->band == band) {
chan = &data->channels[++idx];
n++;
}

View File

@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
/*
* RX related structures and functions
*/
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers

View File

@ -84,6 +84,8 @@
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MEM = 9,
IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_MAX,
};
@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem {
u8 data[];
};
/**
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
* @reserved:
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
__le32 index;
__le32 rxq;
__le32 reserved;
u8 data[];
};
/**
* iwl_fw_error_next_data - advance fw error dump data pointer
* @data: previous data block

View File

@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_PAGING = 32,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
};
struct iwl_ucode_tlv {
@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
* @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@ -266,7 +266,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
@ -284,6 +284,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
* @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@ -298,6 +299,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@ -305,12 +307,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
@ -320,10 +324,12 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
};
/* The default calibrate table size if not specified by firmware file */
@ -341,8 +347,9 @@ enum iwl_ucode_tlv_capa {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
#define IWL_UCODE_SECTION_MAX 16
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
@ -412,6 +419,12 @@ enum iwl_fw_dbg_reg_operator {
PRPH_ASSIGN,
PRPH_SETBIT,
PRPH_CLEARBIT,
INDIRECT_ASSIGN,
INDIRECT_SETBIT,
INDIRECT_CLEARBIT,
PRPH_BLOCKBIT,
};
/**
@ -485,10 +498,13 @@ struct iwl_fw_dbg_conf_hcmd {
*
* @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
* @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
* @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
* collect only monitor data
*/
enum iwl_fw_dbg_trigger_mode {
IWL_FW_DBG_TRIGGER_START = BIT(0),
IWL_FW_DBG_TRIGGER_STOP = BIT(1),
IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
};
/**
@ -718,4 +734,28 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
/**
* struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
* @max_scan_cache_size: total space allocated for scan results (in bytes).
* @max_scan_buckets: maximum number of channel buckets.
* @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
* @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
* @max_scan_reporting_threshold: max possible report threshold. in percentage.
* @max_hotlist_aps: maximum number of entries for hotlist APs.
* @max_significant_change_aps: maximum number of entries for significant
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
*/
struct iwl_fw_gscan_capabilities {
__le32 max_scan_cache_size;
__le32 max_scan_buckets;
__le32 max_ap_cache_per_scan;
__le32 max_rssi_sample_size;
__le32 max_scan_reporting_threshold;
__le32 max_hotlist_aps;
__le32 max_significant_change_aps;
__le32 max_bssid_history_entries;
} __packed;
#endif /* __iwl_fw_file_h__ */

View File

@ -133,6 +133,7 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
bool is_dual_cpus;
u32 paging_mem_size;
};
struct iwl_sf_region {
@ -140,6 +141,48 @@ struct iwl_sf_region {
u32 size;
};
/*
* Block paging calculations
*/
#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
#define PAGE_PER_GROUP_2_EXP_SIZE 3
/* 8 pages per group */
#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
/* don't change, support only 32KB size */
#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
/* 32K == 2^15 */
#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
/*
* Image paging calculations
*/
#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
/* 2^5 == 32 blocks per image */
#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
/* maximum image size 1024KB */
#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
/* Virtual address signature */
#define PAGING_ADDR_SIG 0xAA000000
#define PAGING_CMD_IS_SECURED BIT(9)
#define PAGING_CMD_IS_ENABLED BIT(8)
#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
#define PAGING_TLV_SECURE_MASK 1
/**
* struct iwl_fw_paging
* @fw_paging_phys: page phy pointer
* @fw_paging_block: pointer to the allocated block
* @fw_paging_size: page size
*/
struct iwl_fw_paging {
dma_addr_t fw_paging_phys;
struct page *fw_paging_block;
u32 fw_paging_size;
};
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list {
struct iwl_fw_cipher_scheme cs[];
} __packed;
/**
* struct iwl_gscan_capabilities - gscan capabilities supported by FW
* @max_scan_cache_size: total space allocated for scan results (in bytes).
* @max_scan_buckets: maximum number of channel buckets.
* @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
* @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
* @max_scan_reporting_threshold: max possible report threshold. in percentage.
* @max_hotlist_aps: maximum number of entries for hotlist APs.
* @max_significant_change_aps: maximum number of entries for significant
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
*/
struct iwl_gscan_capabilities {
u32 max_scan_cache_size;
u32 max_scan_buckets;
u32 max_ap_cache_per_scan;
u32 max_rssi_sample_size;
u32 max_scan_reporting_threshold;
u32 max_hotlist_aps;
u32 max_significant_change_aps;
u32 max_bssid_history_entries;
};
/**
* struct iwl_fw - variables associated with the firmware
*
@ -208,6 +275,7 @@ struct iwl_fw {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa;
};
static inline const char *get_fw_dbg_mode_string(int mode)

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
continue;
for (i = 0; i < w->n_cmds; i++) {
if (w->cmds[i] == pkt->hdr.cmd) {
if (w->cmds[i] ==
WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
found = true;
break;
}
@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
const u8 *cmds, int n_cmds,
const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data),
void *fn_data)
@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
wait_entry->fn = fn;
wait_entry->fn_data = fn_data;
wait_entry->n_cmds = n_cmds;
memcpy(wait_entry->cmds, cmds, n_cmds);
memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
wait_entry->triggered = false;
wait_entry->aborted = false;

View File

@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -105,7 +106,7 @@ struct iwl_notification_wait {
struct iwl_rx_packet *pkt, void *data);
void *fn_data;
u8 cmds[MAX_NOTIF_CMDS];
u16 cmds[MAX_NOTIF_CMDS];
u8 n_cmds;
bool triggered, aborted;
};
@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
struct iwl_notification_wait *wait_entry,
const u8 *cmds, int n_cmds,
const u16 *cmds, int n_cmds,
bool (*fn)(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data),
void *fn_data);

View File

@ -116,10 +116,6 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep.
* @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
* but the higher layers need to know about it (in particular mac80211 to
* to able to call the right NAPI RX functions); this function is needed
* to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@ -148,13 +144,8 @@ struct iwl_op_mode_ops {
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void (*napi_add)(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight);
void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@ -188,11 +179,11 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
op_mode->ops->stop(op_mode);
}
static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
return op_mode->ops->rx(op_mode, rxb, cmd);
return op_mode->ops->rx(op_mode, napi, rxb);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@ -260,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
int (*poll)(struct napi_struct *, int),
int weight)
{
if (!op_mode->ops->napi_add)
return;
op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
}
#endif /* __iwl_op_mode_h__ */

View File

@ -253,6 +253,7 @@
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)
/* Context Data */
#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
@ -291,6 +292,9 @@
/*********************** END TX SCHEDULER *************************************/
/* tcp checksum offload */
#define RX_EN_CSUM (0x00a00d88)
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
@ -379,6 +383,8 @@ enum aux_misc_master1_en {
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
@ -386,4 +392,10 @@ enum {
LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
};
/* FW chicken bits */
#define LMPM_PAGE_PASS_NOTIF 0xA03824
enum {
LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
};
#endif /* __iwl_prph_h__ */

View File

@ -122,6 +122,40 @@
#define INDEX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
/*
* those functions retrieve specific information from
* the id field in the iwl_host_cmd struct which contains
* the command id, the group id and the version of the command
* and vice versa
*/
static inline u8 iwl_cmd_opcode(u32 cmdid)
{
return cmdid & 0xFF;
}
static inline u8 iwl_cmd_groupid(u32 cmdid)
{
return ((cmdid & 0xFF00) >> 8);
}
static inline u8 iwl_cmd_version(u32 cmdid)
{
return ((cmdid & 0xFF0000) >> 16);
}
static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
{
return opcode + (groupid << 8) + (version << 16);
}
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
/* due to the conversion, this group is special; new groups
* should be defined in the appropriate fw-api header files
*/
#define IWL_ALWAYS_LONG_GROUP 1
/**
* struct iwl_cmd_header
*
@ -130,7 +164,7 @@
*/
struct iwl_cmd_header {
u8 cmd; /* Command ID: REPLY_RXON, etc. */
u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
u8 group_id;
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@ -154,9 +188,22 @@ struct iwl_cmd_header {
__le16 sequence;
} __packed;
/* iwl_cmd_header flags value */
#define IWL_CMD_FAILED_MSK 0x40
/**
* struct iwl_cmd_header_wide
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
* this is the wide version that contains more information about the command
* like length, version and command type
*/
struct iwl_cmd_header_wide {
u8 cmd;
u8 group_id;
__le16 sequence;
__le16 length;
u8 reserved;
u8 version;
} __packed;
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
* @CMD_WAKE_UP_TRANS: The command response should wake up the trans
* (i.e. mark it as non-idle).
* @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
* check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
@ -210,6 +259,8 @@ enum CMD_MODE {
CMD_SEND_IN_IDLE = BIT(4),
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
@ -222,8 +273,18 @@ enum CMD_MODE {
* aren't fully copied and use other TFD space.
*/
struct iwl_device_cmd {
struct iwl_cmd_header hdr; /* uCode API */
u8 payload[DEF_CMD_PAYLOAD_SIZE];
union {
struct {
struct iwl_cmd_header hdr; /* uCode API */
u8 payload[DEF_CMD_PAYLOAD_SIZE];
};
struct {
struct iwl_cmd_header_wide hdr_wide;
u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
sizeof(struct iwl_cmd_header_wide) +
sizeof(struct iwl_cmd_header)];
};
};
} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag {
* @resp_pkt: response packet, if %CMD_WANT_SKB was set
* @_rx_page_order: (internally used to free response packet)
* @_rx_page_addr: (internally used to free response packet)
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
* @id: command id of the host command, for wide commands encoding the
* version and group as well
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TBS_PER_TFD];
struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
u32 _rx_page_order;
int handler_status;
u32 flags;
u32 id;
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
u8 id;
};
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@ -379,6 +438,7 @@ enum iwl_trans_status {
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @wide_cmd_header: firmware supports wide host command header
* @command_names: array of command names, must be 256 entries
* (one for each command); for debugging only
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@ -396,6 +456,7 @@ struct iwl_trans_config {
bool rx_buf_size_8k;
bool bc_table_dword;
bool scd_set_active;
bool wide_cmd_header;
const char *const *command_names;
u32 sdio_adma_addr;
@ -544,10 +605,12 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
void (*suspend)(struct iwl_trans *trans);
int (*suspend)(struct iwl_trans *trans);
void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
struct iwl_fw_dbg_trigger_tlv
*trigger);
};
/**
@ -584,6 +647,8 @@ enum iwl_d0i3_mode {
* @cfg - pointer to the configuration
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
@ -603,6 +668,12 @@ enum iwl_d0i3_mode {
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
* @paging_req_addr: The location were the FW will upload / download the pages
* from. The address is set by the opmode
* @paging_db: Pointer to the opmode paging data base, the pointer is set by
* the opmode.
* @paging_download_buf: Buffer used for copying all of the pages before
* downloading them to the FW. The buffer is allocated in the opmode
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@ -612,6 +683,7 @@ struct iwl_trans {
unsigned long status;
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
@ -639,6 +711,14 @@ struct iwl_trans {
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
/*
* Paging parameters - All of the parameters should be set by the
* opmode when paging is enabled
*/
u32 paging_req_addr;
struct iwl_fw_paging *paging_db;
void *paging_download_buf;
enum iwl_d0i3_mode d0i3_mode;
bool wowlan_d0i3;
@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
{
might_sleep();
trans->ops->d3_suspend(trans, test);
if (trans->ops->d3_suspend)
trans->ops->d3_suspend(trans, test);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
bool test)
{
might_sleep();
if (!trans->ops->d3_resume)
return 0;
return trans->ops->d3_resume(trans, status, test);
}
@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
static inline void iwl_trans_suspend(struct iwl_trans *trans)
static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
if (trans->ops->suspend)
trans->ops->suspend(trans);
if (!trans->ops->suspend)
return 0;
return trans->ops->suspend(trans);
}
static inline void iwl_trans_resume(struct iwl_trans *trans)
@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
}
static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans)
iwl_trans_dump_data(struct iwl_trans *trans,
struct iwl_fw_dbg_trigger_tlv *trigger)
{
if (!trans->ops->dump_data)
return NULL;
return trans->ops->dump_data(trans);
return trans->ops->dump_data(trans, trigger);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,

View File

@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../

View File

@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
}
}
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
return;
}
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
/*
* This is an async handler for a notification, returning anything other
* than 0 doesn't make sense even if HCMD failed.
*/
return 0;
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
return;
}
if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0;
return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
return 0;
return;
if (ant_isolation == mvm->last_ant_isol)
return 0;
return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
return 0;
return;
mvm->last_corun_lut = lut;
@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(cmd.corun_lut40));
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
sizeof(cmd), &cmd);
if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
sizeof(cmd), &cmd))
IWL_ERR(mvm,
"failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
}

View File

@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
iwl_mvm_bt_coex_notif_handle(mvm);
/*
* This is an async handler for a notification, returning anything other
* than 0 doesn't make sense even if HCMD failed.
*/
return 0;
}
static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
int ret;
u8 lut;
struct iwl_bt_coex_cmd_old *bt_cmd;
@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
};
if (!iwl_mvm_bt_is_plcr_supported(mvm))
return 0;
return;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
return 0;
return;
if (ant_isolation == mvm->last_ant_isol)
return 0;
return;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
return 0;
return;
mvm->last_corun_lut = lut;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
return 0;
return;
cmd.data[0] = bt_cmd;
bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (iwl_mvm_send_cmd(mvm, &cmd))
IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
kfree(bt_cmd);
return ret;
}

View File

@ -102,6 +102,7 @@
#define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1

View File

@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_d3;
static const u8 d3_notif[] = { D3_CONFIG_CMD };
static const u16 d3_notif[] = { D3_CONFIG_CMD };
int ret;
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@ -1168,13 +1168,17 @@ remove_notif:
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
ret = iwl_trans_suspend(mvm->trans);
if (ret)
return ret;
iwl_trans_suspend(mvm->trans);
mvm->trans->wowlan_d0i3 = wowlan->any;
if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_enter_d0i3_sync(mvm);
ret = iwl_mvm_enter_d0i3_sync(mvm);
if (ret)
return ret;
@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
iwl_trans_d3_suspend(mvm->trans, false);
return 0;
}
@ -1935,28 +1942,59 @@ out:
return 1;
}
static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
{
iwl_trans_resume(mvm->trans);
return __iwl_mvm_resume(mvm, false);
}
static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
{
bool exit_now;
enum iwl_d3_status d3_status;
iwl_trans_d3_resume(mvm->trans, &d3_status, false);
/*
* make sure to clear D0I3_DEFER_WAKEUP before
* calling iwl_trans_resume(), which might wait
* for d0i3 exit completion.
*/
mutex_lock(&mvm->d0i3_suspend_mutex);
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
&mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
if (exit_now) {
IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
_iwl_mvm_exit_d0i3(mvm);
}
iwl_trans_resume(mvm->trans);
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
if (ret)
return ret;
/*
* d0i3 exit will be deferred until reconfig_complete.
* make sure there we are out of d0i3.
*/
}
return 0;
}
int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_resume(mvm->trans);
if (mvm->hw->wiphy->wowlan_config->any) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_exit_d0i3(hw->priv);
if (ret)
return ret;
/*
* d0i3 exit will be deferred until reconfig_complete.
* make sure there we are out of d0i3.
*/
}
return 0;
}
return __iwl_mvm_resume(mvm, false);
/* 'any' trigger means d0i3 was used */
if (hw->wiphy->wowlan_config->any)
return iwl_mvm_resume_d0i3(mvm);
else
return iwl_mvm_resume_d3(mvm);
}
void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)

View File

@ -63,6 +63,7 @@
*
*****************************************************************************/
#include "mvm.h"
#include "fw-api-tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static inline char *iwl_dbgfs_is_match(char *name, char *buf)
{
int len = strlen(name);
return !strncmp(name, buf, len) ? buf + len : NULL;
}
static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = -EINVAL;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("tof_disabled=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.tof_disabled = value;
goto out;
}
data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.one_sided_disabled = value;
goto out;
}
data = iwl_dbgfs_is_match("is_debug_mode=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.is_debug_mode = value;
goto out;
}
data = iwl_dbgfs_is_match("is_buf=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.tof_cfg.is_buf_required = value;
goto out;
}
data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_config_cmd(mvm);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_config_cmd *cmd;
cmd = &mvm->tof_data.tof_cfg;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
cmd->tof_disabled);
pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
cmd->one_sided_disabled);
pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
cmd->is_debug_mode);
pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
cmd->is_buf_required);
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("burst_period=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (!ret)
mvm->tof_data.responder_cfg.burst_period =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.min_delta_ftm = value;
goto out;
}
data = iwl_dbgfs_is_match("burst_duration=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.burst_duration = value;
goto out;
}
data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.num_of_burst_exp = value;
goto out;
}
data = iwl_dbgfs_is_match("abort_responder=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.abort_responder = value;
goto out;
}
data = iwl_dbgfs_is_match("get_ch_est=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.get_ch_est = value;
goto out;
}
data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.recv_sta_req_params = value;
goto out;
}
data = iwl_dbgfs_is_match("channel_num=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.channel_num = value;
goto out;
}
data = iwl_dbgfs_is_match("bandwidth=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.bandwidth = value;
goto out;
}
data = iwl_dbgfs_is_match("rate=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.rate = value;
goto out;
}
data = iwl_dbgfs_is_match("bssid=", buf);
if (data) {
u8 *mac = mvm->tof_data.responder_cfg.bssid;
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
}
data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("toa_offset=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.toa_offset =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.ctrl_ch_position = value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.ftm_per_burst = value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
goto out;
}
data = iwl_dbgfs_is_match("asap_mode=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.responder_cfg.asap_mode = value;
goto out;
}
data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_responder_cmd(mvm, vif);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_responder_config_cmd *cmd;
cmd = &mvm->tof_data.responder_cfg;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
le16_to_cpu(cmd->burst_period));
pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
cmd->burst_duration);
pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
cmd->bandwidth);
pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
cmd->channel_num);
pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
cmd->ctrl_ch_position);
pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
cmd->bssid);
pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
cmd->min_delta_ftm);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
cmd->num_of_burst_exp);
pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
cmd->abort_responder);
pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
cmd->get_ch_est);
pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
cmd->recv_sta_req_params);
pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
cmd->ftm_per_burst);
pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
cmd->ftm_resp_ts_avail);
pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
cmd->asap_mode);
pos += scnprintf(buf + pos, bufsz - pos,
"tsf_timer_offset_msecs = %d\n",
le16_to_cpu(cmd->tsf_timer_offset_msecs));
pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
le16_to_cpu(cmd->toa_offset));
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("request_id=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.request_id = value;
goto out;
}
data = iwl_dbgfs_is_match("initiator=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.initiator = value;
goto out;
}
data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.one_sided_los_disable = value;
goto out;
}
data = iwl_dbgfs_is_match("req_timeout=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.req_timeout = value;
goto out;
}
data = iwl_dbgfs_is_match("report_policy=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.report_policy = value;
goto out;
}
data = iwl_dbgfs_is_match("macaddr_random=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.macaddr_random = value;
goto out;
}
data = iwl_dbgfs_is_match("num_of_ap=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req.num_of_ap = value;
goto out;
}
data = iwl_dbgfs_is_match("macaddr_template=", buf);
if (data) {
u8 mac[ETH_ALEN];
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
}
data = iwl_dbgfs_is_match("macaddr_mask=", buf);
if (data) {
u8 mac[ETH_ALEN];
if (!mac_pton(data, mac)) {
ret = -EINVAL;
goto out;
}
memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
}
data = iwl_dbgfs_is_match("ap=", buf);
if (data) {
struct iwl_tof_range_req_ap_entry ap;
int size = sizeof(struct iwl_tof_range_req_ap_entry);
u16 burst_period;
u8 *mac = ap.bssid;
unsigned int i;
if (sscanf(data, "%u %hhd %hhx %hhx"
"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
"%hhx %hhx %hx"
"%hhx %hhx %x"
"%hhx %hhx %hhx %hhx",
&i, &ap.channel_num, &ap.bandwidth,
&ap.ctrl_ch_position,
mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
&ap.measure_type, &ap.num_of_bursts,
&burst_period,
&ap.samples_per_burst, &ap.retries_per_sample,
&ap.tsf_delta, &ap.location_req, &ap.asap_mode,
&ap.enable_dyn_ack, &ap.rssi) != 20) {
ret = -EINVAL;
goto out;
}
if (i >= IWL_MVM_TOF_MAX_APS) {
IWL_ERR(mvm, "Invalid AP index %d\n", i);
ret = -EINVAL;
goto out;
}
ap.burst_period = cpu_to_le16(burst_period);
memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
goto out;
}
data = iwl_dbgfs_is_match("send_range_request=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_range_req_cmd *cmd;
int i;
cmd = &mvm->tof_data.range_req;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
cmd->request_id);
pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
cmd->initiator);
pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
cmd->one_sided_los_disable);
pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
cmd->req_timeout);
pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
cmd->report_policy);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
cmd->macaddr_random);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
cmd->macaddr_template);
pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
cmd->macaddr_mask);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
cmd->num_of_ap);
for (i = 0; i < cmd->num_of_ap; i++) {
struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: channel_num=%hhx bw=%hhx"
" control=%hhx bssid=%pM type=%hhx"
" num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
" retries=%hhx tsf_delta=%x location_req=%hhx "
" asap=%hhx enable=%hhx rssi=%hhx\n",
i, ap->channel_num, ap->bandwidth,
ap->ctrl_ch_position, ap->bssid,
ap->measure_type, ap->num_of_bursts,
ap->burst_period, ap->samples_per_burst,
ap->retries_per_sample, ap->tsf_delta,
ap->location_req, ap->asap_mode,
ap->enable_dyn_ack, ap->rssi);
}
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
cpu_to_le16(value);
goto out;
}
data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.min_delta_ftm = value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
value;
goto out;
}
data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
value;
goto out;
}
data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_tof_range_req_ext_cmd *cmd;
cmd = &mvm->tof_data.range_req_ext;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos,
"tsf_timer_offset_msec = %hx\n",
cmd->tsf_timer_offset_msec);
pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
cmd->min_delta_ftm);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw20M = %hhx\n",
cmd->ftm_format_and_bw20M);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw40M = %hhx\n",
cmd->ftm_format_and_bw40M);
pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw80M = %hhx\n",
cmd->ftm_format_and_bw80M);
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0;
int abort_id;
char *data;
mutex_lock(&mvm->mutex);
data = iwl_dbgfs_is_match("abort_id=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0)
mvm->tof_data.last_abort_id = value;
goto out;
}
data = iwl_dbgfs_is_match("send_range_abort=", buf);
if (data) {
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
abort_id = mvm->tof_data.last_abort_id;
ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
goto out;
}
}
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[32];
int pos = 0;
const size_t bufsz = sizeof(buf);
int last_abort_id;
mutex_lock(&mvm->mutex);
last_abort_id = mvm->tof_data.last_abort_id;
mutex_unlock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
last_abort_id);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char *buf;
int pos = 0;
const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
struct iwl_tof_range_rsp_ntfy *cmd;
int i, ret;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
cmd = &mvm->tof_data.range_resp;
pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
cmd->request_id);
pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
cmd->request_status);
pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
cmd->last_in_batch);
pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
cmd->num_of_aps);
for (i = 0; i < cmd->num_of_aps; i++) {
struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: bssid=%pM status=%hhx bw=%hhx"
" rtt=%x rtt_var=%x rtt_spread=%x"
" rssi=%hhx rssi_spread=%hhx"
" range=%x range_var=%x"
" time_stamp=%x\n",
i, ap->bssid, ap->measure_status,
ap->measure_bw,
ap->rtt, ap->rtt_variance, ap->rtt_spread,
ap->rssi, ap->rssi_spread, ap->range,
ap->range_variance, ap->timestamp);
}
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
{
@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
!vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
S_IRUSR);
}
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under

View File

@ -974,7 +974,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (ret)
return ret;
iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
@ -1200,12 +1200,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
if (ptr) {
for (ofs = 0; ofs < len; ofs += 16) {
pos += scnprintf(buf + pos, bufsz - pos,
"0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
bufsz - pos, false);
pos += strlen(buf + pos);
if (bufsz - pos > 0)
buf[pos++] = '\n';
"0x%.4x %16ph\n", ofs, ptr + ofs);
}
} else {
pos += scnprintf(buf + pos, bufsz - pos,

View File

@ -413,7 +413,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0

View File

@ -87,41 +87,6 @@ struct iwl_ssid_ie {
u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
/* How many statistics are gathered for each channel */
#define SCAN_RESULTS_STATISTICS 1
/**
* enum iwl_scan_complete_status - status codes for scan complete notifications
* @SCAN_COMP_STATUS_OK: scan completed successfully
* @SCAN_COMP_STATUS_ABORT: scan was aborted by user
* @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
* @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
* @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
* @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
* @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
* @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
* @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
* @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
* (not an error!)
* @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
* asked for
* @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
*/
enum iwl_scan_complete_status {
SCAN_COMP_STATUS_OK = 0x1,
SCAN_COMP_STATUS_ABORT = 0x2,
SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
SCAN_COMP_STATUS_ERR_PROBE = 0x5,
SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
SCAN_COMP_STATUS_ERR_COEX = 0x9,
SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
SCAN_COMP_STATUS_ITERATION_END = 0x0B,
SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
};
/* scan offload */
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
@ -143,71 +108,6 @@ enum scan_framework_client {
SCAN_CLIENT_ASSET_TRACKING = BIT(2),
};
/**
* struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
* @scan_flags: see enum iwl_scan_flags
* @channel_count: channels in channel list
* @quiet_time: dwell time, in milliseconds, on quiet channel
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
* @max_out_time: max TUs to be out of associated channel
* @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
* @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
* @direct_scan: list of SSIDs for directed active scan
* @scan_type: see enum iwl_scan_type.
* @rep_count: repetition count for each scheduled scan iteration.
*/
struct iwl_scan_offload_cmd {
__le16 len;
u8 scan_flags;
u8 channel_count;
__le16 quiet_time;
__le16 quiet_plcp_th;
__le16 good_CRC_th;
__le16 rx_chain;
__le32 max_out_time;
__le32 suspend_time;
/* RX_ON_FLAGS_API_S_VER_1 */
__le32 flags;
__le32 filter_flags;
struct iwl_tx_cmd tx_cmd[2];
/* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
__le32 scan_type;
__le32 rep_count;
} __packed;
enum iwl_scan_offload_channel_flags {
IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0),
IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22),
IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24),
IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25),
};
/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
* __le32 type: bitmap; bits 1-20 are for directed scan to i'th ssid and
* see enum iwl_scan_offload_channel_flags.
* __le16 channel_number: channel number 1-13 etc.
* __le16 iter_count: repetition count for the channel.
* __le32 iter_interval: interval between two iterations on one channel.
* u8 active_dwell.
* u8 passive_dwell.
*/
#define IWL_SCAN_CHAN_SIZE 14
/**
* iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
* @scan_cmd: scan command fixed part
* @data: scan channel configuration and probe request frames
*/
struct iwl_scan_offload_cfg {
struct iwl_scan_offload_cmd scan_cmd;
u8 data[0];
} __packed;
/**
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
* @ssid: MAC address to filter out
@ -297,35 +197,6 @@ enum iwl_scan_ebs_status {
IWL_SCAN_EBS_INACTIVE,
};
/**
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_compleate_status
* @ebs_status: last EBS status, see IWL_SCAN_EBS_*
*/
struct iwl_scan_offload_complete {
u8 last_schedule_line;
u8 last_schedule_iteration;
u8 status;
u8 ebs_status;
} __packed;
/**
* iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
* @ssid_bitmap: SSIDs indexes found in this iteration
* @client_bitmap: clients that are active and wait for this notification
*/
struct iwl_sched_scan_results {
__le16 ssid_bitmap;
u8 client_bitmap;
u8 reserved;
};
/* Unified LMAC scan API */
#define IWL_MVM_BASIC_PASSIVE_DWELL 110
/**
* iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
* @tx_flags: combination of TX_CMD_FLG_*
@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
/**
* struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
* @size: size of the command (not including header)
* @reserved0: for future use and alignment
* @ver: API version number
*/
struct iwl_mvm_umac_cmd_hdr {
__le16 size;
u8 reserved0;
u8 ver;
} __packed;
/* The maximum of either of these cannot exceed 8, because we use an
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
*/
@ -621,7 +480,6 @@ enum iwl_channel_flags {
/**
* struct iwl_scan_config
* @hdr: umac command header
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@ -639,7 +497,6 @@ enum iwl_channel_flags {
* @channel_array: default supported channels
*/
struct iwl_scan_config {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@ -735,7 +592,6 @@ struct iwl_scan_req_umac_tail {
/**
* struct iwl_scan_req_umac
* @hdr: umac command header
* @flags: &enum iwl_umac_scan_flags
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
@ -754,7 +610,6 @@ struct iwl_scan_req_umac_tail {
* &struct iwl_scan_req_umac_tail
*/
struct iwl_scan_req_umac {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 uid;
__le32 ooc_priority;
@ -776,12 +631,10 @@ struct iwl_scan_req_umac {
/**
* struct iwl_umac_scan_abort
* @hdr: umac command header
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
*/
struct iwl_umac_scan_abort {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 uid;
__le32 flags;
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */

View File

@ -0,0 +1,386 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __fw_api_tof_h__
#define __fw_api_tof_h__
#include "fw-api.h"
/* ToF sub-group command IDs */
enum iwl_mvm_tof_sub_grp_ids {
TOF_RANGE_REQ_CMD = 0x1,
TOF_CONFIG_CMD = 0x2,
TOF_RANGE_ABORT_CMD = 0x3,
TOF_RANGE_REQ_EXT_CMD = 0x4,
TOF_RESPONDER_CONFIG_CMD = 0x5,
TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
TOF_RANGE_RESPONSE_NOTIF = 0xFE,
TOF_MCSI_DEBUG_NOTIF = 0xFB,
};
/**
* struct iwl_tof_config_cmd - ToF configuration
* @tof_disabled: 0 enabled, 1 - disabled
* @one_sided_disabled: 0 enabled, 1 - disabled
* @is_debug_mode: 1 debug mode, 0 - otherwise
* @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
*/
struct iwl_tof_config_cmd {
__le32 sub_grp_cmd_id;
u8 tof_disabled;
u8 one_sided_disabled;
u8 is_debug_mode;
u8 is_buf_required;
} __packed;
/**
* struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
* @burst_period: future use: (currently hard coded in the LMAC)
* The interval between two sequential bursts.
* @min_delta_ftm: future use: (currently hard coded in the LMAC)
* The minimum delay between two sequential FTM Responses
* in the same burst.
* @burst_duration: future use: (currently hard coded in the LMAC)
* The total time for all FTMs handshake in the same burst.
* Affect the time events duration in the LMAC.
* @num_of_burst_exp: future use: (currently hard coded in the LMAC)
* The number of bursts for the current ToF request. Affect
* the number of events allocations in the current iteration.
* @get_ch_est: for xVT only, NA for driver
* @abort_responder: when set to '1' - Responder will terminate its activity
* (all other fields in the command are ignored)
* @recv_sta_req_params: 1 - Responder will ignore the other Responder's
* params and use the recomended Initiator params.
* 0 - otherwise
* @channel_num: current AP Channel
* @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rate: current AP rate
* @ctrl_ch_position: coding of the control channel position relative to
* the center frequency.
* 40MHz 0 below center, 1 above center
* 80MHz bits [0..1]: 0 the near 20MHz to the center,
* 1 the far 20MHz to the center
* bit[2] as above 40MHz
* @ftm_per_burst: FTMs per Burst
* @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
* '1' - we measure over the Initial FTM Response
* @asap_mode: ASAP / Non ASAP mode for the current WLS station
* @sta_id: index of the AP STA when in AP mode
* @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
* @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
* purposes, simulating station movement by adding various values
* to this field
* @bssid: Current AP BSSID
*/
struct iwl_tof_responder_config_cmd {
__le32 sub_grp_cmd_id;
__le16 burst_period;
u8 min_delta_ftm;
u8 burst_duration;
u8 num_of_burst_exp;
u8 get_ch_est;
u8 abort_responder;
u8 recv_sta_req_params;
u8 channel_num;
u8 bandwidth;
u8 rate;
u8 ctrl_ch_position;
u8 ftm_per_burst;
u8 ftm_resp_ts_avail;
u8 asap_mode;
u8 sta_id;
__le16 tsf_timer_offset_msecs;
__le16 toa_offset;
u8 bssid[ETH_ALEN];
} __packed;
/**
* struct iwl_tof_range_request_ext_cmd - extended range req for WLS
* @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
* @min_delta_ftm: Minimal time between two consecutive measurements,
* in units of 100us. 0 means no preference by station
* @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
* value be sent to the AP
* @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
* value to be sent to the AP
* @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
* value to be sent to the AP
*/
struct iwl_tof_range_req_ext_cmd {
__le32 sub_grp_cmd_id;
__le16 tsf_timer_offset_msec;
__le16 reserved;
u8 min_delta_ftm;
u8 ftm_format_and_bw20M;
u8 ftm_format_and_bw40M;
u8 ftm_format_and_bw80M;
} __packed;
#define IWL_MVM_TOF_MAX_APS 21
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @channel_num: Current AP Channel
* @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @tsf_delta_direction: TSF relatively to the subject AP
* @ctrl_ch_position: Coding of the control channel position relative to the
* center frequency.
* 40MHz 0 below center, 1 above center
* 80MHz bits [0..1]: 0 the near 20MHz to the center,
* 1 the far 20MHz to the center
* bit[2] as above 40MHz
* @bssid: AP's bss id
* @measure_type: Measurement type: 0 - two sided, 1 - One sided
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
* number of measurement iterations (min 2^0 = 1, max 2^14)
* @burst_period: Recommended value to be sent to the AP. Measurement
* periodicity In units of 100ms. ignored if num_of_bursts = 0
* @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
* 1-sided: how many rts/cts pairs should be used per burst.
* @retries_per_sample: Max number of retries that the LMAC should send
* in case of no replies by the AP.
* @tsf_delta: TSF Delta in units of microseconds.
* The difference between the AP TSF and the device local clock.
* @location_req: Location Request Bit[0] LCI should be sent in the FTMR
* Bit[1] Civic should be sent in the FTMR
* @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
* @enable_dyn_ack: Enable Dynamic ACK BW.
* 0 Initiator interact with regular AP
* 1 Initiator interact with Responder machine: need to send the
* Initiator Acks with HT 40MHz / 80MHz, since the Responder should
* use it for its ch est measurement (this flag will be set when we
* configure the opposite machine to be Responder).
* @rssi: Last received value
* leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
*/
struct iwl_tof_range_req_ap_entry {
u8 channel_num;
u8 bandwidth;
u8 tsf_delta_direction;
u8 ctrl_ch_position;
u8 bssid[ETH_ALEN];
u8 measure_type;
u8 num_of_bursts;
__le16 burst_period;
u8 samples_per_burst;
u8 retries_per_sample;
__le32 tsf_delta;
u8 location_req;
u8 asap_mode;
u8 enable_dyn_ack;
s8 rssi;
} __packed;
/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
* @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
* timeout expiration
* @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
* earlier of: measurements completion / timeout
* expiration.
*/
enum iwl_tof_response_mode {
IWL_MVM_TOF_RESPOSE_ASAP = 1,
IWL_MVM_TOF_RESPOSE_TIMEOUT,
IWL_MVM_TOF_RESPOSE_COMPLETE,
};
/**
* struct iwl_tof_range_req_cmd - start measurement cmd
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @initiator: 0- NW initiated, 1 - Client Initiated
* @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
* '1' - run ML-Algo for ToF only
* @req_timeout: Requested timeout of the response in units of 100ms.
* This is equivalent to the session time configured to the
* LMAC in Initiator Request
* @report_policy: Supported partially for this release: For current release -
* the range report will be uploaded as a batch when ready or
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
*/
struct iwl_tof_range_req_cmd {
__le32 sub_grp_cmd_id;
u8 request_id;
u8 initiator;
u8 one_sided_los_disable;
u8 req_timeout;
u8 report_policy;
u8 los_det_disable;
u8 num_of_ap;
u8 macaddr_random;
u8 macaddr_template[ETH_ALEN];
u8 macaddr_mask[ETH_ALEN];
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
} __packed;
/**
* struct iwl_tof_gen_resp_cmd - generic ToF response
*/
struct iwl_tof_gen_resp_cmd {
__le32 sub_grp_cmd_id;
u8 data[];
} __packed;
/**
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @measure_status: current APs measurement status
* @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
* @rtt: The Round Trip Time that took for the last measurement for
* current AP [nSec]
* @rtt_variance: The Variance of the RTT values measured for current AP
* @rtt_spread: The Difference between the maximum and the minimum RTT
* values measured for current AP in the current session [nsec]
* @rssi: RSSI as uploaded in the Channel Estimation notification
* @rssi_spread: The Difference between the maximum and the minimum RSSI values
* measured for current AP in the current session
* @range: Measured range [cm]
* @range_variance: Measured range variance [cm]
* @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
* uploaded by the LMAC
*/
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
__le32 rtt;
__le32 rtt_variance;
__le32 rtt_spread;
s8 rssi;
u8 rssi_spread;
__le16 reserved;
__le32 range;
__le32 range_variance;
__le32 timestamp;
} __packed;
/**
* struct iwl_tof_range_rsp_ntfy -
* @request_id: A Token ID of the corresponding Range request
* @request_status: status of current measurement session
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
* @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
*/
struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
} __packed;
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
* struct iwl_tof_mcsi_notif - used for debug
* @token: token ID for the current session
* @role: '0' - initiator, '1' - responder
* @initiator_bssid: initiator machine
* @responder_bssid: responder machine
* @mcsi_buffer: debug data
*/
struct iwl_tof_mcsi_notif {
u8 token;
u8 role;
__le16 reserved;
u8 initiator_bssid[ETH_ALEN];
u8 responder_bssid[ETH_ALEN];
u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
} __packed;
/**
* struct iwl_tof_neighbor_report_notif
* @bssid: BSSID of the AP which sent the report
* @request_token: same token as the corresponding request
* @status:
* @report_ie_len: the length of the response frame starting from the Element ID
* @data: the IEs
*/
struct iwl_tof_neighbor_report {
u8 bssid[ETH_ALEN];
u8 request_token;
u8 status;
__le16 report_ie_len;
u8 data[];
} __packed;
/**
* struct iwl_tof_range_abort_cmd
* @request_id: corresponds to a range request
*/
struct iwl_tof_range_abort_cmd {
__le32 sub_grp_cmd_id;
u8 request_id;
u8 reserved[3];
} __packed;
#endif

View File

@ -124,6 +124,18 @@ enum iwl_tx_flags {
TX_CMD_FLG_HCCA_CHUNK = BIT(31)
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
* enum iwl_tx_pm_timeouts - pm timeout values in TX command
* @PM_FRAME_NONE: no need to suspend sleep mode
* @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
* @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
*/
enum iwl_tx_pm_timeouts {
PM_FRAME_NONE = 0,
PM_FRAME_MGMT = 2,
PM_FRAME_ASSOC = 3,
};
/*
* TX command security control
*/

View File

@ -75,6 +75,7 @@
#include "fw-api-coex.h"
#include "fw-api-scan.h"
#include "fw-api-stats.h"
#include "fw-api-tof.h"
/* Tx queue numbers */
enum {
@ -119,6 +120,9 @@ enum {
ADD_STA = 0x18,
REMOVE_STA = 0x19,
/* paging get item */
FW_GET_ITEM_CMD = 0x1a,
/* TX */
TX_CMD = 0x1c,
TXPATH_FLUSH = 0x1e,
@ -148,6 +152,9 @@ enum {
LQ_CMD = 0x4e,
/* paging block to FW cpu2 */
FW_PAGING_BLOCK_CMD = 0x4f,
/* Scan offload */
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
SCAN_OFFLOAD_ABORT_CMD = 0x52,
@ -163,6 +170,10 @@ enum {
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/* PHY_DB_CMD = 0x6c, */
/* ToF - 802.11mc FTM */
TOF_CMD = 0x10,
TOF_NOTIFICATION = 0x11,
/* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd {
u8 data[];
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
/*
* struct iwl_fw_paging_cmd - paging layout
*
* (FW_PAGING_BLOCK_CMD = 0x4f)
*
* Send to FW the paging layout in the driver.
*
* @flags: various flags for the command
* @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side
*/
struct iwl_fw_paging_cmd {
__le32 flags;
__le32 block_size;
__le32 block_num;
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
/*
* Fw items ID's
*
* @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
* download
*/
enum iwl_fw_item_id {
IWL_FW_ITEM_ID_PAGING = 3,
};
/*
* struct iwl_fw_get_item_cmd - get an item from the fw
*/
struct iwl_fw_get_item_cmd {
__le32 item_id;
} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
struct iwl_fw_get_item_resp {
__le32 item_id;
__le32 item_byte_cnt;
__le32 item_val;
} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
/**
* struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info {
__le16 frame_time;
} __packed;
/*
* TCP offload Rx assist info
*
* bits 0:3 - reserved
* bits 4:7 - MIC CRC length
* bits 8:12 - MAC header length
* bit 13 - Padding indication
* bit 14 - A-AMSDU indication
* bit 15 - Offload enabled
*/
enum iwl_csum_rx_assist_info {
CSUM_RXA_RESERVED_MASK = 0x000f,
CSUM_RXA_MICSIZE_MASK = 0x00f0,
CSUM_RXA_HEADERLEN_MASK = 0x1f00,
CSUM_RXA_PADD = BIT(13),
CSUM_RXA_AMSDU = BIT(14),
CSUM_RXA_ENA = BIT(15)
};
/**
* struct iwl_rx_mpdu_res_start - phy info
* @assist: see CSUM_RX_ASSIST_ above
*/
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
__le16 assist;
} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
/**
* enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
* @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
* @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
* @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
* @RX_MPDU_RES_STATUS_STA_ID_MSK:
* @RX_MPDU_RES_STATUS_RRF_KILL:
@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),

View File

@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
static void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
if (!mvm->fw_paging_db[0].fw_paging_block)
return;
for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
if (!mvm->fw_paging_db[i].fw_paging_block) {
IWL_DEBUG_FW(mvm,
"Paging: block %d already freed, continue to next page\n",
i);
continue;
}
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
get_order(mvm->fw_paging_db[i].fw_paging_size));
}
kfree(mvm->trans->paging_download_buf);
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
{
int sec_idx, idx;
u32 offset = 0;
/*
* find where is the paging image start point:
* if CPU2 exist and it's in paging format, then the image looks like:
* CPU1 sections (2 or more)
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
* CPU2 sections (not paged)
* PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
* non paged to CPU2 paging sec
* CPU2 paging CSS
* CPU2 paging image (including instruction and data)
*/
for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
sec_idx++;
break;
}
}
if (sec_idx >= IWL_UCODE_SECTION_MAX) {
IWL_ERR(mvm, "driver didn't find paging image\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
}
/* copy the CSS block to the dram */
IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
sec_idx);
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
mvm->fw_paging_db[0].fw_paging_size);
IWL_DEBUG_FW(mvm,
"Paging: copied %d CSS bytes to first block\n",
mvm->fw_paging_db[0].fw_paging_size);
sec_idx++;
/*
* copy the paging blocks to the dram
* loop index start from 1 since that CSS block already copied to dram
* and CSS index is 0.
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
image->sec[sec_idx].data + offset,
mvm->fw_paging_db[idx].fw_paging_size);
IWL_DEBUG_FW(mvm,
"Paging: copied %d paging bytes to block %d\n",
mvm->fw_paging_db[idx].fw_paging_size,
idx);
offset += mvm->fw_paging_db[idx].fw_paging_size;
}
/* copy the last paging block */
if (mvm->num_of_pages_in_last_blk > 0) {
memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
IWL_DEBUG_FW(mvm,
"Paging: copied %d pages in the last block %d\n",
mvm->num_of_pages_in_last_blk, idx);
}
return 0;
}
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
const struct fw_img *image)
{
struct page *block;
dma_addr_t phys = 0;
int blk_idx = 0;
int order, num_of_pages;
int dma_enabled;
if (mvm->fw_paging_db[0].fw_paging_block)
return 0;
dma_enabled = is_device_dma_capable(mvm->trans->dev);
/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
mvm->num_of_paging_blk = ((num_of_pages - 1) /
NUM_OF_PAGE_PER_GROUP) + 1;
mvm->num_of_pages_in_last_blk =
num_of_pages -
NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
IWL_DEBUG_FW(mvm,
"Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
mvm->num_of_paging_blk,
mvm->num_of_pages_in_last_blk);
/* allocate block of 4Kbytes for paging CSS */
order = get_order(FW_PAGING_SIZE);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
if (dma_enabled) {
phys = dma_map_page(mvm->trans->dev, block, 0,
PAGE_SIZE << order, DMA_BIDIRECTIONAL);
if (dma_mapping_error(mvm->trans->dev, phys)) {
/*
* free the previous pages and the current one since
* we failed to map_page.
*/
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
} else {
mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
blk_idx << BLOCK_2_EXP_SIZE;
}
IWL_DEBUG_FW(mvm,
"Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
order);
/*
* allocate blocks in dram.
* since that CSS allocated in fw_paging_db[0] loop start from index 1
*/
for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
/* allocate block of PAGING_BLOCK_SIZE (32K) */
order = get_order(PAGING_BLOCK_SIZE);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
if (dma_enabled) {
phys = dma_map_page(mvm->trans->dev, block, 0,
PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(mvm->trans->dev, phys)) {
/*
* free the previous pages and the current one
* since we failed to map_page.
*/
iwl_free_fw_paging(mvm);
return -ENOMEM;
}
mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
} else {
mvm->fw_paging_db[blk_idx].fw_paging_phys =
PAGING_ADDR_SIG |
blk_idx << BLOCK_2_EXP_SIZE;
}
IWL_DEBUG_FW(mvm,
"Paging: allocated 32K bytes (order %d) for firmware paging.\n",
order);
}
return 0;
}
static int iwl_save_fw_paging(struct iwl_mvm *mvm,
const struct fw_img *fw)
{
int ret;
ret = iwl_alloc_fw_paging_mem(mvm, fw);
if (ret)
return ret;
return iwl_fill_paging_mem(mvm, fw);
}
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
int blk_idx;
__le32 dev_phy_addr;
struct iwl_fw_paging_cmd fw_paging_cmd = {
.flags =
cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
(mvm->num_of_pages_in_last_blk <<
PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
};
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
dev_phy_addr =
cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
PAGE_2_EXP_SIZE);
fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(fw_paging_cmd), &fw_paging_cmd);
}
/*
* Send paging item cmd to FW in case CPU2 has paging image
*/
static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
{
int ret;
struct iwl_fw_get_item_cmd fw_get_item_cmd = {
.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
};
struct iwl_fw_get_item_resp *item_resp;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &fw_get_item_cmd, },
};
cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm,
"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
ret);
return ret;
}
item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
IWL_ERR(mvm,
"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
le32_to_cpu(item_resp->item_id));
ret = -EIO;
goto exit;
}
mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
GFP_KERNEL);
if (!mvm->trans->paging_download_buf) {
ret = -ENOMEM;
goto exit;
}
mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
mvm->trans->paging_db = mvm->fw_paging_db;
IWL_DEBUG_FW(mvm,
"Paging: got paging request address (paging_req_addr 0x%08x)\n",
mvm->trans->paging_req_addr);
exit:
iwl_free_resp(&cmd);
return ret;
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
const struct fw_img *fw;
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
static const u8 alive_cmd[] = { MVM_ALIVE };
static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
* configure and operate fw paging mechanism.
* driver configures the paging flow only once, CPU2 paging image
* included in the IWL_UCODE_INIT image.
*/
if (fw->paging_mem_size) {
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
}
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait calib_wait;
static const u8 init_complete[] = {
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
CALIB_RES_NOTIF_PHY_DB
};
@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
return;
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
pkt->hdr.flags);
goto exit;
}
mem_cfg = (void *)pkt->data;
mvm->shared_mem_cfg.shared_mem_addr =
@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
le32_to_cpu(mem_cfg->page_buff_size);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
exit:
iwl_free_resp(&cmd);
}
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
unsigned int delay)
struct iwl_fw_dbg_trigger_tlv *trigger)
{
unsigned int delay = 0;
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
return -EBUSY;
@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
le32_to_cpu(desc->trig_desc.type));
mvm->fw_dump_desc = desc;
mvm->fw_dump_trig = trigger;
queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
}
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, unsigned int delay)
const char *str, size_t len,
struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_mvm_dump_desc *desc;
@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
}
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
len = strlen(buf) + 1;
}
ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
len, delay);
ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
trigger);
if (ret)
return ret;
@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
iwl_mvm_get_shared_mem_conf(mvm);
iwl_mvm_get_shared_mem_conf(mvm);
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
if (iwl_mvm_is_csum_supported(mvm) &&
mvm->cfg->features & NETIF_F_RXCSUM)
iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return ret;
}
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_KILL_CARD_DISABLED) ?
"Reached" : "Not reached");
return 0;
}
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
return 0;
}

View File

@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
}
int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
}
}
return 0;
}
static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
}
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
mb);
return 0;
}

View File

@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
ieee80211_hw_set(hw, TDLS_WIDER_BW);
}
if (fw_has_capa(&mvm->fw->ucode_capa,
@ -649,6 +650,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
hw->netdev_features |= mvm->cfg->features;
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~NETIF_F_RXCSUM;
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
@ -1120,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
u32 file_len, fifo_data_len = 0;
u32 smem_len = mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len;
bool monitor_dump_only = false;
lockdep_assert_held(&mvm->mutex);
if (mvm->fw_dump_trig &&
mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
@ -1174,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
fifo_data_len +
sizeof(*dump_info);
/* Make room for the SMEM, if it exists */
if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
/* Make room for the secondary SRAM, if it exists */
if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* If we only want a monitor dump, reset the file length */
if (monitor_dump_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) +
sizeof(*dump_info);
}
/*
* In 8000 HW family B-step include the ICCM (which resides separately)
*/
@ -1186,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
/* Make room for the SMEM, if it exists */
if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
/* Make room for the secondary SRAM, if it exists */
if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
@ -1239,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_data = iwl_fw_error_next_data(dump_data);
}
/* In case we only want monitor dump, skip to dump trasport data */
if (monitor_dump_only)
goto dump_trans_data;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@ -1282,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->data, IWL8260_ICCM_LEN);
}
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
mvm->fw_dump_trig);
fw_error_dump->op_mode_len = file_len;
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
@ -1291,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
mvm->fw_dump_trig = NULL;
clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
}
@ -1433,22 +1456,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
{
bool exit_now;
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
mutex_lock(&mvm->d0i3_suspend_mutex);
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
&mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
if (exit_now) {
IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
_iwl_mvm_exit_d0i3(mvm);
}
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3,
@ -1664,6 +1674,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
}
mvmvif->features |= hw->netdev_features;
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
@ -2880,10 +2892,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
/* fall-through */
case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
@ -3025,7 +3038,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
int res, time_reg = DEVICE_SYSTEM_TIME_REG;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
static const u8 time_event_response[] = { HOT_SPOT_CMD };
static const u16 time_event_response[] = { HOT_SPOT_CMD };
struct iwl_notification_wait wait_time_event;
struct iwl_hs20_roc_req aux_roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),

View File

@ -80,6 +80,7 @@
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
#include "tof.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5
@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
* @tfd_q_hang_detect: enabled the detection of hung transmit queues
* @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
* Save)-2(default), LP(Low Power)-3
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data {
* # of received beacons accumulated over FW restart, and the current
* average signal of beacons retrieved from the firmware
* @csa_failed: CSA failed to schedule time event, report an error later
* @features: hw features active for this vif
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@ -437,6 +438,9 @@ struct iwl_mvm_vif {
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
/* TCP Checksum Offload */
netdev_features_t features;
};
static inline struct iwl_mvm_vif *
@ -559,7 +563,6 @@ struct iwl_mvm {
const struct iwl_cfg *cfg;
struct iwl_phy_db *phy_db;
struct ieee80211_hw *hw;
struct napi_struct *napi;
/* for protecting access to iwl_mvm */
struct mutex mutex;
@ -607,6 +610,11 @@ struct iwl_mvm {
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* Paging section */
struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
u16 num_of_paging_blk;
u16 num_of_pages_in_last_blk;
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@ -687,6 +695,7 @@ struct iwl_mvm {
* can hold 16 keys at most. Reflect this fact.
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
u8 fw_key_deleted[STA_KEY_MAX_NUM];
/* references taken by the driver and spinlock protecting them */
spinlock_t refs_lock;
@ -699,6 +708,7 @@ struct iwl_mvm {
u8 fw_dbg_conf;
struct delayed_work fw_dump_wk;
struct iwl_mvm_dump_desc *fw_dump_desc;
struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@ -823,6 +833,7 @@ struct iwl_mvm {
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
u32 ciphers[6];
struct iwl_mvm_tof_data tof_data;
};
/* Extract MVM priv from op_mode and _hw */
@ -942,6 +953,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
IWL_MVM_BT_COEX_RRC;
}
static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@ -975,12 +992,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd);
int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
u32 flags, u16 len, const void *data);
int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
struct iwl_host_cmd *cmd,
u32 *status);
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@ -989,10 +1006,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb_frag);
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
@ -1004,6 +1017,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
}
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
{
flush_work(&mvm->async_handlers_wk);
@ -1012,9 +1036,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
/* Statistics */
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt);
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
@ -1060,27 +1083,20 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
*/
int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@ -1107,12 +1123,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@ -1136,29 +1150,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies,
int type);
int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -1197,9 +1206,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@ -1255,9 +1263,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@ -1275,9 +1282,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@ -1286,9 +1292,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
enum ieee80211_band band);
int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -1377,9 +1382,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@ -1391,9 +1395,8 @@ struct iwl_mcc_update_resp *
iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
enum iwl_mcc_source src_id);
int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
const char *alpha2,
enum iwl_mcc_source src_id,
@ -1432,8 +1435,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@ -1443,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, unsigned int delay);
const char *str, size_t len,
struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
struct iwl_mvm_dump_desc *desc,
unsigned int delay);
struct iwl_fw_dbg_trigger_tlv *trigger);
void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trigger,

Some files were not shown because too many files have changed in this diff Show More