1
0
Fork 0

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem

Conflicts:
	Documentation/feature-removal-schedule.txt
	drivers/net/wireless/ath/ath5k/phy.c
	drivers/net/wireless/wl12xx/wl1271_main.c
wifi-calibration
John W. Linville 2010-04-15 16:21:34 -04:00
commit 5c01d56693
174 changed files with 8602 additions and 5618 deletions

View File

@ -520,7 +520,6 @@ Who: Hans de Goede <hdegoede@redhat.com>
----------------------------
What: corgikbd, spitzkbd, tosakbd driver
When: 2.6.35
Files: drivers/input/keyboard/{corgi,spitz,tosa}kbd.c
@ -608,3 +607,24 @@ Why: Useful in 2003, implementation is a hack.
Generally invoked by accident today.
Seen as doing more harm than good.
Who: Len Brown <len.brown@intel.com>
----------------------------
What: iwlwifi 50XX module parameters
When: 2.6.40
Why: The "..50" modules parameters were used to configure 5000 series and
up devices; different set of module parameters also available for 4965
with same functionalities. Consolidate both set into single place
in drivers/net/wireless/iwlwifi/iwl-agn.c
Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
----------------------------
What: iwl4965 alias support
When: 2.6.40
Why: Internal alias support has been present in module-init-tools for some
time, the MODULE_ALIAS("iwl4965") boilerplate aliases can be removed
with no impact.
Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>

View File

@ -79,7 +79,7 @@ __regwrite_out : \
if (__nreg) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
8 * __nreg, \
8 * __nreg, \
(u8 *) &__ar->cmdbuf[1], \
0, NULL); \
__nreg = 0; \

View File

@ -127,8 +127,8 @@ struct ar9170_eeprom {
__le16 checksum;
__le16 version;
u8 operating_flags;
#define AR9170_OPFLAG_5GHZ 1
#define AR9170_OPFLAG_2GHZ 2
#define AR9170_OPFLAG_5GHZ 1
#define AR9170_OPFLAG_2GHZ 2
u8 misc;
__le16 reg_domain[2];
u8 mac_address[6];

View File

@ -425,5 +425,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */

View File

@ -236,7 +236,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
printk(KERN_DEBUG "index:%d => \n", i++);
printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
@ -281,7 +281,7 @@ static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
unsigned long flags;
spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
printk(KERN_DEBUG "%s: A-MPDU tx_status queue =>\n",
wiphy_name(ar->hw->wiphy));
__ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
@ -308,7 +308,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
"recycle \n", wiphy_name(ar->hw->wiphy),
"recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
@ -689,7 +689,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
/* firmware debug */
case 0xca:
printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
(char *)buf + 4);
break;
case 0xcb:
len -= 4;
@ -1728,7 +1729,7 @@ static void ar9170_tx(struct ar9170 *ar)
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
printk(KERN_DEBUG "%s: stuck frames: ===> \n",
printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@ -2512,7 +2513,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/
skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;

View File

@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
/* 3Com Dual Band 802.11n USB Adapter */
{ USB_DEVICE(0x0cf3, 0x1010) },
/* H3C Dual Band 802.11n USB Adapter */
{ USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
/* Netgear WNA1000 */
{ USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
/* Proxim ORiNOCO 802.11n USB */
{ USB_DEVICE(0x1435, 0x0804) },
/* WNC Generic 11n USB Dongle */
{ USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */

View File

@ -48,6 +48,12 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
enum ath_bus_type {
ATH_PCI,
ATH_AHB,
ATH_USB,
};
struct reg_dmn_pair_mapping {
u16 regDmnEnum;
u16 reg_5ghz_ctl;
@ -73,9 +79,10 @@ struct ath_ops {
struct ath_common;
struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
enum ath_bus_type ath_bus_type;
void (*read_cachesize)(struct ath_common *common, int *csz);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
};
struct ath_common {

View File

@ -12,5 +12,6 @@ ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
ath5k-y += ani.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o

View File

@ -0,0 +1,744 @@
/*
* Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "ath5k.h"
#include "base.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
/**
* DOC: Basic ANI Operation
*
* Adaptive Noise Immunity (ANI) controls five noise immunity parameters
* depending on the amount of interference in the environment, increasing
* or reducing sensitivity as necessary.
*
* The parameters are:
* - "noise immunity"
* - "spur immunity"
* - "firstep level"
* - "OFDM weak signal detection"
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
* threshold of errors has been reached we will raise immunity.
* Also we regularly check the amount of errors and lower or raise immunity as
* necessary.
*/
/*** ANI parameter control ***/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
*
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
{
/* TODO:
* ANI documents suggest the following five levels to use, but the HAL
* and ath9k use only use the last two levels, making this
* essentially an on/off option. There *may* be a reason for this (???),
* so i stick with the HAL version for now...
*/
#if 0
const s8 hi[] = { -18, -18, -16, -14, -12 };
const s8 lo[] = { -52, -56, -60, -64, -70 };
const s8 sz[] = { -34, -41, -48, -55, -62 };
const s8 fr[] = { -70, -72, -75, -78, -80 };
#else
const s8 sz[] = { -55, -62 };
const s8 lo[] = { -64, -70 };
const s8 hi[] = { -14, -12 };
const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level > ARRAY_SIZE(sz)) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"level out of range %d", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_LO, lo[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_HI, hi[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRPWR, fr[level]);
ah->ah_sc->ani_state.noise_imm_level = level;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
*
* @level: level between 0 and @max_spur_level (the maximum level is dependent
* on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
{
const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
if (level < 0 || level > ARRAY_SIZE(val) ||
level > ah->ah_sc->ani_state.max_spur_level) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"level out of range %d", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
ah->ah_sc->ani_state.spur_level = level;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
*
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
{
const int val[] = { 0, 4, 8 };
if (level < 0 || level > ARRAY_SIZE(val)) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"level out of range %d", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRSTEP, val[level]);
ah->ah_sc->ani_state.firstep_level = level;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
* detection
*
* @on: turn on or off
*/
void
ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
const int m1l[] = { 127, 50 };
const int m2l[] = { 127, 40 };
const int m1[] = { 127, 0x4d };
const int m2[] = { 127, 0x40 };
const int m2cnt[] = { 31, 16 };
const int m2lcnt[] = { 63, 48 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
if (on)
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
ah->ah_sc->ani_state.ofdm_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/**
* ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
*
* @on: turn on or off
*/
void
ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
const int val[] = { 8, 6 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
ah->ah_sc->ani_state.cck_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/*** ANI algorithm ***/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
*
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
* the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
int rssi = ah->ah_beacon_rssi_avg.avg;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
/* first: raise noise immunity */
if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
return;
}
/* only OFDM: raise spur immunity level */
if (ofdm_trigger &&
as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
return;
}
/* AP mode */
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
/* STA and IBSS mode */
/* TODO: for IBSS mode it would be better to keep a beacon RSSI average
* per each neighbour node and use the minimum of these, to make sure we
* don't shut out a remote node by raising immunity too high. */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"beacon RSSI high");
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
* signal detection */
if (ofdm_trigger && as->ofdm_weak_sig == true) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_spur_immunity_level(ah, 0);
return;
}
/* as a last resort or CCK: raise firstep level */
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI in mid range, we need OFDM weak signal detect,
* but can raise firstep level */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"beacon RSSI mid");
if (ofdm_trigger && as->ofdm_weak_sig == false)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"beacon RSSI low, 2GHz");
if (ofdm_trigger && as->ofdm_weak_sig == true)
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
if (as->firstep_level > 0)
ath5k_ani_set_firstep_level(ah, 0);
return;
}
/* TODO: why not?:
if (as->cck_weak_sig == true) {
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
*/
}
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
int rssi = ah->ah_beacon_rssi_avg.avg;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
/* AP mode */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
return;
}
} else {
/* STA and IBSS mode (see TODO above) */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
/* beacon signal is high, leave OFDM weak signal
* detection off or it may oscillate
* TODO: who said it's off??? */
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI is mid-range: turn on ODFM weak signal
* detection and next, lower firstep level */
if (as->ofdm_weak_sig == false) {
ath5k_ani_set_ofdm_weak_signal_detection(ah,
true);
return;
}
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
} else {
/* beacon signal is low: only reduce firstep level */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
}
}
/* all modes */
if (as->spur_level > 0) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
return;
}
/* finally, reduce noise immunity */
if (as->noise_imm_level > 0) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
return;
}
}
/**
* ath5k_hw_ani_get_listen_time() - Calculate time spent listening
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function by deducting the cycles spent
* transmitting and receiving from the total cycle count.
* Save profile count values for debugging/statistics and because we might want
* to use them later.
*
* We assume no one else clears these registers!
*/
static int
ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
int listen;
/* freeze */
ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
/* read */
as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
/* clear */
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
/* un-freeze */
ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
/* TODO: where does 44000 come from? (11g clock rate?) */
listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
if (as->pfc_cycles == 0 || listen < 0)
return 0;
return listen;
}
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
* Add the count of CCK and OFDM errors to our internal state, so it can be used
* by the algorithm later.
*
* Will be called from interrupt and tasklet context.
* Returns 0 if both counters are zero.
*/
static int
ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
struct ath5k_ani_state *as)
{
unsigned int ofdm_err, cck_err;
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return 0;
ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
/* reset counters first, we might be in a hurry (interrupt) */
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
/* sometimes both can be zero, especially when there is a superfluous
* second interrupt. detect that here and return an error. */
if (ofdm_err <= 0 && cck_err <= 0)
return 0;
/* avoid negative values should one of the registers overflow */
if (ofdm_err > 0) {
as->ofdm_errors += ofdm_err;
as->sum_ofdm_errors += ofdm_err;
}
if (cck_err > 0) {
as->cck_errors += cck_err;
as->sum_cck_errors += cck_err;
}
return 1;
}
/**
* ath5k_ani_period_restart() - Restart ANI period
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
as->last_cck_errors = as->cck_errors;
as->last_listen = as->listen_time;
as->ofdm_errors = 0;
as->cck_errors = 0;
as->listen_time = 0;
}
/**
* ath5k_ani_calibration() - The main ANI calibration function
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
* This is called regularly (every second) from the calibration timer, but also
* when an error threshold has been reached.
*
* In order to synchronize access from different contexts, this should be
* called only indirectly by scheduling the ANI tasklet!
*/
void
ath5k_ani_calibration(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
return;
/* get listen time since last call and add it to the counter because we
* might not have restarted the "ani period" last time */
listen = ath5k_hw_ani_get_listen_time(ah, as);
as->listen_time += listen;
ath5k_ani_save_and_clear_phy_errors(ah, as);
ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"listen %d (now %d)", as->listen_time, listen);
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"check high ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(ah, as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
* relatively little errors we can try to lower immunity */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"check low ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
ath5k_ani_period_restart(ah, as);
}
}
/*** INTERRUPT HANDLER ***/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
* to raise immunity or not.
*
* We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
* should take care of all "normal" MIB interrupts.
*/
void
ath5k_ani_mib_intr(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
/* nothing to do here if HW does not have PHY error counters - they
* can't be the reason for the MIB interrupt then */
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return;
/* not in use but clear anyways */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
return;
/* if one of the errors triggered, we can get a superfluous second
* interrupt, even though we have already reset the register. the
* function detects that so we can return early */
if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
return;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ah_sc->ani_tasklet);
}
/**
* ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
*/
void
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr)
{
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
as->ofdm_errors++;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
tasklet_schedule(&ah->ah_sc->ani_tasklet);
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
as->cck_errors++;
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ah_sc->ani_tasklet);
}
}
/*** INIT ***/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_ani_init() - Initialize ANI
* @mode: Which mode to use (auto, manual high, manual low, off)
*
* Initialize ANI according to mode.
*/
void
ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
{
/* ANI is only possible on 5212 and newer */
if (ah->ah_version < AR5K_AR5212)
return;
/* clear old state information */
memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
/* older hardware has more spur levels than newer */
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
ah->ah_sc->ani_state.max_spur_level = 7;
else
ah->ah_sc->ani_state.max_spur_level = 2;
/* initial values for our ani parameters */
if (mode == ATH5K_ANI_MODE_OFF) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
} else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"ANI manual low -> high sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, true);
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
"ANI manual high -> low sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
ath5k_ani_set_spur_immunity_level(ah,
ah->ah_sc->ani_state.max_spur_level);
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (mode == ATH5K_ANI_MODE_AUTO) {
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
/* newer hardware has PHY error counter registers which we can use to
* get OFDM and CCK error counts. older hardware has to set rxfilter and
* report every single PHY error by calling ath5k_ani_phy_error_report()
*/
if (mode == ATH5K_ANI_MODE_AUTO) {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_enable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
AR5K_RX_FILTER_PHYERR);
} else {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_disable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
~AR5K_RX_FILTER_PHYERR);
}
ah->ah_sc->ani_state.ani_mode = mode;
}
/*** DEBUG ***/
#ifdef CONFIG_ATH5K_DEBUG
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
/* clears too */
printk(KERN_NOTICE "ACK fail\t%d\n",
ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
printk(KERN_NOTICE "RTS fail\t%d\n",
ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
printk(KERN_NOTICE "RTS success\t%d\n",
ath5k_hw_reg_read(ah, AR5K_RTS_OK));
printk(KERN_NOTICE "FCS error\t%d\n",
ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
/* no clear */
printk(KERN_NOTICE "tx\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
printk(KERN_NOTICE "rx\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
printk(KERN_NOTICE "busy\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
printk(KERN_NOTICE "cycles\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
}
#endif

View File

@ -0,0 +1,104 @@
/*
* Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ANI_H
#define ANI_H
/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
#define ATH5K_ANI_LISTEN_PERIOD 100
#define ATH5K_ANI_OFDM_TRIG_HIGH 500
#define ATH5K_ANI_OFDM_TRIG_LOW 200
#define ATH5K_ANI_CCK_TRIG_HIGH 200
#define ATH5K_ANI_CCK_TRIG_LOW 100
/* average beacon RSSI thresholds */
#define ATH5K_ANI_RSSI_THR_HIGH 40
#define ATH5K_ANI_RSSI_THR_LOW 7
/* maximum availabe levels */
#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
/**
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
* algorithm after it has been on auto mode.
* ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
* maximizing sensitivity. ANI will not run.
* ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
* minimizing sensitivity. ANI will not run.
* ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
* amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
ATH5K_ANI_MODE_MANUAL_LOW = 1,
ATH5K_ANI_MODE_MANUAL_HIGH = 2,
ATH5K_ANI_MODE_AUTO = 3
};
/**
* struct ath5k_ani_state - ANI state and associated counters
*
* @max_spur_level: the maximum spur level is chip dependent
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
/* state */
int noise_imm_level;
int spur_level;
int firstep_level;
bool ofdm_weak_sig;
bool cck_weak_sig;
int max_spur_level;
/* used by the algorithm */
unsigned int listen_time;
unsigned int ofdm_errors;
unsigned int cck_errors;
/* debug/statistics only: numbers from last ANI calibration */
unsigned int pfc_tx;
unsigned int pfc_rx;
unsigned int pfc_busy;
unsigned int pfc_cycles;
unsigned int last_listen;
unsigned int last_ofdm_errors;
unsigned int last_cck_errors;
unsigned int sum_ofdm_errors;
unsigned int sum_cck_errors;
};
void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
void ath5k_ani_mib_intr(struct ath5k_hw *ah);
void ath5k_ani_calibration(struct ath5k_hw *ah);
void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr);
/* for manual control */
void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
void ath5k_ani_print_counters(struct ath5k_hw *ah);
#endif /* ANI_H */

View File

@ -202,6 +202,8 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@ -799,9 +801,9 @@ struct ath5k_athchan_2ghz {
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the Management Information Base counters should be
* checked. We should do this with ath5k_hw_update_mib_counters() but
* it seems we should also then do some noise immunity work.
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
* one of the PHY error counters reached the maximum value and should be
* read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@ -889,10 +891,11 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
/* Software interrupts used for calibration */
enum ath5k_software_interrupt {
AR5K_SWI_FULL_CALIBRATION = 0x01,
AR5K_SWI_SHORT_CALIBRATION = 0x02,
/* mask which calibration is active at the moment */
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
AR5K_CALIBRATION_ANI = 0x04,
};
/*
@ -981,6 +984,8 @@ struct ath5k_capabilities {
struct {
u8 q_tx_num;
} cap_queues;
bool cap_has_phyerr_counters;
};
/* size of noise floor history (keep it a power of two) */
@ -991,6 +996,15 @@ struct ath5k_nfcal_hist
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
/**
* struct avg_val - Helper structure for average calculation
* @avg: contains the actual average value
* @avg_weight: is used internally during calculation to prevent rounding errors
*/
struct ath5k_avg_val {
int avg;
int avg_weight;
};
/***************************************\
HARDWARE ABSTRACTION LAYER STRUCTURE
@ -1095,17 +1109,18 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
/* average beacon RSSI in our BSS (used by ANI) */
struct ath5k_avg_val ah_beacon_rssi_avg;
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
/* Calibration timestamp */
unsigned long ah_cal_tstamp;
unsigned long ah_cal_next_full;
unsigned long ah_cal_next_ani;
/* Calibration interval (secs) */
u8 ah_cal_intval;
/* Software interrupt mask */
u8 ah_swi_mask;
/* Calibration mask */
u8 ah_cal_mask;
/*
* Function pointers
@ -1163,8 +1178,7 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
struct ieee80211_low_level_stats *stats);
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
/* EEPROM access functions */
int ath5k_eeprom_init(struct ath5k_hw *ah);
@ -1256,7 +1270,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel);
void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel);
@ -1308,4 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
#define AVG_SAMPLES 8
#define AVG_FACTOR 1000
/**
* ath5k_moving_average - Exponentially weighted moving average
* @avg: average structure
* @val: current value
*
* This implementation make use of a struct ath5k_avg_val to prevent rounding
* errors.
*/
static inline struct ath5k_avg_val
ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
{
struct ath5k_avg_val new;
new.avg_weight = avg.avg_weight ?
(((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
(val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
(val * (AVG_FACTOR));
new.avg = new.avg_weight / (AVG_FACTOR);
return new;
}
#endif

View File

@ -124,6 +124,8 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
/*
* Find the mac version

View File

@ -59,8 +59,8 @@
#include "base.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@ -365,6 +365,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
static void ath5k_beacon_config(struct ath5k_softc *sc);
static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
static void ath5k_tasklet_beacon(unsigned long data);
static void ath5k_tasklet_ani(unsigned long data);
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
@ -830,6 +831,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
@ -1635,7 +1637,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
sc->txqs[i].link);
}
}
ieee80211_wake_queues(sc->hw); /* XXX move to callers */
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
if (sc->txqs[i].setup)
@ -1805,6 +1806,25 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
}
static void
ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
/* only beacons from our BSSID */
if (!ieee80211_is_beacon(mgmt->frame_control) ||
memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
return;
ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
rssi);
/* in IBSS mode we should keep RSSI statistics per neighbour */
/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
/*
* Compute padding position. skb must contains an IEEE 802.11 frame
*/
@ -1923,6 +1943,8 @@ ath5k_tasklet_rx(unsigned long data)
sc->stats.rxerr_fifo++;
if (rs.rs_status & AR5K_RXERR_PHY) {
sc->stats.rxerr_phy++;
if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
goto next;
}
if (rs.rs_status & AR5K_RXERR_DECRYPT) {
@ -2024,6 +2046,8 @@ accept:
ath5k_debug_dump_skb(sc, skb, "RX ", 0);
ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
/* check beacons in IBSS mode */
if (sc->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(sc, skb, rxs);
@ -2060,6 +2084,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on. If there are more TX
* descriptors in the queue, wait for TXDP to change
* before processing this one.
*/
if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
!list_is_last(&bf->list, &txq->q))
break;
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
@ -2095,7 +2130,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
info->status.rates[ts.ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
sc->ll_stats.dot11ACKFailureCount++;
sc->stats.ack_fail++;
if (ts.ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
sc->stats.txerr_filt++;
@ -2498,9 +2533,6 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
/* Set PHY calibration interval */
ah->ah_cal_intval = ath5k_calinterval;
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@ -2512,7 +2544,8 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
ret = ath5k_reset(sc, NULL);
if (ret)
goto done;
@ -2526,8 +2559,7 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
ath5k_hw_reset_key(ah, i);
/* Set ack to be sent at low bit-rates */
ath5k_hw_set_ack_bitrate_high(ah, false);
ath5k_hw_set_ack_bitrate_high(ah, true);
ret = 0;
done:
mmiowb();
@ -2624,12 +2656,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->calib);
tasklet_kill(&sc->beacontq);
tasklet_kill(&sc->ani_tasklet);
ath5k_rfkill_hw_stop(sc->ah);
return ret;
}
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
/* run ANI only when full calibration is not active */
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ah_sc->ani_tasklet);
} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
tasklet_schedule(&ah->ah_sc->calib);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
* AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
@ -2653,7 +2706,20 @@ ath5k_intr(int irq, void *dev_id)
*/
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
tasklet_schedule(&sc->restq);
/*
* Receive buffers are full. Either the bus is busy or
* the CPU is not fast enough to process all received
* frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
* We don't know exactly which versions need a reset -
* this guess is copied from the HAL.
*/
sc->stats.rxorn_intr++;
if (ah->ah_mac_srev < AR5K_SREV_AR5212)
tasklet_schedule(&sc->restq);
else
tasklet_schedule(&sc->rxtq);
} else {
if (status & AR5K_INT_SWBA) {
tasklet_hi_schedule(&sc->beacontq);
@ -2678,15 +2744,10 @@ ath5k_intr(int irq, void *dev_id)
if (status & AR5K_INT_BMISS) {
/* TODO */
}
if (status & AR5K_INT_SWI) {
tasklet_schedule(&sc->calib);
}
if (status & AR5K_INT_MIB) {
/*
* These stats are also used for ANI i think
* so how about updating them more often ?
*/
ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
sc->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
tasklet_schedule(&sc->rf_kill.toggleq);
@ -2697,7 +2758,7 @@ ath5k_intr(int irq, void *dev_id)
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
ath5k_hw_calibration_poll(ah);
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
@ -2721,8 +2782,7 @@ ath5k_tasklet_calibrate(unsigned long data)
struct ath5k_hw *ah = sc->ah;
/* Only full calibration for now */
if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
return;
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
/* Stop queues so that calibration
* doesn't interfere with tx */
@ -2738,18 +2798,29 @@ ath5k_tasklet_calibrate(unsigned long data)
* to load new gain values.
*/
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
ath5k_reset_wake(sc);
ath5k_reset(sc, sc->curchan);
}
if (ath5k_hw_phy_calibrate(ah, sc->curchan))
ATH5K_ERR(sc, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
ah->ah_swi_mask = 0;
/* Wake queues */
ieee80211_wake_queues(sc->hw);
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
}
static void
ath5k_tasklet_ani(unsigned long data)
{
struct ath5k_softc *sc = (void *)data;
struct ath5k_hw *ah = sc->ah;
ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
ath5k_ani_calibration(ah);
ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
@ -2852,6 +2923,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@ -3207,12 +3280,14 @@ ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
/* Force update */
ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
ath5k_hw_update_mib_counters(sc->ah);
memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
stats->dot11ACKFailureCount = sc->stats.ack_fail;
stats->dot11RTSFailureCount = sc->stats.rts_fail;
stats->dot11RTSSuccessCount = sc->stats.rts_ok;
stats->dot11FCSErrorCount = sc->stats.fcs_error;
return 0;
}

View File

@ -50,6 +50,7 @@
#include "ath5k.h"
#include "debug.h"
#include "ani.h"
#include "../regd.h"
#include "../ath.h"
@ -105,14 +106,18 @@ struct ath5k_rfkill {
struct tasklet_struct toggleq;
};
/* statistics (only used for debugging now) */
/* statistics */
struct ath5k_statistics {
/* antenna use */
unsigned int antenna_rx[5]; /* frames count per antenna RX */
unsigned int antenna_tx[5]; /* frames count per antenna TX */
/* frame errors */
unsigned int rx_all_count; /* all RX frames, including errors */
unsigned int tx_all_count; /* all TX frames, including errors */
unsigned int rxerr_crc;
unsigned int rxerr_phy;
unsigned int rxerr_phy_code[32];
unsigned int rxerr_fifo;
unsigned int rxerr_decrypt;
unsigned int rxerr_mic;
@ -121,6 +126,16 @@ struct ath5k_statistics {
unsigned int txerr_retry;
unsigned int txerr_fifo;
unsigned int txerr_filt;
/* MIB counters */
unsigned int ack_fail;
unsigned int rts_fail;
unsigned int rts_ok;
unsigned int fcs_error;
unsigned int beacons;
unsigned int mib_intr;
unsigned int rxorn_intr;
};
#if CHAN_DEBUG
@ -135,7 +150,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
@ -211,6 +225,9 @@ struct ath5k_softc {
bool enable_beacon; /* true if beacons are on */
struct ath5k_statistics stats;
struct ath5k_ani_state ani_state;
struct tasklet_struct ani_tasklet; /* ANI calibration */
};
#define ath5k_hw_hasbssidmask(_ah) \

View File

@ -109,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
ah->ah_capabilities.cap_has_phyerr_counters = true;
else
ah->ah_capabilities.cap_has_phyerr_counters = false;
return 0;
}

View File

@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
#include <linux/seq_file.h>
#include "reg.h"
#include "ani.h"
static struct dentry *ath5k_global_debugfs;
@ -307,6 +308,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@ -474,6 +476,7 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
struct ath5k_statistics *st = &sc->stats;
char buf[700];
unsigned int len = 0;
int i;
len += snprintf(buf+len, sizeof(buf)-len,
"RX\n---------------------\n");
@ -485,6 +488,13 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy*100/st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
len += snprintf(buf+len, sizeof(buf)-len,
" phy_err[%d]\t%d\n",
i, st->rxerr_phy_code[i]);
}
len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
@ -565,6 +575,160 @@ static const struct file_operations fops_frameerrors = {
};
/* debugfs: ani */
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_softc *sc = file->private_data;
struct ath5k_statistics *st = &sc->stats;
struct ath5k_ani_state *as = &sc->ani_state;
char buf[700];
unsigned int len = 0;
len += snprintf(buf+len, sizeof(buf)-len,
"HW has PHY error counters:\t%s\n",
sc->ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
len += snprintf(buf+len, sizeof(buf)-len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
len += snprintf(buf+len, sizeof(buf)-len,
"\nANI state\n--------------------------------------------\n");
len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
len += snprintf(buf+len, sizeof(buf)-len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
len += snprintf(buf+len, sizeof(buf)-len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
break;
default:
len += snprintf(buf+len, sizeof(buf)-len,
"??? (not good)\n");
break;
}
len += snprintf(buf+len, sizeof(buf)-len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
len += snprintf(buf+len, sizeof(buf)-len,
"spur immunity level:\t\t%d\n",
as->spur_level);
len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
as->firstep_level);
len += snprintf(buf+len, sizeof(buf)-len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
len += snprintf(buf+len, sizeof(buf)-len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
len += snprintf(buf+len, sizeof(buf)-len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
len += snprintf(buf+len, sizeof(buf)-len,
"beacon RSSI average:\t%d\n",
sc->ah->ah_beacon_rssi_avg.avg);
len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
as->pfc_tx,
as->pfc_cycles > 0 ?
as->pfc_tx*100/as->pfc_cycles : 0);
len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
as->pfc_rx,
as->pfc_cycles > 0 ?
as->pfc_rx*100/as->pfc_cycles : 0);
len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
as->pfc_busy,
as->pfc_cycles > 0 ?
as->pfc_busy*100/as->pfc_cycles : 0);
len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
as->pfc_cycles);
len += snprintf(buf+len, sizeof(buf)-len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
len += snprintf(buf+len, sizeof(buf)-len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
len += snprintf(buf+len, sizeof(buf)-len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
len += snprintf(buf+len, sizeof(buf)-len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
len += snprintf(buf+len, sizeof(buf)-len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_ani(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_softc *sc = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
} else if (strncmp(buf, "ani-off", 7) == 0) {
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
} else if (strncmp(buf, "ani-on", 6) == 0) {
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
} else if (strncmp(buf, "noise-low", 9) == 0) {
ath5k_ani_set_noise_immunity_level(sc->ah, 0);
} else if (strncmp(buf, "noise-high", 10) == 0) {
ath5k_ani_set_noise_immunity_level(sc->ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
} else if (strncmp(buf, "spur-low", 8) == 0) {
ath5k_ani_set_spur_immunity_level(sc->ah, 0);
} else if (strncmp(buf, "spur-high", 9) == 0) {
ath5k_ani_set_spur_immunity_level(sc->ah,
sc->ani_state.max_spur_level);
} else if (strncmp(buf, "fir-low", 7) == 0) {
ath5k_ani_set_firstep_level(sc->ah, 0);
} else if (strncmp(buf, "fir-high", 8) == 0) {
ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
} else if (strncmp(buf, "cck-off", 7) == 0) {
ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
} else if (strncmp(buf, "cck-on", 6) == 0) {
ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
}
return count;
}
static const struct file_operations fops_ani = {
.read = read_file_ani,
.write = write_file_ani,
.open = ath5k_debugfs_open,
.owner = THIS_MODULE,
};
/* init */
void
@ -603,6 +767,11 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
S_IWUSR | S_IRUSR,
sc->debug.debugfs_phydir, sc,
&fops_frameerrors);
sc->debug.debugfs_ani = debugfs_create_file("ani",
S_IWUSR | S_IRUSR,
sc->debug.debugfs_phydir, sc,
&fops_ani);
}
void
@ -620,6 +789,7 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_reset);
debugfs_remove(sc->debug.debugfs_antenna);
debugfs_remove(sc->debug.debugfs_frameerrors);
debugfs_remove(sc->debug.debugfs_ani);
debugfs_remove(sc->debug.debugfs_phydir);
}

View File

@ -76,6 +76,7 @@ struct ath5k_dbg_info {
struct dentry *debugfs_reset;
struct dentry *debugfs_antenna;
struct dentry *debugfs_frameerrors;
struct dentry *debugfs_ani;
};
/**
@ -115,6 +116,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_TRACE = 0x00001000,
ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};

View File

@ -645,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &

View File

@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
/* PHY Error codes */
#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60
#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80
#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0
#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
/**
* enum ath5k_phy_error_code - PHY Error codes
*/
enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
/* these are specific to the 5212 */
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
/*
* 5210/5211 hardware 2-word TX control descriptor

View File

@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
}
/**
* ath5k_hw_update - Update mib counters (mac layer statistics)
* ath5k_hw_update - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
* @stats: The &struct ieee80211_low_level_stats we use to track
* statistics on the driver
*
* Reads MIB counters from PCU and updates sw statistics. Must be
* called after a MIB interrupt.
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
* Is called in interrupt context!
*/
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
struct ieee80211_low_level_stats *stats)
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
struct ath5k_statistics *stats = &ah->ah_sc->stats;
/* Read-And-Clear */
stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
/* XXX: Should we use this to track beacon count ?
* -we read it anyway to clear the register */
ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
/* Reset profile count registers on 5212*/
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
}
/* TODO: Handle ANI stats */
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/**
@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
@ -392,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
* TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{

View File

@ -980,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
} else if ((c - (c % 5)) != 2 || c > 5435) {
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@ -993,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
} else
return -EINVAL;
} else {
data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@ -1021,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
} else if ((c - (c % 5)) != 2 || c > 5435) {
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
@ -1032,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@ -1103,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
void
ath5k_hw_calibration_poll(struct ath5k_hw *ah)
{
/* Calibration interval in jiffies */
unsigned long cal_intval;
cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
/* Initialize timestamp if needed */
if (!ah->ah_cal_tstamp)
ah->ah_cal_tstamp = jiffies;
/* For now we always do full calibration
* Mark software interrupt mask and fire software
* interrupt (bit gets auto-cleared) */
if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
ah->ah_cal_tstamp = jiffies;
ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
}
}
static int sign_extend(int val, const int nbits)
{
int order = BIT(nbits-1);
@ -1411,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
q_coff = (i_pwr / q_coffd) - 128;
if (ah->ah_version == AR5K_AR5211)
q_coff = (i_pwr / q_coffd) - 64;
else
q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@ -2580,7 +2561,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
/* Fill pdadc_out table */
while (pdadc_0 < max_idx)
while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */

View File

@ -212,10 +212,10 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
/*
* Timeout prescale register
@ -1139,8 +1139,8 @@
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@ -1516,7 +1516,14 @@
AR5K_NAV_5210 : AR5K_NAV_5211)
/*
* RTS success register
* MIB counters:
*
* max value is 0xc000, if this is reached we get a MIB interrupt.
* they can be controlled via AR5K_MIBC and are cleared on read.
*/
/*
* RTS success (MIB counter)
*/
#define AR5K_RTS_OK_5210 0x8090
#define AR5K_RTS_OK_5211 0x8088
@ -1524,7 +1531,7 @@
AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
/*
* RTS failure register
* RTS failure (MIB counter)
*/
#define AR5K_RTS_FAIL_5210 0x8094
#define AR5K_RTS_FAIL_5211 0x808c
@ -1532,7 +1539,7 @@
AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
/*
* ACK failure register
* ACK failure (MIB counter)
*/
#define AR5K_ACK_FAIL_5210 0x8098
#define AR5K_ACK_FAIL_5211 0x8090
@ -1540,7 +1547,7 @@
AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
/*
* FCS failure register
* FCS failure (MIB counter)
*/
#define AR5K_FCS_FAIL_5210 0x809c
#define AR5K_FCS_FAIL_5211 0x8094
@ -1667,11 +1674,17 @@
/*
* Profile count registers
*
* These registers can be cleared and freezed with ATH5K_MIBC, but they do not
* generate a MIB interrupt.
* Instead of overflowing, they shift by one bit to the right. All registers
* shift together, i.e. when one reaches the max, all shift at the same time by
* one bit to the right. This way we should always get consistent values.
*/
#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
/*
* Quiet period control registers
@ -1758,7 +1771,7 @@
#define AR5K_CCK_FIL_CNT 0x8128
/*
* PHY Error Counters (?)
* PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
*/
#define AR5K_PHYERR_CNT1 0x812c
#define AR5K_PHYERR_CNT1_MASK 0x8130
@ -1766,6 +1779,9 @@
#define AR5K_PHYERR_CNT2 0x8134
#define AR5K_PHYERR_CNT2_MASK 0x8138
/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
#define ATH5K_PHYERR_CNT_MAX 0x00c00000
/*
* TSF Threshold register (?)
*/

View File

@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static struct ath_bus_ops ath_ahb_bus_ops = {
.ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};

View File

@ -178,9 +178,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
#define ATH_TX_COMPLETE_POLL_INT 1000
@ -483,7 +480,6 @@ struct ath_softc {
bool ps_enabled;
bool ps_idle;
unsigned long ps_usecount;
enum ath9k_int imask;
struct ath_config config;
struct ath_rx rx;

View File

@ -524,6 +524,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
static void ath_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
@ -539,15 +540,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* prepare beacon frames.
*/
intval |= ATH9K_BEACON_ENA;
sc->imask |= ATH9K_INT_SWBA;
ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed AP beacon timers */
ath9k_hw_set_interrupts(sc->sc_ah, 0);
ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
/* Clear the reset TSF flag, so that subsequent beacon updation
will not reset the HW TSF. */
@ -566,7 +567,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
static void ath_beacon_config_sta(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
@ -605,7 +607,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
tsf = ath9k_hw_gettsf64(sc->sc_ah);
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
@ -678,17 +680,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* Set the computed STA beacon timers */
ath9k_hw_set_interrupts(sc->sc_ah, 0);
ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs);
sc->imask |= ATH9K_INT_BMISS;
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
ath9k_hw_set_interrupts(ah, 0);
ath9k_hw_set_sta_beacon_timers(ah, &bs);
ah->imask |= ATH9K_INT_BMISS;
ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
u64 tsf;
u32 tsftu, intval, nexttbtt;
@ -703,7 +706,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
else if (intval)
nexttbtt = roundup(nexttbtt, intval);
tsf = ath9k_hw_gettsf64(sc->sc_ah);
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
do {
nexttbtt += intval;
@ -719,20 +722,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
* self-linked tx descriptor and let the hardware deal with things.
*/
intval |= ATH9K_BEACON_ENA;
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
sc->imask |= ATH9K_INT_SWBA;
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed ADHOC beacon timers */
ath9k_hw_set_interrupts(sc->sc_ah, 0);
ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
/* FIXME: Handle properly when vif is NULL */
if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
ath_beacon_start_adhoc(sc, vif);
}

View File

@ -18,6 +18,7 @@
/* We can tune this as we go by monitoring really low values */
#define ATH9K_NF_TOO_LOW -60
#define AR9285_CLCAL_REDO_THRESH 1
/* AR5416 may return very high value (like -31 dBm), in those cases the nf
* is incorrect and we should use the static NF value. Later we can try to
@ -1091,7 +1092,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
EXPORT_SYMBOL(ath9k_hw_calibrate);
/* Carrier leakage Calibration fix */
static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
static bool ar9285_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
@ -1132,6 +1133,62 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
return true;
}
static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
{
int i;
u_int32_t txgain_max;
u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
u_int32_t reg_clc_I0, reg_clc_Q0;
u_int32_t i0_num = 0;
u_int32_t q0_num = 0;
u_int32_t total_num = 0;
u_int32_t reg_rf2g5_org;
bool retv = true;
if (!(ar9285_cl_cal(ah, chan)))
return false;
txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
for (i = 0; i < (txgain_max+1); i++) {
clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
if (!(gain_mask & (1 << clc_gain))) {
gain_mask |= (1 << clc_gain);
clc_num++;
}
}
for (i = 0; i < clc_num; i++) {
reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
& AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
& AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
if (reg_clc_I0 == 0)
i0_num++;
if (reg_clc_Q0 == 0)
q0_num++;
}
total_num = i0_num + q0_num;
if (total_num > AR9285_CLCAL_REDO_THRESH) {
reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
if (AR_SREV_9285E_20(ah)) {
REG_WRITE(ah, AR9285_RF2G5,
(reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
AR9285_RF2G5_IC50TX_XE_SET);
} else {
REG_WRITE(ah, AR9285_RF2G5,
(reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
AR9285_RF2G5_IC50TX_SET);
}
retv = ar9285_cl_cal(ah, chan);
REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
}
return retv;
}
bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);

View File

@ -255,7 +255,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
keyix = rx_stats->rs_keyix;
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
@ -303,88 +304,6 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
/*
* Calculate the RX filter to be set in the HW.
*/
u32 ath9k_cmn_calcrxfilter(struct ieee80211_hw *hw, struct ath_hw *ah,
unsigned int rxfilter)
{
#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
u32 rfilt;
rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
/* If not a STA, enable processing of Probe Requests */
if (ah->opmode != NL80211_IFTYPE_STATION)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
/*
* Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
* mode interface or when in monitor mode. AP mode does not need this
* since it receives all in-BSS frames anyway.
*/
if (((ah->opmode != NL80211_IFTYPE_AP) &&
(rxfilter & FIF_PROMISC_IN_BSS)) ||
(ah->opmode == NL80211_IFTYPE_MONITOR))
rfilt |= ATH9K_RX_FILTER_PROM;
if (rxfilter & FIF_CONTROL)
rfilt |= ATH9K_RX_FILTER_CONTROL;
if ((ah->opmode == NL80211_IFTYPE_STATION) &&
!(rxfilter & FIF_BCN_PRBRESP_PROMISC))
rfilt |= ATH9K_RX_FILTER_MYBEACON;
else
rfilt |= ATH9K_RX_FILTER_BEACON;
if ((AR_SREV_9280_10_OR_LATER(ah) ||
AR_SREV_9285_10_OR_LATER(ah)) &&
(ah->opmode == NL80211_IFTYPE_AP) &&
(rxfilter & FIF_PSPOLL))
rfilt |= ATH9K_RX_FILTER_PSPOLL;
if (conf_is_ht(&hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
return rfilt;
#undef RX_FILTER_PRESERVE
}
EXPORT_SYMBOL(ath9k_cmn_calcrxfilter);
/*
* Recv initialization for opmode change.
*/
void ath9k_cmn_opmode_init(struct ieee80211_hw *hw, struct ath_hw *ah,
unsigned int rxfilter)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 rfilt, mfilt[2];
/* configure rx filter */
rfilt = ath9k_cmn_calcrxfilter(hw, ah, rxfilter);
ath9k_hw_setrxfilter(ah, rfilt);
/* configure bssid mask */
if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath_hw_setbssidmask(common);
/* configure operational mode */
ath9k_hw_setopmode(ah);
/* Handle any link-level address change. */
ath9k_hw_setmac(ah, common->macaddr);
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}
EXPORT_SYMBOL(ath9k_cmn_opmode_init);
static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{

View File

@ -128,10 +128,6 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
int ath9k_cmn_padpos(__le16 frame_control);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
u32 ath9k_cmn_calcrxfilter(struct ieee80211_hw *hw, struct ath_hw *ah,
unsigned int rxfilter);
void ath9k_cmn_opmode_init(struct ieee80211_hw *hw, struct ath_hw *ah,
unsigned int rxfilter);
void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,

View File

@ -157,10 +157,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
"AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@ -557,10 +557,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
struct ath_buf *bf, struct ath_tx_status *ts)
{
struct ath_desc *ds = bf->bf_desc;
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
@ -570,17 +568,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, completed);
}
if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)
if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP)
if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED)
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR)
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN)
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN)
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
}
@ -663,30 +661,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
#undef PHY_ERR
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
struct ath_desc *ds = bf->bf_desc;
u32 phyerr;
if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
if (rs->rs_status & ATH9K_RXERR_CRC)
RX_STAT_INC(crc_err);
if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
if (rs->rs_status & ATH9K_RXERR_DECRYPT)
RX_STAT_INC(decrypt_crc_err);
if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
if (rs->rs_status & ATH9K_RXERR_MIC)
RX_STAT_INC(mic_err);
if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
RX_STAT_INC(pre_delim_crc_err);
if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
RX_STAT_INC(post_delim_crc_err);
if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
RX_STAT_INC(decrypt_busy_err);
if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
phyerr = rs->rs_phyerr & 0x24;
RX_PHY_ERR_INC(phyerr);
}

View File

@ -167,8 +167,8 @@ void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
struct ath_buf *bf, struct ath_tx_status *ts);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@ -204,12 +204,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_buf *bf)
struct ath_buf *bf,
struct ath_tx_status *ts)
{
}
static inline void ath_debug_stat_rx(struct ath_softc *sc,
struct ath_buf *bf)
struct ath_rx_status *rs)
{
}

View File

@ -43,7 +43,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
"Unable to read eeprom region \n");
"Unable to read eeprom region\n");
return false;
}
eep_data++;

View File

@ -44,7 +44,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
if (!ath9k_hw_nvram_read(common,
addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
"Unable to read eeprom region \n");
"Unable to read eeprom region\n");
return false;
}
eep_data++;

View File

@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
u32 timer_next,
u32 timer_period)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
sc->imask |= ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, sc->imask);
ah->imask |= ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
sc->imask &= ~ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, sc->imask);
ah->imask &= ~ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
}
@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"no stomp timer running \n");
"no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);

View File

@ -21,6 +21,7 @@
static struct usb_device_id ath9k_hif_usb_ids[] = {
ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
{ },
};
@ -31,27 +32,15 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev);
static void hif_usb_regout_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev = cmd->hif_dev;
if (!hif_dev) {
usb_free_urb(urb);
if (cmd) {
if (cmd->skb)
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
}
return;
}
switch (urb->status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
break;
case -ENODEV:
case -ESHUTDOWN:
return;
goto free;
default:
break;
}
@ -60,8 +49,12 @@ static void hif_usb_regout_cb(struct urb *urb)
ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
cmd->skb, 1);
kfree(cmd);
usb_free_urb(urb);
}
return;
free:
kfree_skb(cmd->skb);
kfree(cmd);
}
static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
@ -89,11 +82,13 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
skb->data, skb->len,
hif_usb_regout_cb, cmd, 1);
usb_anchor_urb(urb, &hif_dev->regout_submitted);
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_free_urb(urb);
usb_unanchor_urb(urb);
kfree(cmd);
}
usb_free_urb(urb);
return ret;
}
@ -154,6 +149,13 @@ static void hif_usb_tx_cb(struct urb *urb)
}
}
static inline void ath9k_skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
dev_kfree_skb_any(skb);
}
/* TX lock has to be taken */
static int __hif_usb_tx(struct hif_device_usb *hif_dev)
{
@ -212,7 +214,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
if (ret) {
tx_buf->len = tx_buf->offset = 0;
__skb_queue_purge(&tx_buf->skb_queue);
ath9k_skb_queue_purge(&tx_buf->skb_queue);
__skb_queue_head_init(&tx_buf->skb_queue);
list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
hif_dev->tx.tx_buf_cnt++;
@ -279,7 +281,7 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
unsigned long flags;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
__skb_queue_purge(&hif_dev->tx.tx_skb_queue);
ath9k_skb_queue_purge(&hif_dev->tx.tx_skb_queue);
hif_dev->tx.tx_skb_cnt = 0;
hif_dev->tx.flags |= HIF_USB_TX_STOP;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
@ -299,6 +301,8 @@ static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
ret = hif_usb_send_regout(hif_dev, skb);
break;
default:
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
ret = -EINVAL;
break;
}
@ -321,12 +325,14 @@ static struct ath9k_htc_hif hif_usb = {
static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct sk_buff *nskb, *skb_pool[8];
struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
int index = 0, i = 0, chk_idx, len = skb->len;
int rx_remain_len = 0, rx_pkt_len = 0;
u16 pkt_len, pkt_tag, pool_index = 0;
u8 *ptr;
spin_lock(&hif_dev->rx_lock);
rx_remain_len = hif_dev->rx_remain_len;
rx_pkt_len = hif_dev->rx_transfer_len;
@ -353,6 +359,8 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
}
}
spin_unlock(&hif_dev->rx_lock);
while (index < len) {
ptr = (u8 *) skb->data;
@ -370,6 +378,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
index = index + 4 + pkt_len + pad_len;
if (index > MAX_RX_BUF_SIZE) {
spin_lock(&hif_dev->rx_lock);
hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
hif_dev->rx_transfer_len =
MAX_RX_BUF_SIZE - chk_idx - 4;
@ -381,6 +390,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
dev_err(&hif_dev->udev->dev,
"ath9k_htc: RX memory allocation"
" error\n");
spin_unlock(&hif_dev->rx_lock);
goto err;
}
skb_reserve(nskb, 32);
@ -391,6 +401,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
/* Record the buffer pointer */
hif_dev->remain_skb = nskb;
spin_unlock(&hif_dev->rx_lock);
} else {
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
@ -408,14 +419,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
}
} else {
RX_STAT_INC(skb_dropped);
dev_kfree_skb_any(skb);
return;
}
}
err:
dev_kfree_skb_any(skb);
for (i = 0; i < pool_index; i++) {
ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
skb_pool[i]->len, USB_WLAN_RX_PIPE);
@ -426,11 +434,13 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct sk_buff *nskb;
struct hif_device_usb *hif_dev = (struct hif_device_usb *)
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
return;
if (!hif_dev)
goto free;
@ -448,38 +458,23 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
nskb = __dev_alloc_skb(MAX_RX_BUF_SIZE, GFP_ATOMIC);
if (!nskb)
goto resubmit;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
nskb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, nskb);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
dev_kfree_skb_any(nskb);
goto free;
}
ath9k_hif_usb_rx_stream(hif_dev, skb);
return;
}
resubmit:
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret)
if (ret) {
usb_unanchor_urb(urb);
goto free;
}
return;
free:
dev_kfree_skb_any(skb);
kfree_skb(skb);
}
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
@ -490,6 +485,9 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
if (!skb)
return;
if (!hif_dev)
goto free;
@ -508,7 +506,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
if (likely(urb->actual_length != 0)) {
skb_put(skb, urb->actual_length);
nskb = __dev_alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
if (!nskb)
goto resubmit;
@ -519,7 +517,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
dev_kfree_skb_any(nskb);
kfree_skb(nskb);
goto free;
}
@ -539,7 +537,8 @@ resubmit:
return;
free:
dev_kfree_skb_any(skb);
kfree_skb(skb);
urb->context = NULL;
}
static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
@ -609,78 +608,66 @@ err:
return -ENOMEM;
}
static void ath9k_hif_usb_dealloc_rx_skbs(struct hif_device_usb *hif_dev)
{
int i;
for (i = 0; i < MAX_RX_URB_NUM; i++) {
if (hif_dev->wlan_rx_data_urb[i]) {
if (hif_dev->wlan_rx_data_urb[i]->transfer_buffer)
dev_kfree_skb_any((void *)
hif_dev->wlan_rx_data_urb[i]->context);
}
}
}
static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
int i;
for (i = 0; i < MAX_RX_URB_NUM; i++) {
if (hif_dev->wlan_rx_data_urb[i]) {
usb_kill_urb(hif_dev->wlan_rx_data_urb[i]);
usb_free_urb(hif_dev->wlan_rx_data_urb[i]);
hif_dev->wlan_rx_data_urb[i] = NULL;
}
}
}
static int ath9k_hif_usb_prep_rx_urb(struct hif_device_usb *hif_dev,
struct urb *urb)
{
struct sk_buff *skb;
skb = __dev_alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev, USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
return 0;
usb_kill_anchored_urbs(&hif_dev->rx_submitted);
}
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
struct urb *urb = NULL;
struct sk_buff *skb = NULL;
int i, ret;
init_usb_anchor(&hif_dev->rx_submitted);
spin_lock_init(&hif_dev->rx_lock);
for (i = 0; i < MAX_RX_URB_NUM; i++) {
/* Allocate URB */
hif_dev->wlan_rx_data_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
if (hif_dev->wlan_rx_data_urb[i] == NULL) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL) {
ret = -ENOMEM;
goto err_rx_urb;
goto err_urb;
}
/* Allocate buffer */
ret = ath9k_hif_usb_prep_rx_urb(hif_dev,
hif_dev->wlan_rx_data_urb[i]);
if (ret)
goto err_rx_urb;
skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err_skb;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
USB_WLAN_RX_PIPE),
skb->data, MAX_RX_BUF_SIZE,
ath9k_hif_usb_rx_cb, skb);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->rx_submitted);
/* Submit URB */
ret = usb_submit_urb(hif_dev->wlan_rx_data_urb[i], GFP_KERNEL);
if (ret)
goto err_rx_urb;
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
goto err_submit;
}
/*
* Drop reference count.
* This ensures that the URB is freed when killing them.
*/
usb_free_urb(urb);
}
return 0;
err_rx_urb:
ath9k_hif_usb_dealloc_rx_skbs(hif_dev);
err_submit:
kfree_skb(skb);
err_skb:
usb_free_urb(urb);
err_urb:
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
return ret;
}
@ -689,6 +676,8 @@ static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
{
if (hif_dev->reg_in_urb) {
usb_kill_urb(hif_dev->reg_in_urb);
if (hif_dev->reg_in_urb->context)
kfree_skb((void *)hif_dev->reg_in_urb->context);
usb_free_urb(hif_dev->reg_in_urb);
hif_dev->reg_in_urb = NULL;
}
@ -702,7 +691,7 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
if (hif_dev->reg_in_urb == NULL)
return -ENOMEM;
skb = __dev_alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
if (!skb)
goto err;
@ -712,12 +701,10 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
ath9k_hif_usb_reg_in_cb, skb, 1);
if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
goto err_skb;
goto err;
return 0;
err_skb:
dev_kfree_skb_any(skb);
err:
ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
return -ENOMEM;
@ -725,6 +712,9 @@ err:
static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
{
/* Register Write */
init_usb_anchor(&hif_dev->regout_submitted);
/* TX */
if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
goto err;
@ -733,7 +723,7 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
goto err;
/* Register Read/Write */
/* Register Read */
if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
goto err;
@ -830,6 +820,7 @@ err_fw_req:
static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
usb_kill_anchored_urbs(&hif_dev->regout_submitted);
ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);

View File

@ -34,6 +34,7 @@
#define MAX_RX_URB_NUM 8
#define MAX_RX_BUF_SIZE 16384
#define MAX_PKT_NUM_IN_TRANSFER 10
#define MAX_REG_OUT_URB_NUM 1
#define MAX_REG_OUT_BUF_NUM 8
@ -85,18 +86,17 @@ struct hif_device_usb {
struct usb_interface *interface;
const struct firmware *firmware;
struct htc_target *htc_handle;
u8 flags;
struct hif_usb_tx tx;
struct urb *wlan_rx_data_urb[MAX_RX_URB_NUM];
struct urb *reg_in_urb;
struct usb_anchor regout_submitted;
struct usb_anchor rx_submitted;
struct sk_buff *remain_skb;
int rx_remain_len;
int rx_pkt_len;
int rx_transfer_len;
int rx_pad_len;
spinlock_t rx_lock;
u8 flags; /* HIF_USB_* */
};
int ath9k_hif_usb_init(void);

View File

@ -309,6 +309,14 @@ struct ath_led {
int brightness;
};
struct htc_beacon_config {
u16 beacon_interval;
u16 listen_interval;
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
};
#define OP_INVALID BIT(0)
#define OP_SCANNING BIT(1)
#define OP_FULL_RESET BIT(2)
@ -349,7 +357,11 @@ struct ath9k_htc_priv {
struct sk_buff *beacon;
spinlock_t beacon_lock;
bool tx_queues_stop;
spinlock_t tx_lock;
struct ieee80211_vif *vif;
struct htc_beacon_config cur_beacon_conf;
unsigned int rxfilter;
struct tasklet_struct wmi_tasklet;
struct tasklet_struct rx_tasklet;
@ -360,6 +372,11 @@ struct ath9k_htc_priv {
struct ath9k_htc_aggr_work aggr_work;
struct delayed_work ath9k_aggr_work;
struct delayed_work ath9k_ani_work;
struct work_struct ps_work;
struct mutex htc_pm_lock;
unsigned long ps_usecount;
bool ps_enabled;
struct ath_led radio_led;
struct ath_led assoc_led;
@ -386,8 +403,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
}
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf);
struct ieee80211_vif *vif);
void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
void ath9k_htc_beacon_update(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif);
@ -415,6 +431,11 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_tasklet(unsigned long data);
u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);

View File

@ -19,7 +19,7 @@
#define FUDGE 2
static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
struct ieee80211_bss_conf *bss_conf)
struct htc_beacon_config *bss_conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_beacon_state bs;
@ -34,8 +34,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
memset(&bs, 0, sizeof(bs));
intval = bss_conf->beacon_int & ATH9K_BEACON_PERIOD;
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_int);
intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
/*
* Setup dtim and cfp parameters according to
@ -138,7 +138,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
}
static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
struct ieee80211_bss_conf *bss_conf)
struct htc_beacon_config *bss_conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum ath9k_int imask = 0;
@ -146,7 +146,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
int ret;
u8 cmd_rsp;
intval = bss_conf->beacon_int & ATH9K_BEACON_PERIOD;
intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
nexttbtt = intval;
intval |= ATH9K_BEACON_ENA;
if (priv->op_flags & OP_ENABLE_BEACON)
@ -154,7 +154,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
ath_print(common, ATH_DBG_BEACON,
"IBSS Beacon config, intval: %d, imask: 0x%x\n",
bss_conf->beacon_int, imask);
bss_conf->beacon_interval, imask);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@ -239,18 +239,35 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
spin_unlock_bh(&priv->beacon_lock);
}
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum nl80211_iftype iftype;
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
switch (vif->type) {
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
iftype = vif->type;
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
} else
iftype = priv->ah->opmode;
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
switch (iftype) {
case NL80211_IFTYPE_STATION:
ath9k_htc_beacon_config_sta(priv, bss_conf);
ath9k_htc_beacon_config_sta(priv, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
ath9k_htc_beacon_config_adhoc(priv, bss_conf);
ath9k_htc_beacon_config_adhoc(priv, cur_conf);
break;
default:
ath_print(common, ATH_DBG_CONFIG,

View File

@ -287,6 +287,7 @@ static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static const struct ath_bus_ops ath9k_usb_bus_ops = {
.ath_bus_type = ATH_USB,
.read_cachesize = ath_usb_read_cachesize,
.eeprom_read = ath_usb_eeprom_read,
};
@ -421,6 +422,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
priv->op_flags |= OP_TXAGGR;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
@ -449,8 +451,10 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
spin_lock_init(&priv->wmi->wmi_lock);
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx_lock);
mutex_init(&priv->mutex);
mutex_init(&priv->aggr_work.mutex);
mutex_init(&priv->htc_pm_lock);
tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
(unsigned long)priv);
tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
@ -458,6 +462,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
/*
* Cache line size is used to size and align various
@ -511,12 +516,17 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL;
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->queues = 4;
hw->channel_change_time = 5000;
hw->max_listen_interval = 10;

View File

@ -65,6 +65,56 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
return mode;
}
static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
enum ath9k_power_mode mode)
{
bool ret;
mutex_lock(&priv->htc_pm_lock);
ret = ath9k_hw_setpower(priv->ah, mode);
mutex_unlock(&priv->htc_pm_lock);
return ret;
}
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
{
mutex_lock(&priv->htc_pm_lock);
if (++priv->ps_usecount != 1)
goto unlock;
ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
unlock:
mutex_unlock(&priv->htc_pm_lock);
}
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
{
mutex_lock(&priv->htc_pm_lock);
if (--priv->ps_usecount != 0)
goto unlock;
if (priv->ps_enabled)
ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
mutex_unlock(&priv->htc_pm_lock);
}
void ath9k_ps_work(struct work_struct *work)
{
struct ath9k_htc_priv *priv =
container_of(work, struct ath9k_htc_priv,
ps_work);
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
/* The chip wakes up after receiving the first beacon
while network sleep is enabled. For the driver to
be in sync with the hw, set the chip to awake and
only then set it to sleep.
*/
ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
}
static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
@ -87,7 +137,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
/* Fiddle around with fastcc later on, for now just use full reset */
fastcc = false;
ath9k_htc_ps_wakeup(priv);
htc_stop(priv->htc);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
@ -103,6 +153,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath_print(common, ATH_DBG_FATAL,
"Unable to reset channel (%u Mhz) "
"reset status %d\n", channel->center_freq, ret);
ath9k_htc_ps_restore(priv);
goto err;
}
@ -128,6 +179,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
priv->op_flags &= ~OP_FULL_RESET;
err:
ath9k_htc_ps_restore(priv);
return ret;
}
@ -412,32 +464,31 @@ static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
if (tid > ATH9K_HTC_MAX_TID)
return -EINVAL;
rcu_read_lock();
sta = ieee80211_find_sta(vif, sta_addr);
if (sta) {
ista = (struct ath9k_htc_sta *) sta->drv_priv;
} else {
rcu_read_unlock();
return -EINVAL;
}
if (!ista) {
rcu_read_unlock();
return -EINVAL;
}
memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
aggr.sta_index = ista->index;
rcu_read_unlock();
aggr.tidno = tid;
aggr.aggr_enable = oper;
rcu_read_lock();
/* Check if we are able to retrieve the station */
sta = ieee80211_find_sta(vif, sta_addr);
if (!sta) {
rcu_read_unlock();
return -EINVAL;
}
ista = (struct ath9k_htc_sta *) sta->drv_priv;
if (oper)
ista->tid_state[tid] = AGGR_START;
else
ista->tid_state[tid] = AGGR_STOP;
aggr.sta_index = ista->index;
rcu_read_unlock();
aggr.tidno = tid;
aggr.aggr_enable = oper;
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
ath_print(common, ATH_DBG_CONFIG,
@ -694,6 +745,10 @@ void ath9k_ani_work(struct work_struct *work)
short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
/* Only calibrate if awake */
if (ah->power_mode != ATH9K_PM_AWAKE)
goto set_timer;
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
@ -728,6 +783,9 @@ void ath9k_ani_work(struct work_struct *work)
/* Skip all processing if there's nothing to do. */
if (longcal || shortcal || aniflag) {
ath9k_htc_ps_wakeup(priv);
/* Call ANI routine if necessary */
if (aniflag)
ath9k_hw_ani_monitor(ah, ah->curchan);
@ -749,8 +807,11 @@ void ath9k_ani_work(struct work_struct *work)
ah->curchan->channelFlags,
common->ani.noise_floor);
}
ath9k_htc_ps_restore(priv);
}
set_timer:
/*
* Set timer interval based on previous results.
* The interval must be the shortest necessary to satisfy ANI,
@ -995,7 +1056,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct ath9k_htc_priv *priv = hw->priv;
int padpos, padsize;
int padpos, padsize, ret;
hdr = (struct ieee80211_hdr *) skb->data;
@ -1009,8 +1070,19 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
memmove(skb->data, skb->data + padsize, padpos);
}
if (ath9k_htc_tx_start(priv, skb) != 0) {
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, "Tx failed");
ret = ath9k_htc_tx_start(priv, skb);
if (ret != 0) {
if (ret == -ENOMEM) {
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
"Stopping TX queues\n");
ieee80211_stop_queues(hw);
spin_lock_bh(&priv->tx_lock);
priv->tx_queues_stop = true;
spin_unlock_bh(&priv->tx_lock);
} else {
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
"Tx failed");
}
goto fail_tx;
}
@ -1075,6 +1147,12 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
priv->op_flags &= ~OP_INVALID;
htc_start(priv->htc);
spin_lock_bh(&priv->tx_lock);
priv->tx_queues_stop = false;
spin_unlock_bh(&priv->tx_lock);
ieee80211_wake_queues(hw);
mutex_unlock:
mutex_unlock(&priv->mutex);
return ret;
@ -1096,6 +1174,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
return;
}
ath9k_htc_ps_wakeup(priv);
htc_stop(priv->htc);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
@ -1103,8 +1182,10 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_hw_phy_disable(ah);
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
cancel_delayed_work_sync(&priv->ath9k_aggr_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
@ -1145,6 +1226,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
ath9k_htc_ps_wakeup(priv);
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@ -1191,6 +1273,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
priv->vif = vif;
out:
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
return ret;
}
@ -1259,6 +1342,16 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
priv->ps_enabled = true;
} else {
priv->ps_enabled = false;
cancel_work_sync(&priv->ps_work);
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
}
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
@ -1295,16 +1388,18 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
priv->rxfilter = *total_flags;
rfilt = ath9k_cmn_calcrxfilter(hw, priv->ah, priv->rxfilter);
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
"Set HW RX filter: 0x%x\n", rfilt);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@ -1382,6 +1477,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
switch (cmd) {
case SET_KEY:
@ -1404,6 +1500,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
ret = -EINVAL;
}
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
return ret;
@ -1419,6 +1516,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(ah);
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
if (changed & BSS_CHANGED_ASSOC) {
common->curaid = bss_conf->assoc ?
@ -1431,6 +1529,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_start_ani(priv);
} else {
priv->op_flags &= ~OP_ASSOCIATED;
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
}
}
@ -1450,7 +1549,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
((changed & BSS_CHANGED_BEACON_ENABLED) &&
bss_conf->enable_beacon)) {
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif, bss_conf);
ath9k_htc_beacon_config(priv, vif);
}
if (changed & BSS_CHANGED_BEACON)
@ -1459,7 +1558,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
!bss_conf->enable_beacon) {
priv->op_flags &= ~OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif, bss_conf);
ath9k_htc_beacon_config(priv, vif);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@ -1490,6 +1589,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath9k_hw_init_global_settings(ah);
}
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@ -1518,9 +1618,11 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
ath9k_htc_ps_wakeup(priv);
mutex_lock(&priv->mutex);
ath9k_hw_reset_tsf(priv->ah);
mutex_unlock(&priv->mutex);
ath9k_htc_ps_restore(priv);
}
static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
@ -1569,6 +1671,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
spin_lock_bh(&priv->beacon_lock);
priv->op_flags |= OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
mutex_unlock(&priv->mutex);
}
@ -1577,13 +1680,17 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
ath9k_htc_ps_wakeup(priv);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
priv->op_flags &= ~OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
priv->op_flags |= OP_FULL_RESET;
if (priv->op_flags & OP_ASSOCIATED)
ath9k_htc_beacon_config(priv, NULL);
ath_start_ani(priv);
mutex_unlock(&priv->mutex);
ath9k_htc_ps_restore(priv);
}
static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)

View File

@ -188,10 +188,20 @@ void ath9k_tx_tasklet(unsigned long data)
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
tx_info = IEEE80211_SKB_CB(skb);
sta = tx_info->control.sta;
memset(&tx_info->status, 0, sizeof(tx_info->status));
rcu_read_lock();
sta = ieee80211_find_sta(priv->vif, hdr->addr1);
if (!sta) {
rcu_read_unlock();
ieee80211_tx_status(priv->hw, skb);
continue;
}
/* Check if we need to start aggregation */
if (sta && conf_is_ht(&priv->hw->conf) &&
(priv->op_flags & OP_TXAGGR)
&& !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
@ -213,9 +223,21 @@ void ath9k_tx_tasklet(unsigned long data)
rcu_read_unlock();
memset(&tx_info->status, 0, sizeof(tx_info->status));
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
/* Wake TX queues if needed */
spin_lock_bh(&priv->tx_lock);
if (priv->tx_queues_stop) {
priv->tx_queues_stop = false;
spin_unlock_bh(&priv->tx_lock);
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
"Waking up TX queues\n");
ieee80211_wake_queues(priv->hw);
return;
}
spin_unlock_bh(&priv->tx_lock);
}
void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
@ -290,10 +312,84 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
/* RX */
/******/
/*
* Calculate the RX filter to be set in the HW.
*/
u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
{
#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
struct ath_hw *ah = priv->ah;
u32 rfilt;
rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST;
/* If not a STA, enable processing of Probe Requests */
if (ah->opmode != NL80211_IFTYPE_STATION)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
/*
* Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
* mode interface or when in monitor mode. AP mode does not need this
* since it receives all in-BSS frames anyway.
*/
if (((ah->opmode != NL80211_IFTYPE_AP) &&
(priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
(ah->opmode == NL80211_IFTYPE_MONITOR))
rfilt |= ATH9K_RX_FILTER_PROM;
if (priv->rxfilter & FIF_CONTROL)
rfilt |= ATH9K_RX_FILTER_CONTROL;
if ((ah->opmode == NL80211_IFTYPE_STATION) &&
!(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
rfilt |= ATH9K_RX_FILTER_MYBEACON;
else
rfilt |= ATH9K_RX_FILTER_BEACON;
if (conf_is_ht(&priv->hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
return rfilt;
#undef RX_FILTER_PRESERVE
}
/*
* Recv initialization for opmode change.
*/
static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
u32 rfilt, mfilt[2];
/* configure rx filter */
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(ah, rfilt);
/* configure bssid mask */
if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath_hw_setbssidmask(common);
/* configure operational mode */
ath9k_hw_setopmode(ah);
/* Handle any link-level address change. */
ath9k_hw_setmac(ah, common->macaddr);
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}
void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
ath9k_hw_rxena(priv->ah);
ath9k_cmn_opmode_init(priv->hw, priv->ah, priv->rxfilter);
ath9k_htc_opmode_init(priv);
ath9k_hw_startpcureceive(priv->ah);
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
@ -354,7 +450,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
padpos = ath9k_cmn_padpos(fc);
padsize = padpos & 3;
if (padsize && skb->len >= padpos+padsize) {
if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
@ -457,7 +553,7 @@ void ath9k_rx_tasklet(unsigned long data)
struct ieee80211_rx_status rx_status;
struct sk_buff *skb;
unsigned long flags;
struct ieee80211_hdr *hdr;
do {
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
@ -484,6 +580,11 @@ void ath9k_rx_tasklet(unsigned long data)
memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
sizeof(struct ieee80211_rx_status));
skb = rxbuf->skb;
hdr = (struct ieee80211_hdr *) skb->data;
if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
ieee80211_queue_work(priv->hw, &priv->ps_work);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
ieee80211_rx(priv->hw, skb);
@ -550,7 +651,6 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
spin_lock(&priv->rx.rxbuflock);
memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
skb->len = rxstatus->rs_datalen;
rxbuf->skb = skb;
rxbuf->in_process = true;
spin_unlock(&priv->rx.rxbuflock);

View File

@ -146,7 +146,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
struct htc_config_pipe_msg *cp_msg;
int ret, time_left;
skb = dev_alloc_skb(50 + sizeof(struct htc_frame_hdr));
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
dev_err(target->dev, "failed to allocate send buffer\n");
return -ENOMEM;
@ -174,7 +174,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
return 0;
err:
dev_kfree_skb(skb);
kfree_skb(skb);
return -EINVAL;
}
@ -184,7 +184,7 @@ static int htc_setup_complete(struct htc_target *target)
struct htc_comp_msg *comp_msg;
int ret = 0, time_left;
skb = dev_alloc_skb(50 + sizeof(struct htc_frame_hdr));
skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
dev_err(target->dev, "failed to allocate send buffer\n");
return -ENOMEM;
@ -210,7 +210,7 @@ static int htc_setup_complete(struct htc_target *target)
return 0;
err:
dev_kfree_skb(skb);
kfree_skb(skb);
return -EINVAL;
}
@ -250,8 +250,8 @@ int htc_connect_service(struct htc_target *target,
endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
endpoint->ep_callbacks = service_connreq->ep_callbacks;
skb = dev_alloc_skb(sizeof(struct htc_conn_svc_msg) +
sizeof(struct htc_frame_hdr));
skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
sizeof(struct htc_frame_hdr), GFP_ATOMIC);
if (!skb) {
dev_err(target->dev, "Failed to allocate buf to send"
"service connect req\n");
@ -282,7 +282,7 @@ int htc_connect_service(struct htc_target *target,
*conn_rsp_epid = target->conn_rsp_epid;
return 0;
err:
dev_kfree_skb(skb);
kfree_skb(skb);
return ret;
}
@ -321,16 +321,18 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
struct sk_buff *skb, bool txok)
{
struct htc_endpoint *endpoint;
struct htc_frame_hdr *htc_hdr;
struct htc_frame_hdr *htc_hdr = NULL;
if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
complete(&htc_handle->cmd_wait);
htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
goto ret;
}
if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
complete(&htc_handle->cmd_wait);
htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
goto ret;
}
if (skb) {
@ -343,6 +345,14 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
htc_hdr->endpoint_id, txok);
}
}
return;
ret:
/* HTC-generated packets are freed here. */
if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
dev_kfree_skb_any(skb);
else
kfree_skb(skb);
}
/*
@ -367,7 +377,10 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
epid = htc_hdr->endpoint_id;
if (epid >= ENDPOINT_MAX) {
dev_kfree_skb_any(skb);
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
kfree_skb(skb);
return;
}
@ -377,7 +390,7 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
if (be32_to_cpu(*(u32 *) skb->data) == 0x00C60000)
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *) skb->data + 4;
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
}
/* Get the message ID */
@ -396,7 +409,7 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
break;
}
dev_kfree_skb_any(skb);
kfree_skb(skb);
} else {
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)

View File

@ -28,9 +28,6 @@
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
struct ar5416_eeprom_def *pEepData,
u32 reg, u32 value);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@ -548,7 +545,6 @@ static bool ath9k_hw_devid_supported(u16 devid)
case AR9285_DEVID_PCIE:
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
case AR9271_USB:
case AR2427_DEVID_PCIE:
return true;
default:
@ -817,38 +813,46 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
/* txgain table */
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_high_power_tx_gain_9285_1_2,
ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_high_power,
ARRAY_SIZE(
ar9285Modes_XE2_0_high_power), 6);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_high_power_tx_gain_9285_1_2,
ARRAY_SIZE(
ar9285Modes_high_power_tx_gain_9285_1_2), 6);
}
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_original_tx_gain_9285_1_2,
ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_XE2_0_normal_power,
ARRAY_SIZE(
ar9285Modes_XE2_0_normal_power), 6);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9285Modes_original_tx_gain_9285_1_2,
ARRAY_SIZE(
ar9285Modes_original_tx_gain_9285_1_2), 6);
}
}
}
}
static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
{
u32 i, j;
struct base_eep_header *pBase = &(ah->eeprom.def.baseEepHeader);
struct ath_common *common = ath9k_hw_common(ah);
if (ah->hw_version.devid == AR9280_DEVID_PCI) {
ah->need_an_top2_fixup = (ah->hw_version.devid == AR9280_DEVID_PCI) &&
(ah->eep_map != EEP_MAP_4KBITS) &&
((pBase->version & 0xff) > 0x0a) &&
(pBase->pwdclkind == 0);
/* EEPROM Fixup */
for (i = 0; i < ah->iniModes.ia_rows; i++) {
u32 reg = INI_RA(&ah->iniModes, i, 0);
for (j = 1; j < ah->iniModes.ia_columns; j++) {
u32 val = INI_RA(&ah->iniModes, i, j);
INI_RA(&ah->iniModes, i, j) =
ath9k_hw_ini_fixup(ah,
&ah->eeprom.def,
reg, val);
}
}
}
if (ah->need_an_top2_fixup)
ath_print(common, ATH_DBG_EEPROM,
"needs fixup for AR_AN_TOP2 register\n");
}
int ath9k_hw_init(struct ath_hw *ah)
@ -856,11 +860,13 @@ int ath9k_hw_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
ath_print(common, ATH_DBG_FATAL,
"Unsupported device ID: 0x%0x\n",
ah->hw_version.devid);
return -EOPNOTSUPP;
if (common->bus_ops->ath_bus_type != ATH_USB) {
if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
ath_print(common, ATH_DBG_FATAL,
"Unsupported device ID: 0x%0x\n",
ah->hw_version.devid);
return -EOPNOTSUPP;
}
}
ath9k_hw_init_defaults(ah);
@ -1121,23 +1127,23 @@ static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
ah->mask_reg = AR_IMR_TXERR |
u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
if (ah->config.rx_intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
imr_reg |= AR_IMR_RXOK;
ah->mask_reg |= AR_IMR_TXOK;
imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
ah->mask_reg |= AR_IMR_MIB;
imr_reg |= AR_IMR_MIB;
REG_WRITE(ah, AR_IMR, ah->mask_reg);
REG_WRITE(ah, AR_IMR, imr_reg);
ah->imrs2_reg |= AR_IMR_S2_GTT;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
@ -1290,51 +1296,6 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
}
}
static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
struct ar5416_eeprom_def *pEepData,
u32 reg, u32 value)
{
struct base_eep_header *pBase = &(pEepData->baseEepHeader);
struct ath_common *common = ath9k_hw_common(ah);
switch (ah->hw_version.devid) {
case AR9280_DEVID_PCI:
if (reg == 0x7894) {
ath_print(common, ATH_DBG_EEPROM,
"ini VAL: %x EEPROM: %x\n", value,
(pBase->version & 0xff));
if ((pBase->version & 0xff) > 0x0a) {
ath_print(common, ATH_DBG_EEPROM,
"PWDCLKIND: %d\n",
pBase->pwdclkind);
value &= ~AR_AN_TOP2_PWDCLKIND;
value |= AR_AN_TOP2_PWDCLKIND &
(pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
} else {
ath_print(common, ATH_DBG_EEPROM,
"PWDCLKIND Earlier Rev\n");
}
ath_print(common, ATH_DBG_EEPROM,
"final ini VAL: %x\n", value);
}
break;
}
return value;
}
static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
struct ar5416_eeprom_def *pEepData,
u32 reg, u32 value)
{
if (ah->eep_map == EEP_MAP_4KBITS)
return value;
else
return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
}
static void ath9k_olc_init(struct ath_hw *ah)
{
u32 i;
@ -1440,6 +1401,9 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
u32 reg = INI_RA(&ah->iniModes, i, 0);
u32 val = INI_RA(&ah->iniModes, i, modesIndex);
if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
val &= ~AR_AN_TOP2_PWDCLKIND;
REG_WRITE(ah, reg, val);
if (reg >= 0x7800 && reg < 0x78a0
@ -2840,7 +2804,7 @@ EXPORT_SYMBOL(ath9k_hw_getisr);
enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
{
u32 omask = ah->mask_reg;
enum ath9k_int omask = ah->imask;
u32 mask, mask2;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
@ -2912,7 +2876,6 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
ah->imrs2_reg |= mask2;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
ah->mask_reg = ints;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if (ints & ATH9K_INT_TIM_TIMER)
@ -3231,8 +3194,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9271(ah))
pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
else
pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;

View File

@ -44,8 +44,6 @@
#define AR5416_AR9100_DEVID 0x000b
#define AR9271_USB 0x9271
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
#define AR5416_MAGIC 0x19641014
@ -461,6 +459,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
bool need_an_top2_fixup;
u16 tx_trig_level;
u16 rfsilent;
u32 rfkill_gpio;
@ -478,7 +477,7 @@ struct ath_hw {
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
int16_t curchan_rad_index;
u32 mask_reg;
enum ath9k_int imask;
u32 imrs2_reg;
u32 txok_interrupt_mask;
u32 txerr_interrupt_mask;

View File

@ -4184,7 +4184,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
{ 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
{ 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@ -4198,8 +4198,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
{ 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
{ 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@ -4312,7 +4312,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
{ 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
{ 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@ -4326,8 +4326,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
{ 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
{ 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@ -4731,17 +4731,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007808, 0x54214514 },
{ 0x0000780c, 0x02025830 },
{ 0x00007810, 0x71c0d388 },
{ 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
{ 0x00007824, 0x00d86fff },
{ 0x00007828, 0x26d2491b },
{ 0x0000782c, 0x6e36d97b },
{ 0x00007830, 0xedb6d96e },
{ 0x00007834, 0x71400087 },
{ 0x0000783c, 0x0001fffe },
{ 0x00007840, 0xffeb1a20 },
{ 0x00007844, 0x000c0db6 },
{ 0x00007848, 0x6db61b6f },
{ 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@ -4777,7 +4772,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
{ 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
{ 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
{ 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
{ 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@ -4813,7 +4813,12 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
{ 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
{ 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
{ 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
{ 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@ -4825,6 +4830,86 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
static const u_int32_t ar9285Modes_XE2_0_normal_power[][6] = {
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
{ 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
{ 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
{ 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
{ 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
{ 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
{ 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
{ 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
{ 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
{ 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
{ 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
{ 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
{ 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
{ 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
{ 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
{ 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
{ 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
{ 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
{ 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
{ 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
{ 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
{ 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
{ 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
{ 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
{ 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
{ 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
static const u_int32_t ar9285Modes_XE2_0_high_power[][6] = {
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
{ 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
{ 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
{ 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
{ 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
{ 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
{ 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
{ 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
{ 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
{ 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
{ 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
{ 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
{ 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
{ 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
{ 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
{ 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
{ 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
{ 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
{ 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
{ 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
{ 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
{ 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
{ 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
{ 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
{ 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
{ 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
{ 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
};
static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },

View File

@ -105,7 +105,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@ -246,79 +246,80 @@ void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
}
EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
struct ath_tx_status *ts)
{
struct ar5416_desc *ads = AR5416DESC(ds);
if ((ads->ds_txstatus9 & AR_TxDone) == 0)
return -EINPROGRESS;
ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
ds->ds_txstat.ts_status = 0;
ds->ds_txstat.ts_flags = 0;
ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
ts->ts_tstamp = ads->AR_SendTimestamp;
ts->ts_status = 0;
ts->ts_flags = 0;
if (ads->ds_txstatus1 & AR_FrmXmitOK)
ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
ts->ts_status |= ATH9K_TX_ACKED;
if (ads->ds_txstatus1 & AR_ExcessiveRetries)
ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
ts->ts_status |= ATH9K_TXERR_XRETRY;
if (ads->ds_txstatus1 & AR_Filtered)
ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
ts->ts_status |= ATH9K_TXERR_FILT;
if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
ts->ts_status |= ATH9K_TXERR_FIFO;
ath9k_hw_updatetxtriglevel(ah, true);
}
if (ads->ds_txstatus9 & AR_TxOpExceeded)
ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
ts->ts_status |= ATH9K_TXERR_XTXOP;
if (ads->ds_txstatus1 & AR_TxTimerExpired)
ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
if (ads->ds_txstatus1 & AR_DescCfgErr)
ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
if (ads->ds_txstatus0 & AR_TxBaStatus) {
ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
ts->ts_flags |= ATH9K_TX_BA;
ts->ba_low = ads->AR_BaBitmapLow;
ts->ba_high = ads->AR_BaBitmapHigh;
}
ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
switch (ds->ds_txstat.ts_rateindex) {
ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
switch (ts->ts_rateindex) {
case 0:
ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
break;
case 1:
ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
break;
case 2:
ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
break;
case 3:
ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
break;
}
ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
ds->ds_txstat.evm0 = ads->AR_TxEVM0;
ds->ds_txstat.evm1 = ads->AR_TxEVM1;
ds->ds_txstat.evm2 = ads->AR_TxEVM2;
ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
ds->ds_txstat.ts_antenna = 0;
ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
ts->evm0 = ads->AR_TxEVM0;
ts->evm1 = ads->AR_TxEVM1;
ts->evm2 = ads->AR_TxEVM2;
ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
ts->ts_antenna = 0;
return 0;
}
@ -858,7 +859,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 pa, struct ath_desc *nds, u64 tsf)
struct ath_rx_status *rs, u64 tsf)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@ -869,70 +870,70 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
ads.u.rx = adsp->u.rx;
ds->ds_rxstat.rs_status = 0;
ds->ds_rxstat.rs_flags = 0;
rs->rs_status = 0;
rs->rs_flags = 0;
ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
rs->rs_tstamp = ads.AR_RcvTimestamp;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
rs->rs_rssi = ATH9K_RSSI_BAD;
rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
} else {
ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
else
ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
ds->ds_rxstat.rs_moreaggr =
rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
ds->ds_rxstat.rs_flags =
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
rs->rs_flags =
(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
ds->ds_rxstat.rs_flags |=
rs->rs_flags |=
(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
if (ads.ds_rxstatus8 & AR_CRCErr)
ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
ds->ds_rxstat.rs_phyerr = phyerr;
rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
rs->rs_status |= ATH9K_RXERR_MIC;
}
return 0;

View File

@ -233,18 +233,9 @@ struct ath_desc {
u32 ds_ctl0;
u32 ds_ctl1;
u32 ds_hw[20];
union {
struct ath_tx_status tx;
struct ath_rx_status rx;
void *stats;
} ds_us;
void *ds_vdata;
} __packed;
#define ds_txstat ds_us.tx
#define ds_rxstat ds_us.rx
#define ds_stat ds_us.stats
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
#define ATH9K_TXDESC_RTSENA 0x0004
@ -702,7 +693,8 @@ void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 segLen, bool firstSeg,
bool lastSeg, const struct ath_desc *ds0);
void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
struct ath_tx_status *ts);
void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
u32 keyIx, enum ath9k_key_type keyType, u32 flags);
@ -732,7 +724,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 pa, struct ath_desc *nds, u64 tsf);
struct ath_rx_status *rs, u64 tsf);
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);

View File

@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
ath9k_hw_set_interrupts(ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
ps_restore:
ath9k_ps_restore(sc);
@ -434,7 +434,7 @@ void ath9k_tasklet(unsigned long data)
ath_gen_timer_isr(sc->sc_ah);
/* re-enable hardware interrupt */
ath9k_hw_set_interrupts(ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
ath9k_ps_restore(sc);
}
@ -477,7 +477,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* value to insure we only process bits we requested.
*/
ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
status &= sc->imask; /* discard unasked-for bits */
status &= ah->imask; /* discard unasked-for bits */
/*
* If there are no status bits set, then this interrupt was not
@ -518,7 +518,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* the interrupt.
*/
ath9k_hw_procmibevent(ah);
ath9k_hw_set_interrupts(ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@ -536,7 +536,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt except SWBA */
ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
tasklet_schedule(&sc->intr_tq);
}
@ -887,7 +887,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_beacon_config(sc, NULL); /* restart beacons */
/* Re-Enable interrupts */
ath9k_hw_set_interrupts(ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
/* Enable LED */
ath9k_hw_cfg_output(ah, ah->led_pin,
@ -977,7 +977,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
ath9k_hw_set_interrupts(ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
if (retry_tx) {
int i;
@ -1162,23 +1162,23 @@ static int ath9k_start(struct ieee80211_hw *hw)
}
/* Setup our intr mask. */
sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
ah->imask = ATH9K_INT_RX | ATH9K_INT_TX
| ATH9K_INT_RXEOL | ATH9K_INT_RXORN
| ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
sc->imask |= ATH9K_INT_GTT;
ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
sc->imask |= ATH9K_INT_CST;
ah->imask |= ATH9K_INT_CST;
ath_cache_conf_rate(sc, &hw->conf);
sc->sc_flags &= ~SC_OP_INVALID;
/* Disable BMISS interrupt when we're not associated */
sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
ath9k_hw_set_interrupts(ah, sc->imask);
ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
ath9k_hw_set_interrupts(ah, ah->imask);
ieee80211_wake_queues(hw);
@ -1372,14 +1372,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
mutex_lock(&sc->mutex);
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
sc->nvifs > 0) {
ret = -ENOBUFS;
goto out;
@ -1414,19 +1415,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->nvifs++;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath9k_set_bssid_mask(hw);
if (sc->nvifs > 1)
goto out; /* skip global settings for secondary vif */
if (ic_opmode == NL80211_IFTYPE_AP) {
ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
}
/* Set the device opmode */
sc->sc_ah->opmode = ic_opmode;
ah->opmode = ic_opmode;
/*
* Enable MIB interrupts when there are hardware phy counters.
@ -1435,11 +1436,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_ADHOC) ||
(vif->type == NL80211_IFTYPE_MESH_POINT)) {
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
ah->imask |= ATH9K_INT_MIB;
ah->imask |= ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
@ -1495,15 +1496,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
void ath9k_enable_ps(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
sc->ps_enabled = true;
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
sc->imask |= ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
sc->imask);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
ah->imask |= ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
}
ath9k_hw_setrxabort(sc->sc_ah, 1);
ath9k_hw_setrxabort(ah, 1);
}
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@ -1579,10 +1581,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
if (ah->imask & ATH9K_INT_TIM_TIMER) {
ah->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
sc->imask);
ah->imask);
}
}
}

View File

@ -88,6 +88,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
}
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,

View File

@ -503,6 +503,8 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
#define AR_PHY_TX_PWRCTRL7 0xa274
#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
@ -513,8 +515,16 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
#define AR_PHY_TX_GAIN_TBL1 0xa300
#define AR_PHY_TX_GAIN 0x0007F000
#define AR_PHY_TX_GAIN_S 12
#define AR_PHY_TX_GAIN_CLC 0x0000001E
#define AR_PHY_TX_GAIN_CLC_S 1
#define AR_PHY_TX_GAIN 0x0007F000
#define AR_PHY_TX_GAIN_S 12
#define AR_PHY_CLC_TBL1 0xa35c
#define AR_PHY_CLC_I0 0x07ff0000
#define AR_PHY_CLC_I0_S 16
#define AR_PHY_CLC_Q0 0x0000ffd0
#define AR_PHY_CLC_Q0_S 5
#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
#define AR_PHY_CH1_TX_PWRCTRL11 0xb398

View File

@ -176,9 +176,9 @@ struct ath_rate_priv {
#define ATH_TX_INFO_UNDERRUN (1 << 4)
enum ath9k_internal_frame_type {
ATH9K_NOT_INTERNAL,
ATH9K_INT_PAUSE,
ATH9K_INT_UNPAUSE
ATH9K_IFT_NOT_INTERNAL,
ATH9K_IFT_PAUSE,
ATH9K_IFT_UNPAUSE
};
int ath_rate_control_register(void);

View File

@ -477,7 +477,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
struct ath_buf *bf;
struct ath_desc *ds;
struct ath_rx_status *rx_stats;
struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
@ -491,6 +490,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
struct ath_rx_status rs;
spin_lock_bh(&sc->rx.rxbuflock);
@ -518,14 +518,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* on. All this is necessary because of our use of
* a self-linked list to avoid rx overruns.
*/
retval = ath9k_hw_rxprocdesc(ah, ds,
bf->bf_daddr,
PA2DESC(sc, ds->ds_link),
0);
memset(&rs, 0, sizeof(rs));
retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0);
if (retval == -EINPROGRESS) {
struct ath_rx_status trs;
struct ath_buf *tbf;
struct ath_desc *tds;
memset(&trs, 0, sizeof(trs));
if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
sc->rx.rxlink = NULL;
break;
@ -545,8 +545,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
*/
tds = tbf->bf_desc;
retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
PA2DESC(sc, tds->ds_link), 0);
retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
if (retval == -EINPROGRESS) {
break;
}
@ -569,9 +568,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
rxs = IEEE80211_SKB_RXCB(skb);
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
ath_debug_stat_rx(sc, bf);
ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
@ -580,7 +578,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if (flush)
goto requeue;
retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue;
@ -601,9 +599,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
common->rx_bufsize,
DMA_FROM_DEVICE);
skb_put(skb, rx_stats->rs_datalen);
skb_put(skb, rs.rs_datalen);
ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
@ -626,9 +624,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rx_stats->rs_antenna);
ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}

View File

@ -679,7 +679,7 @@
#define AR_WA 0x4004
#define AR_WA_D3_L1_DISABLE (1 << 14)
#define AR9285_WA_DEFAULT 0x004a05cb
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@ -845,6 +845,10 @@
(AR_SREV_9271(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
#define AR_SREV_9285E_20(_ah) \
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
#define AR_RAD2133_SREV_MAJOR 0xd0
@ -1181,6 +1185,13 @@ enum {
#define AR9285_AN_RF2G4_DB2_4 0x00003800
#define AR9285_AN_RF2G4_DB2_4_S 11
#define AR9285_RF2G5 0x7830
#define AR9285_RF2G5_IC50TX 0xfffff8ff
#define AR9285_RF2G5_IC50TX_SET 0x00000400
#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
#define AR9285_RF2G5_IC50TX_CLEAR_S 8
/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
#define AR9271_AN_RF2G3_OB_cck 0x001C0000
#define AR9271_AN_RF2G3_OB_cck_S 18

View File

@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
goto exit;

View File

@ -169,7 +169,7 @@ void ath9k_wmi_tasklet(unsigned long data)
break;
}
dev_kfree_skb_any(skb);
kfree_skb(skb);
}
static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
@ -207,13 +207,13 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
ath9k_wmi_rsp_callback(wmi, skb);
free_skb:
dev_kfree_skb_any(skb);
kfree_skb(skb);
}
static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
enum htc_endpoint_id epid, bool txok)
{
dev_kfree_skb_any(skb);
kfree_skb(skb);
}
int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
@ -269,7 +269,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (!wmi)
return -EINVAL;
skb = dev_alloc_skb(headroom + cmd_len);
skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
@ -313,7 +313,7 @@ out:
ath_print(common, ATH_DBG_WMI,
"WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
dev_kfree_skb_any(skb);
kfree_skb(skb);
return ret;
}

View File

@ -59,15 +59,14 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq,
struct list_head *bf_q,
int txok, int sendbar);
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
int txok);
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
struct ath_tx_status *ts, int txok);
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
enum {
@ -223,6 +222,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
@ -236,7 +238,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, bf->bf_seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
spin_lock(&txq->axq_lock);
}
@ -286,7 +288,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
int txok)
struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@ -296,7 +298,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct ath_desc *ds = bf_last->bf_desc;
struct list_head bf_head, bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@ -325,10 +326,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
if (isaggr && txok) {
if (ATH_DS_TX_BA(ds)) {
seq_st = ATH_DS_BA_SEQ(ds);
memcpy(ba, ATH_DS_BA_BITMAP(ds),
WME_BA_BMP_SIZE >> 3);
if (ts->ts_flags & ATH9K_TX_BA) {
seq_st = ts->ts_seqnum;
memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
} else {
/*
* AR5416 can become deaf/mute when BA
@ -345,7 +345,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
nbad = ath_tx_num_badfrms(sc, bf, txok);
nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
while (bf) {
txfail = txpending = 0;
bf_next = bf->bf_next;
@ -359,7 +359,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
acked_cnt++;
} else {
if (!(tid->state & AGGR_CLEANUP) &&
ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
ts->ts_flags != ATH9K_TX_SW_ABORTED) {
if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
ath_tx_set_retry(sc, txq, bf);
txpending = 1;
@ -402,13 +402,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
ath_tx_rc_status(bf, ds, nbad, txok, true);
ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
ath_tx_rc_status(bf, ds, nbad, txok, false);
ath_tx_rc_status(bf, ts, nbad, txok, false);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail, sendbar);
} else {
/* retry the un-acked ones */
if (bf->bf_next == NULL && bf_last->bf_stale) {
@ -426,10 +427,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
bf->bf_state.bf_type |= BUF_XRETRY;
ath_tx_rc_status(bf, ds, nbad,
ath_tx_rc_status(bf, ts, nbad,
0, false);
ath_tx_complete_buf(sc, bf, txq,
&bf_head, 0, 0);
&bf_head, ts, 0, 0);
break;
}
@ -752,8 +753,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
struct ath_tx_status ts;
struct ath_buf *bf;
struct list_head bf_head;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
@ -780,7 +784,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
}
list_move_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, txtid, bf->bf_seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_unlock_bh(&txq->axq_lock);
@ -1028,6 +1032,11 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
struct ath_tx_status ts;
memset(&ts, 0, sizeof(ts));
if (!retry_tx)
ts.ts_flags = ATH9K_TX_SW_ABORTED;
INIT_LIST_HEAD(&bf_head);
@ -1053,9 +1062,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
lastbf = bf->bf_lastbf;
if (!retry_tx)
lastbf->bf_desc->ds_txstat.ts_flags =
ATH9K_TX_SW_ABORTED;
/* remove ath_buf's of the same mpdu from txq */
list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
@ -1064,9 +1070,9 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_lock_bh(&txq->axq_lock);
@ -1568,12 +1574,12 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
tx_info->pad[0] = 0;
switch (txctl->frame_type) {
case ATH9K_NOT_INTERNAL:
case ATH9K_IFT_NOT_INTERNAL:
break;
case ATH9K_INT_PAUSE:
case ATH9K_IFT_PAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
/* fall through */
case ATH9K_INT_UNPAUSE:
case ATH9K_IFT_UNPAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
break;
}
@ -1852,9 +1858,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq,
struct list_head *bf_q,
int txok, int sendbar)
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
unsigned long flags;
@ -1872,7 +1877,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
ath_debug_stat_tx(sc, txq, bf);
ath_debug_stat_tx(sc, txq, bf, ts);
/*
* Return the list of ath_buf of this mpdu to free queue
@ -1883,23 +1888,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
}
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
int txok)
struct ath_tx_status *ts, int txok)
{
struct ath_buf *bf_last = bf->bf_lastbf;
struct ath_desc *ds = bf_last->bf_desc;
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int ba_index;
int nbad = 0;
int isaggr = 0;
if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
if (ts->ts_flags == ATH9K_TX_SW_ABORTED)
return 0;
isaggr = bf_isaggr(bf);
if (isaggr) {
seq_st = ATH_DS_BA_SEQ(ds);
memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
seq_st = ts->ts_seqnum;
memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
}
while (bf) {
@ -1913,7 +1916,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
return nbad;
}
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
@ -1923,24 +1926,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
u8 i, tx_rateindex;
if (txok)
tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
tx_info->status.ack_signal = ts->ts_rssi;
tx_rateindex = ds->ds_txstat.ts_rateindex;
tx_rateindex = ts->ts_rateindex;
WARN_ON(tx_rateindex >= hw->max_rates);
if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
if (ds->ds_txstat.ts_flags &
if (ts->ts_flags &
(ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
(ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
(ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
tx_info->status.ampdu_len = bf->bf_nframes;
tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@ -1978,6 +1981,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
struct ath_tx_status ts;
int txok;
int status;
@ -2017,7 +2021,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
lastbf = bf->bf_lastbf;
ds = lastbf->bf_desc;
status = ath9k_hw_txprocdesc(ah, ds);
memset(&ts, 0, sizeof(ts));
status = ath9k_hw_txprocdesc(ah, ds, &ts);
if (status == -EINPROGRESS) {
spin_unlock_bh(&txq->axq_lock);
break;
@ -2028,7 +2033,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* can disable RX.
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
(ts.ts_status & ATH9K_TX_ACKED)) {
if ((sc->ps_flags & PS_ENABLED))
ath9k_enable_ps(sc);
else
@ -2047,7 +2052,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
txok = !(ts.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
@ -2062,16 +2067,16 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
bf->bf_retries = ds->ds_txstat.ts_longretry;
if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_retries = ts.ts_longretry;
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
ath_tx_rc_status(bf, ds, 0, txok, true);
ath_tx_rc_status(bf, &ts, 0, txok, true);
}
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);

View File

@ -19,8 +19,8 @@
#include "ath.h"
#include "reg.h"
#define REG_READ common->ops->read
#define REG_WRITE common->ops->write
#define REG_READ (common->ops->read)
#define REG_WRITE (common->ops->write)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen

View File

@ -50,6 +50,7 @@
#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5470_5850
/* This one skips what we call "mid band" */
#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5725_5850
@ -360,7 +361,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
u16 rd = ath_regd_get_eepromRD(reg);
u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {

View File

@ -105,7 +105,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
}
static void b43_chantab_radio_upload(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry *e)
const struct b43_nphy_channeltab_entry_rev2 *e)
{
b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
@ -142,7 +142,7 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev,
}
static void b43_chantab_phy_upload(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry *e)
const struct b43_phy_n_sfo_cfg *e)
{
b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@ -160,16 +160,16 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
static void b43_radio_2055_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry *e)
const struct b43_nphy_channeltab_entry_rev2 *e)
{
B43_WARN_ON(dev->phy.rev >= 3);
b43_chantab_radio_upload(dev, e);
udelay(50);
b43_radio_write(dev, B2055_VCO_CAL10, 5);
b43_radio_write(dev, B2055_VCO_CAL10, 45);
b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
b43_radio_write(dev, B2055_VCO_CAL10, 65);
b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
udelay(300);
}
@ -254,6 +254,16 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
/*
* Initialize a Broadcom 2056 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
*/
static void b43_radio_init2056(struct b43_wldev *dev)
{
/* TODO */
}
/*
* Upload the N-PHY tables.
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
@ -2791,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
}
b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
buffer);
b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
buffer);
b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
buffer);
@ -3261,7 +3271,7 @@ int b43_phy_initn(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry *e,
const struct b43_phy_n_sfo_cfg *e,
struct b43_chanspec chanspec)
{
struct b43_phy *phy = &dev->phy;
@ -3327,13 +3337,21 @@ static int b43_nphy_set_chanspec(struct b43_wldev *dev,
{
struct b43_phy_n *nphy = dev->phy.n;
const struct b43_nphy_channeltab_entry *tabent;
const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
u8 tmp;
u8 channel = chanspec.channel;
if (dev->phy.rev >= 3) {
/* TODO */
tabent_r3 = NULL;
if (!tabent_r3)
return -ESRCH;
} else {
tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
if (!tabent_r2)
return -ESRCH;
}
nphy->radio_chanspec = chanspec;
@ -3354,17 +3372,13 @@ static int b43_nphy_set_chanspec(struct b43_wldev *dev,
if (dev->phy.rev >= 3) {
tmp = (chanspec.b_freq == 1) ? 4 : 0;
b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
/* TODO: PHY Radio2056 Setup (chan_info_ptr[i]) */
/* TODO: N PHY Chanspec Setup (chan_info_ptr[i]) */
/* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
} else {
tabent = b43_nphy_get_chantabent(dev, channel);
if (!tabent)
return -ESRCH;
tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
b43_radio_2055_setup(dev, tabent);
b43_nphy_chanspec_setup(dev, tabent, chanspec);
b43_radio_2055_setup(dev, tabent_r2);
b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
}
return 0;
@ -3474,6 +3488,8 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
{
struct b43_phy_n *nphy = dev->phy.n;
if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
b43err(dev->wl, "MAC not suspended\n");
@ -3499,8 +3515,8 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
} else {
if (dev->phy.rev >= 3) {
/* TODO: b43_radio_init2056(dev); */
/* TODO: PHY Set Channel Spec (dev, radio_chanspec) */
b43_radio_init2056(dev);
b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
} else {
b43_radio_init2055(dev);
}

View File

@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
.radio_c2_tx_mxbgtrim = r21
#define PHYREGS(r0, r1, r2, r3, r4, r5) \
.phy_bw1a = r0, \
.phy_bw2 = r1, \
.phy_bw3 = r2, \
.phy_bw4 = r3, \
.phy_bw5 = r4, \
.phy_bw6 = r5
.phy_regs.phy_bw1a = r0, \
.phy_regs.phy_bw2 = r1, \
.phy_regs.phy_bw3 = r2, \
.phy_regs.phy_bw4 = r3, \
.phy_regs.phy_bw5 = r4, \
.phy_regs.phy_bw6 = r5
static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
},
};
const struct b43_nphy_channeltab_entry *
b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
const struct b43_nphy_channeltab_entry_rev2 *
b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
{
const struct b43_nphy_channeltab_entry *e;
const struct b43_nphy_channeltab_entry_rev2 *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {

View File

@ -13,9 +13,13 @@ struct b43_phy_n_sfo_cfg {
u16 phy_bw6;
};
struct b43_nphy_channeltab_entry {
struct b43_nphy_channeltab_entry_rev2 {
/* The channel number */
u8 channel;
/* The channel frequency in MHz */
u16 freq;
/* An unknown value */
u16 unk2;
/* Radio register values on channelswitch */
u8 radio_pll_ref;
u8 radio_rf_pllmod0;
@ -40,16 +44,18 @@ struct b43_nphy_channeltab_entry {
u8 radio_c2_tx_pgapadtn;
u8 radio_c2_tx_mxbgtrim;
/* PHY register values on channelswitch */
u16 phy_bw1a;
u16 phy_bw2;
u16 phy_bw3;
u16 phy_bw4;
u16 phy_bw5;
u16 phy_bw6;
struct b43_phy_n_sfo_cfg phy_regs;
};
struct b43_nphy_channeltab_entry_rev3 {
/* The channel number */
u8 channel;
/* The channel frequency in MHz */
u16 freq;
/* An unknown value */
u16 unk2;
/* Radio register values on channelswitch */
/* TODO */
/* PHY register values on channelswitch */
struct b43_phy_n_sfo_cfg phy_regs;
};
@ -86,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
/* Get the NPHY Channel Switch Table entry for a channel number.
* Returns NULL on failure to find an entry. */
const struct b43_nphy_channeltab_entry *
b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
const struct b43_nphy_channeltab_entry_rev2 *
b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
/* The N-PHY tables. */

View File

@ -2140,7 +2140,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
DECLARE_SSID_BUF(ssid);
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
"disassociated: '%s' %pM \n",
"disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@ -3285,7 +3285,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
if (inta & IPW2100_INTA_PARITY_ERROR) {
printk(KERN_ERR DRV_NAME
": ***** PARITY ERROR INTERRUPT !!!! \n");
": ***** PARITY ERROR INTERRUPT !!!!\n");
priv->inta_other++;
write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
}
@ -6753,7 +6753,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
err = -EOPNOTSUPP;
goto done;
} else { /* Set the channel */
IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
err = ipw2100_set_channel(priv, fwrq->m, 0);
}
@ -6782,7 +6782,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
else
wrqu->freq.m = 0;
IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@ -6794,7 +6794,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
int err = 0;
IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
if (wrqu->mode == priv->ieee->iw_mode)
return 0;
@ -7149,7 +7149,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
return 0;
}
@ -7168,7 +7168,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
return 0;
}
@ -7207,7 +7207,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
err = ipw2100_set_tx_rates(priv, rate, 0);
IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
done:
mutex_unlock(&priv->action_mutex);
return err;
@ -7258,7 +7258,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = 0;
}
IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
done:
mutex_unlock(&priv->action_mutex);
@ -7294,7 +7294,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
err = ipw2100_set_rts_threshold(priv, value);
IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
done:
mutex_unlock(&priv->action_mutex);
return err;
@ -7316,7 +7316,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
/* If RTS is set to the default value, then it is disabled */
wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
return 0;
}
@ -7355,7 +7355,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
err = ipw2100_set_tx_power(priv, value);
IPW_DEBUG_WX("SET TX Power -> %d \n", value);
IPW_DEBUG_WX("SET TX Power -> %d\n", value);
done:
mutex_unlock(&priv->action_mutex);
@ -7384,7 +7384,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
wrqu->txpower.flags = IW_TXPOW_DBM;
IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
return 0;
}
@ -7414,7 +7414,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
priv->frag_threshold = priv->ieee->fts;
}
IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
return 0;
}
@ -7432,7 +7432,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@ -7458,14 +7458,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (wrqu->retry.flags & IW_RETRY_SHORT) {
err = ipw2100_set_short_retry(priv, wrqu->retry.value);
IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
if (wrqu->retry.flags & IW_RETRY_LONG) {
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
@ -7474,7 +7474,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (!err)
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
done:
mutex_unlock(&priv->action_mutex);
@ -7508,7 +7508,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->short_retry_limit;
}
IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
return 0;
}

View File

@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
{
u32 word;
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
word = _ipw_read32(priv, IPW_INDIRECT_DATA);
return (word >> ((reg & 0x3) * 8)) & 0xff;
}
@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
value = _ipw_read32(priv, IPW_INDIRECT_DATA);
IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
return value;
}
@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
static void ipw_abort_scan(struct ipw_priv *priv);
#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
static void ipw_scan_check(void *data)
{
struct ipw_priv *priv = data;
if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
if (priv->status & STATUS_SCAN_ABORTING) {
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
queue_work(priv->workqueue, &priv->adapter_restart);
} else if (priv->status & STATUS_SCANNING) {
IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
"after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
ipw_abort_scan(priv);
queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
}
}
@ -2739,7 +2748,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
static int ipw_fw_dma_enable(struct ipw_priv *priv)
{ /* start dma engine but no transfers yet */
IPW_DEBUG_FW(">> : \n");
IPW_DEBUG_FW(">> :\n");
/* Start the dma */
ipw_fw_dma_reset_command_blocks(priv);
@ -2747,7 +2756,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
/* Write CB base address */
ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
IPW_DEBUG_FW("<< : \n");
IPW_DEBUG_FW("<< :\n");
return 0;
}
@ -2762,7 +2771,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
priv->sram_desc.last_cb_index = 0;
IPW_DEBUG_FW("<< \n");
IPW_DEBUG_FW("<<\n");
}
static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@ -2813,29 +2822,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
IPW_DEBUG_FW(">> :\n");
address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
/* Read the DMA Controlor register */
register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
/* Print the CB values */
cb_fields_address = address;
register_value = ipw_read_reg32(priv, cb_fields_address);
IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
IPW_DEBUG_FW(">> :\n");
}
@ -2851,7 +2860,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
sizeof(struct command_block);
IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
current_cb_index, current_cb_address);
IPW_DEBUG_FW(">> :\n");
@ -2910,7 +2919,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
int ret, i;
u32 size;
IPW_DEBUG_FW(">> \n");
IPW_DEBUG_FW(">>\n");
IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
nr, dest_address, len);
@ -2927,7 +2936,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
IPW_DEBUG_FW_INFO(": Added new cb\n");
}
IPW_DEBUG_FW("<< \n");
IPW_DEBUG_FW("<<\n");
return 0;
}
@ -2936,7 +2945,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
u32 current_index = 0, previous_index;
u32 watchdog = 0;
IPW_DEBUG_FW(">> : \n");
IPW_DEBUG_FW(">> :\n");
current_index = ipw_fw_dma_command_block_index(priv);
IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@ -2965,7 +2974,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
ipw_set_bit(priv, IPW_RESET_REG,
IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
IPW_DEBUG_FW("<< dmaWaitSync \n");
IPW_DEBUG_FW("<< dmaWaitSync\n");
return 0;
}
@ -3026,7 +3035,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
{
int rc;
IPW_DEBUG_TRACE(">> \n");
IPW_DEBUG_TRACE(">>\n");
/* stop master. typical delay - 0 */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
@ -3045,7 +3054,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
static void ipw_arc_release(struct ipw_priv *priv)
{
IPW_DEBUG_TRACE(">> \n");
IPW_DEBUG_TRACE(">>\n");
mdelay(5);
ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@ -3067,7 +3076,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
image = (__le16 *) data;
IPW_DEBUG_TRACE(">> \n");
IPW_DEBUG_TRACE(">>\n");
rc = ipw_stop_master(priv);
@ -3181,7 +3190,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
void **virts;
dma_addr_t *phys;
IPW_DEBUG_TRACE("<< : \n");
IPW_DEBUG_TRACE("<< :\n");
virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
GFP_KERNEL);
@ -4482,7 +4491,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case CMAS_ASSOCIATED:{
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
"associated: '%s' %pM \n",
"associated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@ -4563,7 +4572,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DL_ASSOC,
"deauthenticated: '%s' "
"%pM"
": (0x%04X) - %s \n",
": (0x%04X) - %s\n",
print_ssid(ssid,
priv->
essid,
@ -4614,7 +4623,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
"disassociated: '%s' %pM \n",
"disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@ -4652,7 +4661,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
switch (auth->state) {
case CMAS_AUTHENTICATED:
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
"authenticated: '%s' %pM \n",
"authenticated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@ -6925,7 +6934,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
} else {
mode = priv->ieee->mode;
}
IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
return mode;
}
@ -6965,7 +6974,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
&def_parameters_OFDM, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
schedule_work(&priv->qos_activate);
}
@ -7542,7 +7551,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
return err;
}
IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@ -8793,7 +8802,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
}
}
IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
mutex_lock(&priv->mutex);
ret = ipw_set_channel(priv, channel);
mutex_unlock(&priv->mutex);
@ -8835,7 +8844,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
wrqu->freq.m = 0;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@ -9230,7 +9239,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
wrqu->sens.value = priv->roaming_threshold;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@ -9358,7 +9367,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = priv->last_rate;
wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
return 0;
}
@ -9381,7 +9390,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
ipw_send_rts_threshold(priv, priv->rts_threshold);
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
return 0;
}
@ -9395,7 +9404,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
return 0;
}
@ -9445,7 +9454,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET TX Power -> %s %d \n",
IPW_DEBUG_WX("GET TX Power -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@ -9471,7 +9480,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
ipw_send_frag_threshold(priv, wrqu->frag.value);
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@ -9485,7 +9494,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@ -9549,7 +9558,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
}
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
return 0;
}

View File

@ -10,6 +10,8 @@ CFLAGS_iwl-devtrace.o := -I$(src)
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
iwlagn-objs += iwl-agn-lib.o
iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
iwlagn-$(CONFIG_IWL5000) += iwl-5000.o

View File

@ -44,7 +44,7 @@
#include "iwl-sta.h"
#include "iwl-agn.h"
#include "iwl-helpers.h"
#include "iwl-5000-hw.h"
#include "iwl-agn-hw.h"
#include "iwl-agn-led.h"
/* Highest firmware API version supported */
@ -118,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@ -126,13 +126,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
sizeof(struct iwl5000_scd_bc_tbl);
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@ -162,25 +162,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
.txq_agg_enable = iwl5000_txq_agg_enable,
.txq_agg_disable = iwl5000_txq_agg_disable,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
.rx_handler_setup = iwlagn_rx_handler_setup,
.setup_deferred_work = iwlagn_setup_deferred_work,
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
.init_alive_start = iwlagn_init_alive_start,
.alive_notify = iwlagn_alive_notify,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.apm_ops = {
.init = iwl_apm_init,
@ -190,25 +190,25 @@ static struct iwl_lib_ops iwl1000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
EEPROM_5000_REG_BAND_1_CHANNELS,
EEPROM_5000_REG_BAND_2_CHANNELS,
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
EEPROM_REG_BAND_1_CHANNELS,
EEPROM_REG_BAND_2_CHANNELS,
EEPROM_REG_BAND_3_CHANNELS,
EEPROM_REG_BAND_4_CHANNELS,
EEPROM_REG_BAND_5_CHANNELS,
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
.calib_version = iwl5000_eeprom_calib_version,
.query_addr = iwl5000_eeprom_query_addr,
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
.temperature = iwl5000_temperature,
.temperature = iwlagn_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
.add_bcast_station = iwl_add_bcast_station,
@ -218,10 +218,10 @@ static struct iwl_lib_ops iwl1000_lib = {
};
static const struct iwl_ops iwl1000_ops = {
.ucode = &iwl5000_ucode,
.ucode = &iwlagn_ucode,
.lib = &iwl1000_lib,
.hcmd = &iwl5000_hcmd,
.utils = &iwl5000_hcmd_utils,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
@ -234,10 +234,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@ -253,6 +253,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 128,
};
struct iwl_cfg iwl1000_bg_cfg = {
@ -264,10 +265,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@ -282,6 +283,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 128,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));

View File

@ -71,10 +71,6 @@
#include "iwl-eeprom.h"
/* Time constants */
#define SHORT_SLOT_TIME 9
#define LONG_SLOT_TIME 20
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
@ -230,7 +226,6 @@ struct iwl3945_eeprom {
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
#define IWL_NUM_SCAN_RATES (2)
#define IWL_DEFAULT_TX_RETRY 15

View File

@ -342,7 +342,7 @@ void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 s
struct ieee80211_supported_band *sband;
int i;
IWL_DEBUG_INFO(priv, "enter \n");
IWL_DEBUG_INFO(priv, "enter\n");
if (sta_id == priv->hw_params.bcast_sta_id)
goto out;
@ -648,7 +648,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
unsigned long flags;
u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
s8 max_rate_idx = -1;
struct iwl_priv *priv = (struct iwl_priv *)priv_r;
struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");

View File

@ -192,12 +192,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
}
#ifdef CONFIG_IWLWIFI_DEBUG
#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
case TX_3945_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_ENTRY(SHORT_LIMIT);
TX_STATUS_ENTRY(LONG_LIMIT);
@ -487,7 +487,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
"len=%u, rssi=%d, chnl=%d, rate=%d, \n",
"len=%u, rssi=%d, chnl=%d, rate=%d,\n",
title, le16_to_cpu(fc), header->addr1[5],
length, rssi, channel, rate);
else {
@ -549,7 +549,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
u16 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
int ret;
__le16 fc = hdr->frame_control;
/* We received data from the HW, so stop the watchdog */
@ -566,9 +565,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
return;
}
skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
skb = dev_alloc_skb(128);
if (!skb) {
IWL_ERR(priv, "alloc_skb failed\n");
IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
@ -577,37 +576,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
skb_reserve(skb, IWL_LINK_HDR_MAX);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
/* mac80211 currently doesn't support paged SKB. Convert it to
* linear SKB for management frame and data frame requires
* software decryption or software defragementation. */
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
0 : -ENOMEM;
if (ret) {
kfree_skb(skb);
goto out;
}
/*
* XXX: We cannot touch the page and its virtual memory (pkt) after
* here. It might have already been freed by the above skb change.
*/
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
out:
priv->alloc_rxb_page--;
rxb->page = NULL;
}
@ -623,9 +598,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
int snr;
u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
u8 network_packet;
rx_status.flag = 0;
@ -663,43 +637,19 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
/* Convert 3945's rssi indicator to dBm */
rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
/* Set default noise value to -127 */
if (priv->last_rx_noise == 0)
priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
/* 3945 provides noise info for OFDM frames only.
* sig_avg and noise_diff are measured by the 3945's digital signal
* processor (DSP), and indicate linear levels of signal level and
* distortion/noise within the packet preamble after
* automatic gain control (AGC). sig_avg should stay fairly
* constant if the radio's AGC is working well.
* Since these values are linear (not dB or dBm), linear
* signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
* Convert linear SNR to dB SNR, then subtract that from rssi dBm
* to obtain noise level in dBm.
* Calculate rx_status.signal (quality indicator in %) based on SNR. */
if (rx_stats_noise_diff) {
snr = rx_stats_sig_avg / rx_stats_noise_diff;
rx_status.noise = rx_status.signal -
iwl3945_calc_db_from_ratio(snr);
} else {
rx_status.noise = priv->last_rx_noise;
}
IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
rx_status.signal, rx_status.noise,
rx_stats_sig_avg, rx_stats_noise_diff);
IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
rx_status.signal, rx_stats_sig_avg,
rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
network_packet = iwl3945_is_network_packet(priv, header);
IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
network_packet ? '*' : ' ',
le16_to_cpu(rx_hdr->channel),
rx_status.signal, rx_status.signal,
rx_status.noise, rx_status.rate_idx);
rx_status.rate_idx);
/* Set "1" to report good data frames in groups of 100 */
iwl3945_dbg_report_frame(priv, pkt, header, 1);
@ -710,7 +660,6 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
le32_to_cpu(rx_end->beacon_timestamp);
priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
priv->_3945.last_rx_rssi = rx_status.signal;
priv->last_rx_noise = rx_status.noise;
}
iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@ -1050,7 +999,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
IWL_DEBUG_INFO(priv, "RTP type \n");
IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@ -2822,6 +2771,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@ -2841,6 +2791,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
};
DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {

View File

@ -81,26 +81,6 @@
*/
#define IWL49_FIRST_AMPDU_QUEUE 7
/* Time constants */
#define SHORT_SLOT_TIME 9
#define LONG_SLOT_TIME 20
/* RSSI to dBm */
#define IWL49_RSSI_OFFSET 44
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
/* PCI register values */
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
#define IWL_NUM_SCAN_RATES (2)
#define IWL_DEFAULT_TX_RETRY 15
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
* location(s) in command (struct iwl4965_txpowertable_cmd).
*/
/* Limit range of txpower output target to be between these values */
#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
/**
* When MIMO is used (2 transmitters operating simultaneously), driver should
* limit each transmitter to deliver a max of 3 dB below the regulatory limit

View File

@ -46,6 +46,7 @@
#include "iwl-calib.h"
#include "iwl-sta.h"
#include "iwl-agn-led.h"
#include "iwl-agn.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@ -60,14 +61,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
/* module parameters */
static struct iwl_mod_params iwl4965_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
/* the rest are 0 by default */
};
/* check contents of special bootstrap uCode SRAM */
static int iwl4965_verify_bsm(struct iwl_priv *priv)
{
@ -417,7 +410,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
sizeof(cmd), &cmd);
if (ret)
IWL_DEBUG_CALIB(priv, "fail sending cmd "
"REPLY_PHY_CALIBRATION_CMD \n");
"REPLY_PHY_CALIBRATION_CMD\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
@ -1619,19 +1612,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
/* get absolute value */
if (temp_diff < 0) {
IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff);
IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
temp_diff = -temp_diff;
} else if (temp_diff == 0)
IWL_DEBUG_POWER(priv, "Same temp, \n");
IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
else
IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff);
IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n");
IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
return 0;
}
IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n");
IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
return 1;
}
@ -1880,7 +1873,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@ -2020,7 +2013,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc)
iwl_free_tfds_in_queue(priv, sta_id,
tid, freed);
@ -2037,7 +2030,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
iwl_hwrate_to_tx_control(priv,
iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
@ -2048,7 +2041,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
@ -2059,10 +2052,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
iwl_check_abort_status(priv, tx_resp->frame_count, status);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
@ -2096,7 +2088,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
return max_rssi - agc - IWL49_RSSI_OFFSET;
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
@ -2104,7 +2096,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
}
@ -2247,7 +2239,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.ops = &iwl4965_ops,
.num_of_queues = IWL49_NUM_QUEUES,
.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
.mod_params = &iwl4965_mod_params,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@ -2260,27 +2252,11 @@ struct iwl_cfg iwl4965_agn_cfg = {
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.temperature_kelvin = true,
.off_channel_workaround = true,
.max_event_log_size = 512,
};
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(
disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
/* 11n */
module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");

View File

@ -68,25 +68,6 @@
#ifndef __iwl_5000_hw_h__
#define __iwl_5000_hw_h__
#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
IWL50_RTC_INST_LOWER_BOUND)
#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
IWL50_RTC_DATA_LOWER_BOUND)
/* EEPROM */
#define IWL_5000_EEPROM_IMG_SIZE 2048
#define IWL50_CMD_FIFO_NUM 7
#define IWL50_NUM_QUEUES 20
#define IWL50_NUM_AMPDU_QUEUES 10
#define IWL50_FIRST_AMPDU_QUEUE 10
/* 5150 only */
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
/* Fixed (non-configurable) rx data from phy */
/**
* struct iwl5000_schedq_bc_tbl scheduler byte count table
* base physical address of iwl5000_shared
* is provided to SCD_DRAM_BASE_ADDR
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
*/
struct iwl5000_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __attribute__ ((packed));
#endif /* __iwl_5000_hw_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@
#include "iwl-sta.h"
#include "iwl-agn.h"
#include "iwl-helpers.h"
#include "iwl-5000-hw.h"
#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
#include "iwl-agn-led.h"
@ -57,6 +57,7 @@
#define IWL6050_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define IWL6000_G2_FW_PRE "iwlwifi-6005-"
#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api)
@ -137,7 +138,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@ -145,7 +146,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
sizeof(struct iwl5000_scd_bc_tbl);
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@ -226,25 +227,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
static struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
.txq_agg_enable = iwl5000_txq_agg_enable,
.txq_agg_disable = iwl5000_txq_agg_disable,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
.rx_handler_setup = iwlagn_rx_handler_setup,
.setup_deferred_work = iwlagn_setup_deferred_work,
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
.init_alive_start = iwlagn_init_alive_start,
.alive_notify = iwlagn_alive_notify,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@ -255,26 +256,26 @@ static struct iwl_lib_ops iwl6000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
EEPROM_5000_REG_BAND_1_CHANNELS,
EEPROM_5000_REG_BAND_2_CHANNELS,
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
EEPROM_REG_BAND_1_CHANNELS,
EEPROM_REG_BAND_2_CHANNELS,
EEPROM_REG_BAND_3_CHANNELS,
EEPROM_REG_BAND_4_CHANNELS,
EEPROM_REG_BAND_5_CHANNELS,
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
.calib_version = iwl5000_eeprom_calib_version,
.query_addr = iwl5000_eeprom_query_addr,
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
.temperature = iwl5000_temperature,
.temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
.add_bcast_station = iwl_add_bcast_station,
@ -284,34 +285,34 @@ static struct iwl_lib_ops iwl6000_lib = {
};
static const struct iwl_ops iwl6000_ops = {
.ucode = &iwl5000_ucode,
.ucode = &iwlagn_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
.utils = &iwl5000_hcmd_utils,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static struct iwl_lib_ops iwl6050_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
.txq_agg_enable = iwl5000_txq_agg_enable,
.txq_agg_disable = iwl5000_txq_agg_disable,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
.rx_handler_setup = iwlagn_rx_handler_setup,
.setup_deferred_work = iwlagn_setup_deferred_work,
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
.init_alive_start = iwlagn_init_alive_start,
.alive_notify = iwlagn_alive_notify,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@ -322,26 +323,26 @@ static struct iwl_lib_ops iwl6050_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
EEPROM_5000_REG_BAND_1_CHANNELS,
EEPROM_5000_REG_BAND_2_CHANNELS,
EEPROM_5000_REG_BAND_3_CHANNELS,
EEPROM_5000_REG_BAND_4_CHANNELS,
EEPROM_5000_REG_BAND_5_CHANNELS,
EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
EEPROM_5000_REG_BAND_52_HT40_CHANNELS
EEPROM_REG_BAND_1_CHANNELS,
EEPROM_REG_BAND_2_CHANNELS,
EEPROM_REG_BAND_3_CHANNELS,
EEPROM_REG_BAND_4_CHANNELS,
EEPROM_REG_BAND_5_CHANNELS,
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
.calib_version = iwl5000_eeprom_calib_version,
.query_addr = iwl5000_eeprom_query_addr,
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
.temperature = iwl5000_temperature,
.temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
.set_calib_version = iwl6050_set_calib_version,
},
@ -352,16 +353,50 @@ static struct iwl_lib_ops iwl6050_lib = {
};
static const struct iwl_ops iwl6050_ops = {
.ucode = &iwl5000_ucode,
.ucode = &iwlagn_ucode,
.lib = &iwl6050_lib,
.hcmd = &iwl5000_hcmd,
.utils = &iwl5000_hcmd_utils,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
/*
* "i": Internal configuration, use internal Power Amplifier
*/
struct iwl_cfg iwl6000i_g2_2agn_cfg = {
.name = "6000 Series 2x2 AGN Gen2",
.fw_name_pre = IWL6000_G2_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
.set_l0s = true,
.use_bsm = false,
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.ht_greenfield_support = true,
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
.fw_name_pre = IWL6000_FW_PRE,
@ -371,10 +406,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@ -393,6 +428,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@ -404,10 +440,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@ -425,6 +461,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@ -436,10 +473,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@ -457,6 +494,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@ -468,10 +506,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@ -490,6 +528,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@ -501,10 +540,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@ -522,6 +561,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@ -533,10 +573,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
.num_of_queues = IWL50_NUM_QUEUES,
.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
.mod_params = &iwl50_mod_params,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@ -555,6 +595,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 1024,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));

View File

@ -0,0 +1,274 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-agn.h"
static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
{
int ret = 0;
struct iwl5000_rxon_assoc_cmd rxon_assoc;
const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
(rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
(rxon1->ofdm_ht_single_stream_basic_rates ==
rxon2->ofdm_ht_single_stream_basic_rates) &&
(rxon1->ofdm_ht_dual_stream_basic_rates ==
rxon2->ofdm_ht_dual_stream_basic_rates) &&
(rxon1->ofdm_ht_triple_stream_basic_rates ==
rxon2->ofdm_ht_triple_stream_basic_rates) &&
(rxon1->acquisition_data == rxon2->acquisition_data) &&
(rxon1->rx_chain == rxon2->rx_chain) &&
(rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
return 0;
}
rxon_assoc.flags = priv->staging_rxon.flags;
rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
rxon_assoc.reserved1 = 0;
rxon_assoc.reserved2 = 0;
rxon_assoc.reserved3 = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
rxon_assoc.ofdm_ht_triple_stream_basic_rates =
priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
if (ret)
return ret;
return ret;
}
static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
{
struct iwl_tx_ant_config_cmd tx_ant_cmd = {
.valid = cpu_to_le32(valid_tx_ant),
};
if (IWL_UCODE_API(priv->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
sizeof(struct iwl_tx_ant_config_cmd),
&tx_ant_cmd);
} else {
IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
return -EOPNOTSUPP;
}
}
/* Currently this is the superset of everything */
static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
{
return len;
}
static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
{
u16 size = (u16)sizeof(struct iwl_addsta_cmd);
struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
memcpy(addsta, cmd, size);
/* resrved in 5000 */
addsta->rate_n_flags = cpu_to_le16(0);
return size;
}
static void iwlagn_gain_computation(struct iwl_priv *priv,
u32 average_noise[NUM_RX_CHAINS],
u16 min_average_noise_antenna_i,
u32 min_average_noise,
u8 default_chain)
{
int i;
s32 delta_g;
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
/*
* Find Gain Code for the chains based on "default chain"
*/
for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
if ((data->disconn_array[i])) {
data->delta_gain_code[i] = 0;
continue;
}
delta_g = (priv->cfg->chain_noise_scale *
((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
/*
* set negative sign ...
* note to Intel developers: This is uCode API format,
* not the format of any internal device registers.
* Do not change this format for e.g. 6050 or similar
* devices. Change format only if more resolution
* (i.e. more than 2 bits magnitude) is needed.
*/
data->delta_gain_code[i] |= (1 << 2);
}
IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
data->delta_gain_code[1], data->delta_gain_code[2]);
if (!data->radio_write) {
struct iwl_calib_chain_noise_gain_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
cmd.delta_gain_1 = data->delta_gain_code[1];
cmd.delta_gain_2 = data->delta_gain_code[2];
iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd, NULL);
data->radio_write = 1;
data->state = IWL_CHAIN_NOISE_CALIBRATED;
}
data->chain_noise_a = 0;
data->chain_noise_b = 0;
data->chain_noise_c = 0;
data->chain_signal_a = 0;
data->chain_signal_b = 0;
data->chain_signal_c = 0;
data->beacon_count = 0;
}
static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
struct iwl_calib_chain_noise_reset_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
data->state = IWL_CHAIN_NOISE_ACCUMULATE;
IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
}
}
static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags)
{
if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
*tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
else
*tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
}
/* Calc max signal level (dBm) among 3 possible receivers */
static int iwlagn_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
/* data from PHY/DSP regarding signal strength, etc.,
* contents are always there, not configurable by host
*/
struct iwl5000_non_cfg_phy *ncphy =
(struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
u8 agc;
val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
/* Find max rssi among 3 possible receivers.
* These values are measured by the digital signal processor (DSP).
* They should stay fairly constant even as the signal strength varies,
* if the radio's automatic gain control (AGC) is working right.
* AGC value (see below) will provide the "interesting" info.
*/
val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
max_rssi = max_t(u32, rssi_a, rssi_b);
max_rssi = max_t(u32, max_rssi, rssi_c);
IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
rssi_a, rssi_b, rssi_c, max_rssi, agc);
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
struct iwl_hcmd_ops iwlagn_hcmd = {
.rxon_assoc = iwlagn_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
.set_tx_ant = iwlagn_send_tx_ant_config,
};
struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
.get_hcmd_size = iwlagn_get_hcmd_size,
.build_addsta_hcmd = iwlagn_build_addsta_hcmd,
.gain_computation = iwlagn_gain_computation,
.chain_noise_reset = iwlagn_chain_noise_reset,
.rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
.calc_rssi = iwlagn_calc_rssi,
};

View File

@ -0,0 +1,118 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
/*
* Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
*/
#ifndef __iwl_agn_hw_h__
#define __iwl_agn_hw_h__
#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
IWLAGN_RTC_INST_LOWER_BOUND)
#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
IWLAGN_RTC_DATA_LOWER_BOUND)
/* RSSI to dBm */
#define IWLAGN_RSSI_OFFSET 44
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
/* PCI register values */
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
#define IWLAGN_DEFAULT_TX_RETRY 15
/* Limit range of txpower output target to be between these values */
#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
/* EEPROM */
#define IWLAGN_EEPROM_IMG_SIZE 2048
#define IWLAGN_CMD_FIFO_NUM 7
#define IWLAGN_NUM_QUEUES 20
#define IWLAGN_NUM_AMPDU_QUEUES 10
#define IWLAGN_FIRST_AMPDU_QUEUE 10
/* Fixed (non-configurable) rx data from phy */
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __attribute__ ((packed));
#endif /* __iwl_agn_hw_h__ */

View File

@ -141,13 +141,14 @@ static irqreturn_t iwl_isr(int irq, void *data)
{
struct iwl_priv *priv = data;
u32 inta, inta_mask;
unsigned long flags;
#ifdef CONFIG_IWLWIFI_DEBUG
u32 inta_fh;
#endif
if (!priv)
return IRQ_NONE;
spin_lock(&priv->lock);
spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@ -190,7 +191,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
iwl_enable_interrupts(priv);
unplugged:
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@ -199,7 +200,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
iwl_enable_interrupts(priv);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}
@ -216,6 +217,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
struct iwl_priv *priv = data;
u32 inta, inta_mask;
u32 val = 0;
unsigned long flags;
if (!priv)
return IRQ_NONE;
@ -226,7 +228,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (!priv->_agn.use_ict)
return iwl_isr(irq, data);
spin_lock(&priv->lock);
spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@ -290,7 +292,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
iwl_enable_interrupts(priv);
}
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@ -300,6 +302,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
iwl_enable_interrupts(priv);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}

File diff suppressed because it is too large Load Diff

View File

@ -2003,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask);
IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
@ -2410,7 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
struct iwl_priv *priv = (struct iwl_priv *)priv_r;
struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
@ -2934,8 +2934,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
desc += sprintf(buff+desc, "Noise Level= %d dBm\n",
priv->last_rx_noise);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
return ret;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,416 @@
/******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
static const s8 iwlagn_default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWLAGN_CMD_FIFO_NUM,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
};
/*
* ucode
*/
static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
struct fw_desc *image, u32 dst_addr)
{
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
priv->ucode_write_complete = 0;
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
iwl_write_direct32(priv,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
iwl_write_direct32(priv,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(priv,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
IWL_ERR(priv, "Could not load the %s uCode section due "
"to interrupt\n", name);
return ret;
}
if (!ret) {
IWL_ERR(priv, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
return 0;
}
static int iwlagn_load_given_ucode(struct iwl_priv *priv,
struct fw_desc *inst_image,
struct fw_desc *data_image)
{
int ret = 0;
ret = iwlagn_load_section(priv, "INST", inst_image,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
return iwlagn_load_section(priv, "DATA", data_image,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
int iwlagn_load_ucode(struct iwl_priv *priv)
{
int ret = 0;
/* check whether init ucode should be loaded, or rather runtime ucode */
if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
ret = iwlagn_load_given_ucode(priv,
&priv->ucode_init, &priv->ucode_init_data);
if (!ret) {
IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
priv->ucode_type = UCODE_INIT;
}
} else {
IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
"Loading runtime ucode...\n");
ret = iwlagn_load_given_ucode(priv,
&priv->ucode_code, &priv->ucode_data);
if (!ret) {
IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
priv->ucode_type = UCODE_RT;
}
}
return ret;
}
#define IWL_UCODE_GET(item) \
static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\
u32 api_ver) \
{ \
if (api_ver <= 2) \
return le32_to_cpu(ucode->u.v1.item); \
return le32_to_cpu(ucode->u.v2.item); \
}
static u32 iwlagn_ucode_get_header_size(u32 api_ver)
{
if (api_ver <= 2)
return UCODE_HEADER_SIZE(1);
return UCODE_HEADER_SIZE(2);
}
static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode,
u32 api_ver)
{
if (api_ver <= 2)
return 0;
return le32_to_cpu(ucode->u.v2.build);
}
static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode,
u32 api_ver)
{
if (api_ver <= 2)
return (u8 *) ucode->u.v1.data;
return (u8 *) ucode->u.v2.data;
}
IWL_UCODE_GET(inst_size);
IWL_UCODE_GET(data_size);
IWL_UCODE_GET(init_size);
IWL_UCODE_GET(init_data_size);
IWL_UCODE_GET(boot_size);
struct iwl_ucode_ops iwlagn_ucode = {
.get_header_size = iwlagn_ucode_get_header_size,
.get_build = iwlagn_ucode_get_build,
.get_inst_size = iwlagn_ucode_get_inst_size,
.get_data_size = iwlagn_ucode_get_data_size,
.get_init_size = iwlagn_ucode_get_init_size,
.get_init_data_size = iwlagn_ucode_get_init_data_size,
.get_boot_size = iwlagn_ucode_get_boot_size,
.get_data = iwlagn_ucode_get_data,
};
/*
* Calibration
*/
static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
(u8 *)&cmd, sizeof(cmd));
}
static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
.id = CALIBRATION_CFG_CMD,
.len = sizeof(struct iwl_calib_cfg_cmd),
.data = &calib_cfg_cmd,
};
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
return iwl_send_cmd(priv, &cmd);
}
void iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
int index;
/* reduce the size of the length field itself */
len -= 4;
/* Define the order in which the results will be sent to the runtime
* uCode. iwl_send_calib_results sends them in a row according to
* their index. We sort them here
*/
switch (hdr->op_code) {
case IWL_PHY_CALIBRATE_DC_CMD:
index = IWL_CALIB_DC;
break;
case IWL_PHY_CALIBRATE_LO_CMD:
index = IWL_CALIB_LO;
break;
case IWL_PHY_CALIBRATE_TX_IQ_CMD:
index = IWL_CALIB_TX_IQ;
break;
case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
index = IWL_CALIB_TX_IQ_PERD;
break;
case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
index = IWL_CALIB_BASE_BAND;
break;
default:
IWL_ERR(priv, "Unknown calibration notification %d\n",
hdr->op_code);
return;
}
iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
}
void iwlagn_rx_calib_complete(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
queue_work(priv->workqueue, &priv->restart);
}
void iwlagn_init_alive_start(struct iwl_priv *priv)
{
int ret = 0;
/* Check alive response for "valid" sign from uCode */
if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
/* We had an error bringing up the hardware, so take it
* all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
goto restart;
}
/* initialize uCode was loaded... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
if (iwl_verify_ucode(priv)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
goto restart;
}
ret = priv->cfg->ops->lib->alive_notify(priv);
if (ret) {
IWL_WARN(priv,
"Could not complete ALIVE transition: %d\n", ret);
goto restart;
}
iwlagn_send_calib_cfg(priv);
return;
restart:
/* real restart (first load init_ucode) */
queue_work(priv->workqueue, &priv->restart);
}
int iwlagn_alive_notify(struct iwl_priv *priv)
{
u32 a;
unsigned long flags;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&priv->lock, flags);
priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
a += 4)
iwl_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
a += 4)
iwl_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr +
IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
iwl_write_targ_mem(priv, a, 0);
iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
priv->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
/* initiate the queues */
for (i = 0; i < priv->hw_params.max_txq_num; i++) {
iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
iwl_write_targ_mem(priv, priv->scd_base_addr +
IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
iwl_write_targ_mem(priv, priv->scd_base_addr +
IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((SCD_FRAME_LIMIT <<
IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
}
iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
IWL_MASK(0, priv->hw_params.max_txq_num));
/* Activate all Tx DMA/FIFO channels */
priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&priv->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
int ac = iwlagn_default_queue_to_tx_fifo[i];
iwl_txq_ctx_activate(priv, i);
if (ac == IWL_TX_FIFO_UNUSED)
continue;
iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
spin_unlock_irqrestore(&priv->lock, flags);
iwl_send_wimax_coex(priv);
iwlagn_set_Xtal_calib(priv);
iwl_send_calib_results(priv);
return 0;
}

View File

@ -84,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
/*************** STATION TABLE MANAGEMENT ****
* mac80211 should be examined to determine if sta_info is duplicating
* the functionality provided here
*/
/**************************************************************/
/**
* iwl_commit_rxon - commit staging_rxon to hardware
*
@ -166,6 +159,11 @@ int iwl_commit_rxon(struct iwl_priv *priv)
}
iwl_clear_ucode_stations(priv, false);
iwl_restore_stations(priv);
ret = iwl_restore_default_wep_keys(priv);
if (ret) {
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
return ret;
}
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@ -189,10 +187,15 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON. \n");
IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
iwl_clear_ucode_stations(priv, false);
iwl_restore_stations(priv);
ret = iwl_restore_default_wep_keys(priv);
if (ret) {
IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
return ret;
}
}
priv->start_calib = 0;
@ -885,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
@ -1016,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
rxq->read = i;
iwl_rx_replenish_now(priv);
iwlagn_rx_replenish_now(priv);
count = 0;
}
}
@ -1025,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
iwl_rx_replenish_now(priv);
iwlagn_rx_replenish_now(priv);
else
iwl_rx_queue_restock(priv);
iwlagn_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@ -1426,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_enable_interrupts(priv);
}
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
#define ACK_CNT_RATIO (50)
#define BA_TIMEOUT_CNT (5)
#define BA_TIMEOUT_MAX (16)
/**
* iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
*
* When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
* the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
* operation state.
*/
bool iwl_good_ack_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
bool rc = true;
int actual_ack_cnt_delta, expected_ack_cnt_delta;
int ba_timeout_delta;
actual_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
expected_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
ba_timeout_delta =
le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
if ((priv->_agn.agg_tids_count > 0) &&
(expected_ack_cnt_delta > 0) &&
(((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
< ACK_CNT_RATIO) &&
(ba_timeout_delta > BA_TIMEOUT_CNT)) {
IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
" expected_ack_cnt = %d\n",
actual_ack_cnt_delta, expected_ack_cnt_delta);
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
priv->delta_statistics.tx.rx_detected_cnt);
IWL_DEBUG_RADIO(priv,
"ack_or_ba_timeout_collision delta = %d\n",
priv->delta_statistics.tx.
ack_or_ba_timeout_collision);
#endif
IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
ba_timeout_delta);
if (!actual_ack_cnt_delta &&
(ba_timeout_delta >= BA_TIMEOUT_MAX))
rc = false;
}
return rc;
}
/******************************************************************************
*
@ -1787,6 +1844,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
u32 pc, hcmd;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@ -1809,6 +1867,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@ -1817,6 +1876,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
@ -1825,10 +1885,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
"data1 data2 line\n");
IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
desc_lookup(desc), desc, time, data1, data2, line);
IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
ilink1, ilink2);
IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
pc, blink1, blink2, ilink1, ilink2, hcmd);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
@ -1944,9 +2003,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
#define MAX_EVENT_LOG_SIZE (512)
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@ -1979,16 +2035,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
if (capacity > MAX_EVENT_LOG_SIZE) {
if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
capacity, MAX_EVENT_LOG_SIZE);
capacity = MAX_EVENT_LOG_SIZE;
capacity, priv->cfg->max_event_log_size);
capacity = priv->cfg->max_event_log_size;
}
if (next_entry > MAX_EVENT_LOG_SIZE) {
if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
next_entry, MAX_EVENT_LOG_SIZE);
next_entry = MAX_EVENT_LOG_SIZE;
next_entry, priv->cfg->max_event_log_size);
next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@ -2204,8 +2260,8 @@ static void __iwl_down(struct iwl_priv *priv)
/* device going down, Stop using ICT table */
iwl_disable_ict(priv);
iwl_txq_ctx_stop(priv);
iwl_rxq_stop(priv);
iwlagn_txq_ctx_stop(priv);
iwlagn_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@ -2265,7 +2321,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
{
int ret = 0;
IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n");
IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(priv);
if (priv->hw_ready)
@ -2326,7 +2382,7 @@ static int __iwl_up(struct iwl_priv *priv)
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
ret = iwl_hw_nic_init(priv);
ret = iwlagn_hw_nic_init(priv);
if (ret) {
IWL_ERR(priv, "Unable to init nic\n");
return ret;
@ -2476,7 +2532,7 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
return;
mutex_lock(&priv->mutex);
iwl_rx_replenish(priv);
iwlagn_rx_replenish(priv);
mutex_unlock(&priv->mutex);
}
@ -2486,7 +2542,6 @@ void iwl_post_associate(struct iwl_priv *priv)
{
struct ieee80211_conf *conf = NULL;
int ret = 0;
unsigned long flags;
if (priv->iw_mode == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
@ -2567,10 +2622,6 @@ void iwl_post_associate(struct iwl_priv *priv)
break;
}
spin_lock_irqsave(&priv->lock, flags);
iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
/* the chain noise calibration will enabled PM upon completion
* If chain noise has already been run, then we need to enable
* power management here */
@ -2737,7 +2788,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwl_tx_skb(priv, skb))
if (iwlagn_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
@ -2747,7 +2798,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
void iwl_config_ap(struct iwl_priv *priv)
{
int ret = 0;
unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@ -2799,10 +2849,6 @@ void iwl_config_ap(struct iwl_priv *priv)
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
iwl_reset_qos(priv);
spin_lock_irqsave(&priv->lock, flags);
iwl_activate_qos(priv, 1);
spin_unlock_irqrestore(&priv->lock, flags);
iwl_add_bcast_station(priv);
}
iwl_send_beacon_cmd(priv);
@ -2858,12 +2904,13 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
/* If we are getting WEP group key and we didn't receive any key mapping
/*
* If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
* In legacy wep mode, we use another host command to the uCode */
if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
priv->iw_mode != NL80211_IFTYPE_AP) {
* In legacy wep mode, we use another host command to the uCode.
*/
if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
if (cmd == SET_KEY)
is_default_wep_key = !priv->key_mapping_key;
else
@ -2925,7 +2972,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn);
ret = iwlagn_tx_agg_start(priv, sta->addr, tid, ssn);
if (ret == 0) {
priv->_agn.agg_tids_count++;
IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
@ -2934,7 +2981,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return ret;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwl_tx_agg_stop(priv, sta->addr, tid);
ret = iwlagn_tx_agg_stop(priv, sta->addr, tid);
if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
priv->_agn.agg_tids_count--;
IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
@ -2997,19 +3044,6 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
}
}
/**
* iwl_restore_wepkeys - Restore WEP keys to device
*/
static void iwl_restore_wepkeys(struct iwl_priv *priv)
{
mutex_lock(&priv->mutex);
if (priv->iw_mode == NL80211_IFTYPE_STATION &&
priv->default_wep_key &&
iwl_send_static_wepkey_cmd(priv, 0))
IWL_ERR(priv, "Could not send WEP static key\n");
mutex_unlock(&priv->mutex);
}
static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@ -3036,10 +3070,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
return ret;
}
iwl_restore_wepkeys(priv);
/* Initialize rate scaling */
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n",
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl_rs_rate_init(priv, sta, sta_id);
@ -3337,15 +3369,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
iwl_reset_qos(priv);
priv->qos_data.qos_active = 0;
priv->qos_data.qos_cap.val = 0;
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
ret = iwl_init_channel_map(priv);
if (ret) {
@ -3692,8 +3719,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
iwl_rx_queue_free(priv, &priv->rxq);
iwl_hw_txq_ctx_free(priv);
iwlagn_rx_queue_free(priv, &priv->rxq);
iwlagn_hw_txq_ctx_free(priv);
iwl_eeprom_free(priv);
@ -3808,6 +3835,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000i_g2_2agn_cfg)},
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
@ -3890,3 +3918,33 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
MODULE_PARM_DESC(swcrypto50,
"using crypto in software (default 0 [hardware]) (deprecated)");
module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num50,
iwlagn_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num50,
"number of hw queues in 50xx series (deprecated)");
module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K50,
"enable 8K amsdu size in 50XX series (deprecated)");
module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart50,
"restart firmware in case of error (deprecated)");
module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
module_param_named(
disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");

View File

@ -65,10 +65,110 @@
#include "iwl-dev.h"
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_ucode_ops iwlagn_ucode;
extern struct iwl_hcmd_ops iwlagn_hcmd;
extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
int iwl_reset_ict(struct iwl_priv *priv);
void iwl_disable_ict(struct iwl_priv *priv);
int iwl_alloc_isr_ict(struct iwl_priv *priv);
void iwl_free_isr_ict(struct iwl_priv *priv);
irqreturn_t iwl_isr_ict(int irq, void *data);
bool iwl_good_ack_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt);
/* tx queue */
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
int txq_id, u32 index);
void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
u16 byte_cnt);
void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx);
int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo);
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
/* uCode */
int iwlagn_load_ucode(struct iwl_priv *priv);
void iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_calib_complete(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_init_alive_start(struct iwl_priv *priv);
int iwlagn_alive_notify(struct iwl_priv *priv);
/* lib */
void iwl_check_abort_status(struct iwl_priv *priv,
u8 frame_count, u32 status);
void iwlagn_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_setup_deferred_work(struct iwl_priv *priv);
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
size_t offset);
void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_hw_nic_init(struct iwl_priv *priv);
/* rx */
void iwlagn_rx_queue_restock(struct iwl_priv *priv);
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwlagn_rx_replenish(struct iwl_priv *priv);
void iwlagn_rx_replenish_now(struct iwl_priv *priv);
void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_rxq_stop(struct iwl_priv *priv);
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/* tx */
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv,
const u8 *ra, u16 tid, u16 *ssn);
int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
int iwlagn_txq_check_empty(struct iwl_priv *priv,
int sta_id, u8 tid, int txq_id);
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
status &= TX_STATUS_MSK;
switch (status) {
case TX_STATUS_SUCCESS:
case TX_STATUS_DIRECT_DONE:
return IEEE80211_TX_STAT_ACK;
case TX_STATUS_FAIL_DEST_PS:
return IEEE80211_TX_STAT_TX_FILTERED;
default:
return 0;
}
}
static inline bool iwl_is_tx_success(u32 status)
{
status &= TX_STATUS_MSK;
return (status == TX_STATUS_SUCCESS) ||
(status == TX_STATUS_DIRECT_DONE);
}
#endif /* __iwl_agn_h__ */

View File

@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n");
IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
return;
}

View File

@ -106,7 +106,7 @@ enum {
REPLY_TX = 0x1c,
REPLY_RATE_SCALE = 0x47, /* 3945 only */
REPLY_LEDS_CMD = 0x48,
REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
/* WiMAX coexistence */
COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
*
* Entries without timestamps contain only event_id and data.
*
*
* 2) error_event_table_ptr indicates base of the error log. This contains
* information about any uCode error that occurs. For 4965, the format
* information about any uCode error that occurs. For agn, the format
* of the error log is:
*
* __le32 valid; (nonzero) valid, (0) log is empty
@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
* __le32 bcon_time; beacon timer
* __le32 tsf_low; network timestamp function timer
* __le32 tsf_hi; network timestamp function timer
* __le32 gp1; GP1 timer register
* __le32 gp2; GP2 timer register
* __le32 gp3; GP3 timer register
* __le32 ucode_ver; uCode version
* __le32 hw_ver; HW Silicon version
* __le32 brd_ver; HW board version
* __le32 log_pc; log program counter
* __le32 frame_ptr; frame pointer
* __le32 stack_ptr; stack pointer
* __le32 hcmd; last host command
* __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
* __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
* __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
* __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
* __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
* __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
* __le32 wait_event; wait event() caller address
* __le32 l2p_control; L2pControlField
* __le32 l2p_duration; L2pDurationField
* __le32 l2p_mhvalid; L2pMhValidBits
* __le32 l2p_addr_match; L2pAddrMatchStat
* __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
* __le32 u_timestamp; indicate when the date and time of the compilation
* __le32 reserved;
*
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __attribute__ ((packed));
/* TX command response is sent after *all* transmission attempts.
/* TX command response is sent after *3945* transmission attempts.
*
* NOTES:
*
@ -1664,25 +1689,66 @@ struct iwl_tx_cmd {
* command FIFO has been cleared. The host must then deactivate the TX Abort
* control line. Receiving is still allowed in this case.
*/
enum {
TX_3945_STATUS_SUCCESS = 0x01,
TX_3945_STATUS_DIRECT_DONE = 0x02,
TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
TX_3945_STATUS_FAIL_DEST_PS = 0x88,
TX_3945_STATUS_FAIL_ABORTED = 0x89,
TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
};
/*
* TX command response is sent after *agn* transmission attempts.
*
* both postpone and abort status are expected behavior from uCode. there is
* no special operation required from driver; except for RFKILL_FLUSH,
* which required tx flush host command to flush all the tx frames in queues
*/
enum {
TX_STATUS_SUCCESS = 0x01,
TX_STATUS_DIRECT_DONE = 0x02,
/* postpone TX */
TX_STATUS_POSTPONE_DELAY = 0x40,
TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
TX_STATUS_POSTPONE_BT_PRIO = 0x42,
TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
/* abort TX */
TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
TX_STATUS_FAIL_LONG_LIMIT = 0x83,
TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
TX_STATUS_FAIL_NEXT_FRAG = 0x86,
TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
TX_STATUS_FAIL_DEST_PS = 0x88,
TX_STATUS_FAIL_ABORTED = 0x89,
TX_STATUS_FAIL_HOST_ABORTED = 0x89,
TX_STATUS_FAIL_BT_RETRY = 0x8a,
TX_STATUS_FAIL_STA_INVALID = 0x8b,
TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
TX_STATUS_FAIL_TID_DISABLE = 0x8d,
TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
TX_STATUS_FAIL_TX_LOCKED = 0x90,
TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
/* uCode drop due to FW drop request */
TX_STATUS_FAIL_FW_DROP = 0x90,
/*
* uCode drop due to station color mismatch
* between tx command and station table
*/
TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
};
#define TX_PACKET_MODE_REGULAR 0x0000
@ -1704,30 +1770,6 @@ enum {
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
status &= TX_STATUS_MSK;
switch (status) {
case TX_STATUS_SUCCESS:
case TX_STATUS_DIRECT_DONE:
return IEEE80211_TX_STAT_ACK;
case TX_STATUS_FAIL_DEST_PS:
return IEEE80211_TX_STAT_TX_FILTERED;
default:
return 0;
}
}
static inline bool iwl_is_tx_success(u32 status)
{
status &= TX_STATUS_MSK;
return (status == TX_STATUS_SUCCESS) ||
(status == TX_STATUS_DIRECT_DONE);
}
/* *******************************
* TX aggregation status
******************************* */

View File

@ -66,7 +66,7 @@ MODULE_LICENSE("GPL");
*/
static bool bt_coex_active = true;
module_param(bt_coex_active, bool, S_IRUGO);
MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@ -141,30 +141,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
};
EXPORT_SYMBOL(iwl_rates);
/**
* translate ucode response to mac80211 tx status control values
*/
void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->control.rates[0];
info->antenna_sel_tx =
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_MCS_HT_MSK)
r->flags |= IEEE80211_TX_RC_MCS;
if (rate_n_flags & RATE_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
if (rate_n_flags & RATE_MCS_HT40_MSK)
r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate_n_flags & RATE_MCS_DUP_MSK)
r->flags |= IEEE80211_TX_RC_DUP_DATA;
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
}
EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
@ -196,27 +172,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
}
EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
int band_offset = 0;
/* HT rate format: mac80211 wants an MCS number, which is just LSB */
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = (rate_n_flags & 0xff);
return idx;
/* Legacy rate format, search for match in table */
} else {
if (band == IEEE80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
return idx - band_offset;
}
return -1;
}
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
{
int i;
@ -266,74 +221,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_hw_detect);
int iwl_hw_nic_init(struct iwl_priv *priv)
{
unsigned long flags;
struct iwl_rx_queue *rxq = &priv->rxq;
int ret;
/* nic_init */
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
priv->cfg->ops->lib->apm_ops.config(priv);
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
ret = iwl_rx_queue_alloc(priv);
if (ret) {
IWL_ERR(priv, "Unable to initialize Rx queue\n");
return -ENOMEM;
}
} else
iwl_rx_queue_reset(priv, rxq);
iwl_rx_replenish(priv);
iwl_rx_init(priv, rxq);
spin_lock_irqsave(&priv->lock, flags);
rxq->need_update = 1;
iwl_rx_queue_update_write_ptr(priv, rxq);
spin_unlock_irqrestore(&priv->lock, flags);
/* Allocate or reset and init all Tx and Command queues */
if (!priv->txq) {
ret = iwl_txq_ctx_alloc(priv);
if (ret)
return ret;
} else
iwl_txq_ctx_reset(priv);
set_bit(STATUS_INIT, &priv->status);
return 0;
}
EXPORT_SYMBOL(iwl_hw_nic_init);
/*
* QoS support
*/
void iwl_activate_qos(struct iwl_priv *priv, u8 force)
static void iwl_update_qos(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
priv->qos_data.def_qos_parm.qos_flags = 0;
if (priv->qos_data.qos_cap.q_AP.queue_request &&
!priv->qos_data.qos_cap.q_AP.txop_request)
priv->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_TXOP_TYPE_MSK;
if (priv->qos_data.qos_active)
priv->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@ -341,118 +238,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
if (priv->current_ht_config.is_ht)
priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
if (force || iwl_is_associated(priv)) {
IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
priv->qos_data.qos_active,
priv->qos_data.def_qos_parm.qos_flags);
IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
priv->qos_data.qos_active,
priv->qos_data.def_qos_parm.qos_flags);
iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
sizeof(struct iwl_qosparam_cmd),
&priv->qos_data.def_qos_parm, NULL);
}
iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
sizeof(struct iwl_qosparam_cmd),
&priv->qos_data.def_qos_parm, NULL);
}
EXPORT_SYMBOL(iwl_activate_qos);
/*
* AC CWmin CW max AIFSN TXOP Limit TXOP Limit
* (802.11b) (802.11a/g)
* AC_BK 15 1023 7 0 0
* AC_BE 15 1023 3 0 0
* AC_VI 7 15 2 6.016ms 3.008ms
* AC_VO 3 7 2 3.264ms 1.504ms
*/
void iwl_reset_qos(struct iwl_priv *priv)
{
u16 cw_min = 15;
u16 cw_max = 1023;
u8 aifs = 2;
bool is_legacy = false;
unsigned long flags;
int i;
spin_lock_irqsave(&priv->lock, flags);
/* QoS always active in AP and ADHOC mode
* In STA mode wait for association
*/
if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
priv->iw_mode == NL80211_IFTYPE_AP)
priv->qos_data.qos_active = 1;
else
priv->qos_data.qos_active = 0;
/* check for legacy mode */
if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
(priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
(priv->iw_mode == NL80211_IFTYPE_STATION &&
(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
cw_min = 31;
is_legacy = 1;
}
if (priv->qos_data.qos_active)
aifs = 3;
/* AC_BE */
priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
if (priv->qos_data.qos_active) {
/* AC_BK */
i = 1;
priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
/* AC_VI */
i = 2;
priv->qos_data.def_qos_parm.ac[i].cw_min =
cpu_to_le16((cw_min + 1) / 2 - 1);
priv->qos_data.def_qos_parm.ac[i].cw_max =
cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
if (is_legacy)
priv->qos_data.def_qos_parm.ac[i].edca_txop =
cpu_to_le16(6016);
else
priv->qos_data.def_qos_parm.ac[i].edca_txop =
cpu_to_le16(3008);
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
/* AC_VO */
i = 3;
priv->qos_data.def_qos_parm.ac[i].cw_min =
cpu_to_le16((cw_min + 1) / 4 - 1);
priv->qos_data.def_qos_parm.ac[i].cw_max =
cpu_to_le16((cw_min + 1) / 2 - 1);
priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
if (is_legacy)
priv->qos_data.def_qos_parm.ac[i].edca_txop =
cpu_to_le16(3264);
else
priv->qos_data.def_qos_parm.ac[i].edca_txop =
cpu_to_le16(1504);
} else {
for (i = 1; i < 4; i++) {
priv->qos_data.def_qos_parm.ac[i].cw_min =
cpu_to_le16(cw_min);
priv->qos_data.def_qos_parm.ac[i].cw_max =
cpu_to_le16(cw_max);
priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
}
}
IWL_DEBUG_QOS(priv, "set QoS to default \n");
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL(iwl_reset_qos);
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@ -1092,12 +885,12 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
/* copied from 'iwl_bg_request_scan()' */
/* Force use of chains B and C (0x6) for Rx for 4965
* Avoid A (0x1) because of its off-channel reception on A-band.
/* Force use of chains B and C (0x6) for Rx
* Avoid A (0x1) for the device has off-channel reception on A-band.
* MIMO is not used here, but value is required */
if (iwl_is_monitor_mode(priv) &&
!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
priv->cfg->off_channel_workaround) {
rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
@ -1584,10 +1377,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
int ret = 0;
s8 prev_tx_power = priv->tx_power_user_lmt;
if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
IWL_WARN(priv,
"Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
IWL_TX_POWER_TARGET_POWER_MIN);
IWLAGN_TX_POWER_TARGET_POWER_MIN);
return -EINVAL;
}
@ -1631,10 +1425,11 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
struct iwl_priv *priv = data;
u32 inta, inta_mask;
u32 inta_fh;
unsigned long flags;
if (!priv)
return IRQ_NONE;
spin_lock(&priv->lock);
spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@ -1672,7 +1467,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
tasklet_schedule(&priv->irq_tasklet);
unplugged:
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@ -1680,7 +1475,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}
EXPORT_SYMBOL(iwl_isr_legacy);
@ -1993,12 +1788,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
cpu_to_le16((params->txop * 32));
priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
priv->qos_data.qos_active = 1;
if (priv->iw_mode == NL80211_IFTYPE_AP)
iwl_activate_qos(priv, 1);
else if (priv->assoc_id && iwl_is_associated(priv))
iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
@ -2013,7 +1802,7 @@ static void iwl_ht_conf(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct ieee80211_sta *sta;
IWL_DEBUG_MAC80211(priv, "enter: \n");
IWL_DEBUG_MAC80211(priv, "enter:\n");
if (!ht_conf->is_ht)
return;
@ -2269,11 +2058,8 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_MAC80211(priv, "leave\n");
spin_unlock_irqrestore(&priv->lock, flags);
iwl_reset_qos(priv);
priv->cfg->ops->lib->post_associate(priv);
return 0;
}
EXPORT_SYMBOL(iwl_mac_beacon_update);
@ -2495,6 +2281,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
if (changed & IEEE80211_CONF_CHANGE_QOS) {
bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
spin_lock_irqsave(&priv->lock, flags);
priv->qos_data.qos_active = qos_active;
iwl_update_qos(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@ -2529,8 +2324,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
spin_unlock_irqrestore(&priv->lock, flags);
iwl_reset_qos(priv);
spin_lock_irqsave(&priv->lock, flags);
priv->assoc_id = 0;
priv->assoc_capability = 0;
@ -2574,7 +2367,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
GFP_KERNEL);
if (!priv->txq) {
IWL_ERR(priv, "Not enough memory for txq \n");
IWL_ERR(priv, "Not enough memory for txq\n");
return -ENOMEM;
}
return 0;

View File

@ -305,6 +305,9 @@ struct iwl_cfg {
s32 chain_noise_scale;
/* timer period for monitor the driver queues */
u32 monitor_recover_period;
bool temperature_kelvin;
bool off_channel_workaround;
u32 max_event_log_size;
};
/***************************
@ -314,8 +317,7 @@ struct iwl_cfg {
struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
struct ieee80211_ops *hw_ops);
void iwl_hw_detect(struct iwl_priv *priv);
void iwl_reset_qos(struct iwl_priv *priv);
void iwl_activate_qos(struct iwl_priv *priv, u8 force);
void iwl_activate_qos(struct iwl_priv *priv);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@ -336,7 +338,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
void iwl_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
int iwl_hw_nic_init(struct iwl_priv *priv);
int iwl_set_hw_params(struct iwl_priv *priv);
bool iwl_is_monitor_mode(struct iwl_priv *priv);
void iwl_post_associate(struct iwl_priv *priv);
@ -420,21 +421,13 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
/*****************************************************
* RX
******************************************************/
void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_replenish(struct iwl_priv *priv);
void iwl_rx_replenish_now(struct iwl_priv *priv);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
@ -455,14 +448,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
int iwl_txq_ctx_alloc(struct iwl_priv *priv);
void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@ -473,9 +462,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
/*****************************************************
* TX power
****************************************************/
@ -485,10 +471,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
* Rate
******************************************************************************/
void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
@ -688,12 +671,6 @@ extern int iwl_send_statistics_request(struct iwl_priv *priv,
extern int iwl_verify_ucode(struct iwl_priv *priv);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
extern void iwl_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_apm_stop(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);

View File

@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-4965-hw.h"
#include "iwl-3945-hw.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
@ -57,6 +58,7 @@ extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
extern struct iwl_cfg iwl5150_abg_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000i_g2_2agn_cfg;
extern struct iwl_cfg iwl6000i_2abg_cfg;
extern struct iwl_cfg iwl6000i_2bg_cfg;
extern struct iwl_cfg iwl6000_3agn_cfg;
@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
struct iwl_tx_queue;
/* shared structures from iwl-5000.c */
extern struct iwl_mod_params iwl50_mod_params;
extern struct iwl_ucode_ops iwl5000_ucode;
extern struct iwl_lib_ops iwl5000_lib;
extern struct iwl_hcmd_ops iwl5000_hcmd;
extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
/* shared functions from iwl-5000.c */
extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
u8 *data);
extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags);
extern int iwl5000_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp);
extern void iwl5000_nic_config(struct iwl_priv *priv);
extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
size_t offset);
extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
u16 byte_cnt);
extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
extern int iwl5000_load_ucode(struct iwl_priv *priv);
extern void iwl5000_init_alive_start(struct iwl_priv *priv);
extern int iwl5000_alive_notify(struct iwl_priv *priv);
extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx);
extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo);
extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
extern int iwl5000_send_tx_power(struct iwl_priv *priv);
extern void iwl5000_temperature(struct iwl_priv *priv);
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@ -363,13 +326,6 @@ enum {
#define DEF_CMD_PAYLOAD_SIZE 320
/*
* IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
* SNAP header and alignment. It should also be big enough for 802.11
* control frames.
*/
#define IWL_LINK_HDR_MAX 64
/**
* struct iwl_device_cmd
*
@ -521,30 +477,9 @@ struct iwl_ht_config {
u8 non_GF_STA_present;
};
union iwl_qos_capabity {
struct {
u8 edca_count:4; /* bit 0-3 */
u8 q_ack:1; /* bit 4 */
u8 queue_request:1; /* bit 5 */
u8 txop_request:1; /* bit 6 */
u8 reserved:1; /* bit 7 */
} q_AP;
struct {
u8 acvo_APSD:1; /* bit 0 */
u8 acvi_APSD:1; /* bit 1 */
u8 ac_bk_APSD:1; /* bit 2 */
u8 ac_be_APSD:1; /* bit 3 */
u8 q_ack:1; /* bit 4 */
u8 max_len:2; /* bit 5-6 */
u8 more_data_ack:1; /* bit 7 */
} q_STA;
u8 val;
};
/* QoS structures */
struct iwl_qos_info {
int qos_active;
union iwl_qos_capabity qos_cap;
struct iwl_qosparam_cmd def_qos_parm;
};
@ -1185,7 +1120,6 @@ struct iwl_priv {
__le16 sensitivity_tbl[HD_TABLE_SIZE];
struct iwl_ht_config current_ht_config;
u8 last_phy_res[100];
/* Rate scaling data */
u8 retry_rate;
@ -1205,8 +1139,6 @@ struct iwl_priv {
unsigned long status;
int last_rx_noise; /* From beacon statistics */
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
@ -1234,7 +1166,6 @@ struct iwl_priv {
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
u8 default_wep_key;
u8 key_mapping_key;
unsigned long ucode_key_table;
@ -1305,6 +1236,9 @@ struct iwl_priv {
* no AGGREGATION
*/
u8 agg_tids_count;
struct iwl_rx_phy_res last_phy_res;
bool last_phy_res_valid;
} _agn;
#endif
};

View File

@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);

View File

@ -188,19 +188,19 @@ struct iwl_eeprom_enhanced_txpwr {
/* 5000 regulatory - indirect access */
#define EEPROM_5000_REG_SKU_ID ((0x02)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\
#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
/* 6000 and up regulatory tx power - indirect access */
@ -261,12 +261,15 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5050_EEPROM_VERSION (0x21E)
/* 1000 Specific */
#define EEPROM_1000_TX_POWER_VERSION (4)
#define EEPROM_1000_EEPROM_VERSION (0x15C)
/* 6x00 Specific */
#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434)
/* 6x50 Specific */
#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
/* OTP */

View File

@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;

View File

@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_read_direct32(priv, reg);
IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
f, l);
return value;
}

View File

@ -46,7 +46,7 @@
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
"(default 0)\n");
"(default 0)");
static const struct {

View File

@ -384,10 +384,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
{
s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@ -840,12 +840,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
static void iwl_bg_tt_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
s32 temp = priv->temperature; /* degrees CELSIUS except specified */
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@ -875,7 +875,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
struct iwl_tt_trans *transaction;
IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n");
IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
memset(tt, 0, sizeof(struct iwl_tt_mgmt));

View File

@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
/**
* iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
dma_addr_t dma_addr)
{
return cpu_to_le32((u32)(dma_addr >> 8));
}
/**
* iwl_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
* as we can, pulling from rx_free.
*
* This moves the 'write' index forward to catch up with 'processed', and
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
void iwl_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
int write;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
spin_unlock_irqrestore(&rxq->lock, flags);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
queue_work(priv->workqueue, &priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
iwl_rx_queue_update_write_ptr(priv, rxq);
}
}
EXPORT_SYMBOL(iwl_rx_queue_restock);
/**
* iwl_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
*
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
}
spin_unlock_irqrestore(&rxq->lock, flags);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
if (priv->hw_params.rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
"order: %d\n",
priv->hw_params.rx_page_order);
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
net_ratelimit())
IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
return;
}
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, priv->hw_params.rx_page_order);
return;
}
element = rxq->rx_used.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
list_del(element);
spin_unlock_irqrestore(&rxq->lock, flags);
rxb->page = page;
/* Get physical address of the RB */
rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
priv->alloc_rxb_page++;
spin_unlock_irqrestore(&rxq->lock, flags);
}
}
void iwl_rx_replenish(struct iwl_priv *priv)
{
unsigned long flags;
iwl_rx_allocate(priv, GFP_KERNEL);
spin_lock_irqsave(&priv->lock, flags);
iwl_rx_queue_restock(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL(iwl_rx_replenish);
void iwl_rx_replenish_now(struct iwl_priv *priv)
{
iwl_rx_allocate(priv, GFP_ATOMIC);
iwl_rx_queue_restock(priv);
}
EXPORT_SYMBOL(iwl_rx_replenish_now);
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].page != NULL) {
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@ -396,98 +205,6 @@ err_bd:
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
unsigned long flags;
int i;
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
if (!priv->cfg->use_isr_legacy)
rb_timeout = RX_RB_TIMEOUT;
if (priv->cfg->mod_params->amsdu_size_8K)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
else
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
/* Stop Rx DMA */
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->dma_addr >> 8));
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
* the credit mechanism in 5000 HW RX FIFO
* Direct rx interrupts to hosts
* Rx buffer size 4 or 8k
* RB timeout 0x10
* 256 RBDs
*/
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
rb_size|
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
int iwl_rxq_stop(struct iwl_priv *priv)
{
/* stop Rx DMA */
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
return 0;
}
EXPORT_SYMBOL(iwl_rxq_stop);
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
int bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
int last_rx_noise;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
/* Average among active antennas */
if (num_active_rx)
priv->last_rx_noise = (total_silence / num_active_rx) - 107;
last_rx_noise = (total_silence / num_active_rx) - 107;
else
priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
bcn_silence_a, bcn_silence_b, bcn_silence_c,
priv->last_rx_noise);
last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@ -617,63 +335,6 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
#define ACK_CNT_RATIO (50)
#define BA_TIMEOUT_CNT (5)
#define BA_TIMEOUT_MAX (16)
#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
/**
* iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
*
* When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
* the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
* operation state.
*/
bool iwl_good_ack_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
bool rc = true;
int actual_ack_cnt_delta, expected_ack_cnt_delta;
int ba_timeout_delta;
actual_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
expected_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
ba_timeout_delta =
le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
if ((priv->_agn.agg_tids_count > 0) &&
(expected_ack_cnt_delta > 0) &&
(((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
< ACK_CNT_RATIO) &&
(ba_timeout_delta > BA_TIMEOUT_CNT)) {
IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
" expected_ack_cnt = %d\n",
actual_ack_cnt_delta, expected_ack_cnt_delta);
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
priv->delta_statistics.tx.rx_detected_cnt);
IWL_DEBUG_RADIO(priv,
"ack_or_ba_timeout_collision delta = %d\n",
priv->delta_statistics.tx.
ack_or_ba_timeout_collision);
#endif
IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
ba_timeout_delta);
if (!actual_ack_cnt_delta &&
(ba_timeout_delta >= BA_TIMEOUT_MAX))
rc = false;
}
return rc;
}
EXPORT_SYMBOL(iwl_good_ack_health);
#endif
/**
* iwl_good_plcp_health - checks for plcp error.
*
@ -830,139 +491,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwl_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
#ifdef CONFIG_IWLWIFI_DEBUG
/**
* iwl_dbg_report_frame - dump frame to syslog during debug sessions
*
* You may hack this function to show different aspects of received frames,
* including selective frame dumps.
* group100 parameter selects whether to show 1 out of 100 good data frames.
* All beacon and probe response frames are printed.
*/
static void iwl_dbg_report_frame(struct iwl_priv *priv,
struct iwl_rx_phy_res *phy_res, u16 length,
struct ieee80211_hdr *header, int group100)
{
u32 to_us;
u32 print_summary = 0;
u32 print_dump = 0; /* set to 1 to dump all frames' contents */
u32 hundred = 0;
u32 dataframe = 0;
__le16 fc;
u16 seq_ctl;
u16 channel;
u16 phy_flags;
u32 rate_n_flags;
u32 tsf_low;
int rssi;
if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
return;
/* MAC header */
fc = header->frame_control;
seq_ctl = le16_to_cpu(header->seq_ctrl);
/* metadata */
channel = le16_to_cpu(phy_res->channel);
phy_flags = le16_to_cpu(phy_res->phy_flags);
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* signal statistics */
rssi = iwl_calc_rssi(priv, phy_res);
tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
/* if data frame is to us and all is good,
* (optionally) print summary for only 1 out of every 100 */
if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
dataframe = 1;
if (!group100)
print_summary = 1; /* print each frame */
else if (priv->framecnt_to_us < 100) {
priv->framecnt_to_us++;
print_summary = 0;
} else {
priv->framecnt_to_us = 0;
print_summary = 1;
hundred = 1;
}
} else {
/* print summary for all other frames */
print_summary = 1;
}
if (print_summary) {
char *title;
int rate_idx;
u32 bitrate;
if (hundred)
title = "100Frames";
else if (ieee80211_has_retry(fc))
title = "Retry";
else if (ieee80211_is_assoc_resp(fc))
title = "AscRsp";
else if (ieee80211_is_reassoc_resp(fc))
title = "RasRsp";
else if (ieee80211_is_probe_resp(fc)) {
title = "PrbRsp";
print_dump = 1; /* dump frame contents */
} else if (ieee80211_is_beacon(fc)) {
title = "Beacon";
print_dump = 1; /* dump frame contents */
} else if (ieee80211_is_atim(fc))
title = "ATIM";
else if (ieee80211_is_auth(fc))
title = "Auth";
else if (ieee80211_is_deauth(fc))
title = "DeAuth";
else if (ieee80211_is_disassoc(fc))
title = "DisAssoc";
else
title = "Frame";
rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
bitrate = 0;
WARN_ON_ONCE(1);
} else {
bitrate = iwl_rates[rate_idx].ieee / 2;
}
/* print frame summary.
* MAC addresses show just the last byte (for brevity),
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
"len=%u, rssi=%d, chnl=%d, rate=%u, \n",
title, le16_to_cpu(fc), header->addr1[5],
length, rssi, channel, bitrate);
else {
/* src/dst addresses assume managed mode */
IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
"len=%u, rssi=%d, tim=%lu usec, "
"phy=0x%02x, chnl=%d\n",
title, le16_to_cpu(fc), header->addr1[5],
header->addr3[5], length, rssi,
tsf_low - priv->scan_start_tsf,
phy_flags, channel);
}
}
if (print_dump)
iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
}
#endif
/*
* returns non-zero if packet should be dropped
*/
@ -1010,281 +538,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
return 0;
}
EXPORT_SYMBOL(iwl_set_decrypted_flag);
static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
};
IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
decrypt_in, decrypt_out);
return decrypt_out;
}
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
struct iwl_rx_mem_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
int ret = 0;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
IWL_DEBUG_DROP_LIMIT(priv,
"Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!priv->cfg->mod_params->sw_crypto &&
iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
if (!skb) {
IWL_ERR(priv, "alloc_skb failed\n");
return;
}
skb_reserve(skb, IWL_LINK_HDR_MAX);
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
/* mac80211 currently doesn't support paged SKB. Convert it to
* linear SKB for management frame and data frame requires
* software decryption or software defragementation. */
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
(ieee80211_is_data_qos(fc) &&
*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
0 : -ENOMEM;
if (ret) {
kfree_skb(skb);
goto out;
}
/*
* XXX: We cannot touch the page and its virtual memory (hdr) after
* here. It might have already been freed by the above skb change.
*/
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
out:
priv->alloc_rxb_page--;
rxb->page = NULL;
}
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
void iwl_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct iwl4965_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
* REPLY_RX: physical layer info is in this buffer
* REPLY_RX_MPDU_CMD: physical layer info was sent in separate
* command and cached in priv->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!priv->last_phy_res[0]) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status = iwl_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.rate_idx =
iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_TSFT;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = iwl_calc_rssi(priv, phy_res);
/* Meaningful noise values are available only from beacon statistics,
* which are gathered only when associated, and indicate noise
* only for the associated network channel ...
* Ignore these noise values while scanning (other channels) */
if (iwl_is_associated(priv) &&
!test_bit(STATUS_SCANNING, &priv->status)) {
rx_status.noise = priv->last_rx_noise;
} else {
rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
}
/* Reset beacon noise level if not associated. */
if (!iwl_is_associated(priv))
priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
#ifdef CONFIG_IWLWIFI_DEBUG
/* Set "1" to report good data frames in groups of 100 */
if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
iwl_dbg_report_frame(priv, phy_res, len, header, 1);
#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
rx_status.signal, rx_status.noise,
(unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
}
EXPORT_SYMBOL(iwl_rx_reply_rx);
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->last_phy_res[0] = 1;
memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
sizeof(struct iwl_rx_phy_res));
}
EXPORT_SYMBOL(iwl_rx_reply_rx_phy);

View File

@ -454,7 +454,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
added++;
}
IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
@ -814,10 +814,11 @@ static void iwl_bg_request_scan(struct work_struct *data)
*/
scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
/* Force use of chains B and C (0x6) for scan Rx for 4965
* Avoid A (0x1) because of its off-channel reception on A-band.
/* Force use of chains B and C (0x6) for scan Rx
* Avoid A (0x1) for the device has off-channel reception
* on A-band.
*/
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
if (priv->cfg->off_channel_workaround)
rx_ant = ANT_BC;
} else {
IWL_WARN(priv, "Invalid scan band count\n");

View File

@ -71,7 +71,7 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
(!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
(priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
IWL_ERR(priv, "Requested station info for sta %d before ready. \n",
IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
ret);
ret = IWL_INVALID_STATION;
}
@ -143,7 +143,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
sta_id);
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
sta_id);
break;
default:
@ -194,7 +194,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.flags = flags,
.data = data,
};
u8 sta_id = sta->sta.sta_id;
u8 sta_id __maybe_unused = sta->sta.sta_id;
IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
@ -425,6 +425,7 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
.reserved1 = 0,
};
u32 rate_flags;
int ret = 0;
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
@ -458,8 +459,10 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
/* Update the rate scaling for control frame Tx to AP */
link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD,
ret = iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD,
sizeof(link_cmd), &link_cmd);
if (ret)
IWL_ERR(priv, "REPLY_TX_LINK_QUALITY_CMD failed (%d)\n", ret);
}
/*
@ -571,7 +574,7 @@ static int iwl_remove_station(struct iwl_priv *priv, struct ieee80211_sta *sta)
if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv,
"Unable to remove station %pM, device not ready. \n",
"Unable to remove station %pM, device not ready.\n",
sta->addr);
/*
* It is typical for stations to be removed when we are
@ -668,7 +671,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv, bool force)
} else {
for (i = 0; i < priv->hw_params.max_stations; i++) {
if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d \n", i);
IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
cleared = true;
}
@ -759,7 +762,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
@ -803,7 +806,14 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
else
return 0;
}
EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
int iwl_restore_default_wep_keys(struct iwl_priv *priv)
{
WARN_ON(!mutex_is_locked(&priv->mutex));
return iwl_send_static_wepkey_cmd(priv, 0);
}
EXPORT_SYMBOL(iwl_restore_default_wep_keys);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
@ -815,11 +825,6 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
IWL_ERR(priv, "index %d not used in uCode key table.\n",
keyconf->keyidx);
priv->default_wep_key--;
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
@ -851,12 +856,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
priv->default_wep_key++;
if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
IWL_ERR(priv, "index %d already used in uCode key table.\n",
keyconf->keyidx);
priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
@ -1105,7 +1104,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
@ -1191,13 +1190,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
.data = lq,
};
if ((lq->sta_id == 0xFF) &&
(priv->iw_mode == NL80211_IFTYPE_ADHOC))
if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
return -EINVAL;
if (lq->sta_id == 0xFF)
lq->sta_id = IWL_AP_ID;
iwl_dump_lq_cmd(priv, lq);
BUG_ON(init && (cmd.flags & CMD_ASYNC));
@ -1207,7 +1202,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
return ret;
if (init) {
IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d \n",
IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
lq->sta_id);
spin_lock_irqsave(&priv->sta_lock, flags_spin);
priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
@ -1395,6 +1390,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
int iwl_mac_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,

View File

@ -44,11 +44,11 @@
*/
u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
int iwl_restore_default_wep_keys(struct iwl_priv *priv);
int iwl_set_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl_remove_dynamic_key(struct iwl_priv *priv,

File diff suppressed because it is too large Load Diff

View File

@ -598,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->need_update = 0;
}
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
ieee80211_hdrlen(fc));
@ -1604,9 +1604,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
#define IWL3945_MAX_EVENT_LOG_SIZE (512)
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@ -1633,16 +1630,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
capacity, IWL3945_MAX_EVENT_LOG_SIZE);
capacity = IWL3945_MAX_EVENT_LOG_SIZE;
capacity, priv->cfg->max_event_log_size);
capacity = priv->cfg->max_event_log_size;
}
if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
next_entry, priv->cfg->max_event_log_size);
next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@ -1938,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
added++;
}
IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
@ -3141,8 +3138,6 @@ void iwl3945_post_associate(struct iwl_priv *priv)
break;
}
iwl_activate_qos(priv, 0);
/* we have just associated, don't start scan too early */
priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
@ -3404,7 +3399,7 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
}
/* Initialize rate scaling */
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n",
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl3945_rs_rate_init(priv, sta, sta_id);
@ -3890,11 +3885,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
iwl_reset_qos(priv);
priv->qos_data.qos_active = 0;
priv->qos_data.qos_cap.val = 0;
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {

View File

@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
return -ENOMEM;
pos += snprintf(buf+pos, len-pos,
"# | ch | rssi | bssid | cap | Qual | SSID \n");
"# | ch | rssi | bssid | cap | Qual | SSID\n");
mutex_lock(&priv->lock);
list_for_each_entry (iter_bss, &priv->network_list, list) {

View File

@ -35,6 +35,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/host.h>
#include "host.h"
#include "decl.h"
@ -943,6 +945,7 @@ static int if_sdio_probe(struct sdio_func *func,
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@ -1023,6 +1026,25 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto disable;
/* For 1-bit transfers to the 8686 model, we need to enable the
* interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
* bit to allow access to non-vendor registers. */
if ((card->model == IF_SDIO_MODEL_8686) &&
(host->caps & MMC_CAP_SDIO_IRQ) &&
(host->ios.bus_width == MMC_BUS_WIDTH_1)) {
u8 reg;
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
if (ret)
goto release_int;
reg |= SDIO_BUS_ECSI;
sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
if (ret)
goto release_int;
}
card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
if (ret)
goto release_int;

Some files were not shown because too many files have changed in this diff Show More