Merge branch 'wireless-next-2.6' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6

This commit is contained in:
John W. Linville 2010-02-15 16:04:43 -05:00
commit f318d658de
16 changed files with 273 additions and 124 deletions

View file

@ -246,7 +246,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
};
@ -274,7 +274,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
};

View file

@ -2470,11 +2470,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
priv->shared_virt =
pci_alloc_consistent(priv->pci_dev,
sizeof(struct iwl3945_shared),
&priv->shared_phys);
priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
sizeof(struct iwl3945_shared),
&priv->shared_phys, GFP_KERNEL);
if (!priv->shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
mutex_unlock(&priv->mutex);

View file

@ -581,6 +581,13 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&priv->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];

View file

@ -648,6 +648,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&priv->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
int ac = iwl5000_default_queue_to_tx_fifo[i];

View file

@ -298,10 +298,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
int ret;
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ieee80211_start_tx_ba_session(sta, tid);
ret = ieee80211_start_tx_ba_session(sta, tid);
if (ret == -EAGAIN) {
/*
* driver and mac80211 is out of sync
* this might be cause by reloading firmware
* stop the tx ba session here
*/
IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
tid);
ret = ieee80211_stop_tx_ba_session(sta, tid,
WLAN_BACK_INITIATOR);
}
}
}

View file

@ -2941,10 +2941,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn);
if (ret == 0) {
priv->agg_tids_count++;
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
return ret;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwl_tx_agg_stop(priv, sta->addr, tid);
if ((ret == 0) && (priv->agg_tids_count > 0)) {
priv->agg_tids_count--;
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
priv->agg_tids_count);
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
@ -3364,6 +3375,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->agg_tids_count = 0;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@ -3540,6 +3552,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);

View file

@ -3470,11 +3470,7 @@ enum {
IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
IWL_PHY_CALIBRATE_RX_BB_CMD = 10,
IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
IWL_PHY_CALIBRATE_RX_IQ_CMD = 12,
IWL_PHY_CALIBRATION_NOISE_CMD = 13,
IWL_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,

View file

@ -1670,9 +1670,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
void iwl_free_isr_ict(struct iwl_priv *priv)
{
if (priv->ict_tbl_vir) {
pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
PAGE_SIZE, priv->ict_tbl_vir,
priv->ict_tbl_dma);
dma_free_coherent(&priv->pci_dev->dev,
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
priv->ict_tbl_vir, priv->ict_tbl_dma);
priv->ict_tbl_vir = NULL;
}
}
@ -1688,9 +1688,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
if (priv->cfg->use_isr_legacy)
return 0;
/* allocate shrared data table */
priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
ICT_COUNT) + PAGE_SIZE,
&priv->ict_tbl_dma);
priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
&priv->ict_tbl_dma, GFP_KERNEL);
if (!priv->ict_tbl_vir)
return -ENOMEM;
@ -3334,7 +3334,7 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
}
EXPORT_SYMBOL(iwl_dump_fh);
void iwl_force_rf_reset(struct iwl_priv *priv)
static void iwl_force_rf_reset(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@ -3356,7 +3356,47 @@ void iwl_force_rf_reset(struct iwl_priv *priv)
iwl_internal_short_hw_scan(priv);
return;
}
EXPORT_SYMBOL(iwl_force_rf_reset);
#define IWL_DELAY_NEXT_FORCE_RESET (HZ*3)
int iwl_force_reset(struct iwl_priv *priv, int mode)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
if (priv->last_force_reset_jiffies &&
time_after(priv->last_force_reset_jiffies +
IWL_DELAY_NEXT_FORCE_RESET, jiffies)) {
IWL_DEBUG_INFO(priv, "force reset rejected\n");
return -EAGAIN;
}
IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
switch (mode) {
case IWL_RF_RESET:
iwl_force_rf_reset(priv);
break;
case IWL_FW_RESET:
IWL_ERR(priv, "On demand firmware reload\n");
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
/*
* Keep the restart process from trying to send host
* commands by clearing the INIT status bit
*/
clear_bit(STATUS_READY, &priv->status);
queue_work(priv->workqueue, &priv->restart);
break;
default:
IWL_DEBUG_INFO(priv, "invalid reset request.\n");
return -EINVAL;
}
priv->last_force_reset_jiffies = jiffies;
return 0;
}
#ifdef CONFIG_PM

View file

@ -414,13 +414,13 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_replenish(struct iwl_priv *priv);
void iwl_rx_replenish_now(struct iwl_priv *priv);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwl_rx_queue_restock(struct iwl_priv *priv);
void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
@ -450,7 +450,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
@ -501,7 +501,7 @@ int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
int iwl_internal_short_hw_scan(struct iwl_priv *priv);
void iwl_force_rf_reset(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);

View file

@ -2223,6 +2223,32 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return count;
}
static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
char buf[8];
int buf_size;
int reset, ret;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
if (sscanf(buf, "%d", &reset) != 1)
return -EINVAL;
switch (reset) {
case IWL_RF_RESET:
case IWL_FW_RESET:
ret = iwl_force_reset(priv, reset);
break;
default:
return -EINVAL;
}
return ret ? ret : count;
}
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@ -2243,6 +2269,7 @@ DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
DEBUGFS_WRITE_FILE_OPS(internal_scan);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_WRITE_FILE_OPS(force_reset);
/*
* Create the debugfs files and directories
@ -2296,6 +2323,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);

View file

@ -1033,8 +1033,14 @@ struct iwl_event_log {
#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
};
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@ -1066,6 +1072,12 @@ struct iwl_priv {
/* storing the jiffies when the plcp error rate is received */
unsigned long plcp_jiffies;
/* reporting the number of tids has AGG on. 0 means no AGGREGATION */
u8 agg_tids_count;
/* force reset */
unsigned long last_force_reset_jiffies;
/* we allocate array of iwl4965_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
struct iwl_channel_info *channel_info; /* channel info array */
@ -1087,7 +1099,6 @@ struct iwl_priv {
unsigned long scan_start;
unsigned long scan_pass_start;
unsigned long scan_start_tsf;
unsigned long last_internal_scan_jiffies;
void *scan;
int scan_bands;
struct cfg80211_scan_request *scan_request;

View file

@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
if (desc->v_addr)
pci_free_consistent(pci_dev, desc->len,
desc->v_addr, desc->p_addr);
dma_free_coherent(&pci_dev->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
&desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}

View file

@ -123,12 +123,11 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
{
unsigned long flags;
u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
u32 reg;
int ret = 0;
spin_lock_irqsave(&q->lock, flags);
@ -161,7 +160,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
/**
@ -184,14 +182,13 @@ static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
int iwl_rx_queue_restock(struct iwl_priv *priv)
void iwl_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
int write;
int ret = 0;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@ -220,10 +217,8 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
ret = iwl_rx_queue_update_write_ptr(priv, rxq);
iwl_rx_queue_update_write_ptr(priv, rxq);
}
return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_restock);
@ -350,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
}
}
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@ -362,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct pci_dev *dev = priv->pci_dev;
struct device *dev = &priv->pci_dev->dev;
int i;
spin_lock_init(&rxq->lock);
@ -370,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma);
rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@ -392,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
return 0;
err_rb:
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
err_bd:
return -ENOMEM;
}
@ -620,6 +616,11 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
#define ACK_CNT_RATIO (50)
#define BA_TIMEOUT_CNT (5)
#define BA_TIMEOUT_MAX (16)
#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@ -629,6 +630,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
int combined_plcp_delta;
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
int actual_ack_cnt_delta;
int expected_ack_cnt_delta;
int ba_timeout_delta;
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(priv->statistics),
@ -643,6 +647,44 @@ void iwl_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_DEBUG
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
actual_ack_cnt_delta = le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
expected_ack_cnt_delta = le32_to_cpu(
pkt->u.stats.tx.expected_ack_cnt) -
le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
ba_timeout_delta = le32_to_cpu(
pkt->u.stats.tx.agg.ba_timeout) -
le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
if ((priv->agg_tids_count > 0) &&
(expected_ack_cnt_delta > 0) &&
(((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) <
ACK_CNT_RATIO) &&
(ba_timeout_delta > BA_TIMEOUT_CNT)) {
IWL_DEBUG_RADIO(priv,
"actual_ack_cnt delta = %d, expected_ack_cnt = %d\n",
actual_ack_cnt_delta, expected_ack_cnt_delta);
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
priv->delta_statistics.tx.rx_detected_cnt);
IWL_DEBUG_RADIO(priv,
"ack_or_ba_timeout_collision delta = %d\n",
priv->delta_statistics.tx.ack_or_ba_timeout_collision);
#endif
IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
ba_timeout_delta);
if ((actual_ack_cnt_delta == 0) &&
(ba_timeout_delta >=
BA_TIMEOUT_MAX)) {
IWL_DEBUG_RADIO(priv,
"call iwl_force_reset(IWL_FW_RESET)\n");
iwl_force_reset(priv, IWL_FW_RESET);
} else {
IWL_DEBUG_RADIO(priv,
"call iwl_force_reset(IWL_RF_RESET)\n");
iwl_force_reset(priv, IWL_RF_RESET);
}
}
/*
* check for plcp_err and trigger radio reset if it exceeds
* the plcp error threshold plcp_delta.
@ -689,7 +731,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
* Reset the RF radio due to the high plcp
* error rate
*/
iwl_force_rf_reset(priv);
iwl_force_reset(priv, IWL_RF_RESET);
}
}

View file

@ -250,8 +250,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
if (!priv->is_internal_short_scan)
priv->next_scan_jiffies = 0;
else
priv->last_internal_scan_jiffies = jiffies;
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
@ -471,21 +469,6 @@ EXPORT_SYMBOL(iwl_init_scan_params);
static int iwl_scan_initiate(struct iwl_priv *priv)
{
if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
return -EIO;
}
if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
return -EAGAIN;
}
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
return -EAGAIN;
}
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = false;
@ -517,6 +500,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
}
if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
ret = -EAGAIN;
goto out_unlock;
}
/* We don't schedule scan within next_scan_jiffies period.
* Avoid scanning during possible EAPOL exchange, return
* success immediately.
@ -551,8 +546,6 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
* internal short scan, this function should only been called while associated.
* It will reset and tune the radio to prevent possible RF related problem
*/
#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1)
int iwl_internal_short_hw_scan(struct iwl_priv *priv)
{
int ret = 0;
@ -572,12 +565,6 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv)
ret = -EAGAIN;
goto out;
}
if (priv->last_internal_scan_jiffies &&
time_after(priv->last_internal_scan_jiffies +
IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) {
IWL_DEBUG_SCAN(priv, "internal scan rejected\n");
goto out;
}
priv->scan_bands = 0;
if (priv->band == IEEE80211_BAND_5GHZ)

View file

@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr, size_t size)
{
ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
@ -73,21 +74,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
if (unlikely(!ptr->addr))
return;
pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
u32 reg = 0;
int ret = 0;
int txq_id = txq->q.id;
if (txq->need_update == 0)
return ret;
return;
/* if we're trying to save power */
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
@ -101,7 +101,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq_id, reg);
iwl_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return ret;
return;
}
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
@ -114,8 +114,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq->q.write_ptr | (txq_id << 8));
txq->need_update = 0;
return ret;
}
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
@ -132,7 +130,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev;
struct device *dev = &priv->pci_dev->dev;
int i;
if (q->n_bd == 0)
@ -149,8 +147,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
dma_free_coherent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
kfree(txq->txb);
@ -179,7 +177,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev;
struct device *dev = &priv->pci_dev->dev;
int i;
if (q->n_bd == 0)
@ -191,8 +189,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
txq->tfds, txq->q.dma_addr);
/* deallocate arrays */
kfree(txq->cmd);
@ -283,7 +281,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
struct iwl_tx_queue *txq, u32 id)
{
struct pci_dev *dev = priv->pci_dev;
struct device *dev = &priv->pci_dev->dev;
size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
/* Driver private data, only for Tx (not command) queues,
@ -302,8 +300,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
goto error;
@ -731,7 +729,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
int ret;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@ -806,8 +803,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
}
}
txq = &priv->txq[txq_id];
@ -949,7 +948,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
/*
@ -963,9 +962,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client)
atomic_inc(&sta_priv->pending_frames);
if (ret)
return ret;
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
@ -1004,7 +1000,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
int len, ret;
int len;
u32 idx;
u16 fix_size;
@ -1101,10 +1097,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
return ret ? ret : idx;
return idx;
}
static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
@ -1328,7 +1324,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
struct iwl_tid_data *tid_data;
int ret, write_ptr, read_ptr;
int write_ptr, read_ptr;
unsigned long flags;
if (!ra) {
@ -1380,13 +1376,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_lock_irqsave(&priv->lock, flags);
ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
/*
* the only reason this call can fail is queue number out of range,
* which can happen if uCode is reloaded and all the station
* information are lost. if it is outside the range, there is no need
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
if (ret)
return ret;
ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
return 0;

View file

@ -352,10 +352,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
if (priv->shared_virt)
pci_free_consistent(priv->pci_dev,
sizeof(struct iwl3945_shared),
priv->shared_virt,
priv->shared_phys);
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct iwl3945_shared),
priv->shared_virt,
priv->shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@ -478,7 +478,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 wait_write_ptr = 0;
u8 *qc = NULL;
unsigned long flags;
int rc;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@ -663,12 +662,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
rc = iwl_txq_update_write_ptr(priv, txq);
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
if (rc)
return rc;
if ((iwl_queue_space(q) < q->high_mark)
&& priv->mac80211_registered) {
if (wait_write_ptr) {
@ -1063,13 +1059,13 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
int write, rc;
int write;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@ -1099,12 +1095,8 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
rc = iwl_rx_queue_update_write_ptr(priv, rxq);
if (rc)
return rc;
iwl_rx_queue_update_write_ptr(priv, rxq);
}
return 0;
}
/**
@ -1249,10 +1241,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
}
}
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@ -4047,6 +4039,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/***********************
* 4. Read EEPROM
* ********************/