iwlwifi: pcie: don't disable interrupts for reg_lock

The only thing we do touching the device in hard interrupt context
is, at most, writing an interrupt ACK register, which isn't racing
in with anything protected by the reg_lock.

Thus, avoid disabling interrupts here for potentially long periods
of time, particularly long periods have been observed with dumping
of firmware memory (leading to lockup warnings on some devices.)

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20210210135352.da916ab91298.I064c3e7823b616647293ed97da98edefb9ce9435@changeid
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
This commit is contained in:
Johannes Berg 2021-02-10 13:56:27 +02:00 committed by Luca Coelho
parent 806832c965
commit 874020f8ad
3 changed files with 16 additions and 22 deletions

View file

@ -1972,7 +1972,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
spin_lock_bh(&trans_pcie->reg_lock);
if (trans_pcie->cmd_hold_nic_awake)
goto out;
@ -2057,7 +2057,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
}
err:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
spin_unlock_bh(&trans_pcie->reg_lock);
return false;
}
@ -2095,7 +2095,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
spin_unlock_bh(&trans_pcie->reg_lock);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
@ -2296,11 +2296,10 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
spin_lock_bh(&trans_pcie->reg_lock);
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
spin_unlock_bh(&trans_pcie->reg_lock);
}
static const char *get_csr_string(int cmd)

View file

@ -31,7 +31,6 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int i, cmd_pos, idx;
@ -244,11 +243,11 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
spin_lock(&trans_pcie->reg_lock);
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);

View file

@ -223,12 +223,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
spin_lock(&trans_pcie->reg_lock);
if (txq_id == trans->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
spin_unlock(&trans_pcie->reg_lock);
}
}
@ -679,7 +677,6 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
unsigned long flags;
int nfreed = 0;
u16 r;
@ -710,9 +707,10 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
/* BHs are also disabled due to txq->lock */
spin_lock(&trans_pcie->reg_lock);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
spin_unlock(&trans_pcie->reg_lock);
}
iwl_txq_progress(txq);
@ -921,7 +919,6 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
@ -1164,20 +1161,19 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
spin_lock(&trans_pcie->reg_lock);
ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
if (ret < 0) {
idx = ret;
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
goto out;
goto unlock_reg;
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
unlock_reg:
spin_unlock(&trans_pcie->reg_lock);
out:
spin_unlock_bh(&txq->lock);
free_dup_buf: