qla2xxx: Add irq affinity notification

Register to receive notification of when irq setting change
occured.

Signed-off-by: Quinn Tran <quinn.tran@qlogic.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@qlogic.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Quinn Tran 2015-12-17 14:57:05 -05:00 committed by Nicholas Bellinger
parent 7560151b6b
commit cdb898c52d
3 changed files with 93 additions and 1 deletions

View file

@ -2714,11 +2714,16 @@ struct isp_operations {
struct scsi_qla_host;
#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */
struct qla_msix_entry {
int have_irq;
uint32_t vector;
uint16_t entry;
struct rsp_que *rsp;
struct irq_affinity_notify irq_notify;
int cpuid;
};
#define WATCH_INTERVAL 1 /* number of seconds */
@ -2930,6 +2935,7 @@ struct qlt_hw_data {
spinlock_t q_full_lock;
uint32_t leak_exchg_thresh_hold;
spinlock_t sess_lock;
int rspq_vector_cpuid;
};
#define MAX_QFULL_CMDS_ALLOC 8192

View file

@ -18,6 +18,10 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
static void qla_irq_affinity_notify(struct irq_affinity_notify *,
const cpumask_t *);
static void qla_irq_affinity_release(struct kref *);
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@ -2548,6 +2552,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
if (rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
rsp->msix->cpuid = smp_processor_id();
ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
}
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@ -2979,8 +2991,11 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
if (qentry->have_irq)
if (qentry->have_irq) {
/* un-register irq cpu affinity notification */
irq_set_affinity_notifier(qentry->vector, NULL);
free_irq(qentry->vector, qentry->rsp);
}
}
pci_disable_msix(ha->pdev);
kfree(ha->msix_entries);
@ -3043,6 +3058,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->entry = entries[i].entry;
qentry->have_irq = 0;
qentry->rsp = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
@ -3061,6 +3079,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
/* Schedule work (ie. trigger a notification) to read cpu
* mask for this specific irq.
* kref_get is required because
* irq_affinity_notify() will do
* kref_put().
*/
kref_get(&qentry->irq_notify.kref);
schedule_work(&qentry->irq_notify.work);
}
/*
@ -3240,3 +3270,47 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha;
struct scsi_qla_host *base_vha;
/* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask);
ha = e->rsp->hw;
base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host %ld : vector %d cpu %d \n", __func__,
base_vha->host_no, e->vector, e->cpuid);
if (e->have_irq) {
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
(e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
ha->tgt.rspq_vector_cpuid = e->cpuid;
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host%ld: rspq vector %d cpu %d runtime change\n",
__func__, base_vha->host_no, e->vector, e->cpuid);
}
}
}
void qla_irq_affinity_release(struct kref *ref)
{
struct irq_affinity_notify *notify =
container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify);
struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host%ld: vector %d cpu %d \n", __func__,
base_vha->host_no, e->vector, e->cpuid);
}

View file

@ -6232,6 +6232,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@ -6250,6 +6251,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
} else {
if (ha->msix_entries) {
ql_dbg(ql_dbg_tgt, vha, 0xffff,
"%s: host%ld : vector %d cpu %d\n",
__func__, vha->host_no,
ha->msix_entries[rspq_ent].vector,
ha->msix_entries[rspq_ent].cpuid);
ha->tgt.rspq_vector_cpuid =
ha->msix_entries[rspq_ent].cpuid;
}
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);