[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes

- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390)
- Convert byte count to word count when calling __iowrite32_copy (CR 122550)
- Checked the ERR1 and ERR2 registers for error attention due to SLI
  Port state affected by forced debug dump. (CR 122986, 122426, 124859)
- Use the lpfc_readl routine instead of the readl for the port status
  register read in lpfc_handle_eratt_s4 (CR 125403)
- Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing
  a pci function reset (CR 125124, 125168, 125572, 125622)
- Zero out the HBQ when it is allocated (CR 125663)
- Alter port reset log messages to indicate error type (CR 125989)
- Added proper NULL pointer checking to all the places that accessing
  the queue memory (CR 125832)

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
James Smart 2011-12-13 13:22:37 -05:00 committed by James Bottomley
parent df9e1b59f9
commit 2e90f4b5a2
6 changed files with 350 additions and 139 deletions

View file

@ -1,7 +1,7 @@
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2005 Emulex. All rights reserved. * * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com * * www.emulex.com *
* * * *
@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{ {
__iowrite32_copy(dest, src, bytes); /* convert bytes in argument list to word count for copy function */
__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
} }
static inline void static inline void

View file

@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get slow-path event queue information */ /* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n"); "Slow-path EQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.sp_eq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], " "\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.sp_eq->entry_size, phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index, phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index); phba->sli4_hba.sp_eq->hba_index);
}
/* Get fast-path event queue information */ /* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n"); "Fast-path EQ information:\n");
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { if (phba->sli4_hba.fp_eq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++) {
if (phba->sli4_hba.fp_eq[fcp_qidx]) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], " "\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index, phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
}
}
} }
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox complete queue information */ /* Get mailbox complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX CQ information:\n"); "Slow-path MBX CQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.mbx_cq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n", "Associated EQID[%02d]:\n",
phba->sli4_hba.mbx_cq->assoc_qid); phba->sli4_hba.mbx_cq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], " "\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_cq->entry_size, phba->sli4_hba.mbx_cq->entry_size,
phba->sli4_hba.mbx_cq->host_index, phba->sli4_hba.mbx_cq->host_index,
phba->sli4_hba.mbx_cq->hba_index); phba->sli4_hba.mbx_cq->hba_index);
}
/* Get slow-path complete queue information */ /* Get slow-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS CQ information:\n"); "Slow-path ELS CQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.els_cq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n", "Associated EQID[%02d]:\n",
phba->sli4_hba.els_cq->assoc_qid); phba->sli4_hba.els_cq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], " "\tCQID [%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_cq->entry_size, phba->sli4_hba.els_cq->entry_size,
phba->sli4_hba.els_cq->host_index, phba->sli4_hba.els_cq->host_index,
phba->sli4_hba.els_cq->hba_index); phba->sli4_hba.els_cq->hba_index);
}
/* Get fast-path complete queue information */ /* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n"); "Fast-path FCP CQ information:\n");
fcp_qidx = 0; fcp_qidx = 0;
do { if (phba->sli4_hba.fcp_cq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, do {
if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n", "Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], " "\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
} while (++fcp_qidx < phba->cfg_fcp_eq_count); }
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); } while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
/* Get mailbox queue information */ /* Get mailbox queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX MQ information:\n"); "Slow-path MBX MQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.mbx_wq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n", "Associated CQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid); phba->sli4_hba.mbx_wq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], " "\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_wq->entry_size, phba->sli4_hba.mbx_wq->entry_size,
phba->sli4_hba.mbx_wq->host_index, phba->sli4_hba.mbx_wq->host_index,
phba->sli4_hba.mbx_wq->hba_index); phba->sli4_hba.mbx_wq->hba_index);
}
/* Get slow-path work queue information */ /* Get slow-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS WQ information:\n"); "Slow-path ELS WQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.els_wq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n", "Associated CQID[%02d]:\n",
phba->sli4_hba.els_wq->assoc_qid); phba->sli4_hba.els_wq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], " "\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_wq->entry_size, phba->sli4_hba.els_wq->entry_size,
phba->sli4_hba.els_wq->host_index, phba->sli4_hba.els_wq->host_index,
phba->sli4_hba.els_wq->hba_index); phba->sli4_hba.els_wq->hba_index);
}
/* Get fast-path work queue information */ /* Get fast-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n"); "Fast-path FCP WQ information:\n");
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { if (phba->sli4_hba.fcp_wq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
fcp_qidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_qidx])
continue;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n", "Associated CQID[%02d]:\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], " "\tWQID[%02d], "
"QE-COUNT[%04d], WQE-SIZE[%04d], " "QE-COUNT[%04d], WQE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
}
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
} }
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get receive queue information */ /* Get receive queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path RQ information:\n"); "Slow-path RQ information:\n");
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n", "Associated CQID[%02d]:\n",
phba->sli4_hba.hdr_rq->assoc_qid); phba->sli4_hba.hdr_rq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tHQID[%02d], " "\tHQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.hdr_rq->entry_size, phba->sli4_hba.hdr_rq->entry_size,
phba->sli4_hba.hdr_rq->host_index, phba->sli4_hba.hdr_rq->host_index,
phba->sli4_hba.hdr_rq->hba_index); phba->sli4_hba.hdr_rq->hba_index);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tDQID[%02d], " "\tDQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.dat_rq->entry_size, phba->sli4_hba.dat_rq->entry_size,
phba->sli4_hba.dat_rq->host_index, phba->sli4_hba.dat_rq->host_index,
phba->sli4_hba.dat_rq->hba_index); phba->sli4_hba.dat_rq->hba_index);
}
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
} }
@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) { switch (quetp) {
case LPFC_IDIAG_EQ: case LPFC_IDIAG_EQ:
/* Slow-path event queue */ /* Slow-path event queue */
if (phba->sli4_hba.sp_eq->queue_id == queid) { if (phba->sli4_hba.sp_eq &&
phba->sli4_hba.sp_eq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.sp_eq, index, count); phba->sli4_hba.sp_eq, index, count);
@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* Fast-path event queue */ /* Fast-path event queue */
for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { if (phba->sli4_hba.fp_eq) {
if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
/* Sanity check */ if (phba->sli4_hba.fp_eq[qidx] &&
rc = lpfc_idiag_que_param_check( phba->sli4_hba.fp_eq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx], phba->sli4_hba.fp_eq[qidx],
index, count); index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; idiag.ptr_private =
goto pass_check; phba->sli4_hba.fp_eq[qidx];
goto pass_check;
}
} }
} }
goto error_out; goto error_out;
break; break;
case LPFC_IDIAG_CQ: case LPFC_IDIAG_CQ:
/* MBX complete queue */ /* MBX complete queue */
if (phba->sli4_hba.mbx_cq->queue_id == queid) { if (phba->sli4_hba.mbx_cq &&
phba->sli4_hba.mbx_cq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_cq, index, count); phba->sli4_hba.mbx_cq, index, count);
@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* ELS complete queue */ /* ELS complete queue */
if (phba->sli4_hba.els_cq->queue_id == queid) { if (phba->sli4_hba.els_cq &&
phba->sli4_hba.els_cq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_cq, index, count); phba->sli4_hba.els_cq, index, count);
@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* FCP complete queue */ /* FCP complete queue */
qidx = 0; if (phba->sli4_hba.fcp_cq) {
do { qidx = 0;
if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { do {
/* Sanity check */ if (phba->sli4_hba.fcp_cq[qidx] &&
rc = lpfc_idiag_que_param_check( phba->sli4_hba.fcp_cq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_cq[qidx], phba->sli4_hba.fcp_cq[qidx],
index, count); index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private =
phba->sli4_hba.fcp_cq[qidx]; phba->sli4_hba.fcp_cq[qidx];
goto pass_check; goto pass_check;
} }
} while (++qidx < phba->cfg_fcp_eq_count); } while (++qidx < phba->cfg_fcp_eq_count);
}
goto error_out; goto error_out;
break; break;
case LPFC_IDIAG_MQ: case LPFC_IDIAG_MQ:
/* MBX work queue */ /* MBX work queue */
if (phba->sli4_hba.mbx_wq->queue_id == queid) { if (phba->sli4_hba.mbx_wq &&
phba->sli4_hba.mbx_wq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_wq, index, count); phba->sli4_hba.mbx_wq, index, count);
@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.mbx_wq; idiag.ptr_private = phba->sli4_hba.mbx_wq;
goto pass_check; goto pass_check;
} }
goto error_out;
break; break;
case LPFC_IDIAG_WQ: case LPFC_IDIAG_WQ:
/* ELS work queue */ /* ELS work queue */
if (phba->sli4_hba.els_wq->queue_id == queid) { if (phba->sli4_hba.els_wq &&
phba->sli4_hba.els_wq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_wq, index, count); phba->sli4_hba.els_wq, index, count);
@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* FCP work queue */ /* FCP work queue */
for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { if (phba->sli4_hba.fcp_wq) {
if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
/* Sanity check */ if (!phba->sli4_hba.fcp_wq[qidx])
rc = lpfc_idiag_que_param_check( continue;
if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_wq[qidx], phba->sli4_hba.fcp_wq[qidx],
index, count); index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private =
phba->sli4_hba.fcp_wq[qidx]; phba->sli4_hba.fcp_wq[qidx];
goto pass_check; goto pass_check;
}
} }
} }
goto error_out; goto error_out;
break; break;
case LPFC_IDIAG_RQ: case LPFC_IDIAG_RQ:
/* HDR queue */ /* HDR queue */
if (phba->sli4_hba.hdr_rq->queue_id == queid) { if (phba->sli4_hba.hdr_rq &&
phba->sli4_hba.hdr_rq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.hdr_rq, index, count); phba->sli4_hba.hdr_rq, index, count);
@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* DAT queue */ /* DAT queue */
if (phba->sli4_hba.dat_rq->queue_id == queid) { if (phba->sli4_hba.dat_rq &&
phba->sli4_hba.dat_rq->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.dat_rq, index, count); phba->sli4_hba.dat_rq, index, count);

View file

@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t event_data; uint32_t event_data;
struct Scsi_Host *shost; struct Scsi_Host *shost;
uint32_t if_type; uint32_t if_type;
struct lpfc_register portstat_reg; struct lpfc_register portstat_reg = {0};
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
int rc; int rc;
/* If the pci channel is offline, ignore possible errors, since /* If the pci channel is offline, ignore possible errors, since
@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset) if (!phba->cfg_enable_hba_reset)
return; return;
/* Send an internal error event to mgmt application */
lpfc_board_errevt_to_mgmt(phba);
/* For now, the actual action for SLI4 device handling is not
* specified yet, just treated it as adaptor hardware failure
*/
event_data = FC_REG_DUMP_EVENT;
shost = lpfc_shost_from_vport(vport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) { switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0: case LPFC_SLI_INTF_IF_TYPE_0:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type0.UERRLOregaddr,
&uerrlo_reg);
pci_rd_rc2 = lpfc_readl(
phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
&uemasklo_reg);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
return;
lpfc_sli4_offline_eratt(phba); lpfc_sli4_offline_eratt(phba);
break; break;
case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_2:
portstat_reg.word0 = pci_rd_rc1 = lpfc_readl(
readl(phba->sli4_hba.u.if_type2.STATUSregaddr); phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO)
return;
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */ /* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
phba->over_temp_state = HBA_OVER_TEMP; phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba); lpfc_sli4_offline_eratt(phba);
return; break;
} }
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3143 Port Down: Firmware Restarted\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
/* /*
* On error status condition, driver need to wait for port * On error status condition, driver need to wait for port
* ready before performing reset. * ready before performing reset.
@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!rc) { if (!rc) {
/* need reset: attempt for port recovery */ /* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2887 Port Error: Attempting " "2887 Reset Needed: Attempting Port "
"Port Recovery\n"); "Recovery...\n");
lpfc_offline_prep(phba); lpfc_offline_prep(phba);
lpfc_offline(phba); lpfc_offline(phba);
lpfc_sli_brdrestart(phba); lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) { if (lpfc_online(phba) == 0) {
lpfc_unblock_mgmt_io(phba); lpfc_unblock_mgmt_io(phba);
return; /* don't report event on forced debug dump */
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
return;
else
break;
} }
/* fall through for not able to recover */ /* fall through for not able to recover */
} }
@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
default: default:
break; break;
} }
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3123 Report dump event to upper layer\n");
/* Send an internal error event to mgmt application */
lpfc_board_errevt_to_mgmt(phba);
event_data = FC_REG_DUMP_EVENT;
shost = lpfc_shost_from_vport(vport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
} }
/** /**
@ -6475,6 +6507,7 @@ out_free_fcp_wq:
phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
} }
kfree(phba->sli4_hba.fcp_wq); kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq: out_free_els_wq:
lpfc_sli4_queue_free(phba->sli4_hba.els_wq); lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
phba->sli4_hba.els_wq = NULL; phba->sli4_hba.els_wq = NULL;
@ -6487,6 +6520,7 @@ out_free_fcp_cq:
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
} }
kfree(phba->sli4_hba.fcp_cq); kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq: out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq); lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL; phba->sli4_hba.els_cq = NULL;
@ -6499,6 +6533,7 @@ out_free_fp_eq:
phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
} }
kfree(phba->sli4_hba.fp_eq); kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq: out_free_sp_eq:
lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
phba->sli4_hba.sp_eq = NULL; phba->sli4_hba.sp_eq = NULL;
@ -6532,8 +6567,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.els_wq = NULL; phba->sli4_hba.els_wq = NULL;
/* Release FCP work queue */ /* Release FCP work queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) if (phba->sli4_hba.fcp_wq != NULL)
lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
fcp_qidx++)
lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
kfree(phba->sli4_hba.fcp_wq); kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL; phba->sli4_hba.fcp_wq = NULL;
@ -6553,15 +6590,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP response complete queue */ /* Release FCP response complete queue */
fcp_qidx = 0; fcp_qidx = 0;
do if (phba->sli4_hba.fcp_cq != NULL)
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); do
while (++fcp_qidx < phba->cfg_fcp_eq_count); lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq); kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL; phba->sli4_hba.fcp_cq = NULL;
/* Release fast-path event queue */ /* Release fast-path event queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) if (phba->sli4_hba.fp_eq != NULL)
lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++)
lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
kfree(phba->sli4_hba.fp_eq); kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL; phba->sli4_hba.fp_eq = NULL;
@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id); phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path event queue */ /* Set up fast-path event queue */
if (!phba->sli4_hba.fp_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
goto out_destroy_sp_eq;
}
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id); phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */ /* Set up fast-path FCP Response Complete Queue */
if (!phba->sli4_hba.fcp_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3148 Fast-path FCP CQ array not "
"allocated\n");
goto out_destroy_els_cq;
}
fcp_cqidx = 0; fcp_cqidx = 0;
do { do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id); phba->sli4_hba.els_cq->queue_id);
/* Set up fast-path FCP Work Queue */ /* Set up fast-path FCP Work Queue */
if (!phba->sli4_hba.fcp_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3149 Fast-path FCP WQ array not "
"allocated\n");
goto out_destroy_els_wq;
}
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq: out_destroy_fcp_wq:
for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq: out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq: out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq: out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq: out_destroy_fp_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
out_destroy_sp_eq:
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error: out_error:
return rc; return rc;
@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */ /* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */ /* Unset FCP response complete queue */
fcp_qidx = 0; if (phba->sli4_hba.fcp_cq) {
do { fcp_qidx = 0;
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); do {
} while (++fcp_qidx < phba->cfg_fcp_eq_count); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
} while (++fcp_qidx < phba->cfg_fcp_eq_count);
}
/* Unset fast-path event queue */ /* Unset fast-path event queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) if (phba->sli4_hba.fp_eq) {
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
}
/* Unset slow-path event queue */ /* Unset slow-path event queue */
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
} }
@ -7411,22 +7476,25 @@ out:
static void static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{ {
struct pci_dev *pdev; uint32_t if_type;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
/* Obtain PCI device reference */ switch (if_type) {
if (!phba->pcidev) case LPFC_SLI_INTF_IF_TYPE_0:
return; iounmap(phba->sli4_hba.drbl_regs_memmap_p);
else iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
pdev = phba->pcidev; iounmap(phba->sli4_hba.conf_regs_memmap_p);
break;
/* Free coherent DMA memory allocated */ case LPFC_SLI_INTF_IF_TYPE_2:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
/* Unmap I/O memory space */ break;
iounmap(phba->sli4_hba.drbl_regs_memmap_p); case LPFC_SLI_INTF_IF_TYPE_1:
iounmap(phba->sli4_hba.ctrl_regs_memmap_p); default:
iounmap(phba->sli4_hba.conf_regs_memmap_p); dev_printk(KERN_ERR, &phba->pcidev->dev,
"FATAL - unsupported SLI4 interface type - %d\n",
return; if_type);
break;
}
} }
/** /**

View file

@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{ {
struct hbq_dmabuf *hbqbp; struct hbq_dmabuf *hbqbp;
hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp) if (!hbqbp)
return NULL; return NULL;
@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{ {
struct hbq_dmabuf *dma_buf; struct hbq_dmabuf *dma_buf;
dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf) if (!dma_buf)
return NULL; return NULL;

View file

@ -89,10 +89,15 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
static uint32_t static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{ {
union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; union lpfc_wqe *temp_wqe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
uint32_t host_index; uint32_t host_index;
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
temp_wqe = q->qe[q->host_index].wqe;
/* If the host has not yet processed the next entry then we are done */ /* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index) if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM; return -ENOMEM;
@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{ {
uint32_t released = 0; uint32_t released = 0;
/* sanity check on queue memory */
if (unlikely(!q))
return 0;
if (q->hba_index == index) if (q->hba_index == index)
return 0; return 0;
do { do {
@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
static uint32_t static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{ {
struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; struct lpfc_mqe *temp_mqe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
uint32_t host_index; uint32_t host_index;
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
temp_mqe = q->qe[q->host_index].mqe;
/* If the host has not yet processed the next entry then we are done */ /* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index) if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM; return -ENOMEM;
@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
static uint32_t static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue *q) lpfc_sli4_mq_release(struct lpfc_queue *q)
{ {
/* sanity check on queue memory */
if (unlikely(!q))
return 0;
/* Clear the mailbox pointer for completion */ /* Clear the mailbox pointer for completion */
q->phba->mbox = NULL; q->phba->mbox = NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count); q->hba_index = ((q->hba_index + 1) % q->entry_count);
@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
static struct lpfc_eqe * static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q) lpfc_sli4_eq_get(struct lpfc_queue *q)
{ {
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; struct lpfc_eqe *eqe;
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */ /* If the next EQE is not valid then we are done */
if (!bf_get_le32(lpfc_eqe_valid, eqe)) if (!bf_get_le32(lpfc_eqe_valid, eqe))
@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
struct lpfc_eqe *temp_eqe; struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
/* sanity check on queue memory */
if (unlikely(!q))
return 0;
/* while there are valid entries */ /* while there are valid entries */
while (q->hba_index != q->host_index) { while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe; temp_eqe = q->qe[q->host_index].eqe;
@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
{ {
struct lpfc_cqe *cqe; struct lpfc_cqe *cqe;
/* sanity check on queue memory */
if (unlikely(!q))
return NULL;
/* If the next CQE is not valid then we are done */ /* If the next CQE is not valid then we are done */
if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL; return NULL;
@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
struct lpfc_cqe *temp_qe; struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
/* sanity check on queue memory */
if (unlikely(!q))
return 0;
/* while there are valid entries */ /* while there are valid entries */
while (q->hba_index != q->host_index) { while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe; temp_qe = q->qe[q->host_index].cqe;
@ -359,11 +393,17 @@ static int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{ {
struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
int put_index = hq->host_index; int put_index = hq->host_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL; return -EINVAL;
if (hq->host_index != dq->host_index) if (hq->host_index != dq->host_index)
@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
static uint32_t static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
{ {
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return 0;
if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
return 0; return 0;
hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{ {
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value; uint16_t cfg_value;
uint8_t qindx;
/* Reset HBA */ /* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA); psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0; phba->fcf.fcf_flag = 0;
/* Clean up the child queue list for the CQs */
list_del_init(&phba->sli4_hba.mbx_wq->list);
list_del_init(&phba->sli4_hba.els_wq->list);
list_del_init(&phba->sli4_hba.hdr_rq->list);
list_del_init(&phba->sli4_hba.dat_rq->list);
list_del_init(&phba->sli4_hba.mbx_cq->list);
list_del_init(&phba->sli4_hba.els_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
qindx = 0;
do
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */ /* Now physically reset the device */
@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
/* Perform FCoE PCI function reset */ /* Perform FCoE PCI function reset */
lpfc_sli4_queue_destroy(phba);
lpfc_pci_function_reset(phba); lpfc_pci_function_reset(phba);
/* Restore PCI cmd register */ /* Restore PCI cmd register */
@ -4869,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
fcp_eqidx = 0; fcp_eqidx = 0;
do if (phba->sli4_hba.fcp_cq) {
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], do
LPFC_QUEUE_REARM); lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
while (++fcp_eqidx < phba->cfg_fcp_eq_count); LPFC_QUEUE_REARM);
while (++fcp_eqidx < phba->cfg_fcp_eq_count);
}
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) if (phba->sli4_hba.fp_eq) {
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
LPFC_QUEUE_REARM); fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
} }
/** /**
@ -8083,6 +8119,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/ */
if (piocb->iocb_flag & LPFC_IO_FCP) if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (unlikely(!phba->sli4_hba.fcp_wq))
return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe)) &wqe))
return IOCB_ERROR; return IOCB_ERROR;
@ -9900,7 +9938,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[1] = phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr); readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2885 Port Error Detected: " "2885 Port Status Event: "
"port status reg 0x%x, " "port status reg 0x%x, "
"port smphr reg 0x%x, " "port smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n", "error 1=0x%x, error 2=0x%x\n",
@ -10906,6 +10944,9 @@ static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_release *wcqe) struct lpfc_wcqe_release *wcqe)
{ {
/* sanity check on queue memory */
if (unlikely(!phba->sli4_hba.els_wq))
return;
/* Check for the slow-path ELS work queue */ /* Check for the slow-path ELS work queue */
if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
lpfc_sli4_wq_release(phba->sli4_hba.els_wq, lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@ -10995,6 +11036,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
uint32_t status, rq_id; uint32_t status, rq_id;
unsigned long iflags; unsigned long iflags;
/* sanity check on queue memory */
if (unlikely(!hrq) || unlikely(!drq))
return workposted;
if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
else else
@ -11129,6 +11174,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Search for completion queue pointer matching this cqid */ /* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq; speq = phba->sli4_hba.sp_eq;
/* sanity check on queue memory */
if (unlikely(!speq))
return;
list_for_each_entry(childq, &speq->child_list, list) { list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) { if (childq->queue_id == cqid) {
cq = childq; cq = childq;
@ -11370,12 +11418,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return; return;
} }
if (unlikely(!phba->sli4_hba.fcp_cq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3146 Fast-path completion queues "
"does not exist\n");
return;
}
cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) { if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue " "0367 Fast-path completion queue "
"does not exist\n"); "(%d) does not exist\n", fcp_cqidx);
return; return;
} }
@ -11546,6 +11600,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
/* Get to the EQ struct associated with this vector */ /* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
if (unlikely(!fpeq))
return IRQ_NONE;
/* Check device state for handling interrupt */ /* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) { if (unlikely(lpfc_intr_state_check(phba))) {
@ -11764,6 +11820,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint16_t dmult; uint16_t dmult;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported) if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE; hw_page_size = SLI4_PAGE_SIZE;
@ -11880,6 +11939,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported) if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE; hw_page_size = SLI4_PAGE_SIZE;
@ -12062,6 +12124,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!mq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported) if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE; hw_page_size = SLI4_PAGE_SIZE;
@ -12212,6 +12277,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page; struct dma_address *page;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported) if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE; hw_page_size = SLI4_PAGE_SIZE;
@ -12304,6 +12372,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
{ {
uint32_t cnt; uint32_t cnt;
/* sanity check on queue memory */
if (!rq)
return;
cnt = lpfc_hbq_defs[qno]->entry_count; cnt = lpfc_hbq_defs[qno]->entry_count;
/* Recalc repost for RQs based on buffers initially posted */ /* Recalc repost for RQs based on buffers initially posted */
@ -12349,6 +12420,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!hrq || !drq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported) if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE; hw_page_size = SLI4_PAGE_SIZE;
@ -12550,6 +12624,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
/* sanity check on queue memory */
if (!eq) if (!eq)
return -ENODEV; return -ENODEV;
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@ -12605,6 +12680,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
/* sanity check on queue memory */
if (!cq) if (!cq)
return -ENODEV; return -ENODEV;
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@ -12658,6 +12734,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
/* sanity check on queue memory */
if (!mq) if (!mq)
return -ENODEV; return -ENODEV;
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@ -12711,6 +12788,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
/* sanity check on queue memory */
if (!wq) if (!wq)
return -ENODEV; return -ENODEV;
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@ -12764,6 +12842,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
/* sanity check on queue memory */
if (!hrq || !drq) if (!hrq || !drq)
return -ENODEV; return -ENODEV;
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);

View file

@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
void __iomem *STATUSregaddr; void __iomem *STATUSregaddr;
void __iomem *CTRLregaddr; void __iomem *CTRLregaddr;
void __iomem *ERR1regaddr; void __iomem *ERR1regaddr;
#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
void __iomem *ERR2regaddr; void __iomem *ERR2regaddr;
#define SLIPORT_ERR2_REG_FW_RESTART 0x0
#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
} if_type2; } if_type2;
} u; } u;