[SCSI] bfa: Revert back the current LUN Masking Implementation.

This patch reverts the current LUN Masking Implementation.  We re-implemented
this feature using the SCSI Slave Callout's as per the review comments.

Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Krishna Gudipati 2011-12-20 18:55:07 -08:00 committed by James Bottomley
parent 6bc6204e3b
commit 8ca2dd87e7
5 changed files with 2 additions and 582 deletions

View file

@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
u32 lm_lun_across_sg; /* LM lun is across sg data buf */
u32 lm_lun_not_sup; /* LM lun not supported */
u32 lm_rpl_data_changed; /* LM report-lun data changed */
u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
u32 lm_lun_not_rdy; /* LM lun not ready */
u32 rsvd[6];
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */

View file

@ -56,161 +56,6 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
#define SCSI_SENSE_CUR_ERR 0x70
#define SCSI_SENSE_DEF_ERR 0x71
/*
* SCSI additional sense codes
*/
#define SCSI_ASC_LUN_NOT_READY 0x04
#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
#define SCSI_ASC_TOCC 0x3F
/*
* SCSI additional sense code qualifiers
*/
#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
/*
* Methods of reporting informational exceptions
*/
#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
struct scsi_report_luns_data_s {
u32 lun_list_length; /* length of LUN list length */
u32 reserved;
struct scsi_lun lun[1]; /* first LUN in lun list */
};
struct scsi_inquiry_vendor_s {
u8 vendor_id[8];
};
struct scsi_inquiry_prodid_s {
u8 product_id[16];
};
struct scsi_inquiry_prodrev_s {
u8 product_rev[4];
};
struct scsi_inquiry_data_s {
#ifdef __BIG_ENDIAN
u8 peripheral_qual:3; /* peripheral qualifier */
u8 device_type:5; /* peripheral device type */
u8 rmb:1; /* removable medium bit */
u8 device_type_mod:7; /* device type modifier */
u8 version;
u8 aenc:1; /* async evt notification capability */
u8 trm_iop:1; /* terminate I/O process */
u8 norm_aca:1; /* normal ACA supported */
u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
u8 rsp_data_format:4;
u8 additional_len;
u8 sccs:1;
u8 reserved1:7;
u8 reserved2:1;
u8 enc_serv:1; /* enclosure service component */
u8 reserved3:1;
u8 multi_port:1; /* multi-port device */
u8 m_chngr:1; /* device in medium transport element */
u8 ack_req_q:1; /* SIP specific bit */
u8 addr32:1; /* SIP specific bit */
u8 addr16:1; /* SIP specific bit */
u8 rel_adr:1; /* relative address */
u8 w_bus32:1;
u8 w_bus16:1;
u8 synchronous:1;
u8 linked_commands:1;
u8 trans_dis:1;
u8 cmd_queue:1; /* command queueing supported */
u8 soft_reset:1; /* soft reset alternative (VS) */
#else
u8 device_type:5; /* peripheral device type */
u8 peripheral_qual:3; /* peripheral qualifier */
u8 device_type_mod:7; /* device type modifier */
u8 rmb:1; /* removable medium bit */
u8 version;
u8 rsp_data_format:4;
u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
u8 norm_aca:1; /* normal ACA supported */
u8 terminate_iop:1;/* terminate I/O process */
u8 aenc:1; /* async evt notification capability */
u8 additional_len;
u8 reserved1:7;
u8 sccs:1;
u8 addr16:1; /* SIP specific bit */
u8 addr32:1; /* SIP specific bit */
u8 ack_req_q:1; /* SIP specific bit */
u8 m_chngr:1; /* device in medium transport element */
u8 multi_port:1; /* multi-port device */
u8 reserved3:1; /* TBD - Vendor Specific */
u8 enc_serv:1; /* enclosure service component */
u8 reserved2:1;
u8 soft_seset:1; /* soft reset alternative (VS) */
u8 cmd_queue:1; /* command queueing supported */
u8 trans_dis:1;
u8 linked_commands:1;
u8 synchronous:1;
u8 w_bus16:1;
u8 w_bus32:1;
u8 rel_adr:1; /* relative address */
#endif
struct scsi_inquiry_vendor_s vendor_id;
struct scsi_inquiry_prodid_s product_id;
struct scsi_inquiry_prodrev_s product_rev;
u8 vendor_specific[20];
u8 reserved4[40];
};
/*
* SCSI sense data format
*/
struct scsi_sense_s {
#ifdef __BIG_ENDIAN
u8 valid:1;
u8 rsp_code:7;
#else
u8 rsp_code:7;
u8 valid:1;
#endif
u8 seg_num;
#ifdef __BIG_ENDIAN
u8 file_mark:1;
u8 eom:1; /* end of media */
u8 ili:1; /* incorrect length indicator */
u8 reserved:1;
u8 sense_key:4;
#else
u8 sense_key:4;
u8 reserved:1;
u8 ili:1; /* incorrect length indicator */
u8 eom:1; /* end of media */
u8 file_mark:1;
#endif
u8 information[4]; /* device-type or cmd specific info */
u8 add_sense_length; /* additional sense length */
u8 command_info[4];/* command specific information */
u8 asc; /* additional sense code */
u8 ascq; /* additional sense code qualifier */
u8 fru_code; /* field replaceable unit code */
#ifdef __BIG_ENDIAN
u8 sksv:1; /* sense key specific valid */
u8 c_d:1; /* command/data bit */
u8 res1:2;
u8 bpv:1; /* bit pointer valid */
u8 bpointer:3; /* bit pointer */
#else
u8 bpointer:3; /* bit pointer */
u8 bpv:1; /* bit pointer valid */
u8 res1:2;
u8 c_d:1; /* command/data bit */
u8 sksv:1; /* sense key specific valid */
#endif
u8 fpointer[2]; /* field pointer */
};
/*
* Fibre Channel Header Structure (FCHS) definition
*/

View file

@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
#define bfa_ioim_rp_wwn(__ioim) \
(((struct bfa_fcs_rport_s *) \
(__ioim)->itnim->rport->rport_drv)->pwwn)
#define bfa_ioim_lp_wwn(__ioim) \
((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
(__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
enum bfa_ioim_lm_status {
BFA_IOIM_LM_PRESENT = 1,
BFA_IOIM_LM_LUN_NOT_SUP = 2,
BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
BFA_IOIM_LM_LUN_NOT_RDY = 4,
};
enum bfa_ioim_lm_ua_status {
BFA_IOIM_LM_UA_RESET = 0,
BFA_IOIM_LM_UA_SET = 1,
@ -145,9 +128,6 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
__bfa_cb_ioim_abort, ioim);
break;
case BFA_IOIM_SM_LM_LUN_NOT_SUP:
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
bfa_ioim_move_to_comp_q(ioim);
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
__bfa_cb_ioim_lm_lun_not_sup, ioim);
break;
case BFA_IOIM_SM_LM_RPL_DC:
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
bfa_ioim_move_to_comp_q(ioim);
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
__bfa_cb_ioim_lm_rpl_dc, ioim);
break;
case BFA_IOIM_SM_LM_LUN_NOT_RDY:
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
bfa_ioim_move_to_comp_q(ioim);
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
__bfa_cb_ioim_lm_lun_not_rdy, ioim);
break;
default:
bfa_sm_fault(ioim->bfa, event);
}
@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
}
}
/*
* Validate LUN for LUN masking
*/
static enum bfa_ioim_lm_status
bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
struct bfa_rport_s *rp, struct scsi_lun lun)
{
u8 i;
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
(scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
return BFA_IOIM_LM_PRESENT;
}
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
continue;
if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
scsilun_to_int((struct scsi_lun *)&lun))
&& (rp->rport_tag == lun_list[i].rp_tag)
&& ((u8)ioim->itnim->rport->rport_info.lp_tag ==
lun_list[i].lp_tag)) {
bfa_trc(ioim->bfa, lun_list[i].rp_tag);
bfa_trc(ioim->bfa, lun_list[i].lp_tag);
bfa_trc(ioim->bfa, scsilun_to_int(
(struct scsi_lun *)&lun_list[i].lun));
if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
((cdb->scsi_cdb[0] != INQUIRY) ||
(cdb->scsi_cdb[0] != REPORT_LUNS))) {
lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
return BFA_IOIM_LM_RPL_DATA_CHANGED;
}
if (cdb->scsi_cdb[0] == REPORT_LUNS)
ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
return BFA_IOIM_LM_PRESENT;
}
}
if ((cdb->scsi_cdb[0] == INQUIRY) &&
(scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
return BFA_IOIM_LM_PRESENT;
}
if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
return BFA_IOIM_LM_LUN_NOT_RDY;
return BFA_IOIM_LM_LUN_NOT_SUP;
}
static bfa_boolean_t
bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
{
return BFA_TRUE;
}
static void
bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
int buf_lun_cnt)
{
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
struct scsi_lun lun;
int i, j;
bfa_trc(ioim->bfa, buf_lun_cnt);
for (j = 0; j < buf_lun_cnt; j++) {
lun = *((struct scsi_lun *)(lun_data + j));
for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
continue;
if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
(lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
(scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
== scsilun_to_int((struct scsi_lun *)&lun))) {
lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
break;
}
} /* next lun in mask DB */
} /* next lun in buf */
}
static int
bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
struct scsi_report_luns_data_s *rl)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
struct scatterlist *sg = scsi_sglist(cmnd);
struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
int lun_across_sg_bytes, bytes_from_next_buf;
u64 last_lun, temp_last_lun;
/* fetch luns from the first sg element */
bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
(sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
/* fetch luns from multiple sg elements */
scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
if (sgeid == 0) {
prev_sg_len = sg_dma_len(sg);
prev_rl_data = (struct scsi_lun *)
phys_to_virt(sg_dma_address(sg));
continue;
}
/* if the buf is having more data */
lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
if (lun_across_sg_bytes) {
bfa_trc(ioim->bfa, lun_across_sg_bytes);
bfa_stats(ioim->itnim, lm_lun_across_sg);
bytes_from_next_buf = sizeof(struct scsi_lun) -
lun_across_sg_bytes;
/* from next buf take higher bytes */
temp_last_lun = *((u64 *)
phys_to_virt(sg_dma_address(sg)));
last_lun |= temp_last_lun >>
(lun_across_sg_bytes * BITS_PER_BYTE);
/* from prev buf take higher bytes */
temp_last_lun = *((u64 *)(prev_rl_data +
(prev_sg_len - lun_across_sg_bytes)));
temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
last_lun = last_lun | (temp_last_lun <<
(bytes_from_next_buf * BITS_PER_BYTE));
bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
} else
bytes_from_next_buf = 0;
*pgdlen += sg_dma_len(sg);
prev_sg_len = sg_dma_len(sg);
prev_rl_data = (struct scsi_lun *)
phys_to_virt(sg_dma_address(sg));
bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
bytes_from_next_buf,
sg_dma_len(sg) / sizeof(struct scsi_lun));
}
/* update the report luns data - based on fetched luns */
sg = scsi_sglist(cmnd);
base_rl_data = (struct scsi_lun *)rl->lun;
base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
base_rl_data[j] = lun_list[i].lun;
lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
j++;
lun_fetched_cnt++;
}
if (j > base_count) {
j = 0;
sg = sg_next(sg);
base_rl_data = (struct scsi_lun *)
phys_to_virt(sg_dma_address(sg));
base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
}
}
bfa_trc(ioim->bfa, lun_fetched_cnt);
return lun_fetched_cnt;
}
static bfa_boolean_t
bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
{
struct scsi_inquiry_data_s *inq;
struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
bfa_trc(ioim->bfa, inq->device_type);
inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
return 0;
}
static bfa_boolean_t
bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
struct scatterlist *sg = scsi_sglist(cmnd);
struct bfi_ioim_rsp_s *m;
struct scsi_report_luns_data_s *rl = NULL;
int lun_count = 0, lun_fetched_cnt = 0;
u32 residue, pgdlen = 0;
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
return BFA_TRUE;
m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
return BFA_TRUE;
pgdlen = sg_dma_len(sg);
bfa_trc(ioim->bfa, pgdlen);
rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
if (lun_count == lun_fetched_cnt)
return BFA_TRUE;
bfa_trc(ioim->bfa, lun_count);
bfa_trc(ioim->bfa, lun_fetched_cnt);
bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
sizeof(struct scsi_lun);
else
bfa_stats(ioim->itnim, lm_small_buf_addresidue);
bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
residue = be32_to_cpu(m->residue);
residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
bfa_stats(ioim->itnim, lm_wire_residue_changed);
m->residue = be32_to_cpu(residue);
bfa_trc(ioim->bfa, ioim->nsges);
return BFA_FALSE;
}
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
{
@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m->scsi_status, sns_len, snsinfo, residue);
}
static void
__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
{
struct bfa_ioim_s *ioim = cbarg;
int sns_len = 0xD;
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
struct scsi_sense_s *snsinfo;
if (!complete) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
return;
}
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
ioim->fcpim->fcp, ioim->iotag);
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
snsinfo->add_sense_length = 0xa;
snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
snsinfo->sense_key = ILLEGAL_REQUEST;
bfa_trc(ioim->bfa, residue);
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
SCSI_STATUS_CHECK_CONDITION, sns_len,
(u8 *)snsinfo, residue);
}
static void
__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
{
struct bfa_ioim_s *ioim = cbarg;
int sns_len = 0xD;
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
struct scsi_sense_s *snsinfo;
if (!complete) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
return;
}
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
ioim->iotag);
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
snsinfo->asc = SCSI_ASC_TOCC;
snsinfo->add_sense_length = 0x6;
snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
bfa_trc(ioim->bfa, residue);
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
SCSI_STATUS_CHECK_CONDITION, sns_len,
(u8 *)snsinfo, residue);
}
static void
__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
{
struct bfa_ioim_s *ioim = cbarg;
int sns_len = 0xD;
u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
struct scsi_sense_s *snsinfo;
if (!complete) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
return;
}
snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
ioim->fcpim->fcp, ioim->iotag);
snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
snsinfo->add_sense_length = 0xa;
snsinfo->sense_key = NOT_READY;
snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
bfa_trc(ioim->bfa, residue);
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
SCSI_STATUS_CHECK_CONDITION, sns_len,
(u8 *)snsinfo, residue);
}
void
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
u16 rp_tag, u8 lp_tag)
@ -2759,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@ -2775,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@ -2790,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@ -3134,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@ -3172,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@ -3208,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@ -3227,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@ -3246,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_ioim_cb_profile_comp(fcpim, ioim);
if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
return;
}
if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
else
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
/*
@ -3366,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
struct bfa_lps_s *lps;
enum bfa_ioim_lm_status status;
struct scsi_lun scsilun;
if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
lps = BFA_IOIM_TO_LPS(ioim);
int_to_scsilun(cmnd->device->lun, &scsilun);
status = bfa_ioim_lm_check(ioim, lps,
ioim->itnim->rport, scsilun);
if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
bfa_stats(ioim->itnim, lm_lun_not_rdy);
return;
}
if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
bfa_stats(ioim->itnim, lm_lun_not_sup);
return;
}
if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
bfa_stats(ioim->itnim, lm_rpl_data_changed);
return;
}
}
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*

View file

@ -110,7 +110,6 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@ -124,7 +123,6 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@ -181,7 +179,6 @@ struct bfa_ioim_s {
u8 reqq; /* Request queue for I/O */
u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@ -261,10 +258,6 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
#define BFA_IOIM_TO_LPS(__ioim) \
BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
__ioim->itnim->rport->rport_info.lp_tag)
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{

View file

@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
#define BFA_LP_TAG_INVALID 0xff
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
wwn_t *lpwwn, wwn_t rpwwn);
void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
/*
* bfa fcxp API functions