1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6:
  [SCSI] aic79xx: make driver respect nvram for IU and QAS settings
  [SCSI] don't attach ULD to Dell Universal Xport
  [SCSI] lpfc 8.3.3 : Update driver version to 8.3.3
  [SCSI] lpfc 8.3.3 : Add support for Target Reset handler entrypoint
  [SCSI] lpfc 8.3.3 : Fix a couple of spin_lock and memory issues and a crash
  [SCSI] lpfc 8.3.3 : FC/FCOE discovery fixes
  [SCSI] lpfc 8.3.3 : Fix various SLI-3 vs SLI-4 differences
  [SCSI] qla2xxx: Resolve a performance issue in interrupt
  [SCSI] cnic, bnx2i: Fix build failure when CONFIG_PCI is not set.
  [SCSI] nsp_cs: time_out reaches -1
  [SCSI] qla2xxx: fix printk format warnings
  [SCSI] ncr53c8xx: div reaches -1
  [SCSI] compat: don't perform unneeded copy in sg_io code
  [SCSI] zfcp: Update FC pass-through support
  [SCSI] zfcp: Add FC pass-through support
  [SCSI] FC Pass Thru support
hifive-unleashed-5.1
Linus Torvalds 2009-06-17 09:50:44 -07:00
commit aa2638a210
42 changed files with 1898 additions and 515 deletions

View File

@ -1,10 +1,11 @@
SCSI FC Tansport
=============================================
Date: 4/12/2007
Date: 11/18/2008
Kernel Revisions for features:
rports : <<TBS>>
vports : 2.6.22 (? TBD)
vports : 2.6.22
bsg support : 2.6.30 (?TBD?)
Introduction
@ -15,6 +16,7 @@ The FC transport can be found at:
drivers/scsi/scsi_transport_fc.c
include/scsi/scsi_transport_fc.h
include/scsi/scsi_netlink_fc.h
include/scsi/scsi_bsg_fc.h
This file is found at Documentation/scsi/scsi_fc_transport.txt
@ -472,6 +474,14 @@ int
fc_vport_terminate(struct fc_vport *vport)
FC BSG support (CT & ELS passthru, and more)
========================================================================
<< To Be Supplied >>
Credits
=======
The following people have contributed to this document:

View File

@ -1271,6 +1271,11 @@ of interest:
hostdata[0] - area reserved for LLD at end of struct Scsi_Host. Size
is set by the second argument (named 'xtr_bytes') to
scsi_host_alloc() or scsi_register().
vendor_id - a unique value that identifies the vendor supplying
the LLD for the Scsi_Host. Used most often in validating
vendor-specific message requests. Value consists of an
identifier type and a vendor-specific value.
See scsi_netlink.h for a description of valid formats.
The scsi_host structure is defined in include/scsi/scsi_host.h

View File

@ -2272,8 +2272,9 @@ config BNX2
config CNIC
tristate "Broadcom CNIC support"
depends on BNX2
depends on UIO
depends on PCI
select BNX2
select UIO
help
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.

View File

@ -470,6 +470,12 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
if (!adapter)
return -ENOMEM;
adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
if (!adapter->gs) {
kfree(adapter);
return -ENOMEM;
}
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
atomic_set(&adapter->refcount, 0);
@ -523,8 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto sysfs_failed;
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
zfcp_fc_nameserver_init(adapter);
zfcp_fc_wka_ports_init(adapter);
if (!zfcp_adapter_scsi_register(adapter))
return 0;
@ -571,6 +576,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter->gs);
kfree(adapter);
}

View File

@ -22,6 +22,8 @@
#include <linux/syscalls.h>
#include <linux/scatterlist.h>
#include <linux/ioctl.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
@ -29,6 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
#include <asm/qdio.h>
#include <asm/debug.h>
@ -228,11 +231,6 @@ struct zfcp_ls_adisc {
/* FC-PH/FC-GS well-known address identifiers for generic services */
#define ZFCP_DID_WKA 0xFFFFF0
#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@ -376,6 +374,14 @@ struct zfcp_wka_port {
struct delayed_work work;
};
struct zfcp_wka_ports {
struct zfcp_wka_port ms; /* management service */
struct zfcp_wka_port ts; /* time service */
struct zfcp_wka_port ds; /* directory service */
struct zfcp_wka_port as; /* alias service */
struct zfcp_wka_port ks; /* key distribution service */
};
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first; /* index of next free bfr in queue */
@ -461,7 +467,7 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct zfcp_wka_port nsp; /* adapter's nameserver */
struct zfcp_wka_ports *gs; /* generic services */
debug_info_t *rec_dbf;
debug_info_t *hba_dbf;
debug_info_t *san_dbf; /* debug feature areas */

View File

@ -719,7 +719,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
zfcp_qdio_close(adapter);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
zfcp_fc_wka_port_force_offline(&adapter->nsp);
zfcp_fc_wka_port_force_offline(&adapter->gs->ds);
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);

View File

@ -106,8 +106,12 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *);
extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);

View File

@ -120,14 +120,13 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
schedule_delayed_work(&wka_port->work, HZ / 100);
}
void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
struct zfcp_wka_port *wka_port = &adapter->nsp;
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
wka_port->d_id = d_id;
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
@ -143,6 +142,17 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter)
{
struct zfcp_wka_ports *gs = adapter->gs;
zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
@ -282,7 +292,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
gid_pn->ct.wka_port = &adapter->nsp;
gid_pn->ct.wka_port = &adapter->gs->ds;
gid_pn->ct.handler = zfcp_fc_ns_handler;
gid_pn->ct.handler_data = (unsigned long) &compl_rec;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
@ -329,13 +339,13 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
memset(gid_pn, 0, sizeof(*gid_pn));
ret = zfcp_wka_port_get(&adapter->nsp);
ret = zfcp_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
zfcp_wka_port_put(&adapter->nsp);
zfcp_wka_port_put(&adapter->gs->ds);
out:
mempool_free(gid_pn, adapter->pool.data_gid_pn);
return ret;
@ -525,7 +535,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
req->fc4_type = ZFCP_CT_SCSI_FCP;
/* prepare zfcp_send_ct */
ct->wka_port = &adapter->nsp;
ct->wka_port = &adapter->gs->ds;
ct->handler = zfcp_fc_ns_handler;
ct->handler_data = (unsigned long)&compl_rec;
ct->timeout = 10;
@ -644,7 +654,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return 0;
ret = zfcp_wka_port_get(&adapter->nsp);
ret = zfcp_wka_port_get(&adapter->gs->ds);
if (ret)
return ret;
@ -666,7 +676,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
zfcp_free_sg_env(gpn_ft, buf_num);
out:
zfcp_wka_port_put(&adapter->nsp);
zfcp_wka_port_put(&adapter->gs->ds);
return ret;
}
@ -675,3 +685,158 @@ void _zfcp_scan_ports_later(struct work_struct *work)
{
zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
}
struct zfcp_els_fc_job {
struct zfcp_send_els els;
struct fc_bsg_job *job;
};
static void zfcp_fc_generic_els_handler(unsigned long data)
{
struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
struct fc_bsg_job *job = els_fc_job->job;
struct fc_bsg_reply *reply = job->reply;
if (els_fc_job->els.status) {
/* request rejected or timed out */
reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
goto out;
}
reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
reply->reply_payload_rcv_len = job->reply_payload.payload_len;
out:
job->state_flags = FC_RQST_STATE_DONE;
job->job_done(job);
kfree(els_fc_job);
}
int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
{
struct zfcp_els_fc_job *els_fc_job;
struct fc_rport *rport = job->rport;
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
u8 *port_did;
shost = rport ? rport_to_shost(rport) : job->shost;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
if (!els_fc_job)
return -ENOMEM;
els_fc_job->els.adapter = adapter;
if (rport) {
read_lock_irq(&zfcp_data.config_lock);
port = rport->dd_data;
if (port)
els_fc_job->els.d_id = port->d_id;
read_unlock_irq(&zfcp_data.config_lock);
if (!port) {
kfree(els_fc_job);
return -EINVAL;
}
} else {
port_did = job->request->rqst_data.h_els.port_id;
els_fc_job->els.d_id = (port_did[0] << 16) +
(port_did[1] << 8) + port_did[2];
}
els_fc_job->els.req = job->request_payload.sg_list;
els_fc_job->els.resp = job->reply_payload.sg_list;
els_fc_job->els.handler = zfcp_fc_generic_els_handler;
els_fc_job->els.handler_data = (unsigned long) els_fc_job;
els_fc_job->job = job;
return zfcp_fsf_send_els(&els_fc_job->els);
}
struct zfcp_ct_fc_job {
struct zfcp_send_ct ct;
struct fc_bsg_job *job;
};
static void zfcp_fc_generic_ct_handler(unsigned long data)
{
struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
struct fc_bsg_job *job = ct_fc_job->job;
job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
job->state_flags = FC_RQST_STATE_DONE;
job->job_done(job);
zfcp_wka_port_put(ct_fc_job->ct.wka_port);
kfree(ct_fc_job);
}
int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
{
int ret;
u8 gs_type;
struct fc_rport *rport = job->rport;
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_ct_fc_job *ct_fc_job;
u32 preamble_word1;
shost = rport ? rport_to_shost(rport) : job->shost;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
if (!ct_fc_job)
return -ENOMEM;
preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
switch (gs_type) {
case FC_FST_ALIAS:
ct_fc_job->ct.wka_port = &adapter->gs->as;
break;
case FC_FST_MGMT:
ct_fc_job->ct.wka_port = &adapter->gs->ms;
break;
case FC_FST_TIME:
ct_fc_job->ct.wka_port = &adapter->gs->ts;
break;
case FC_FST_DIR:
ct_fc_job->ct.wka_port = &adapter->gs->ds;
break;
default:
kfree(ct_fc_job);
return -EINVAL; /* no such service */
}
ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port);
if (ret) {
kfree(ct_fc_job);
return ret;
}
ct_fc_job->ct.req = job->request_payload.sg_list;
ct_fc_job->ct.resp = job->reply_payload.sg_list;
ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
ct_fc_job->ct.completion = NULL;
ct_fc_job->job = job;
ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL);
if (ret) {
kfree(ct_fc_job);
zfcp_wka_port_put(ct_fc_job->ct.wka_port);
}
return ret;
}

View File

@ -1146,7 +1146,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
case FSF_RESPONSE_SIZE_TOO_LARGE:
break;
case FSF_ACCESS_DENIED:
zfcp_fsf_access_denied_port(req, port);
if (port)
zfcp_fsf_access_denied_port(req, port);
break;
case FSF_SBAL_MISMATCH:
/* should never occure, avoided in zfcp_fsf_send_els */

View File

@ -623,6 +623,20 @@ void zfcp_scsi_scan(struct work_struct *work)
zfcp_unit_put(unit);
}
static int zfcp_execute_fc_job(struct fc_bsg_job *job)
{
switch (job->request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_execute_els_fc_job(job);
case FC_BSG_RPT_CT:
case FC_BSG_HST_CT:
return zfcp_fc_execute_ct_fc_job(job);
default:
return -EINVAL;
}
}
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
@ -644,6 +658,7 @@ struct fc_function_template zfcp_transport_functions = {
.dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.bsg_request = zfcp_execute_fc_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,

View File

@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget)
starget->id, &tstate);
if ((flags & CFPACKETIZED) == 0) {
/* Do not negotiate packetized transfers */
spi_rd_strm(starget) = 0;
spi_pcomp_en(starget) = 0;
spi_rti(starget) = 0;
spi_wr_flow(starget) = 0;
spi_hold_mcs(starget) = 0;
/* don't negotiate packetized (IU) transfers */
spi_max_iu(starget) = 0;
} else {
if ((ahd->features & AHD_RTI) == 0)
spi_rti(starget) = 0;
}
if ((flags & CFQAS) == 0)
spi_qas(starget) = 0;
spi_max_qas(starget) = 0;
/* Transinfo values have been set to BIOS settings */
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;

View File

@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
select CNIC
depends on PCI
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
devices.

View File

@ -457,10 +457,6 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
struct lpfc_nodelist *);
int (*lpfc_scsi_prep_task_mgmt_cmd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
unsigned int, uint8_t);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,

View File

@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev >= LPFC_SLI_REV4)
return -EPERM;
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->sli_rev >= LPFC_SLI_REV4)
return -EPERM;
if (off > FF_REG_AREA_SIZE)
return -ERANGE;

View File

@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint32_t *ptr, str[4];
uint8_t *fwname;
if (vp->rev.rBit) {
if (phba->sli_rev == LPFC_SLI_REV4)
sprintf(fwrevision, "%s", vp->rev.opFwName);
else if (vp->rev.rBit) {
if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;
else

View File

@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (elsiocb == NULL)
return NULL;
/*
* If this command is for fabric controller and HBA running
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
if ((did == Fabric_DID) &&
bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
((elscmd == ELS_CMD_FLOGI) ||
(elscmd == ELS_CMD_FDISC) ||
(elscmd == ELS_CMD_LOGO)))
elsiocb->iocb_flag |= LPFC_FIP_ELS;
else
elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
icmd = &elsiocb->iocb;
/* fill in BDEs for command */
@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd->un.elsreq64.myID = 0;
icmd->un.elsreq64.fl = 1;
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* FDISC needs to be 1 for WQE VPI */
elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
/* Set the ulpContext to the vpi */
elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
} else {
/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;

View File

@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
{
struct lpfc_fcf_conn_entry *conn_entry;
/* If FCF not available return 0 */
if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
!bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
return 0;
if (!phba->cfg_enable_fip) {
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
/*
* When there are no FCF connect entries, use driver's default
* addressing mode - FPMA.
*/
if (*addr_mode & LPFC_FCF_FPMA)
*addr_mode = LPFC_FCF_FPMA;
*vlan_id = 0xFFFF;
return 1;
}
@ -1240,6 +1253,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
continue;
}
/*
* If connection record does not support any addressing mode,
* skip the FCF record.
*/
if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
continue;
/*
* Check if the connection record specifies a required
* addressing mode.
@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
else
*boot_flag = 0;
/*
* If user did not specify any addressing mode, or if the
* prefered addressing mode specified by user is not supported
* by FCF, allow fabric to pick the addressing mode.
*/
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
/*
@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
(*addr_mode & LPFC_FCF_FPMA))
*addr_mode = LPFC_FCF_FPMA;
/*
* If user did not specify any addressing mode, use FPMA if
* possible else use SPMA.
*/
else if (*addr_mode & LPFC_FCF_FPMA)
*addr_mode = LPFC_FCF_FPMA;
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
*vlan_id = conn_entry->conn_rec.vlan_tag;
@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~FC_BYPASSED_MODE;
spin_unlock_irq(shost->host_lock);
if (((phba->fc_eventTag + 1) < la->eventTag) ||
if ((phba->fc_eventTag < la->eventTag) ||
(phba->fc_eventTag == la->eventTag)) {
phba->fc_stat.LinkMultiEvent++;
if (la->attType == AT_LINK_UP)
@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_no_rpi(phba, ndlp);
ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_VALID;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
return 1;
}
return 0;

View File

@ -1183,7 +1183,6 @@ typedef struct {
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
#define PCI_DEVICE_ID_TIGERSHARK 0x0704
#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC

View File

@ -422,9 +422,9 @@ struct lpfc_wqe_generic{
#define lpfc_wqe_gen_pri_WORD word10
uint32_t word11;
#define lpfc_wqe_gen_cq_id_SHIFT 16
#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
#define lpfc_wqe_gen_cq_id_WORD word11
#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
#define lpfc_wqe_gen_wqec_SHIFT 7
#define lpfc_wqe_gen_wqec_MASK 0x00000001
#define lpfc_wqe_gen_wqec_WORD word11
@ -1128,7 +1128,7 @@ struct fcf_record {
#define lpfc_fcf_record_mac_5_WORD word4
#define lpfc_fcf_record_fcf_avail_SHIFT 16
#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
#define lpfc_fcf_record_fc_avail_WORD word4
#define lpfc_fcf_record_fcf_avail_WORD word4
#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
#define lpfc_fcf_record_mac_addr_prov_WORD word4

View File

@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
phba->cfg_hba_queue_depth =
mb->un.varRdConfig.max_xri + 1;
(mb->un.varRdConfig.max_xri + 1) -
lpfc_sli4_get_els_iocb_cnt(phba);
phba->lmt = mb->un.varRdConfig.lmt;
@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
oneConnect = 1;
m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_TIGERSHARK_S:
oneConnect = 1;
m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
break;
default:
m = (typeof(m)){ NULL };
break;
@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the allocated rpi headers. */
lpfc_sli4_remove_rpi_hdrs(phba);
lpfc_sli4_remove_rpis(phba);
/* Free the ELS sgl list */
lpfc_free_active_sgl(phba);
@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
{
int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
if (max_xri <= 100)
return 4;
else if (max_xri <= 256)
return 8;
else if (max_xri <= 512)
return 16;
else if (max_xri <= 1024)
return 32;
else
return 48;
if (phba->sli_rev == LPFC_SLI_REV4) {
if (max_xri <= 100)
return 4;
else if (max_xri <= 256)
return 8;
else if (max_xri <= 512)
return 16;
else if (max_xri <= 1024)
return 32;
else
return 48;
} else
return 0;
}
/**
@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
switch (dev_id) {
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TIGERSHARK_S:
rc = lpfc_pci_probe_one_s4(pdev, pid);
break;
default:
@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};

View File

@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
memset(viraddr, 0, PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
/* Virtual fabrics and FIPs are not supported yet. */
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
if (phba->cfg_enable_fip)
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
else
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)

View File

@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_type & NLP_FABRIC) &&
if ((ndlp->nlp_DID == Fabric_DID) &&
vport->port_type == LPFC_NPIV_PORT) {
lpfc_linkdown_port(vport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);

View File

@ -115,6 +115,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
}
}
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
* @phba: Pointer to HBA object.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called from the lpfc_prep_task_mgmt_cmd function to
* set the last bit in the response sge entry.
**/
static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd)
{
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
if (sgl) {
sgl += 1;
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
}
}
/**
* lpfc_update_stats - Update statistical data for the command completion
* @phba: Pointer to HBA object.
@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
}
/**
* lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
* lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
* @phba: The HBA for which this call is being executed.
* @psb: The scsi buffer which is going to be un-mapped.
*
@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
* field of @lpfc_cmd for device with SLI-3 interface spec.
**/
static void
lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
/*
* There are only two special cases to consider. (1) the scsi command
@ -2002,36 +2023,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
psb->pCmd->sc_data_direction);
}
/**
* lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is going to be un-mapped.
*
* This routine does DMA un-mapping of scatter gather list of scsi command
* field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
* remove the sgl for this scsi buffer then we will do it here. For now
* we should be able to just call the sli3 unprep routine.
**/
static void
lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
lpfc_scsi_unprep_dma_buf_s3(phba, psb);
}
/**
* lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is going to be un-mapped.
*
* This routine does DMA un-mapping of scatter gather list of scsi command
* field of @lpfc_cmd for device with SLI-4 interface spec.
**/
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
phba->lpfc_scsi_unprep_dma_buf(phba, psb);
}
/**
* lpfc_handler_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
}
/**
* lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
* lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
* @pnode: Pointer to lpfc_nodelist.
@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
* to transfer for device with SLI3 interface spec.
**/
static void
lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
struct lpfc_hba *phba = vport->phba;
@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
* lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
* @pnode: Pointer to lpfc_nodelist.
*
* This routine initializes fcp_cmnd and iocb data structure from scsi command
* to transfer for device with SLI4 interface spec.
**/
static void
lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
/*
* The prep cmnd routines do not touch the sgl or its
* entries. We may not have to do anything different.
* I will leave this function in place until we can
* run some IO through the driver and determine if changes
* are needed.
*/
return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
}
/**
* lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
* @pnode: Pointer to lpfc_nodelist.
*
* This routine wraps the actual convert SCSI cmnd function pointer from
* the lpfc_hba struct.
**/
static inline void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
}
/**
* lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* 1 - Success
**/
static int
lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
@ -2653,70 +2605,15 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
* The driver will provide the timeout mechanism.
*/
piocb->ulpTimeout = 0;
} else {
} else
piocb->ulpTimeout = lpfc_cmd->timeout;
}
if (vport->phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
return 1;
}
/**
* lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
* @task_mgmt_cmd: SCSI task management command.
*
* This routine creates FCP information unit corresponding to @task_mgmt_cmd
* for device with SLI-4 interface spec.
*
* Return codes:
* 0 - Error
* 1 - Success
**/
static int
lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
{
/*
* The prep cmnd routines do not touch the sgl or its
* entries. We may not have to do anything different.
* I will leave this function in place until we can
* run some IO through the driver and determine if changes
* are needed.
*/
return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
task_mgmt_cmd);
}
/**
* lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
* @task_mgmt_cmd: SCSI task management command.
*
* This routine wraps the actual convert SCSI TM to FCP information unit
* function pointer from the lpfc_hba struct.
*
* Return codes:
* 0 - Error
* 1 - Success
**/
static inline int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
task_mgmt_cmd);
}
/**
* lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
* @phba: The hba struct for which this call is being executed.
@ -2730,23 +2627,19 @@ int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
phba->lpfc_scsi_prep_task_mgmt_cmd =
lpfc_scsi_prep_task_mgmt_cmd_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
phba->lpfc_scsi_prep_task_mgmt_cmd =
lpfc_scsi_prep_task_mgmt_cmd_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
break;
default:
@ -2782,72 +2675,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
return;
}
/**
* lpfc_scsi_tgt_reset - Target reset handler
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
* @vport: The virtual port for which this call is being executed.
* @tgt_id: Target ID.
* @lun: Lun number.
* @rdata: Pointer to lpfc_rport_data.
*
* This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
*
* Return Code:
* 0x2003 - Error
* 0x2002 - Success.
**/
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
unsigned tgt_id, unsigned int lun,
struct lpfc_rport_data *rdata)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
int status;
if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
lpfc_cmd->rdata = rdata;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
FCP_TARGET_RESET);
if (!status)
return FAILED;
iocbq = &lpfc_cmd->cur_iocbq;
iocbqrsp = lpfc_sli_get_iocbq(phba);
if (!iocbqrsp)
return FAILED;
/* Issue Target Reset to TGT <num> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status != IOCB_SUCCESS) {
if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
} else
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
} else {
ret = SUCCESS;
lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
}
lpfc_sli_release_iocbq(phba, iocbqrsp);
return ret;
}
/**
* lpfc_info - Info entry point of scsi_host_template data structure
* @host: The scsi host for which this call is being executed.
@ -3228,11 +3055,201 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
return ret;
}
static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
{
switch (task_mgmt_cmd) {
case FCP_ABORT_TASK_SET:
return "ABORT_TASK_SET";
case FCP_CLEAR_TASK_SET:
return "FCP_CLEAR_TASK_SET";
case FCP_BUS_RESET:
return "FCP_BUS_RESET";
case FCP_LUN_RESET:
return "FCP_LUN_RESET";
case FCP_TARGET_RESET:
return "FCP_TARGET_RESET";
case FCP_CLEAR_ACA:
return "FCP_CLEAR_ACA";
case FCP_TERMINATE_TASK:
return "FCP_TERMINATE_TASK";
default:
return "unknown";
}
}
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
* @rdata: Pointer to remote port local data
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
*
* This routine builds and sends a TMF (SCSI Task Mgmt Function) to
* a remote port.
*
* Return Code:
* 0x2003 - Error
* 0x2002 - Success.
**/
static int
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
unsigned tgt_id, unsigned int lun_id,
uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
int status;
if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = 60;
lpfc_cmd->rdata = rdata;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
iocbq = &lpfc_cmd->cur_iocbq;
iocbqrsp = lpfc_sli_get_iocbq(phba);
if (iocbqrsp == NULL) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status != IOCB_SUCCESS) {
if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
} else
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4]);
} else
ret = SUCCESS;
lpfc_sli_release_iocbq(phba, iocbqrsp);
if (ret != TIMEOUT_ERROR)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return ret;
}
/**
* lpfc_chk_tgt_mapped -
* @vport: The virtual port to check on
* @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine delays until the scsi target (aka rport) for the
* command exists (is present and logged in) or we declare it non-existent.
*
* Return code :
* 0x2003 - Error
* 0x2002 - Success
**/
static int
lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
{
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode;
unsigned long later;
/*
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
rdata = cmnd->device->hostdata;
if (!rdata)
return FAILED;
pnode = rdata->pnode;
}
if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
(pnode->nlp_state != NLP_STE_MAPPED_NODE))
return FAILED;
return SUCCESS;
}
/**
* lpfc_reset_flush_io_context -
* @vport: The virtual port (scsi_host) for the flush context
* @tgt_id: If aborting by Target contect - specifies the target id
* @lun_id: If aborting by Lun context - specifies the lun id
* @context: specifies the context level to flush at.
*
* After a reset condition via TMF, we need to flush orphaned i/o
* contexts from the adapter. This routine aborts any contexts
* outstanding, then waits for their completions. The wait is
* bounded by devloss_tmo though.
*
* Return code :
* 0x2003 - Error
* 0x2002 - Success
**/
static int
lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
uint64_t lun_id, lpfc_ctx_cmd context)
{
struct lpfc_hba *phba = vport->phba;
unsigned long later;
int cnt;
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0724 I/O flush failure for context %s : cnt x%x\n",
((context == LPFC_CTX_LUN) ? "LUN" :
((context == LPFC_CTX_TGT) ? "TGT" :
((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
cnt);
return FAILED;
}
return SUCCESS;
}
/**
* lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
* @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine does a device reset by sending a TARGET_RESET task management
* This routine does a device reset by sending a LUN_RESET task management
* command.
*
* Return code :
@ -3244,33 +3261,79 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode;
unsigned long later;
int ret = SUCCESS;
int status;
int cnt;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
lpfc_block_error_handler(cmnd);
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 Device Reset rport failure: rdata x%p\n", rdata);
return FAILED;
}
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_LUNRESET;
scsi_event.lun = lun_id;
memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0713 SCSI layer issued Device Reset (%d, %d) "
"return x%x\n", tgt_id, lun_id, status);
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
* We have to clean up i/o as : they may be orphaned by the TMF;
* or if the TMF failed, they may be in an indeterminate state.
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
rdata = cmnd->device->hostdata;
if (!rdata)
break;
pnode = rdata->pnode;
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_LUN);
return status;
}
/**
* lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
* @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine does a target reset by sending a TARGET_RESET task management
* command.
*
* Return code :
* 0x2003 - Error
* 0x2002 - Success
**/
static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode;
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
lpfc_block_error_handler(cmnd);
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0722 Target Reset rport failure: rdata x%p\n", rdata);
return FAILED;
}
scsi_event.event_type = FC_REG_SCSI_EVENT;
@ -3279,105 +3342,47 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
LPFC_NL_VENDOR_ID);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
"failure: msec x%x rdata x%p\n",
jiffies_to_msecs(jiffies - later), rdata);
return FAILED;
}
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = 60;
lpfc_cmd->rdata = rdata;
status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
FCP_TARGET_RESET);
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
cmnd->device->lun,
FCP_TARGET_RESET);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
iocbq = &lpfc_cmd->cur_iocbq;
/* get a buffer for this IOCB command response */
iocbqrsp = lpfc_sli_get_iocbq(phba);
if (iocbqrsp == NULL) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n", cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
} else {
if (status != IOCB_SUCCESS)
ret = FAILED;
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
cmnd->device->id, cmnd->device->lun, ret,
iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, iocbqrsp);
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
LPFC_CTX_TGT);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_TGT);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
cmnd->device->lun, LPFC_CTX_TGT);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0719 device reset I/O flush failure: "
"cnt x%x\n", cnt);
ret = FAILED;
}
return ret;
"0723 SCSI layer issued Target Reset (%d, %d) "
"return x%x\n", tgt_id, lun_id, status);
/*
* We have to clean up i/o as : they may be orphaned by the TMF;
* or if the TMF failed, they may be in an indeterminate state.
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
return status;
}
/**
* lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
* @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine does target reset to all target on @cmnd->device->host.
* This routine does target reset to all targets on @cmnd->device->host.
* This emulates Parallel SCSI Bus Reset Semantics.
*
* Return Code:
* 0x2003 - Error
* 0x2002 - Success
* Return code :
* 0x2003 - Error
* 0x2002 - Success
**/
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
int ret = SUCCESS, status = SUCCESS, i;
int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
struct lpfc_scsi_event_header scsi_event;
int match;
int ret = SUCCESS, status, i;
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
LPFC_NL_VENDOR_ID);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
lpfc_block_error_handler(cmnd);
/*
* Since the driver manages a single bus device, reset all
* targets known to the driver. Should any target reset
@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
spin_unlock_irq(shost->host_lock);
if (!match)
continue;
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd) {
lpfc_cmd->timeout = 60;
status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
cmnd->device->lun,
ndlp->rport->dd_data);
if (status != TIMEOUT_ERROR)
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
if (!lpfc_cmd || status != SUCCESS) {
status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
i, 0, FCP_TARGET_RESET);
if (status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0700 Bus Reset on target %d failed\n",
i);
@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
}
}
/*
* All outstanding txcmplq I/Os should have been aborted by
* the targets. Unfortunately, some targets do not abide by
* this forcing the driver to double check.
* We have to clean up i/o as : they may be orphaned by the TMFs
* above; or if any of the TMFs failed, they may be in an
* indeterminate state.
* We will report success if all the i/o aborts successfully.
*/
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n", cnt, i);
status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
if (status != SUCCESS)
ret = FAILED;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
return ret;
@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler= lpfc_device_reset_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler= lpfc_device_reset_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,

View File

@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
return -EIO;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_FCOEPARAM_RGN_SIZE)
if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
return -EIO;
}
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0380 Mailbox cmd x%x Status x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"CQ: x%x x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
bf_get(lpfc_mqe_command, mqe),
bf_get(lpfc_mqe_status, mqe),
mqe->un.mb_words[0], mqe->un.mb_words[1],
mqe->un.mb_words[2], mqe->un.mb_words[3],
mqe->un.mb_words[4], mqe->un.mb_words[5],
mqe->un.mb_words[6], mqe->un.mb_words[7],
mqe->un.mb_words[8], mqe->un.mb_words[9],
mqe->un.mb_words[10], mqe->un.mb_words[11],
mqe->un.mb_words[12], mqe->un.mb_words[13],
mqe->un.mb_words[14], mqe->un.mb_words[15],
mqe->un.mb_words[16], mqe->un.mb_words[50],
mboxq->mcqe.word0,
mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
mboxq->mcqe.trailer);
/*
* The available vpd length cannot be bigger than the
* DMA buffer passed to the port. Catch the less than
@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_vpd;
mqe = &mboxq->u.mqe;
if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
&mqe->un.read_rev) != LPFC_SLI_REV4) ||
(bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
phba->hba_flag |= HBA_FCOE_SUPPORT;
if (phba->sli_rev != LPFC_SLI_REV4 ||
!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
rc = -EIO;
goto out_free_vpd;
}
/* Single threaded at this point, no need for lock */
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= HBA_FCOE_SUPPORT;
spin_unlock_irq(&phba->hbalock);
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
}
/* By now, we should determine the SLI revision, hard code for now */
phba->sli_rev = LPFC_SLI_REV4;
/* Save information as VPD data */
phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
&mqe->un.read_rev);
phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
&mqe->un.read_rev);
phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
&mqe->un.read_rev);
phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
&mqe->un.read_rev);
phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0380 READ_REV Status x%x "
"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
bf_get(lpfc_mqe_status, mqe),
phba->vpd.rev.opFwName,
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
/*
* Discover the port's supported feature set and match it against the
@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_free_vpd;
}
/* Temporary initialization of lpfc_fip_flag to non-fip */
bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
if (phba->cfg_enable_fip)
bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
else
bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
@ -5029,6 +5034,92 @@ out_not_finished:
return MBX_NOT_FINISHED;
}
/**
* lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
* @phba: Pointer to HBA context object.
*
* The function blocks the posting of SLI4 asynchronous mailbox commands from
* the driver internal pending mailbox queue. It will then try to wait out the
* possible outstanding mailbox command before return.
*
* Returns:
* 0 - the outstanding mailbox command completed; otherwise, the wait for
* the outstanding mailbox command timed out.
**/
static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint8_t actcmd = MBX_HEARTBEAT;
int rc = 0;
unsigned long timeout;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
if (phba->sli.mbox_active)
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
spin_unlock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
jiffies;
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
/* Timeout, marked the outstanding cmd not complete */
rc = 1;
break;
}
}
/* Can not cleanly block async mailbox command, fails it */
if (rc) {
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
}
return rc;
}
/**
* lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
* @phba: Pointer to HBA context object.
*
* The function unblocks and resume posting of SLI4 asynchronous mailbox
* commands from the driver internal pending mailbox queue. It makes sure
* that there is no outstanding mailbox command before resuming posting
* asynchronous mailbox commands. If, for any reason, there is outstanding
* mailbox command, it will try to wait it out before resuming asynchronous
* mailbox command posting.
**/
static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
/* Asynchronous mailbox posting is not blocked, do nothing */
spin_unlock_irq(&phba->hbalock);
return;
}
/* Outstanding synchronous mailbox command is guaranteed to be done,
* successful or timeout, after timing-out the outstanding mailbox
* command shall always be removed, so just unblock posting async
* mailbox command and resume
*/
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
/* wake up worker thread to post asynchronlous mailbox command */
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
* @phba: Pointer to HBA context object.
@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2542 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2542 Try to issue mailbox command "
"x%x (x%x) synchronously ahead of async"
"mailbox command queue: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
return -EIO;
/* Try to block the asynchronous mailbox posting */
rc = lpfc_sli4_async_mbox_block(phba);
if (!rc) {
/* Successfully blocked, now issue sync mbox cmd */
rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_SLI,
"(%d):2597 Mailbox command "
"x%x (x%x) cannot issue "
"Data: x%x x%x\n",
mboxq->vport ?
mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba,
mboxq),
psli->sli_flag, flag);
/* Unblock the async mailbox posting afterward */
lpfc_sli4_async_mbox_unblock(phba);
}
return rc;
}
/* Now, interrupt mode asynchrous mailbox command */
@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
/* The fcp commands will set command type */
if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
command_type = ELS_COMMAND_NON_FIP;
else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
command_type = ELS_COMMAND_FIP;
else if (iocbq->iocb_flag & LPFC_IO_FCP)
if (iocbq->iocb_flag & LPFC_IO_FCP)
command_type = FCP_COMMAND;
else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2019 Invalid cmd 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
command_type = ELS_COMMAND_FIP;
else
command_type = ELS_COMMAND_NON_FIP;
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(lpfc_wqe_gen_context, &wqe->generic,
iocbq->iocb.ulpContext);
if (iocbq->vport->fc_myDID != 0) {
bf_set(els_req64_sid, &wqe->els_req,
iocbq->vport->fc_myDID);
bf_set(els_req64_sp, &wqe->els_req, 1);
}
bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* is set and we are sending our 2nd or greater command on
* this exchange.
*/
/* ALLOW read & write to fall through to ICMD64 */
case CMD_FCP_ICMND64_CR:
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
break;
case CMD_FCP_ICMND64_CR:
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
wqe->words[4] = 0;
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
break;
case CMD_GEN_REQUEST64_CR:
/* word3 command length is described as byte offset to the
@ -7246,6 +7353,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
return;
}
/**
* lpfc_chk_iocb_flg - Test IOCB flag with lock held.
* @phba: Pointer to HBA context object..
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
* This routine grabs the hbalock and then test the iocb_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
* 0 if flag is not set.
**/
static int
lpfc_chk_iocb_flg(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq, uint32_t flag)
{
unsigned long iflags;
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
ret = piocbq->iocb_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
}
/**
* lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
* @phba: Pointer to HBA context object..
@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
piocb->iocb_flag & LPFC_IO_WAKE,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7) & phba->work_hs)) {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= DEFER_ERATT;
spin_unlock_irq(&phba->hbalock);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
}
/* Set the driver HA work bitmap */
spin_lock_irq(&phba->hbalock);
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
spin_unlock_irq(&phba->hbalock);
return 1;
}
return 0;
@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
return 0;
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
spin_lock_irq(&phba->hbalock);
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
spin_unlock_irq(&phba->hbalock);
return 1;
}
}
@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
kfree(dmabuf);
goto out_fail;
}
memset(dmabuf->virt, 0, PAGE_SIZE);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
if (rc == MBX_TIMEOUT)
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
rpi_page->start_rpi);
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
LPFC_FCF_FPMA | LPFC_FCF_SPMA);

View File

@ -56,6 +56,7 @@ struct lpfc_iocbq {
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
#define LPFC_FIP_ELS 0x40
uint8_t abort_count;
uint8_t rsvd2;

View File

@ -229,7 +229,7 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 256
#define LPFC_WQE_DEF_COUNT 64
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.2"
#define LPFC_DRIVER_VERSION "8.3.3"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"

View File

@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);

View File

@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
** input speed faster than the period.
*/
kpc = per * clk;
while (--div >= 0)
while (--div > 0)
if (kpc >= (div_10M[div] << 2)) break;
/*

View File

@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
if (reg == 0xff) {
break;
}
} while ((time_out-- != 0) && (reg & mask) != 0);
} while ((--time_out != 0) && (reg & mask) != 0);
if (time_out == 0) {
nsp_msg(KERN_DEBUG, " %s signal off timeut", str);
@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
data->FifoCount = ocount;
if (time_out == 0) {
if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
SCpnt->SCp.buffers_residual);
@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
data->FifoCount = ocount;
if (time_out == 0) {
if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
scsi_get_resid(SCpnt));
}

View File

@ -37,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id)
uint16_t hccr;
uint16_t mb[4];
struct rsp_que *rsp;
unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@ -49,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp;
status = 0;
spin_lock(&ha->hardware_lock);
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
@ -101,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id)
RD_REG_WORD(&reg->hccr);
}
}
spin_unlock(&ha->hardware_lock);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@ -133,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id)
uint16_t mb[4];
struct rsp_que *rsp;
struct qla_hw_data *ha;
unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@ -145,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp;
status = 0;
spin_lock(&ha->hardware_lock);
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
@ -216,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(&reg->hccr);
}
spin_unlock(&ha->hardware_lock);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@ -1626,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
uint32_t hccr;
uint16_t mb[4];
struct rsp_que *rsp;
unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@ -1638,7 +1641,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
spin_lock(&ha->hardware_lock);
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
@ -1688,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
spin_unlock(&ha->hardware_lock);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {

View File

@ -945,7 +945,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name,
le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos));
if (i < QLA_PRECONFIG_VPORTS) {
@ -954,7 +956,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"NPIV-Config: Failed to create vport [%02x]: "
"wwpn=%llx wwnn=%llx.\n", cnt,
vid.port_name, vid.node_name);
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name);
}
}
done:

View File

@ -225,6 +225,7 @@ static struct {
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

View File

@ -35,6 +35,7 @@
#include <linux/netlink.h>
#include <net/netlink.h>
#include <scsi/scsi_netlink_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include "scsi_priv.h"
#include "scsi_transport_fc_internal.h"
@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work);
static int fc_vport_setup(struct Scsi_Host *shost, int channel,
struct device *pdev, struct fc_vport_identifiers *ids,
struct fc_vport **vport);
static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
static void fc_bsg_remove(struct request_queue *);
static void fc_bsg_goose_queue(struct fc_rport *);
/*
* Redefine so that we can have same named attributes in the
@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
return -ENOMEM;
}
fc_bsg_hostadd(shost, fc_host);
/* ignore any bsg add error - we just can't do sgio */
return 0;
}
static int fc_host_remove(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
fc_bsg_remove(fc_host->rqst_q);
return 0;
}
static DECLARE_TRANSPORT_CLASS(fc_host_class,
"fc_host",
fc_host_setup,
NULL,
fc_host_remove,
NULL);
/*
@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work)
scsi_flush_work(shost);
fc_terminate_rport_io(rport);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work)
(i->f->dev_loss_tmo_callbk))
i->f->dev_loss_tmo_callbk(rport);
fc_bsg_remove(rport->rqst_q);
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
transport_add_device(dev);
transport_configure_device(dev);
fc_bsg_rportadd(shost, rport);
/* ignore any bsg add error - we just can't do sgio */
if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock,
flags);
fc_bsg_goose_queue(rport);
return rport;
}
}
@ -3343,6 +3369,592 @@ fc_vport_sched_delete(struct work_struct *work)
}
/*
* BSG support
*/
/**
* fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
* @job: fc_bsg_job that is to be torn down
*/
static void
fc_destroy_bsgjob(struct fc_bsg_job *job)
{
unsigned long flags;
spin_lock_irqsave(&job->job_lock, flags);
if (job->ref_cnt) {
spin_unlock_irqrestore(&job->job_lock, flags);
return;
}
spin_unlock_irqrestore(&job->job_lock, flags);
put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list);
kfree(job);
}
/**
* fc_bsg_jobdone - completion routine for bsg requests that the LLD has
* completed
* @job: fc_bsg_job that is complete
*/
static void
fc_bsg_jobdone(struct fc_bsg_job *job)
{
struct request *req = job->req;
struct request *rsp = req->next_rq;
unsigned long flags;
int err;
spin_lock_irqsave(&job->job_lock, flags);
job->state_flags |= FC_RQST_STATE_DONE;
job->ref_cnt--;
spin_unlock_irqrestore(&job->job_lock, flags);
err = job->req->errors = job->reply->result;
if (err < 0)
/* we're only returning the result field in the reply */
job->req->sense_len = sizeof(uint32_t);
else
job->req->sense_len = job->reply_len;
/* we assume all request payload was transferred, residual == 0 */
req->resid_len = 0;
if (rsp) {
WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
/* set reply (bidi) residual */
rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
rsp->resid_len);
}
blk_end_request_all(req, err);
fc_destroy_bsgjob(job);
}
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
* @req: request that timed out
*/
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
struct fc_bsg_job *job = (void *) req->special;
struct Scsi_Host *shost = job->shost;
struct fc_internal *i = to_fc_internal(shost->transportt);
unsigned long flags;
int err = 0, done = 0;
if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
return BLK_EH_RESET_TIMER;
spin_lock_irqsave(&job->job_lock, flags);
if (job->state_flags & FC_RQST_STATE_DONE)
done = 1;
else
job->ref_cnt++;
spin_unlock_irqrestore(&job->job_lock, flags);
if (!done && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
"abort failed with status %d\n", err);
}
if (!done) {
spin_lock_irqsave(&job->job_lock, flags);
job->ref_cnt--;
spin_unlock_irqrestore(&job->job_lock, flags);
fc_destroy_bsgjob(job);
}
/* the blk_end_sync_io() doesn't check the error */
return BLK_EH_HANDLED;
}
static int
fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
{
size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
BUG_ON(!req->nr_phys_segments);
buf->sg_list = kzalloc(sz, GFP_KERNEL);
if (!buf->sg_list)
return -ENOMEM;
sg_init_table(buf->sg_list, req->nr_phys_segments);
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
buf->payload_len = blk_rq_bytes(req);
return 0;
}
/**
* fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
* bsg request
* @shost: SCSI Host corresponding to the bsg object
* @rport: (optional) FC Remote Port corresponding to the bsg object
* @req: BSG request that needs a job structure
*/
static int
fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
struct request *req)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request *rsp = req->next_rq;
struct fc_bsg_job *job;
int ret;
BUG_ON(req->special);
job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
GFP_KERNEL);
if (!job)
return -ENOMEM;
/*
* Note: this is a bit silly.
* The request gets formatted as a SGIO v4 ioctl request, which
* then gets reformatted as a blk request, which then gets
* reformatted as a fc bsg request. And on completion, we have
* to wrap return results such that SGIO v4 thinks it was a scsi
* status. I hope this was all worth it.
*/
req->special = job;
job->shost = shost;
job->rport = rport;
job->req = req;
if (i->f->dd_bsg_size)
job->dd_data = (void *)&job[1];
spin_lock_init(&job->job_lock);
job->request = (struct fc_bsg_request *)req->cmd;
job->request_len = req->cmd_len;
job->reply = req->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */
if (req->bio) {
ret = fc_bsg_map_buffer(&job->request_payload, req);
if (ret)
goto failjob_rls_job;
}
if (rsp && rsp->bio) {
ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
if (ret)
goto failjob_rls_rqst_payload;
}
job->job_done = fc_bsg_jobdone;
if (rport)
job->dev = &rport->dev;
else
job->dev = &shost->shost_gendev;
get_device(job->dev); /* take a reference for the request */
job->ref_cnt = 1;
return 0;
failjob_rls_rqst_payload:
kfree(job->request_payload.sg_list);
failjob_rls_job:
kfree(job);
return -ENOMEM;
}
enum fc_dispatch_result {
FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
};
/**
* fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
* @shost: scsi host rport attached to
* @job: bsg job to be processed
*/
static enum fc_dispatch_result
fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
struct fc_bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
/* Validate the host command */
switch (job->request->msgcode) {
case FC_BSG_HST_ADD_RPORT:
cmdlen += sizeof(struct fc_bsg_host_add_rport);
break;
case FC_BSG_HST_DEL_RPORT:
cmdlen += sizeof(struct fc_bsg_host_del_rport);
break;
case FC_BSG_HST_ELS_NOLOGIN:
cmdlen += sizeof(struct fc_bsg_host_els);
/* there better be a xmt and rcv payloads */
if ((!job->request_payload.payload_len) ||
(!job->reply_payload.payload_len)) {
ret = -EINVAL;
goto fail_host_msg;
}
break;
case FC_BSG_HST_CT:
cmdlen += sizeof(struct fc_bsg_host_ct);
/* there better be xmt and rcv payloads */
if ((!job->request_payload.payload_len) ||
(!job->reply_payload.payload_len)) {
ret = -EINVAL;
goto fail_host_msg;
}
break;
case FC_BSG_HST_VENDOR:
cmdlen += sizeof(struct fc_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
(job->request->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
}
break;
default:
ret = -EBADR;
goto fail_host_msg;
}
/* check if we really have all the request data needed */
if (job->request_len < cmdlen) {
ret = -ENOMSG;
goto fail_host_msg;
}
ret = i->f->bsg_request(job);
if (!ret)
return FC_DISPATCH_UNLOCKED;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
job->reply->result = ret;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
return FC_DISPATCH_UNLOCKED;
}
/*
* fc_bsg_goose_queue - restart rport queue in case it was stopped
* @rport: rport to be restarted
*/
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
int flagset;
if (!rport->rqst_q)
return;
get_device(&rport->dev);
spin_lock(rport->rqst_q->queue_lock);
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
__blk_run_queue(rport->rqst_q);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock(rport->rqst_q->queue_lock);
put_device(&rport->dev);
}
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
* @shost: scsi host rport attached to
* @rport: rport request destined to
* @job: bsg job to be processed
*/
static enum fc_dispatch_result
fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
struct fc_rport *rport, struct fc_bsg_job *job)
{
struct fc_internal *i = to_fc_internal(shost->transportt);
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
/* Validate the rport command */
switch (job->request->msgcode) {
case FC_BSG_RPT_ELS:
cmdlen += sizeof(struct fc_bsg_rport_els);
goto check_bidi;
case FC_BSG_RPT_CT:
cmdlen += sizeof(struct fc_bsg_rport_ct);
check_bidi:
/* there better be xmt and rcv payloads */
if ((!job->request_payload.payload_len) ||
(!job->reply_payload.payload_len)) {
ret = -EINVAL;
goto fail_rport_msg;
}
break;
default:
ret = -EBADR;
goto fail_rport_msg;
}
/* check if we really have all the request data needed */
if (job->request_len < cmdlen) {
ret = -ENOMSG;
goto fail_rport_msg;
}
ret = i->f->bsg_request(job);
if (!ret)
return FC_DISPATCH_UNLOCKED;
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
job->reply->result = ret;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
return FC_DISPATCH_UNLOCKED;
}
/**
* fc_bsg_request_handler - generic handler for bsg requests
* @q: request queue to manage
* @shost: Scsi_Host related to the bsg object
* @rport: FC remote port related to the bsg object (optional)
* @dev: device structure for bsg object
*/
static void
fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
struct fc_rport *rport, struct device *dev)
{
struct request *req;
struct fc_bsg_job *job;
enum fc_dispatch_result ret;
if (!get_device(dev))
return;
while (!blk_queue_plugged(q)) {
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
break;
req = blk_fetch_request(q);
if (!req)
break;
if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
req->errors = -ENXIO;
spin_unlock_irq(q->queue_lock);
blk_end_request(req, -ENXIO, blk_rq_bytes(req));
spin_lock_irq(q->queue_lock);
continue;
}
spin_unlock_irq(q->queue_lock);
ret = fc_req_to_bsgjob(shost, rport, req);
if (ret) {
req->errors = ret;
blk_end_request(req, ret, blk_rq_bytes(req));
spin_lock_irq(q->queue_lock);
continue;
}
job = req->special;
/* check if we have the msgcode value at least */
if (job->request_len < sizeof(uint32_t)) {
BUG_ON(job->reply_len < sizeof(uint32_t));
job->reply->result = -ENOMSG;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
spin_lock_irq(q->queue_lock);
continue;
}
/* the dispatch routines will unlock the queue_lock */
if (rport)
ret = fc_bsg_rport_dispatch(q, shost, rport, job);
else
ret = fc_bsg_host_dispatch(q, shost, job);
/* did dispatcher hit state that can't process any more */
if (ret == FC_DISPATCH_BREAK)
break;
/* did dispatcher had released the lock */
if (ret == FC_DISPATCH_UNLOCKED)
spin_lock_irq(q->queue_lock);
}
spin_unlock_irq(q->queue_lock);
put_device(dev);
spin_lock_irq(q->queue_lock);
}
/**
* fc_bsg_host_handler - handler for bsg requests for a fc host
* @q: fc host request queue
*/
static void
fc_bsg_host_handler(struct request_queue *q)
{
struct Scsi_Host *shost = q->queuedata;
fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
}
/**
* fc_bsg_rport_handler - handler for bsg requests for a fc rport
* @q: rport request queue
*/
static void
fc_bsg_rport_handler(struct request_queue *q)
{
struct fc_rport *rport = q->queuedata;
struct Scsi_Host *shost = rport_to_shost(rport);
fc_bsg_request_handler(q, shost, rport, &rport->dev);
}
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
* @fc_host: fc_host adding the structures to
*/
static int
fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
{
struct device *dev = &shost->shost_gendev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
int err;
char bsg_name[BUS_ID_SIZE]; /*20*/
fc_host->rqst_q = NULL;
if (!i->f->bsg_request)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
if (!q) {
printk(KERN_ERR "fc_host%d: bsg interface failed to "
"initialize - no request queue\n",
shost->host_no);
return -ENOMEM;
}
q->queuedata = shost;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
err = bsg_register_queue(q, dev, bsg_name, NULL);
if (err) {
printk(KERN_ERR "fc_host%d: bsg interface failed to "
"initialize - register queue\n",
shost->host_no);
blk_cleanup_queue(q);
return err;
}
fc_host->rqst_q = q;
return 0;
}
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
* @shost: shost that rport is attached to
* @rport: rport that the bsg hooks are being attached to
*/
static int
fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
{
struct device *dev = &rport->dev;
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
int err;
rport->rqst_q = NULL;
if (!i->f->bsg_request)
return -ENOTSUPP;
q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
if (!q) {
printk(KERN_ERR "%s: bsg interface failed to "
"initialize - no request queue\n",
dev->kobj.name);
return -ENOMEM;
}
q->queuedata = rport;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
err = bsg_register_queue(q, dev, NULL, NULL);
if (err) {
printk(KERN_ERR "%s: bsg interface failed to "
"initialize - register queue\n",
dev->kobj.name);
blk_cleanup_queue(q);
return err;
}
rport->rqst_q = q;
return 0;
}
/**
* fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
* @q: the request_queue that is to be torn down.
*/
static void
fc_bsg_remove(struct request_queue *q)
{
if (q) {
bsg_unregister_queue(q);
blk_cleanup_queue(q);
}
}
/* Original Author: Martin Hicks */
MODULE_AUTHOR("James Smart");
MODULE_DESCRIPTION("FC Transport Attributes");

View File

@ -234,8 +234,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
spi_width(starget) = 0; /* narrow */
spi_max_width(starget) = 1;
spi_iu(starget) = 0; /* no IU */
spi_max_iu(starget) = 1;
spi_dt(starget) = 0; /* ST */
spi_qas(starget) = 0;
spi_max_qas(starget) = 1;
spi_wr_flow(starget) = 0;
spi_rd_strm(starget) = 0;
spi_rti(starget) = 0;
@ -360,9 +362,9 @@ static DEVICE_ATTR(field, S_IRUGO, \
/* The Parallel SCSI Tranport Attributes: */
spi_transport_max_attr(offset, "%d\n");
spi_transport_max_attr(width, "%d\n");
spi_transport_rd_attr(iu, "%d\n");
spi_transport_max_attr(iu, "%d\n");
spi_transport_rd_attr(dt, "%d\n");
spi_transport_rd_attr(qas, "%d\n");
spi_transport_max_attr(qas, "%d\n");
spi_transport_rd_attr(wr_flow, "%d\n");
spi_transport_rd_attr(rd_strm, "%d\n");
spi_transport_rd_attr(rti, "%d\n");
@ -874,13 +876,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
/* try QAS requests; this should be harmless to set if the
* target supports it */
if (scsi_device_qas(sdev)) {
if (scsi_device_qas(sdev) && spi_max_qas(starget)) {
DV_SET(qas, 1);
} else {
DV_SET(qas, 0);
}
if (scsi_device_ius(sdev) && min_period < 9) {
if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) {
/* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1);
/* Then set the optional parameters */
@ -1412,12 +1414,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_iu.attr &&
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(iu);
else if (attr == &dev_attr_max_iu.attr &&
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(iu);
else if (attr == &dev_attr_dt.attr &&
spi_support_dt(starget))
return TARGET_ATTRIBUTE_HELPER(dt);
else if (attr == &dev_attr_qas.attr &&
spi_support_qas(starget))
return TARGET_ATTRIBUTE_HELPER(qas);
else if (attr == &dev_attr_max_qas.attr &&
spi_support_qas(starget))
return TARGET_ATTRIBUTE_HELPER(qas);
else if (attr == &dev_attr_wr_flow.attr &&
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(wr_flow);
@ -1447,8 +1455,10 @@ static struct attribute *target_attributes[] = {
&dev_attr_width.attr,
&dev_attr_max_width.attr,
&dev_attr_iu.attr,
&dev_attr_max_iu.attr,
&dev_attr_dt.attr,
&dev_attr_qas.attr,
&dev_attr_max_qas.attr,
&dev_attr_wr_flow.attr,
&dev_attr_rd_strm.attr,
&dev_attr_rti.attr,

View File

@ -788,12 +788,6 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
if (put_user(compat_ptr(data), &sgio->usr_ptr))
return -EFAULT;
if (copy_in_user(&sgio->status, &sgio32->status,
(4 * sizeof(unsigned char)) +
(2 * sizeof(unsigned short)) +
(3 * sizeof(int))))
return -EFAULT;
err = sys_ioctl(fd, cmd, (unsigned long) sgio);
if (err >= 0) {

View File

@ -9,3 +9,4 @@ header-y += rdma/
header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/

View File

@ -0,0 +1,4 @@
header-y += scsi.h
header-y += scsi_netlink.h
header-y += scsi_netlink_fc.h
header-y += scsi_bsg_fc.h

View File

@ -0,0 +1,322 @@
/*
* FC Transport BSG Interface
*
* Copyright (C) 2008 James Smart, Emulex Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef SCSI_BSG_FC_H
#define SCSI_BSG_FC_H
/*
* This file intended to be included by both kernel and user space
*/
#include <scsi/scsi.h>
/*
* FC Transport SGIO v4 BSG Message Support
*/
/* Default BSG request timeout (in seconds) */
#define FC_DEFAULT_BSG_TIMEOUT (10 * HZ)
/*
* Request Message Codes supported by the FC Transport
*/
/* define the class masks for the message codes */
#define FC_BSG_CLS_MASK 0xF0000000 /* find object class */
#define FC_BSG_HST_MASK 0x80000000 /* fc host class */
#define FC_BSG_RPT_MASK 0x40000000 /* fc rport class */
/* fc_host Message Codes */
#define FC_BSG_HST_ADD_RPORT (FC_BSG_HST_MASK | 0x00000001)
#define FC_BSG_HST_DEL_RPORT (FC_BSG_HST_MASK | 0x00000002)
#define FC_BSG_HST_ELS_NOLOGIN (FC_BSG_HST_MASK | 0x00000003)
#define FC_BSG_HST_CT (FC_BSG_HST_MASK | 0x00000004)
#define FC_BSG_HST_VENDOR (FC_BSG_HST_MASK | 0x000000FF)
/* fc_rport Message Codes */
#define FC_BSG_RPT_ELS (FC_BSG_RPT_MASK | 0x00000001)
#define FC_BSG_RPT_CT (FC_BSG_RPT_MASK | 0x00000002)
/*
* FC Address Identifiers in Message Structures :
*
* Whenever a command payload contains a FC Address Identifier
* (aka port_id), the value is effectively in big-endian
* order, thus the array elements are decoded as follows:
* element [0] is bits 23:16 of the FC Address Identifier
* element [1] is bits 15:8 of the FC Address Identifier
* element [2] is bits 7:0 of the FC Address Identifier
*/
/*
* FC Host Messages
*/
/* FC_BSG_HST_ADDR_PORT : */
/* Request:
* This message requests the FC host to login to the remote port
* at the specified N_Port_Id. The remote port is to be enumerated
* with the transport upon completion of the login.
*/
struct fc_bsg_host_add_rport {
uint8_t reserved;
/* FC Address Identier of the remote port to login to */
uint8_t port_id[3];
};
/* Response:
* There is no additional response data - fc_bsg_reply->result is sufficient
*/
/* FC_BSG_HST_DEL_RPORT : */
/* Request:
* This message requests the FC host to remove an enumerated
* remote port and to terminate the login to it.
*
* Note: The driver is free to reject this request if it desires to
* remain logged in with the remote port.
*/
struct fc_bsg_host_del_rport {
uint8_t reserved;
/* FC Address Identier of the remote port to logout of */
uint8_t port_id[3];
};
/* Response:
* There is no additional response data - fc_bsg_reply->result is sufficient
*/
/* FC_BSG_HST_ELS_NOLOGIN : */
/* Request:
* This message requests the FC_Host to send an ELS to a specific
* N_Port_ID. The host does not need to log into the remote port,
* nor does it need to enumerate the rport for further traffic
* (although, the FC host is free to do so if it desires).
*/
struct fc_bsg_host_els {
/*
* ELS Command Code being sent (must be the same as byte 0
* of the payload)
*/
uint8_t command_code;
/* FC Address Identier of the remote port to send the ELS to */
uint8_t port_id[3];
};
/* Response:
*/
/* fc_bsg_ctels_reply->status values */
#define FC_CTELS_STATUS_OK 0x00000000
#define FC_CTELS_STATUS_REJECT 0x00000001
#define FC_CTELS_STATUS_P_RJT 0x00000002
#define FC_CTELS_STATUS_F_RJT 0x00000003
#define FC_CTELS_STATUS_P_BSY 0x00000004
#define FC_CTELS_STATUS_F_BSY 0x00000006
struct fc_bsg_ctels_reply {
/*
* Note: An ELS LS_RJT may be reported in 2 ways:
* a) A status of FC_CTELS_STATUS_OK is returned. The caller
* is to look into the ELS receive payload to determine
* LS_ACC or LS_RJT (by contents of word 0). The reject
* data will be in word 1.
* b) A status of FC_CTELS_STATUS_REJECT is returned, The
* rjt_data field will contain valid data.
*
* Note: ELS LS_ACC is determined by an FC_CTELS_STATUS_OK, and
* the receive payload word 0 indicates LS_ACC
* (e.g. value is 0x02xxxxxx).
*
* Note: Similarly, a CT Reject may be reported in 2 ways:
* a) A status of FC_CTELS_STATUS_OK is returned. The caller
* is to look into the CT receive payload to determine
* Accept or Reject (by contents of word 2). The reject
* data will be in word 3.
* b) A status of FC_CTELS_STATUS_REJECT is returned, The
* rjt_data field will contain valid data.
*
* Note: x_RJT/BSY status will indicae that the rjt_data field
* is valid and contains the reason/explanation values.
*/
uint32_t status; /* See FC_CTELS_STATUS_xxx */
/* valid if status is not FC_CTELS_STATUS_OK */
struct {
uint8_t action; /* fragment_id for CT REJECT */
uint8_t reason_code;
uint8_t reason_explanation;
uint8_t vendor_unique;
} rjt_data;
};
/* FC_BSG_HST_CT : */
/* Request:
* This message requests that a CT Request be performed with the
* indicated N_Port_ID. The driver is responsible for logging in with
* the fabric and/or N_Port_ID, etc as per FC rules. This request does
* not mandate that the driver must enumerate the destination in the
* transport. The driver is allowed to decide whether to enumerate it,
* and whether to tear it down after the request.
*/
struct fc_bsg_host_ct {
uint8_t reserved;
/* FC Address Identier of the remote port to send the ELS to */
uint8_t port_id[3];
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
uint32_t preamble_word0; /* revision & IN_ID */
uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
uint32_t preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
*
* The reply structure is an fc_bsg_ctels_reply structure
*/
/* FC_BSG_HST_VENDOR : */
/* Request:
* Note: When specifying vendor_id, be sure to read the Vendor Type and ID
* formatting requirements specified in scsi_netlink.h
*/
struct fc_bsg_host_vendor {
/*
* Identifies the vendor that the message is formatted for. This
* should be the recipient of the message.
*/
uint64_t vendor_id;
/* start of vendor command area */
uint32_t vendor_cmd[0];
};
/* Response:
*/
struct fc_bsg_host_vendor_reply {
/* start of vendor response area */
uint32_t vendor_rsp[0];
};
/*
* FC Remote Port Messages
*/
/* FC_BSG_RPT_ELS : */
/* Request:
* This message requests that an ELS be performed with the rport.
*/
struct fc_bsg_rport_els {
/*
* ELS Command Code being sent (must be the same as
* byte 0 of the payload)
*/
uint8_t els_code;
};
/* Response:
*
* The reply structure is an fc_bsg_ctels_reply structure
*/
/* FC_BSG_RPT_CT : */
/* Request:
* This message requests that a CT Request be performed with the rport.
*/
struct fc_bsg_rport_ct {
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
uint32_t preamble_word0; /* revision & IN_ID */
uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
uint32_t preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
*
* The reply structure is an fc_bsg_ctels_reply structure
*/
/* request (CDB) structure of the sg_io_v4 */
struct fc_bsg_request {
uint32_t msgcode;
union {
struct fc_bsg_host_add_rport h_addrport;
struct fc_bsg_host_del_rport h_delrport;
struct fc_bsg_host_els h_els;
struct fc_bsg_host_ct h_ct;
struct fc_bsg_host_vendor h_vendor;
struct fc_bsg_rport_els r_els;
struct fc_bsg_rport_ct r_ct;
} rqst_data;
};
/* response (request sense data) structure of the sg_io_v4 */
struct fc_bsg_reply {
/*
* The completion result. Result exists in two forms:
* if negative, it is an -Exxx system errno value. There will
* be no further reply information supplied.
* else, it's the 4-byte scsi error result, with driver, host,
* msg and status fields. The per-msgcode reply structure
* will contain valid data.
*/
uint32_t result;
/* If there was reply_payload, how much was recevied ? */
uint32_t reply_payload_rcv_len;
union {
struct fc_bsg_host_vendor_reply vendor_reply;
struct fc_bsg_ctels_reply ctels_reply;
} reply_data;
};
#endif /* SCSI_BSG_FC_H */

View File

@ -478,6 +478,15 @@ struct scsi_host_template {
* module_init/module_exit.
*/
struct list_head legacy_hosts;
/*
* Vendor Identifier associated with the host
*
* Note: When specifying vendor_id, be sure to read the
* Vendor Type and ID formatting requirements specified in
* scsi_netlink.h
*/
u64 vendor_id;
};
/*

View File

@ -33,7 +33,6 @@
struct scsi_transport_template;
/*
* FC Port definitions - Following FC HBAAPI guidelines
*
@ -352,6 +351,7 @@ struct fc_rport { /* aka fc_starget_attrs */
struct delayed_work fail_io_work;
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
struct request_queue *rqst_q; /* bsg support */
} __attribute__((aligned(sizeof(unsigned long))));
/* bit field values for struct fc_rport "flags" field: */
@ -514,6 +514,9 @@ struct fc_host_attrs {
struct workqueue_struct *work_q;
char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
/* bsg support */
struct request_queue *rqst_q;
};
#define shost_to_fc_host(x) \
@ -579,6 +582,47 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
struct fc_bsg_buffer {
unsigned int payload_len;
int sg_cnt;
struct scatterlist *sg_list;
};
/* Values for fc_bsg_job->state_flags (bitflags) */
#define FC_RQST_STATE_INPROGRESS 0
#define FC_RQST_STATE_DONE 1
struct fc_bsg_job {
struct Scsi_Host *shost;
struct fc_rport *rport;
struct device *dev;
struct request *req;
spinlock_t job_lock;
unsigned int state_flags;
unsigned int ref_cnt;
void (*job_done)(struct fc_bsg_job *);
struct fc_bsg_request *request;
struct fc_bsg_reply *reply;
unsigned int request_len;
unsigned int reply_len;
/*
* On entry : reply_len indicates the buffer size allocated for
* the reply.
*
* Upon completion : the message handler must set reply_len
* to indicates the size of the reply to be returned to the
* caller.
*/
/* DMA payloads for the request/response */
struct fc_bsg_buffer request_payload;
struct fc_bsg_buffer reply_payload;
void *dd_data; /* Used for driver-specific storage */
};
/* The functions by which the transport class and the driver communicate */
struct fc_function_template {
void (*get_rport_dev_loss_tmo)(struct fc_rport *);
@ -614,9 +658,14 @@ struct fc_function_template {
int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
int (* it_nexus_response)(struct Scsi_Host *, u64, int);
/* bsg support */
int (*bsg_request)(struct fc_bsg_job *);
int (*bsg_timeout)(struct fc_bsg_job *);
/* allocation lengths for host-specific data */
u32 dd_fcrport_size;
u32 dd_fcvport_size;
u32 dd_bsg_size;
/*
* The driver sets these to tell the transport class it
@ -737,7 +786,6 @@ fc_vport_set_state(struct fc_vport *vport, enum fc_vport_state new_state)
vport->vport_state = new_state;
}
struct scsi_transport_template *fc_attach_transport(
struct fc_function_template *);
void fc_release_transport(struct scsi_transport_template *);

View File

@ -36,8 +36,10 @@ struct spi_transport_attrs {
unsigned int width:1; /* 0 - narrow, 1 - wide */
unsigned int max_width:1;
unsigned int iu:1; /* Information Units enabled */
unsigned int max_iu:1;
unsigned int dt:1; /* DT clocking enabled */
unsigned int qas:1; /* Quick Arbitration and Selection enabled */
unsigned int max_qas:1;
unsigned int wr_flow:1; /* Write Flow control enabled */
unsigned int rd_strm:1; /* Read streaming enabled */
unsigned int rti:1; /* Retain Training Information */
@ -77,8 +79,10 @@ struct spi_host_attrs {
#define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width)
#define spi_max_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_width)
#define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu)
#define spi_max_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_iu)
#define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt)
#define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas)
#define spi_max_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_qas)
#define spi_wr_flow(x) (((struct spi_transport_attrs *)&(x)->starget_data)->wr_flow)
#define spi_rd_strm(x) (((struct spi_transport_attrs *)&(x)->starget_data)->rd_strm)
#define spi_rti(x) (((struct spi_transport_attrs *)&(x)->starget_data)->rti)