1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (39 commits)
  [SCSI] sd: fix compile failure with CONFIG_BLK_DEV_INTEGRITY=n
  libiscsi: fix locking in iscsi_eh_device_reset
  libiscsi: check reason why we are stopping iscsi session to determine error value
  [SCSI] iscsi_tcp: return a descriptive error value during connection errors
  [SCSI] libiscsi: rename host reset to target reset
  [SCSI] iscsi class: fix endpoint id handling
  [SCSI] libiscsi: Support drivers initiating session removal
  [SCSI] libiscsi: fix data corruption when target has to resend data-in packets
  [SCSI] sd: Switch kernel printing level for DIF messages
  [SCSI] sd: Correctly handle all combinations of DIF and DIX
  [SCSI] sd: Always print actual protection_type
  [SCSI] sd: Issue correct protection operation
  [SCSI] scsi_error: fix target reset handling
  [SCSI] lpfc 8.2.8 v2 : Add statistical reporting control and additional fc vendor events
  [SCSI] lpfc 8.2.8 v2 : Add sysfs control of target queue depth handling
  [SCSI] lpfc 8.2.8 v2 : Revert target busy in favor of transport disrupted
  [SCSI] scsi_dh_alua: remove REQ_NOMERGE
  [SCSI] lpfc 8.2.8 : update driver version to 8.2.8
  [SCSI] lpfc 8.2.8 : Add MSI-X support
  [SCSI] lpfc 8.2.8 : Update driver to use new Host byte error code DID_TRANSPORT_DISRUPTED
  ...
wifi-calibration
Linus Torvalds 2008-10-17 09:00:23 -07:00
commit ed09441dac
60 changed files with 8004 additions and 1112 deletions

View File

@ -1075,8 +1075,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->cmd_flags |= REQ_FAILFAST;
if (bio_rw_ahead(bio))
req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER);
if (bio_failfast_dev(bio))
req->cmd_flags |= REQ_FAILFAST_DEV;
if (bio_failfast_transport(bio))
req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
if (bio_failfast_driver(bio))
req->cmd_flags |= REQ_FAILFAST_DRIVER;
/*
* REQ_BARRIER implies no merging, but lets make it explicit

View File

@ -378,6 +378,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost);
iscsi_host_free(shost);
}
@ -597,7 +598,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset,
.eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser",
.this_id = -1,

View File

@ -849,7 +849,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
dm_bio_record(&mpio->details, bio);
map_context->ptr = mpio;
bio->bi_rw |= (1 << BIO_RW_FAILFAST);
bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
r = map_io(m, bio, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);

View File

@ -167,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@ -393,7 +393,7 @@ static void multipathd (mddev_t *mddev)
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= (1 << BIO_RW_FAILFAST);
bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);

View File

@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
if (req->cmd_flags & REQ_FAILFAST)
if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;

View File

@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
recid++;
}
}
if (req->cmd_flags & REQ_FAILFAST)
if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;

View File

@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
recid++;
}
}
if (req->cmd_flags & REQ_FAILFAST)
if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;

View File

@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={

View File

@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES;
rq->timeout = ALUA_FAILOVER_TIMEOUT;

View File

@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;

View File

@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */

View File

@ -226,7 +226,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;

View File

@ -2031,8 +2031,6 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(shost->host_lock, flags);
} else
ibmvfc_issue_fc_host_lip(shost);
scsi_target_unblock(&rport->dev);
LEAVE;
}

View File

@ -523,22 +523,20 @@ iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
}
/**
* iscsi_data_rsp - SCSI Data-In Response processing
* iscsi_data_in - SCSI Data-In Response processing
* @conn: iscsi connection
* @task: scsi command task
**/
static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(sc)->length;
unsigned total_in_length = scsi_in(task->sc)->length;
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
if (tcp_conn->in.datalen == 0)
return 0;
@ -558,23 +556,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return ISCSI_ERR_DATA_OFFSET;
}
if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
ISCSI_FLAG_DATA_OVERFLOW)) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
res_count <= total_in_length))
scsi_in(sc)->resid = res_count;
else
sc->result = (DID_BAD_TARGET << 16) |
rhdr->cmd_status;
}
}
conn->datain_pdus_cnt++;
return 0;
}
@ -774,7 +755,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else
rc = iscsi_data_rsp(conn, task);
rc = iscsi_data_in(conn, task);
if (rc) {
spin_unlock(&conn->session->lock);
break;
@ -998,7 +979,7 @@ iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
error:
debug_tcp("Error receiving PDU, errno=%d\n", rc);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
iscsi_conn_failure(conn, rc);
return 0;
}
@ -1117,8 +1098,10 @@ iscsi_xmit(struct iscsi_conn *conn)
while (1) {
rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
if (rc < 0)
if (rc < 0) {
rc = ISCSI_ERR_XMIT_FAILED;
goto error;
}
if (rc == 0)
break;
@ -1127,7 +1110,7 @@ iscsi_xmit(struct iscsi_conn *conn)
if (segment->total_copied >= segment->total_size) {
if (segment->done != NULL) {
rc = segment->done(tcp_conn, segment);
if (rc < 0)
if (rc != 0)
goto error;
}
}
@ -1142,8 +1125,8 @@ error:
/* Transmit error. We could initiate error recovery
* here. */
debug_tcp("Error sending PDU, errno=%d\n", rc);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return rc;
iscsi_conn_failure(conn, rc);
return -EIO;
}
/**
@ -1904,6 +1887,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost);
iscsi_host_free(shost);
@ -1927,7 +1911,7 @@ static struct scsi_host_template iscsi_sht = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset,
.eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING,
.slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp",

View File

@ -404,11 +404,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
conn->session->queued_cmdsn--;
else
conn->session->tt->cleanup_task(conn, task);
/*
* Check if cleanup_task dropped the lock and the command completed,
*/
if (!task->sc)
return;
sc->result = err;
if (!scsi_bidi_cmnd(sc))
@ -633,6 +628,40 @@ out:
__iscsi_put_task(task);
}
/**
* iscsi_data_in_rsp - SCSI Data-In Response processing
* @conn: iscsi connection
* @hdr: iscsi pdu
* @task: scsi command task
**/
static void
iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task)
{
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
struct scsi_cmnd *sc = task->sc;
if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
return;
sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
ISCSI_FLAG_DATA_OVERFLOW)) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
(rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
res_count <= scsi_in(sc)->length))
scsi_in(sc)->resid = res_count;
else
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
}
conn->scsirsp_pdus_cnt++;
__iscsi_put_task(task);
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
@ -818,12 +847,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
break;
case ISCSI_OP_SCSI_DATA_IN:
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
conn->scsirsp_pdus_cnt++;
iscsi_update_cmdsn(session,
(struct iscsi_nopin*) hdr);
__iscsi_put_task(task);
}
iscsi_data_in_rsp(conn, hdr, task);
break;
case ISCSI_OP_LOGOUT_RSP:
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@ -954,6 +978,38 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
}
EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
void iscsi_session_failure(struct iscsi_cls_session *cls_session,
enum iscsi_err err)
{
struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn;
struct device *dev;
unsigned long flags;
spin_lock_irqsave(&session->lock, flags);
conn = session->leadconn;
if (session->state == ISCSI_STATE_TERMINATE || !conn) {
spin_unlock_irqrestore(&session->lock, flags);
return;
}
dev = get_device(&conn->cls_conn->dev);
spin_unlock_irqrestore(&session->lock, flags);
if (!dev)
return;
/*
* if the host is being removed bypass the connection
* recovery initialization because we are going to kill
* the session.
*/
if (err == ISCSI_ERR_INVALID_HOST)
iscsi_conn_error_event(conn->cls_conn, err);
else
iscsi_conn_failure(conn, err);
put_device(dev);
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
@ -968,9 +1024,10 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
iscsi_conn_error(conn->cls_conn, err);
iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@ -1194,15 +1251,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
switch (session->state) {
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
break;
goto reject;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
sc->result = DID_IMM_RETRY << 16;
break;
goto reject;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
sc->result = DID_NO_CONNECT << 16;
sc->result = DID_TRANSPORT_FAILFAST << 16;
break;
case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
@ -1267,7 +1322,7 @@ reject:
spin_unlock(&session->lock);
debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
spin_lock(host->host_lock);
return SCSI_MLQUEUE_HOST_BUSY;
return SCSI_MLQUEUE_TARGET_BUSY;
fault:
spin_unlock(&session->lock);
@ -1307,7 +1362,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
}
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
int iscsi_eh_host_reset(struct scsi_cmnd *sc)
int iscsi_eh_target_reset(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@ -1321,7 +1376,7 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
debug_scsi("failing host reset: session terminated "
debug_scsi("failing target reset: session terminated "
"[CID %d age %d]\n", conn->id, session->age);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@ -1336,7 +1391,7 @@ failed:
*/
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
debug_scsi("iscsi_eh_host_reset wait for relogin\n");
debug_scsi("iscsi_eh_target_reset wait for relogin\n");
wait_event_interruptible(conn->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
@ -1348,14 +1403,14 @@ failed:
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
iscsi_session_printk(KERN_INFO, session,
"host reset succeeded\n");
"target reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
static void iscsi_tmf_timedout(unsigned long data)
{
@ -1769,10 +1824,10 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_suspend_tx(conn);
spin_lock(&session->lock);
spin_lock_bh(&session->lock);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
spin_unlock_bh(&session->lock);
iscsi_start_tx(conn);
goto done;
@ -1878,6 +1933,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size, uint16_t qdepth)
{
struct Scsi_Host *shost;
struct iscsi_host *ihost;
shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
if (!shost)
@ -1892,22 +1948,43 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
qdepth = ISCSI_DEF_CMD_PER_LUN;
}
shost->cmd_per_lun = qdepth;
ihost = shost_priv(shost);
spin_lock_init(&ihost->lock);
ihost->state = ISCSI_HOST_SETUP;
ihost->num_sessions = 0;
init_waitqueue_head(&ihost->session_removal_wq);
return shost;
}
EXPORT_SYMBOL_GPL(iscsi_host_alloc);
static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
{
iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST);
}
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
*
* This will also remove any sessions attached to the host, but if userspace
* is managing the session at the same time this will break. TODO: add
* refcounting to the netlink iscsi interface so a rmmod or host hot unplug
* does not remove the memory from under us.
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
void iscsi_host_remove(struct Scsi_Host *shost)
{
iscsi_host_for_each_session(shost, iscsi_session_teardown);
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
spin_lock_irqsave(&ihost->lock, flags);
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
flush_signals(current);
scsi_remove_host(shost);
}
EXPORT_SYMBOL_GPL(iscsi_host_remove);
@ -1923,6 +2000,27 @@ void iscsi_host_free(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(iscsi_host_free);
static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
shost = scsi_host_get(shost);
if (!shost) {
printk(KERN_ERR "Invalid state. Cannot notify host removal "
"of session teardown event because host already "
"removed.\n");
return;
}
spin_lock_irqsave(&ihost->lock, flags);
ihost->num_sessions--;
if (ihost->num_sessions == 0)
wake_up(&ihost->session_removal_wq);
spin_unlock_irqrestore(&ihost->lock, flags);
scsi_host_put(shost);
}
/**
* iscsi_session_setup - create iscsi cls session and host and session
* @iscsit: iscsi transport template
@ -1943,9 +2041,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
uint16_t cmds_max, int cmd_task_size,
uint32_t initial_cmdsn, unsigned int id)
{
struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_session *session;
struct iscsi_cls_session *cls_session;
int cmd_i, scsi_cmds, total_cmds = cmds_max;
unsigned long flags;
spin_lock_irqsave(&ihost->lock, flags);
if (ihost->state == ISCSI_HOST_REMOVED) {
spin_unlock_irqrestore(&ihost->lock, flags);
return NULL;
}
ihost->num_sessions++;
spin_unlock_irqrestore(&ihost->lock, flags);
if (!total_cmds)
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
@ -1958,7 +2066,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
"must be a power of two that is at least %d.\n",
total_cmds, ISCSI_TOTAL_CMDS_MIN);
return NULL;
goto dec_session_count;
}
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
@ -1982,7 +2090,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
cls_session = iscsi_alloc_session(shost, iscsit,
sizeof(struct iscsi_session));
if (!cls_session)
return NULL;
goto dec_session_count;
session = cls_session->dd_data;
session->cls_session = cls_session;
session->host = shost;
@ -2021,6 +2129,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
if (iscsi_add_session(cls_session, id))
goto cls_session_fail;
return cls_session;
cls_session_fail:
@ -2029,6 +2138,8 @@ module_get_fail:
iscsi_pool_free(&session->cmdpool);
cmdpool_alloc_fail:
iscsi_free_session(cls_session);
dec_session_count:
iscsi_host_dec_session_cnt(shost);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
@ -2044,6 +2155,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
iscsi_pool_free(&session->cmdpool);
@ -2056,6 +2168,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->ifacename);
iscsi_destroy_session(cls_session);
iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@ -2335,8 +2448,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
fail_all_commands(conn, -1,
STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
if (flag == STOP_CONN_RECOVER)
fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
else
fail_all_commands(conn, -1, DID_ERROR);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);

View File

@ -34,7 +34,14 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
queue depth change in millisecs */
#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
#define LPFC_MIN_TGT_QDEPTH 100
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@ -49,6 +56,9 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@ -60,6 +70,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
@ -357,6 +370,7 @@ struct lpfc_vport {
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time;
uint32_t dev_loss_tmo_changed;
@ -369,6 +383,8 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
uint8_t stat_data_enabled;
uint8_t stat_data_blocked;
};
struct hbq_s {
@ -407,10 +423,11 @@ struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_ENABLED 0x01
#define LPFC_SLI3_HBQ_ENABLED 0x02
#define LPFC_SLI3_NPIV_ENABLED 0x04
#define LPFC_SLI3_VPORT_TEARDOWN 0x08
#define LPFC_SLI3_HBQ_ENABLED 0x01
#define LPFC_SLI3_NPIV_ENABLED 0x02
#define LPFC_SLI3_VPORT_TEARDOWN 0x04
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_INB_ENABLED 0x10
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@ -422,10 +439,20 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
struct lpfc_sli2_slim *slim2p;
struct lpfc_dmabuf hbqslimp;
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
dma_addr_t slim2p_mapping;
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
uint32_t *inb_ha_copy;
uint32_t *inb_counter;
uint32_t inb_last_counter;
uint32_t ha_copy;
struct _PCB *pcb;
struct _IOCB *IOCBs;
struct lpfc_dmabuf hbqslimp;
uint16_t pci_cfg_value;
@ -492,7 +519,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
long data_flags;
unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@ -514,6 +541,7 @@ struct lpfc_hba {
void __iomem *HCregaddr; /* virtual address for host ctl reg */
struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
struct lpfc_pgp *port_gp;
uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
@ -536,6 +564,7 @@ struct lpfc_hba {
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
/*
* stat counters
@ -565,7 +594,7 @@ struct lpfc_hba {
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
struct msix_entry msix_entries[1];
struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@ -605,6 +634,7 @@ struct lpfc_hba {
unsigned long last_completion_time;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
enum hba_temp_state over_temp_state;
/* ndlp reference management */
spinlock_t ndlp_lock;
/*
@ -613,7 +643,19 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
enum hba_temp_state over_temp_state;
int wait_4_mlo_maint_flg;
wait_queue_head_t wait_4_mlo_m_q;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
#define LPFC_POWER2_BUCKET 2
uint8_t bucket_type;
uint32_t bucket_base;
uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
};
static inline struct Scsi_Host *
@ -650,15 +692,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return;
}
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
event */
static inline void
lpfc_sli_read_hs(struct lpfc_hba *phba)
{
/*
* There was a link/board error. Read the status register to retrieve
* the error event and process it.
*/
phba->sli.slistat.err_attn_event++;
/* Save status info */
phba->work_hs = readl(phba->HSregaddr);
phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
/* Clear chip Host Attention error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->pport->stopped = 1;
return;
}
struct temp_event {
uint32_t event_type;
uint32_t event_code;
uint32_t data;
};
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
@ -26,11 +26,11 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
struct lpfc_dmabuf *mp);
int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -43,7 +43,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -135,7 +135,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@ -155,6 +155,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
irqreturn_t lpfc_intr_handler(int, void *);
irqreturn_t lpfc_sp_intr_handler(int, void *);
irqreturn_t lpfc_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@ -175,11 +177,12 @@ void lpfc_mem_free(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
void lpfc_poll_start_timer(struct lpfc_hba *);
void lpfc_poll_eratt(unsigned long);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@ -187,11 +190,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *);
int lpfc_sli_config_port(struct lpfc_hba *, int);
int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
int lpfc_sli_check_eratt(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -199,6 +204,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@ -226,17 +232,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
uint32_t timeout);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
struct lpfc_iocbq * piocb,
struct lpfc_iocbq * prspiocbq,
uint32_t timeout);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, struct lpfc_iocbq *,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@ -269,7 +271,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *);
int lpfc_get_instance(void);
void lpfc_host_attrib_init(struct Scsi_Host *);
@ -290,6 +292,13 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);
void
lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5

View File

@ -34,6 +34,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_del(&head);
} else {
struct lpfc_iocbq *next;
list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0);
lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
paddr = getPaddr(icmd->un.cont64[i].addrHigh,
icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
paddr);
size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq);
lpfc_post_buffer(phba, pring, i);
}
list_del(&head);
}
}
@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
else
list_add_tail(&mp->list, &mlist->list);
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
/* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.ulpIoTag32 = 0;
icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
if (usr_flg)
@ -861,7 +861,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry++;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Retrying NS cmd %x\n", cmdcode);
"0250 Retrying NS cmd %x\n", cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0)
goto out;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2007 Emulex. All rights reserved. *
* Copyright (C) 2007-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -35,6 +35,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -46,13 +47,14 @@
#include "lpfc_debugfs.h"
#ifdef CONFIG_LPFC_DEBUG_FS
/* debugfs interface
/**
* debugfs interface
*
* To access this interface the user should:
* # mkdir /debug
* # mount -t debugfs none /debug
*
* The lpfc debugfs directory hierachy is:
* The lpfc debugfs directory hierarchy is:
* lpfc/lpfcX/vportY
* where X is the lpfc hba unique_id
* where Y is the vport VPI on that hba
@ -61,14 +63,21 @@
* discovery_trace
* This is an ACSII readable file that contains a trace of the last
* lpfc_debugfs_max_disc_trc events that happened on a specific vport.
* See lpfc_debugfs.h for different categories of
* discovery events. To enable the discovery trace, the following
* module parameters must be set:
* See lpfc_debugfs.h for different categories of discovery events.
* To enable the discovery trace, the following module parameters must be set:
* lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
* lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
* EACH vport. X MUST also be a power of 2.
* lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
* lpfc_debugfs.h .
*
* slow_ring_trace
* This is an ACSII readable file that contains a trace of the last
* lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
* To enable the slow ring trace, the following module parameters must be set:
* lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
* lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
* the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
module_param(lpfc_debugfs_enable, int, 0);
@ -117,6 +126,25 @@ struct lpfc_debug {
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer.
* @vport: The vport to gather the log info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine gathers the lpfc discovery debugfs data from the @vport and
* dumps it to @buf up to @size number of bytes. It will start at the next entry
* in the log and process the log until the end of the buffer. Then it will
* gather from the beginning of the log and process until the current entry.
*
* Notes:
* Discovery logging will be disabled while while this routine dumps the log.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
{
@ -125,7 +153,6 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_debugfs_trc *dtp;
char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@ -159,6 +186,25 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
/**
* lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer.
* @phba: The HBA to gather the log info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine gathers the lpfc slow ring debugfs data from the @phba and
* dumps it to @buf up to @size number of bytes. It will start at the next entry
* in the log and process the log until the end of the buffer. Then it will
* gather from the beginning of the log and process until the current entry.
*
* Notes:
* Slow ring logging will be disabled while while this routine dumps the log.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
{
@ -203,6 +249,25 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
static int lpfc_debugfs_last_hbq = -1;
/**
* lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer.
* @phba: The HBA to gather host buffer info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the host buffer queue info from the @phba to @buf up to
* @size number of bytes. A header that describes the current hbq state will be
* dumped to @buf first and then info on each hbq entry will be dumped to @buf
* until @size bytes have been dumped or all the hbq info has been dumped.
*
* Notes:
* This routine will rotate through each configured HBQ each time called.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
@ -303,6 +368,24 @@ skipit:
static int lpfc_debugfs_last_hba_slim_off;
/**
* lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer.
* @phba: The HBA to gather SLIM info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the current contents of HBA SLIM for the HBA associated
* with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
*
* Notes:
* This routine will only dump up to 1024 bytes of data each time called and
* should be called multiple times to dump the entire HBA SLIM.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@ -342,6 +425,21 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
/**
* lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer.
* @phba: The HBA to gather Host SLIM info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the current contents of host SLIM for the host associated
* with @phba to @buf up to @size bytes of data. The dump will contain the
* Mailbox, PCB, Rings, and Registers that are located in host memory.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@ -357,7 +455,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
spin_lock_irq(&phba->hbalock);
len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
ptr = (uint32_t *)phba->slim2p;
ptr = (uint32_t *)phba->slim2p.virt;
i = sizeof(MAILBOX_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@ -370,7 +468,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
}
len += snprintf(buf+len, size-len, "SLIM PCB\n");
ptr = (uint32_t *)&phba->slim2p->pcb;
ptr = (uint32_t *)phba->pcb;
i = sizeof(PCB_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@ -382,44 +480,16 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t));
}
pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port;
pring = &psli->ring[0];
len += snprintf(buf+len, size-len,
"Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[1];
len += snprintf(buf+len, size-len,
"Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[2];
len += snprintf(buf+len, size-len,
"Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[3];
len += snprintf(buf+len, size-len,
"Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get;
for (i = 0; i < 4; i++) {
pgpp = &phba->port_gp[i];
pring = &psli->ring[i];
len += snprintf(buf+len, size-len,
"Ring %d: CMD GetInx:%d (Max:%d Next:%d "
"Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
i, pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx,
pring->flag, pgpp->rspPutInx, pring->numRiocb);
}
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
@ -430,6 +500,21 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
/**
* lpfc_debugfs_nodelist_data - Dump target node list to a buffer.
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the current target node list associated with @vport to
* @buf up to @size bytes of data. Each node entry in the dump will contain a
* node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
@ -513,7 +598,22 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
}
#endif
/**
* lpfc_debugfs_disc_trc - Store discovery trace log.
* @vport: The vport to associate this trace string with for retrieval.
* @mask: Log entry classification.
* @fmt: Format string to be displayed when dumping the log.
* @data1: 1st data parameter to be applied to @fmt.
* @data2: 2nd data parameter to be applied to @fmt.
* @data3: 3rd data parameter to be applied to @fmt.
*
* Description:
* This routine is used by the driver code to add a debugfs log entry to the
* discovery trace buffer associated with @vport. Only entries with a @mask that
* match the current debugfs discovery mask will be saved. Entries that do not
* match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
* printf when displaying the log.
**/
inline void
lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
@ -542,6 +642,19 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
return;
}
/**
* lpfc_debugfs_slow_ring_trc - Store slow ring trace log.
* @phba: The phba to associate this trace string with for retrieval.
* @fmt: Format string to be displayed when dumping the log.
* @data1: 1st data parameter to be applied to @fmt.
* @data2: 2nd data parameter to be applied to @fmt.
* @data3: 3rd data parameter to be applied to @fmt.
*
* Description:
* This routine is used by the driver code to add a debugfs log entry to the
* discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
* @data3 are used like printf when displaying the log.
**/
inline void
lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
@ -568,6 +681,21 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
}
#ifdef CONFIG_LPFC_DEBUG_FS
/**
* lpfc_debugfs_disc_trc_open - Open the discovery trace log.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
{
@ -585,7 +713,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@ -603,6 +731,21 @@ out:
return rc;
}
/**
* lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
{
@ -620,7 +763,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@ -638,6 +781,21 @@ out:
return rc;
}
/**
* lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
{
@ -649,7 +807,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@ -665,6 +823,21 @@ out:
return rc;
}
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
{
@ -676,7 +849,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@ -692,6 +865,21 @@ out:
return rc;
}
/**
* lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
{
@ -703,7 +891,7 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@ -719,6 +907,21 @@ out:
return rc;
}
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return an negative
* error value.
**/
static int
lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
{
@ -730,7 +933,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
/* Round to page boundry */
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@ -746,6 +949,23 @@ out:
return rc;
}
/**
* lpfc_debugfs_lseek - Seek through a debugfs file.
* @file: The file pointer to seek through.
* @off: The offset to seek to or the amount to seek by.
* @whence: Indicates how to seek.
*
* Description:
* This routine is the entry point for the debugfs lseek file operation. The
* @whence parameter indicates whether @off is the offset to directly seek to,
* or if it is a value to seek forward or reverse by. This function figures out
* what the new offset of the debugfs file will be and assigns that value to the
* f_pos field of @file.
*
* Returns:
* This function returns the new offset if successful and returns a negative
* error if unable to process the seek.
**/
static loff_t
lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
{
@ -767,6 +987,22 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
/**
* lpfc_debugfs_read - Read a debugfs file.
* @file: The file pointer to read from.
* @buf: The buffer to copy the data to.
* @nbytes: The number of bytes to read.
* @ppos: The position in the file to start reading from.
*
* Description:
* This routine reads data from from the buffer indicated in the private_data
* field of @file. It will start reading at @ppos and copy up to @nbytes of
* data to @buf.
*
* Returns:
* This function returns the amount of data that was read (this could be less
* than @nbytes if the end of the file was reached) or a negative error value.
**/
static ssize_t
lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
@ -776,6 +1012,18 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
debug->len);
}
/**
* lpfc_debugfs_release - Release the buffer used to store debugfs file data.
* @inode: The inode pointer that contains a vport pointer. (unused)
* @file: The file pointer that contains the buffer to release.
*
* Description:
* This routine frees the buffer that was allocated when the debugfs file was
* opened.
*
* Returns:
* This function returns zero.
**/
static int
lpfc_debugfs_release(struct inode *inode, struct file *file)
{
@ -845,6 +1093,16 @@ static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
#endif
/**
* lpfc_debugfs_initialize - Initialize debugfs for a vport.
* @vport: The vport pointer to initialize.
*
* Description:
* When Debugfs is configured this routine sets up the lpfc debugfs file system.
* If not already created, this routine will create the lpfc directory, and
* lpfcX directory (for this HBA), and vportX directory for this vport. It will
* also create each file used to access lpfc specific debugfs information.
**/
inline void
lpfc_debugfs_initialize(struct lpfc_vport *vport)
{
@ -862,7 +1120,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
atomic_set(&lpfc_debugfs_hba_count, 0);
if (!lpfc_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs root\n");
"0408 Cannot create debugfs root\n");
goto debug_failed;
}
}
@ -876,7 +1134,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs hba\n");
"0412 Cannot create debugfs hba\n");
goto debug_failed;
}
atomic_inc(&lpfc_debugfs_hba_count);
@ -890,7 +1148,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_hbqinfo);
if (!phba->debug_hbqinfo) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs hbqinfo\n");
"0411 Cannot create debugfs hbqinfo\n");
goto debug_failed;
}
@ -902,7 +1160,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHBASlim);
if (!phba->debug_dumpHBASlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs dumpHBASlim\n");
"0413 Cannot create debugfs dumpHBASlim\n");
goto debug_failed;
}
@ -914,7 +1172,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHostSlim);
if (!phba->debug_dumpHostSlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs dumpHostSlim\n");
"0414 Cannot create debugfs dumpHostSlim\n");
goto debug_failed;
}
@ -944,7 +1202,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->debug_slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"0415 Cannot create debugfs "
"slow_ring_trace\n");
goto debug_failed;
}
@ -955,7 +1213,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"0416 Cannot create debugfs "
"slow_ring buffer\n");
goto debug_failed;
}
@ -972,7 +1230,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cant create debugfs");
"0417 Cant create debugfs");
goto debug_failed;
}
atomic_inc(&phba->debugfs_vport_count);
@ -1001,7 +1259,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!vport->disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs disc trace "
"0418 Cannot create debugfs disc trace "
"buffer\n");
goto debug_failed;
}
@ -1014,7 +1272,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_disc_trc);
if (!vport->debug_disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"0419 Cannot create debugfs "
"discovery_trace\n");
goto debug_failed;
}
@ -1033,7 +1291,17 @@ debug_failed:
#endif
}
/**
* lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport.
* @vport: The vport pointer to remove from debugfs.
*
* Description:
* When Debugfs is configured this routine removes debugfs file system elements
* that are specific to this vport. It also checks to see if there are any
* users left for the debugfs directories associated with the HBA and driver. If
* this is the last user of the HBA directory or driver directory then it will
* remove those from the debugfs infrastructure as well.
**/
inline void
lpfc_debugfs_terminate(struct lpfc_vport *vport)
{
@ -1096,5 +1364,3 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
#endif
return;
}

View File

@ -37,6 +37,7 @@ enum lpfc_work_type {
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
};
/* structure used to queue event to the discovery tasklet */
@ -47,6 +48,24 @@ struct lpfc_work_evt {
enum lpfc_work_type evt;
};
struct lpfc_scsi_check_condition_event;
struct lpfc_scsi_varqueuedepth_event;
struct lpfc_scsi_event_header;
struct lpfc_fabric_event_header;
struct lpfc_fcprdchkerr_event;
/* structure used for sending events from fast path */
struct lpfc_fast_path_event {
struct lpfc_work_evt work_evt;
struct lpfc_vport *vport;
union {
struct lpfc_scsi_check_condition_event check_cond_evt;
struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
struct lpfc_scsi_event_header scsi_evt;
struct lpfc_fabric_event_header fabric_evt;
struct lpfc_fcprdchkerr_event read_check_error;
} un;
};
struct lpfc_nodelist {
struct list_head nlp_listp;
@ -88,6 +107,10 @@ struct lpfc_nodelist {
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
atomic_t cmd_pending;
uint32_t cmd_qdepth;
unsigned long last_change_time;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
/* Defines for nlp_flag (uint32) */

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_scsi.h"
@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
&phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
/*
* A device is normally blocked for rediscovery and unblocked when
* devloss timeout happens. In case a vport is removed or driver
* unloaded before devloss timeout happens, we need to unblock here.
*/
scsi_target_unblock(&rport->dev);
return;
}
/*
@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0284 Devloss timeout Ignored on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
return;
}
if (ndlp->nlp_type & NLP_FABRIC) {
/* We will clean up these Nodes in linkup */
@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
if (vport->load_flag & FC_UNLOADING)
warn_on = 0;
if (warn_on) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
}
/**
* lpfc_alloc_fast_evt: Allocates data structure for posting event.
* @phba: Pointer to hba context object.
*
* This function is called from the functions which need to post
* events from interrupt context. This function allocates data
* structure required for posting event. It also keeps track of
* number of events pending and prevent event storm when there are
* too many events.
**/
struct lpfc_fast_path_event *
lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
struct lpfc_fast_path_event *ret;
/* If there are lot of fast event do not exhaust memory due to this */
if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
return NULL;
ret = kzalloc(sizeof(struct lpfc_fast_path_event),
GFP_ATOMIC);
if (ret)
atomic_inc(&phba->fast_event_count);
INIT_LIST_HEAD(&ret->work_evt.evt_listp);
ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
return ret;
}
/**
* lpfc_free_fast_evt: Frees event data structure.
* @phba: Pointer to hba context object.
* @evt: Event object which need to be freed.
*
* This function frees the data structure required for posting
* events.
**/
void
lpfc_free_fast_evt(struct lpfc_hba *phba,
struct lpfc_fast_path_event *evt) {
atomic_dec(&phba->fast_event_count);
kfree(evt);
}
/**
* lpfc_send_fastpath_evt: Posts events generated from fast path.
* @phba: Pointer to hba context object.
* @evtp: Event data structure.
*
* This function is called from worker thread, when the interrupt
* context need to post an event. This function posts the event
* to fc transport netlink interface.
**/
static void
lpfc_send_fastpath_evt(struct lpfc_hba *phba,
struct lpfc_work_evt *evtp)
{
unsigned long evt_category, evt_sub_category;
struct lpfc_fast_path_event *fast_evt_data;
char *evt_data;
uint32_t evt_data_size;
struct Scsi_Host *shost;
fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
work_evt);
evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
evt_sub_category = (unsigned long) fast_evt_data->un.
fabric_evt.subcategory;
shost = lpfc_shost_from_vport(fast_evt_data->vport);
if (evt_category == FC_REG_FABRIC_EVENT) {
if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
evt_data = (char *) &fast_evt_data->un.read_check_error;
evt_data_size = sizeof(fast_evt_data->un.
read_check_error);
} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
(evt_sub_category == IOSTAT_NPORT_BSY)) {
evt_data = (char *) &fast_evt_data->un.fabric_evt;
evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
} else {
lpfc_free_fast_evt(phba, fast_evt_data);
return;
}
} else if (evt_category == FC_REG_SCSI_EVENT) {
switch (evt_sub_category) {
case LPFC_EVENT_QFULL:
case LPFC_EVENT_DEVBSY:
evt_data = (char *) &fast_evt_data->un.scsi_evt;
evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
break;
case LPFC_EVENT_CHECK_COND:
evt_data = (char *) &fast_evt_data->un.check_cond_evt;
evt_data_size = sizeof(fast_evt_data->un.
check_cond_evt);
break;
case LPFC_EVENT_VARQUEDEPTH:
evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
evt_data_size = sizeof(fast_evt_data->un.
queue_depth_evt);
break;
default:
lpfc_free_fast_evt(phba, fast_evt_data);
return;
}
} else {
lpfc_free_fast_evt(phba, fast_evt_data);
return;
}
fc_host_post_vendor_event(shost,
fc_get_event_number(),
evt_data_size,
evt_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
lpfc_free_fast_evt(phba, fast_evt_data);
return;
}
static void
lpfc_work_list_done(struct lpfc_hba *phba)
{
@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_unblock_mgmt_io(phba);
complete((struct completion *)(evtp->evt_arg2));
break;
case LPFC_EVT_FASTPATH_MGMT_EVT:
lpfc_send_fastpath_evt(phba, evtp);
free_evt = 0;
break;
}
if (free_evt)
kfree(evtp);
@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
if (ha_copy & HA_ERATT)
/* Handle the error attention event */
lpfc_handle_eratt(phba);
if (ha_copy & HA_MBATT)
@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i <= phba->max_vpi; i++) {
@ -1013,14 +1136,10 @@ out:
}
static void
lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
lpfc_enable_la(struct lpfc_hba *phba)
{
uint32_t control;
struct lpfc_sli *psli = &phba->sli;
lpfc_linkdown(phba);
/* turn on Link Attention interrupts - no CLEAR_LA needed */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
@ -1030,6 +1149,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
static void
lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
{
lpfc_linkdown(phba);
lpfc_enable_la(phba);
/* turn on Link Attention interrupts - no CLEAR_LA needed */
}
/*
* This routine handles processing a READ_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@ -1077,8 +1205,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
if (la->mm)
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
if (la->attType == AT_LINK_UP) {
if (la->attType == AT_LINK_UP && (!la->mm)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@ -1090,13 +1222,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
"Data: x%x x%x x%x x%x\n",
"Data: x%x x%x x%x x%x x%x x%x %d\n",
la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
phba->alpa_map[0],
la->mm, la->fa,
phba->wait_4_mlo_maint_flg);
}
lpfc_mbx_process_link_up(phba, la);
} else {
} else if (la->attType == AT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@ -1109,11 +1243,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
la->mm, la->fa);
}
lpfc_mbx_issue_link_down(phba);
}
if (la->mm && la->attType == AT_LINK_UP) {
if (phba->link_state != LPFC_LINK_DOWN) {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1312 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
lpfc_mbx_issue_link_down(phba);
} else
lpfc_enable_la(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1310 Menlo Maint Mode Link up Event x%x rcvd "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
/*
* The cmnd that triggered this will be waiting for this
* signal.
*/
/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
if (phba->wait_4_mlo_maint_flg) {
phba->wait_4_mlo_maint_flg = 0;
wake_up_interruptible(&phba->wait_4_mlo_m_q);
}
lpfc_mbx_issue_link_down(phba);
}
if (la->fa) {
if (la->mm)
lpfc_issue_clear_la(phba, vport);
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n", la->fa);
}
lpfc_mbx_cmpl_read_la_free_mbuf:
@ -1177,7 +1346,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
scsi_host_put(shost);
}
void
int
lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@ -1186,7 +1355,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return;
return 1;
lpfc_unreg_vpi(phba, vport->vpi, mbox);
mbox->vport = vport;
@ -1197,7 +1366,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
}
static void
@ -1553,6 +1724,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
lpfc_register_remote_port(vport, ndlp);
}
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
/*
* A new target is discovered, if there is no buffer for
* statistical data collection allocate buffer.
*/
ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
sizeof(struct lpfc_scsicmd_bkt),
GFP_KERNEL);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0286 lpfc_nlp_state_cleanup failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
}
/*
* if we added to Mapped list, but the remote port
* registration failed or assigned a target id outside
@ -2786,7 +2973,7 @@ restart_disc:
default:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0229 Unexpected discovery timeout, "
"0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
}
@ -2940,6 +3127,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
@ -2979,8 +3168,10 @@ lpfc_nlp_release(struct kref *kref)
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp))
if (NLP_CHK_FREE_REQ(ndlp)) {
kfree(ndlp->lat_data);
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
}
}
/* This routine bumps the reference count for a ndlp structure to ensure

View File

@ -1107,6 +1107,8 @@ typedef struct {
/* Start FireFly Register definitions */
#define PCI_VENDOR_ID_EMULEX 0x10df
#define PCI_DEVICE_ID_FIREFLY 0x1ae5
#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@ -1133,10 +1135,12 @@ typedef struct {
#define PCI_DEVICE_ID_LP11000S 0xfc10
#define PCI_DEVICE_ID_LPE11000S 0xfc20
#define PCI_DEVICE_ID_SAT_S 0xfc40
#define PCI_DEVICE_ID_PROTEUS_S 0xfc50
#define PCI_DEVICE_ID_HELIOS 0xfd00
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
@ -1154,6 +1158,7 @@ typedef struct {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@ -1198,6 +1203,18 @@ typedef struct { /* FireFly BIU registers */
#define HA_RXATT 0x00000008 /* Bit 3 */
#define HA_RXMASK 0x0000000f
#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
#define HA_R0_POS 3
#define HA_R1_POS 7
#define HA_R2_POS 11
#define HA_R3_POS 15
#define HA_LE_POS 29
#define HA_MB_POS 30
#define HA_ER_POS 31
/* Chip Attention Register */
#define CA_REG_OFFSET 4 /* Byte offset from register base address */
@ -1235,7 +1252,7 @@ typedef struct { /* FireFly BIU registers */
/* Host Control Register */
#define HC_REG_OFFSET 12 /* Word offset from register base address */
#define HC_REG_OFFSET 12 /* Byte offset from register base address */
#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
@ -1248,6 +1265,19 @@ typedef struct { /* FireFly BIU registers */
#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
#define MSIX_DFLT_ID 0
#define MSIX_RNG0_ID 0
#define MSIX_RNG1_ID 1
#define MSIX_RNG2_ID 2
#define MSIX_RNG3_ID 3
#define MSIX_LINK_ID 4
#define MSIX_MBOX_ID 5
#define MSIX_SPARE0_ID 6
#define MSIX_SPARE1_ID 7
/* Mailbox Commands */
#define MBX_SHUTDOWN 0x00 /* terminate testing */
#define MBX_LOAD_SM 0x01
@ -1285,10 +1315,14 @@ typedef struct { /* FireFly BIU registers */
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
#define MBX_CONFIG_MSI 0x30
#define MBX_HEARTBEAT 0x31
#define MBX_WRITE_VPARMS 0x32
#define MBX_ASYNCEVT_ENABLE 0x33
#define MBX_PORT_CAPABILITIES 0x3B
#define MBX_PORT_IOV_CONTROL 0x3C
#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
#define MBX_RUN_BIU_DIAG64 0x84
@ -1474,24 +1508,18 @@ struct ulp_bde64 { /* SLI-2 */
uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
VALUE !! */
#endif
#define BUFF_USE_RSVD 0x01 /* bdeFlags */
#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
buffer */
#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
addr */
#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
#define BUFF_TYPE_INVALID 0x80 /* "" "" */
#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
} f;
} tus;
uint32_t addrLow;
uint32_t addrHigh;
};
#define BDE64_SIZE_WORD 0
#define BPL64_SIZE_WORD 0x40
typedef struct ULP_BDL { /* SLI-2 */
#ifdef __BIG_ENDIAN_BITFIELD
@ -2201,7 +2229,10 @@ typedef struct {
typedef struct {
uint32_t eventTag; /* Event tag */
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1:22;
uint32_t rsvd1:19;
uint32_t fa:1;
uint32_t mm:1; /* Menlo Maintenance mode enabled */
uint32_t rx:1;
uint32_t pb:1;
uint32_t il:1;
uint32_t attType:8;
@ -2209,7 +2240,10 @@ typedef struct {
uint32_t attType:8;
uint32_t il:1;
uint32_t pb:1;
uint32_t rsvd1:22;
uint32_t rx:1;
uint32_t mm:1;
uint32_t fa:1;
uint32_t rsvd1:19;
#endif
#define AT_RESERVED 0x00 /* Reserved - attType */
@ -2230,6 +2264,7 @@ typedef struct {
#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
union {
struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
@ -2324,6 +2359,36 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
/* Structure for MB Command UPDATE_CFG (0x1B) */
struct update_cfg_var {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2:16;
uint32_t type:8;
uint32_t rsvd:1;
uint32_t ra:1;
uint32_t co:1;
uint32_t cv:1;
uint32_t req:4;
uint32_t entry_length:16;
uint32_t region_id:16;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t req:4;
uint32_t cv:1;
uint32_t co:1;
uint32_t ra:1;
uint32_t rsvd:1;
uint32_t type:8;
uint32_t rsvd2:16;
uint32_t region_id:16;
uint32_t entry_length:16;
#endif
uint32_t resp_info;
uint32_t byte_cnt;
uint32_t data_offset;
};
struct hbq_mask {
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t tmatch;
@ -2560,6 +2625,40 @@ typedef struct {
} CONFIG_PORT_VAR;
/* Structure for MB Command CONFIG_MSI (0x30) */
struct config_msi_var {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t dfltMsgNum:8; /* Default message number */
uint32_t rsvd1:11; /* Reserved */
uint32_t NID:5; /* Number of secondary attention IDs */
uint32_t rsvd2:5; /* Reserved */
uint32_t dfltPresent:1; /* Default message number present */
uint32_t addFlag:1; /* Add association flag */
uint32_t reportFlag:1; /* Report association flag */
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t reportFlag:1; /* Report association flag */
uint32_t addFlag:1; /* Add association flag */
uint32_t dfltPresent:1; /* Default message number present */
uint32_t rsvd2:5; /* Reserved */
uint32_t NID:5; /* Number of secondary attention IDs */
uint32_t rsvd1:11; /* Reserved */
uint32_t dfltMsgNum:8; /* Default message number */
#endif
uint32_t attentionConditions[2];
uint8_t attentionId[16];
uint8_t messageNumberByHA[64];
uint8_t messageNumberByID[16];
uint32_t autoClearHA[2];
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3:16;
uint32_t autoClearID:16;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t autoClearID:16;
uint32_t rsvd3:16;
#endif
uint32_t rsvd4;
};
/* SLI-2 Port Control Block */
/* SLIM POINTER */
@ -2678,10 +2777,12 @@ typedef union {
* NEW_FEATURE
*/
struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
/*
@ -2715,11 +2816,19 @@ struct sli3_pgp {
uint32_t hbq_get[16];
};
typedef union {
struct sli2_desc s2;
struct sli3_desc s3;
struct sli3_pgp s3_pgp;
} SLI_VAR;
struct sli3_inb_pgp {
uint32_t ha_copy;
uint32_t counter;
struct lpfc_pgp port[MAX_RINGS];
uint32_t hbq_get[16];
};
union sli_var {
struct sli2_desc s2;
struct sli3_desc s3;
struct sli3_pgp s3_pgp;
struct sli3_inb_pgp s3_inb_pgp;
};
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
@ -2737,7 +2846,7 @@ typedef struct {
#endif
MAILVARIANTS un;
SLI_VAR us;
union sli_var us;
} MAILBOX_t;
/*
@ -3105,6 +3214,27 @@ struct que_xri64cx_ext_fields {
struct lpfc_hbq_entry buff[5];
};
#define LPFC_EXT_DATA_BDE_COUNT 3
struct fcp_irw_ext {
uint32_t io_tag64_low;
uint32_t io_tag64_high;
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t reserved1;
uint8_t reserved2;
uint8_t reserved3;
uint8_t ebde_count;
#else /* __LITTLE_ENDIAN */
uint8_t ebde_count;
uint8_t reserved3;
uint8_t reserved2;
uint8_t reserved1;
#endif
uint32_t reserved4;
struct ulp_bde64 rbde; /* response bde */
struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */
uint8_t icd[32]; /* immediate command data (32 bytes) */
};
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
@ -3190,7 +3320,7 @@ typedef struct _IOCB { /* IOCB structure */
/* words 8-31 used for que_xri_cx iocb */
struct que_xri64cx_ext_fields que_xri64cx_ext_words;
struct fcp_irw_ext fcp_ext;
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
} unsli3;
@ -3292,3 +3422,10 @@ lpfc_error_lost_link(IOCB_t *iocbp)
iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
}
#define MENLO_TRANSPORT_TYPE 0xfe
#define MENLO_CONTEXT 0
#define MENLO_PU 3
#define MENLO_TIMEOUT 30
#define SETVAR_MLOMNT 0x103107
#define SETVAR_MLORST 0x103007

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -37,10 +38,20 @@
#include "lpfc_crtn.h"
#include "lpfc_compat.h"
/**********************************************/
/* mailbox command */
/**********************************************/
/**
* lpfc_dump_mem: Prepare a mailbox command for retrieving HBA's VPD memory.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset for dumping VPD memory mailbox command.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
* This routine prepares the mailbox command for dumping HBA Vital Product
* Data (VPD) memory. This mailbox command is to be used for retrieving a
* portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
* offset specified by the offset parameter.
**/
void
lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
{
@ -65,10 +76,17 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
return;
}
/**********************************************/
/* lpfc_read_nv Issue a READ NVPARAM */
/* mailbox command */
/**********************************************/
/**
* lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read NVRAM mailbox command returns the HBA's non-volatile parameters
* that are used as defaults when the Fibre Channel link is brought on-line.
*
* This routine prepares the mailbox command for reading information stored
* in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
**/
void
lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -81,10 +99,19 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**********************************************/
/* lpfc_config_async Issue a */
/* MBX_ASYNC_EVT_ENABLE mailbox command */
/**********************************************/
/**
* lpfc_config_async: Prepare a mailbox command for enabling HBA async event.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @ring: ring number for the asynchronous event to be configured.
*
* The asynchronous event enable mailbox command is used to enable the
* asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
* specifies the default ring to which events are posted.
*
* This routine prepares the mailbox command for enabling HBA asynchronous
* event support on a IOCB ring.
**/
void
lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
uint32_t ring)
@ -99,10 +126,19 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
return;
}
/**********************************************/
/* lpfc_heart_beat Issue a HEART_BEAT */
/* mailbox command */
/**********************************************/
/**
* lpfc_heart_beat: Prepare a mailbox command for heart beat.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The heart beat mailbox command is used to detect an unresponsive HBA, which
* is defined as any device where no error attention is sent and both mailbox
* and rings are not processed.
*
* This routine prepares the mailbox command for issuing a heart beat in the
* form of mailbox command to the HBA. The timely completion of the heart
* beat mailbox command indicates the health of the HBA.
**/
void
lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -115,10 +151,26 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**********************************************/
/* lpfc_read_la Issue a READ LA */
/* mailbox command */
/**********************************************/
/**
* lpfc_read_la: Prepare a mailbox command for reading HBA link attention.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @mp: DMA buffer memory for reading the link attention information into.
*
* The read link attention mailbox command is issued to read the Link Event
* Attention information indicated by the HBA port when the Link Event bit
* of the Host Attention (HSTATT) register is set to 1. A Link Event
* Attention occurs based on an exception detected at the Fibre Channel link
* interface.
*
* This routine prepares the mailbox command for reading HBA link attention
* information. A DMA memory has been set aside and address passed to the
* HBA through @mp for the HBA to DMA link attention information into the
* memory as part of the execution of the mailbox command.
*
* Return codes
* 0 - Success (currently always return 0)
**/
int
lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
{
@ -143,10 +195,21 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
return (0);
}
/**********************************************/
/* lpfc_clear_la Issue a CLEAR LA */
/* mailbox command */
/**********************************************/
/**
* lpfc_clear_la: Prepare a mailbox command for clearing HBA link attention.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The clear link attention mailbox command is issued to clear the link event
* attention condition indicated by the Link Event bit of the Host Attention
* (HSTATT) register. The link event attention condition is cleared only if
* the event tag specified matches that of the current link event counter.
* The current event tag is read using the read link attention event mailbox
* command.
*
* This routine prepares the mailbox command for clearing HBA link attention
* information.
**/
void
lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -161,10 +224,20 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**************************************************/
/* lpfc_config_link Issue a CONFIG LINK */
/* mailbox command */
/**************************************************/
/**
* lpfc_config_link: Prepare a mailbox command for configuring link on a HBA.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure link mailbox command is used before the initialize link
* mailbox command to override default value and to configure link-oriented
* parameters such as DID address and various timers. Typically, this
* command would be used after an F_Port login to set the returned DID address
* and the fabric timeout values. This command is not valid before a configure
* port command has configured the HBA port.
*
* This routine prepares the mailbox command for configuring link on a HBA.
**/
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -199,10 +272,98 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**********************************************/
/* lpfc_init_link Issue an INIT LINK */
/* mailbox command */
/**********************************************/
/**
* lpfc_config_msi: Prepare a mailbox command for configuring msi-x.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure MSI-X mailbox command is used to configure the HBA's SLI-3
* MSI-X multi-message interrupt vector association to interrupt attention
* conditions.
*
* Return codes
* 0 - Success
* -EINVAL - Failure
**/
int
lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->mb;
uint32_t attentionConditions[2];
/* Sanity check */
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0475 Not configured for supporting MSI-X "
"cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
return -EINVAL;
}
if (phba->sli_rev < 3) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0476 HBA not supporting SLI-3 or later "
"SLI Revision: 0x%x\n", phba->sli_rev);
return -EINVAL;
}
/* Clear mailbox command fields */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
* SLI-3, Message Signaled Interrupt Fearure.
*/
/* Multi-message attention configuration */
attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
HA_LATT | HA_MBATT);
attentionConditions[1] = 0;
mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
/*
* Set up message number to HA bit association
*/
#ifdef __BIG_ENDIAN_BITFIELD
/* RA0 (FCP Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
/* RA1 (Other Protocol Extra Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
#else /* __LITTLE_ENDIAN_BITFIELD */
/* RA0 (FCP Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
/* RA1 (Other Protocol Extra Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
#endif
/* Multi-message interrupt autoclear configuration*/
mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
/* For now, HBA autoclear does not work reliably, disable it */
mb->un.varCfgMSI.autoClearHA[0] = 0;
mb->un.varCfgMSI.autoClearHA[1] = 0;
/* Set command and owner bit */
mb->mbxCommand = MBX_CONFIG_MSI;
mb->mbxOwner = OWN_HOST;
return 0;
}
/**
* lpfc_init_link: Prepare a mailbox command for initialize link on a HBA.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @topology: the link topology for the link to be initialized to.
* @linkspeed: the link speed for the link to be initialized to.
*
* The initialize link mailbox command is used to initialize the Fibre
* Channel link. This command must follow a configure port command that
* establishes the mode of operation.
*
* This routine prepares the mailbox command for initializing link on a HBA
* with the specified link topology and speed.
**/
void
lpfc_init_link(struct lpfc_hba * phba,
LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
@ -269,10 +430,27 @@ lpfc_init_link(struct lpfc_hba * phba,
return;
}
/**********************************************/
/* lpfc_read_sparam Issue a READ SPARAM */
/* mailbox command */
/**********************************************/
/**
* lpfc_read_sparam: Prepare a mailbox command for reading HBA parameters.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @vpi: virtual N_Port identifier.
*
* The read service parameter mailbox command is used to read the HBA port
* service parameters. The service parameters are read into the buffer
* specified directly by a BDE in the mailbox command. These service
* parameters may then be used to build the payload of an N_Port/F_POrt
* login request and reply (LOGI/ACC).
*
* This routine prepares the mailbox command for reading HBA port service
* parameters. The DMA memory is allocated in this function and the addresses
* are populated into the mailbox command for the HBA to DMA the service
* parameters into.
*
* Return codes
* 0 - Success
* 1 - DMA memory allocation failed
**/
int
lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
@ -312,10 +490,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
return (0);
}
/********************************************/
/* lpfc_unreg_did Issue a UNREG_DID */
/* mailbox command */
/********************************************/
/**
* lpfc_unreg_did: Prepare a mailbox command for unregistering DID.
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregister DID mailbox command is used to unregister an N_Port/F_Port
* login for an unknown RPI by specifying the DID of a remote port. This
* command frees an RPI context in the HBA port. This has the effect of
* performing an implicit N_Port/F_Port logout.
*
* This routine prepares the mailbox command for unregistering a remote
* N_Port/F_Port (DID) login.
**/
void
lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
LPFC_MBOXQ_t * pmb)
@ -333,10 +522,19 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
return;
}
/**********************************************/
/* lpfc_read_nv Issue a READ CONFIG */
/* mailbox command */
/**********************************************/
/**
* lpfc_read_config: Prepare a mailbox command for reading HBA configuration.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read configuration mailbox command is used to read the HBA port
* configuration parameters. This mailbox command provides a method for
* seeing any parameters that may have changed via various configuration
* mailbox commands.
*
* This routine prepares the mailbox command for reading out HBA configuration
* parameters.
**/
void
lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -350,10 +548,18 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/*************************************************/
/* lpfc_read_lnk_stat Issue a READ LINK STATUS */
/* mailbox command */
/*************************************************/
/**
* lpfc_read_lnk_stat: Prepare a mailbox command for reading HBA link stats.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read link status mailbox command is used to read the link status from
* the HBA. Link status includes all link-related error counters. These
* counters are maintained by the HBA and originated in the link hardware
* unit. Note that all of these counters wrap.
*
* This routine prepares the mailbox command for reading out HBA link status.
**/
void
lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -367,10 +573,30 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/********************************************/
/* lpfc_reg_login Issue a REG_LOGIN */
/* mailbox command */
/********************************************/
/**
* lpfc_reg_login: Prepare a mailbox command for registering remote login.
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
* @param: pointer to memory holding the server parameters.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @flag: action flag to be passed back for the complete function.
*
* The registration login mailbox command is used to register an N_Port or
* F_Port login. This registration allows the HBA to cache the remote N_Port
* service parameters internally and thereby make the appropriate FC-2
* decisions. The remote port service parameters are handed off by the driver
* to the HBA using a descriptor entry that directly identifies a buffer in
* host memory. In exchange, the HBA returns an RPI identifier.
*
* This routine prepares the mailbox command for registering remote port login.
* The function allocates DMA buffer for passing the service parameters to the
* HBA with the mailbox command.
*
* Return codes
* 0 - Success
* 1 - DMA memory allocation failed
**/
int
lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
@ -418,10 +644,20 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
return (0);
}
/**********************************************/
/* lpfc_unreg_login Issue a UNREG_LOGIN */
/* mailbox command */
/**********************************************/
/**
* lpfc_unreg_login: Prepare a mailbox command for unregistering remote login.
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @rpi: remote port identifier
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregistration login mailbox command is used to unregister an N_Port
* or F_Port login. This command frees an RPI context in the HBA. It has the
* effect of performing an implicit N_Port/F_Port logout.
*
* This routine prepares the mailbox command for unregistering remote port
* login.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
LPFC_MBOXQ_t * pmb)
@ -440,10 +676,21 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
return;
}
/**************************************************/
/* lpfc_reg_vpi Issue a REG_VPI */
/* mailbox command */
/**************************************************/
/**
* lpfc_reg_vpi: Prepare a mailbox command for registering vport identifier.
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The registration vport identifier mailbox command is used to activate a
* virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
* N_Port_ID against the information in the selected virtual N_Port context
* block and marks it active to allow normal processing of IOCB commands and
* received unsolicited exchanges.
*
* This routine prepares the mailbox command for registering a virtual N_Port.
**/
void
lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
LPFC_MBOXQ_t *pmb)
@ -461,10 +708,22 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
}
/**************************************************/
/* lpfc_unreg_vpi Issue a UNREG_VNPI */
/* mailbox command */
/**************************************************/
/**
* lpfc_unreg_vpi: Prepare a mailbox command for unregistering vport id.
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregistration vport identifier mailbox command is used to inactivate
* a virtual N_Port. The driver must have logged out and unregistered all
* remote N_Ports to abort any activity on the virtual N_Port. The HBA will
* unregisters any default RPIs associated with the specified vpi, aborting
* any active exchanges. The HBA will post the mailbox response after making
* the virtual N_Port inactive.
*
* This routine prepares the mailbox command for unregistering a virtual
* N_Port.
**/
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
@ -479,12 +738,19 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
}
/**
* lpfc_config_pcb_setup: Set up IOCB rings in the Port Control Block (PCB)
* @phba: pointer to lpfc hba data structure.
*
* This routine sets up and initializes the IOCB rings in the Port Control
* Block (PCB).
**/
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
PCB_t *pcbp = &phba->slim2p->pcb;
PCB_t *pcbp = phba->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
uint32_t iocbCnt = 0;
@ -513,29 +779,43 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numRiocb;
}
}
/**
* lpfc_read_rev: Prepare a mailbox command for reading HBA revision.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read revision mailbox command is used to read the revision levels of
* the HBA components. These components include hardware units, resident
* firmware, and available firmware. HBAs that supports SLI-3 mode of
* operation provide different response information depending on the version
* requested by the driver.
*
* This routine prepares the mailbox command for reading HBA revision
* information.
**/
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -548,6 +828,16 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**
* lpfc_build_hbq_profile2: Set up the HBQ Selection Profile 2.
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
* tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
* the Sequence Length Test using the fields in the Selection Profile 2
* extension in words 20:31.
**/
static void
lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@ -557,6 +847,16 @@ lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
}
/**
* lpfc_build_hbq_profile3: Set up the HBQ Selection Profile 3.
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
* tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
* the Sequence Length Test and Byte Field Test using the fields in the
* Selection Profile 3 extension in words 20:31.
**/
static void
lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@ -569,6 +869,17 @@ lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile3.cmdmatch));
}
/**
* lpfc_build_hbq_profile5: Set up the HBQ Selection Profile 5.
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
* HBA tests the initial frame of an incoming sequence using the frame's
* R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
* and Byte Field Test using the fields in the Selection Profile 5 extension
* words 20:31.
**/
static void
lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@ -581,6 +892,20 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile5.cmdmatch));
}
/**
* lpfc_config_hbq: Prepare a mailbox command for configuring an HBQ.
* @phba: pointer to lpfc hba data structure.
* @id: HBQ identifier.
* @hbq_desc: pointer to the HBA descriptor data structure.
* @hbq_entry_index: index of the HBQ entry data structures.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure HBQ (Host Buffer Queue) mailbox command is used to configure
* an HBQ. The configuration binds events that require buffers to a particular
* ring and HBQ based on a selection profile.
*
* This routine prepares the mailbox command for configuring an HBQ.
**/
void
lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
struct lpfc_hbq_init *hbq_desc,
@ -641,8 +966,23 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
return;
}
/**
* lpfc_config_ring: Prepare a mailbox command for configuring an IOCB ring.
* @phba: pointer to lpfc hba data structure.
* @ring:
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure ring mailbox command is used to configure an IOCB ring. This
* configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
* ring. This is used to map incoming sequences to a particular ring whose
* RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
* attempt to configure a ring whose number is greater than the number
* specified in the Port Control Block (PCB). It is an error to issue the
* configure ring command more than once with the same ring number. The HBA
* returns an error if the driver attempts this.
*
* This routine prepares the mailbox command for configuring IOCB ring.
**/
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
@ -684,6 +1024,20 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
return;
}
/**
* lpfc_config_port: Prepare a mailbox command for configuring port.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure port mailbox command is used to identify the Port Control
* Block (PCB) in the driver memory. After this command is issued, the
* driver must not access the mailbox in the HBA without first resetting
* the HBA. The HBA may copy the PCB information to internal storage for
* subsequent use; the driver can not change the PCB information unless it
* resets the HBA.
*
* This routine prepares the mailbox command for configuring port.
**/
void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@ -702,8 +1056,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
@ -711,12 +1065,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.cmv = 1;
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
@ -724,16 +1079,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
phba->pcb->type = TYPE_NATIVE_SLI2;
phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
sizeof(struct sli2_desc);
offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
/*
* Setup Host Group ring pointer.
@ -794,13 +1148,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* mask off BAR0's flag bits 0 - 3 */
phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
(void __iomem *) phba->host_gp -
phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
(void __iomem *)phba->host_gp -
(void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
phba->slim2p->pcb.hgpAddrHigh = bar_high;
phba->pcb->hgpAddrHigh = bar_high;
else
phba->slim2p->pcb.hgpAddrHigh = 0;
phba->pcb->hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
@ -810,17 +1164,19 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Setup Port Group ring pointer */
if (phba->sli_rev == 3)
pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
(uint8_t *)phba->slim2p;
else
pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
(uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + pgp_offset;
phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_inb_pgp.port);
phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
} else if (phba->sli_rev == 3) {
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_pgp.port);
phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
} else
pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
pdma_addr = phba->slim2p.phys + pgp_offset;
phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
@ -835,10 +1191,24 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Swap PCB if needed */
lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
sizeof(PCB_t));
lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
}
/**
* lpfc_kill_board: Prepare a mailbox command for killing board.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The kill board mailbox command is used to tell firmware to perform a
* graceful shutdown of a channel on a specified board to prepare for reset.
* When the kill board mailbox command is received, the ER3 bit is set to 1
* in the Host Status register and the ER Attention bit is set to 1 in the
* Host Attention register of the HBA function that received the kill board
* command.
*
* This routine prepares the mailbox command for killing the board in
* preparation for a graceful shutdown.
**/
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@ -850,6 +1220,16 @@ lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
/**
* lpfc_mbox_put: Put a mailbox cmd into the tail of driver's mailbox queue.
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* Driver maintains a internal mailbox command queue implemented as a linked
* list. When a mailbox command is issued, it shall be put into the mailbox
* command queue such that they shall be processed orderly as HBA can process
* one mailbox command at a time.
**/
void
lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
@ -864,6 +1244,20 @@ lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
return;
}
/**
* lpfc_mbox_get: Remove a mailbox cmd from the head of driver's mailbox queue.
* @phba: pointer to lpfc hba data structure.
*
* Driver maintains a internal mailbox command queue implemented as a linked
* list. When a mailbox command is issued, it shall be put into the mailbox
* command queue such that they shall be processed orderly as HBA can process
* one mailbox command at a time. After HBA finished processing a mailbox
* command, the driver will remove a pending mailbox command from the head of
* the mailbox command queue and send to the HBA for processing.
*
* Return codes
* pointer to the driver internal queue element for mailbox command.
**/
LPFC_MBOXQ_t *
lpfc_mbox_get(struct lpfc_hba * phba)
{
@ -877,6 +1271,17 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
/**
* lpfc_mbox_cmpl_put: Put mailbox command into mailbox command complete list.
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
* complete list. This routine is called from driver interrupt handler
* context.The mailbox complete list is used by the driver worker thread
* to process mailbox complete callback functions outside the driver interrupt
* handler.
**/
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
@ -887,6 +1292,17 @@ lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
return;
}
/**
* lpfc_mbox_tmo_val: Retrieve mailbox command timeout value.
* @phba: pointer to lpfc hba data structure.
* @cmd: mailbox command code.
*
* This routine retrieves the proper timeout value according to the mailbox
* command code.
*
* Return codes
* Timeout value to be used for the given mailbox command
**/
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -39,7 +40,21 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
/**
* lpfc_mem_alloc: create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
* lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
* allocation fails, frees all successfully allocated memory before returning.
*
* Returns:
* 0 on success
* -ENOMEM on failure (if any memory allocations fail)
**/
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
return -ENOMEM;
}
/**
* lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
* @phba: HBA to free memory for
*
* Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
* lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
* lpfc_nodelist. Also frees the VPI bitmask.
*
* Returns: None
**/
void
lpfc_mem_free(struct lpfc_hba * phba)
{
@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba)
phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL;
/* Free the iocb lookup array */
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
}
/**
* lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
* @phba: HBA which owns the pool to allocate from
* @mem_flags: indicates if this is a priority (MEM_PRI) allocation
* @handle: used to return the DMA-mapped address of the mbuf
*
* Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
* Allocates from generic pci_pool_alloc function first and if that fails and
* mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
* HBA's pool.
*
* Notes: Not interrupt-safe. Must be called with no locks held. Takes
* phba->hbalock.
*
* Returns:
* pointer to the allocated mbuf on success
* NULL on failure
**/
void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
return ret;
}
/**
* __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
*
* Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
* it is below its max_count, frees the mbuf otherwise.
*
* Notes: Must be called with phba->hbalock held to synchronize access to
* lpfc_mbuf_safety_pool.
*
* Returns: None
**/
void
__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
/**
* lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
* @phba: HBA which owns the pool to return to
* @virt: mbuf to free
* @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
*
* Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
* it is below its max_count, frees the mbuf otherwise.
*
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
*
* Returns: None
**/
void
lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
unsigned long iflags;
@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
/**
* lpfc_els_hbq_alloc: Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
* Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
*
* Returns:
* pointer to HBQ on success
* NULL on failure
**/
struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
return hbqbp;
}
/**
* lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
* @phba: HBA buffer was allocated for
* @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
*
* Description: Frees both the container and the DMA-mapped buffer returned by
* lpfc_els_hbq_alloc.
*
* Notes: Can be called with or without locks held.
*
* Returns: None
**/
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
return;
}
/* This is ONLY called for the LPFC_ELS_HBQ */
/**
* lpfc_in_buf_free: Free a DMA buffer
* @phba: HBA buffer is associated with
* @mp: Buffer to free
*
* Description: Frees the given DMA buffer in the appropriate way given if the
* HBA is running in SLI3 mode with HBQs enabled.
*
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
*
* Returns: None
**/
void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{

View File

@ -0,0 +1,163 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
/* Event definitions for RegisterForEvent */
#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
#define FC_REG_CT_EVENT 0x0004 /* CT request events */
#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */
#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */
#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */
#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */
#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */
#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */
#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
FC_REG_RSCN_EVENT | \
FC_REG_CT_EVENT | \
FC_REG_DUMP_EVENT | \
FC_REG_TEMPERATURE_EVENT | \
FC_REG_ELS_EVENT | \
FC_REG_FABRIC_EVENT | \
FC_REG_SCSI_EVENT | \
FC_REG_BOARD_EVENT | \
FC_REG_ADAPTER_EVENT)
/* Temperature events */
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3
/*
* All net link event payloads will begin with and event type
* and subcategory. The event type must come first.
* The subcategory further defines the data that follows in the rest
* of the payload. Each category will have its own unique header plus
* any addtional data unique to the subcategory.
* The payload sent via the fc transport is one-way driver->application.
*/
/* els event header */
struct lpfc_els_event_header {
uint32_t event_type;
uint32_t subcategory;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_ELS_EVENT */
#define LPFC_EVENT_PLOGI_RCV 0x01
#define LPFC_EVENT_PRLO_RCV 0x02
#define LPFC_EVENT_ADISC_RCV 0x04
#define LPFC_EVENT_LSRJT_RCV 0x08
/* special els lsrjt event */
struct lpfc_lsrjt_event {
struct lpfc_els_event_header header;
uint32_t command;
uint32_t reason_code;
uint32_t explanation;
};
/* fabric event header */
struct lpfc_fabric_event_header {
uint32_t event_type;
uint32_t subcategory;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_FABRIC_EVENT */
#define LPFC_EVENT_FABRIC_BUSY 0x01
#define LPFC_EVENT_PORT_BUSY 0x02
#define LPFC_EVENT_FCPRDCHKERR 0x04
/* special case fabric fcprdchkerr event */
struct lpfc_fcprdchkerr_event {
struct lpfc_fabric_event_header header;
uint32_t lun;
uint32_t opcode;
uint32_t fcpiparam;
};
/* scsi event header */
struct lpfc_scsi_event_header {
uint32_t event_type;
uint32_t subcategory;
uint32_t lun;
uint8_t wwpn[8];
uint8_t wwnn[8];
};
/* subcategory codes for FC_REG_SCSI_EVENT */
#define LPFC_EVENT_QFULL 0x0001
#define LPFC_EVENT_DEVBSY 0x0002
#define LPFC_EVENT_CHECK_COND 0x0004
#define LPFC_EVENT_LUNRESET 0x0008
#define LPFC_EVENT_TGTRESET 0x0010
#define LPFC_EVENT_BUSRESET 0x0020
#define LPFC_EVENT_VARQUEDEPTH 0x0040
/* special case scsi varqueuedepth event */
struct lpfc_scsi_varqueuedepth_event {
struct lpfc_scsi_event_header scsi_event;
uint32_t oldval;
uint32_t newval;
};
/* special case scsi check condition event */
struct lpfc_scsi_check_condition_event {
struct lpfc_scsi_event_header scsi_event;
uint8_t sense_key;
uint8_t asc;
uint8_t ascq;
};
/* event codes for FC_REG_BOARD_EVENT */
#define LPFC_EVENT_PORTINTERR 0x01
/* board event header */
struct lpfc_board_event_header {
uint32_t event_type;
uint32_t subcategory;
};
/* event codes for FC_REG_ADAPTER_EVENT */
#define LPFC_EVENT_ARRIVAL 0x01
/* adapter event header */
struct lpfc_adapter_event_header {
uint32_t event_type;
uint32_t subcategory;
};
/* event codes for temp_event */
#define LPFC_CRIT_TEMP 0x1
#define LPFC_THRESHOLD_TEMP 0x2
#define LPFC_NORMAL_TEMP 0x3
struct temp_event {
uint32_t event_type;
uint32_t event_code;
uint32_t data;
};

View File

@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
if (vport->num_disc_nodes) {
if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
if ((vport->num_disc_nodes == 0) &&
(vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
}
}
return ndlp->nlp_state;
}
@ -1865,8 +1854,13 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (ndlp->nlp_DID == Fabric_DID) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
}
lpfc_unreg_rpi(vport, ndlp);
/* This routine does nothing, just return the current state */
return ndlp->nlp_state;
}
@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_put(ndlp);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort free\n", rc);
"0213 DSM out state %d on NPort free\n", rc);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",

View File

@ -32,6 +32,7 @@
#include "lpfc_version.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -42,6 +43,111 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
/**
* lpfc_update_stats: Update statistical data for the command completion.
* @phba: Pointer to HBA object.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
struct Scsi_Host *shost = cmd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
unsigned long latency;
int i;
if (cmd->result)
return;
spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
!pnode->lat_data ||
(phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
phba->bucket_step;
if (i >= LPFC_MAX_BUCKET_COUNT)
i = LPFC_MAX_BUCKET_COUNT;
} else {
for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
if (latency <= (phba->bucket_base +
((1<<i)*phba->bucket_step)))
break;
}
pnode->lat_data[i].cmd_count++;
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
* event.
* @phba: Pointer to HBA context object.
* @vport: Pointer to vport object.
* @ndlp: Pointer to FC node associated with the target.
* @lun: Lun number of the scsi device.
* @old_val: Old value of the queue depth.
* @new_val: New value of the queue depth.
*
* This function sends an event to the mgmt application indicating
* there is a change in the scsi device queue depth.
**/
static void
lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
uint32_t lun,
uint32_t old_val,
uint32_t new_val)
{
struct lpfc_fast_path_event *fast_path_evt;
unsigned long flags;
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
return;
fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
FC_REG_SCSI_EVENT;
fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
LPFC_EVENT_VARQUEDEPTH;
/* Report all luns with change in queue depth */
fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
&ndlp->nlp_portname, sizeof(struct lpfc_name));
memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
&ndlp->nlp_nodename, sizeof(struct lpfc_name));
}
fast_path_evt->un.queue_depth_evt.oldval = old_val;
fast_path_evt->un.queue_depth_evt.newval = new_val;
fast_path_evt->vport = vport;
fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_worker_wake_up(phba);
return;
}
/*
* This function is called with no lock held when there is a resource
* error in driver or in firmware.
@ -117,9 +223,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
unsigned long new_queue_depth;
unsigned long new_queue_depth, old_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
int i;
struct lpfc_rport_data *rdata;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
@ -137,6 +244,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
else
new_queue_depth = sdev->queue_depth -
new_queue_depth;
old_queue_depth = sdev->queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
@ -145,6 +253,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
new_queue_depth);
rdata = sdev->hostdata;
if (rdata)
lpfc_send_sdev_queuedepth_change_event(
phba, vports[i],
rdata->pnode,
sdev->lun, old_queue_depth,
new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@ -159,6 +274,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
int i;
struct lpfc_rport_data *rdata;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@ -176,6 +292,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
sdev->queue_depth+1);
rdata = sdev->hostdata;
if (rdata)
lpfc_send_sdev_queuedepth_change_event(
phba, vports[i],
rdata->pnode,
sdev->lun,
sdev->queue_depth - 1,
sdev->queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@ -183,6 +307,35 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
/**
* lpfc_scsi_dev_block: set all scsi hosts to block state.
* @phba: Pointer to HBA context object.
*
* This function walks vport list and set each SCSI host to block state
* by invoking fc_remote_port_delete() routine. This function is invoked
* with EEH when device's PCI slot has been permanently disabled.
**/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
struct fc_rport *rport;
int i;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
rport = starget_to_rport(scsi_target(sdev));
fc_remote_port_delete(rport);
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
/*
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
@ -198,7 +351,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
dma_addr_t pdma_phys;
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_bpl;
uint16_t iotag;
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@ -238,40 +393,60 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
/* Initialize local short-hand pointers. */
bpl = psb->fcp_bpl;
pdma_phys = psb->dma_handle;
pdma_phys_fcp_cmd = psb->dma_handle;
pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp);
/*
* The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
* list bdes. Initialize the first two and leave the rest for
* queuecommand.
*/
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
bpl->tus.f.bdeFlags = BUFF_USE_CMND;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
/* Setup the physical region for the FCP RSP */
pdma_phys += sizeof (struct fcp_cmnd);
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
* initialize it with all known data now.
*/
pdma_phys += (sizeof (struct fcp_rsp));
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
iocb->ulpBdeCount = 1;
if (phba->sli_rev == 3) {
/* fill in immediate fcp command BDE */
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
unsli3.fcp_ext.icd);
iocb->un.fcpi64.bdl.addrHigh = 0;
iocb->ulpBdeCount = 0;
iocb->ulpLe = 0;
/* fill in responce BDE */
iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
sizeof(struct fcp_rsp);
iocb->unsli3.fcp_ext.rbde.addrLow =
putPaddrLow(pdma_phys_fcp_rsp);
iocb->unsli3.fcp_ext.rbde.addrHigh =
putPaddrHigh(pdma_phys_fcp_rsp);
} else {
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
iocb->ulpBdeCount = 1;
iocb->ulpLe = 1;
}
iocb->ulpClass = CLASS3;
return psb;
@ -313,8 +488,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
uint32_t i, num_bde = 0;
uint32_t num_bde = 0;
int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
@ -352,37 +528,159 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* during probe that limits the number of sg elements in any
* single scsi command. Just run through the seg_cnt and format
* the bde's.
* When using SLI-3 the driver will try to fit all the BDEs into
* the IOCB. If it can't then the BDEs get added to a BPL as it
* does for SLI-2 mode.
*/
scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
bpl->tus.f.bdeSize = sg_dma_len(sgel);
if (datadir == DMA_TO_DEVICE)
bpl->tus.f.bdeFlags = 0;
else
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
num_bde++;
if (phba->sli_rev == 3 &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
data_bde->addrLow = putPaddrLow(physaddr);
data_bde->addrHigh = putPaddrHigh(physaddr);
data_bde++;
} else {
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl->addrLow =
le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh =
le32_to_cpu(putPaddrHigh(physaddr));
bpl++;
}
}
}
/*
* Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer. Note that the bdeSize is explicitly
* reinitialized since all iocb memory resources are used many times
* for transmit, receive, and continuation bpl's.
* scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
* explicitly reinitialized and for SLI-3 the extended bde count is
* explicitly reinitialized since all iocb memory resources are reused.
*/
iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
iocb_cmd->un.fcpi64.bdl.bdeSize +=
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
if (phba->sli_rev == 3) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
* This I/O has more than 3 BDE so the 1st data bde will
* be a BPL that is filled in here.
*/
physaddr = lpfc_cmd->dma_handle;
data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
data_bde->tus.f.bdeSize = (num_bde *
sizeof(struct ulp_bde64));
physaddr += (sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
(2 * sizeof(struct ulp_bde64)));
data_bde->addrHigh = putPaddrHigh(physaddr);
data_bde->addrLow = putPaddrLow(physaddr);
/* ebde count includes the responce bde and data bpl */
iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
} else {
/* ebde count includes the responce bde and data bdes */
iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
}
} else {
iocb_cmd->un.fcpi64.bdl.bdeSize =
((num_bde + 2) * sizeof(struct ulp_bde64));
}
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
return 0;
}
/**
* lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
* @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
* @rsp_iocb: Pointer to response iocb object which reported error.
*
* This function posts an event when there is a SCSI command reporting
* error from the scsi device.
**/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
struct lpfc_fast_path_event *fast_path_evt = NULL;
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
/* If there is queuefull or busy condition send a scsi event */
if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
(cmnd->result == SAM_STAT_BUSY)) {
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
return;
fast_path_evt->un.scsi_evt.event_type =
FC_REG_SCSI_EVENT;
fast_path_evt->un.scsi_evt.subcategory =
(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
memcpy(&fast_path_evt->un.scsi_evt.wwpn,
&pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(&fast_path_evt->un.scsi_evt.wwnn,
&pnode->nlp_nodename, sizeof(struct lpfc_name));
} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
return;
fast_path_evt->un.check_cond_evt.scsi_event.event_type =
FC_REG_SCSI_EVENT;
fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
LPFC_EVENT_CHECK_COND;
fast_path_evt->un.check_cond_evt.scsi_event.lun =
cmnd->device->lun;
memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
&pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
&pnode->nlp_nodename, sizeof(struct lpfc_name));
fast_path_evt->un.check_cond_evt.sense_key =
cmnd->sense_buffer[2] & 0xf;
fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
fcpi_parm &&
((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
((scsi_status == SAM_STAT_GOOD) &&
!(resp_info & (RESID_UNDER | RESID_OVER))))) {
/*
* If status is good or resid does not match with fcp_param and
* there is valid fcpi_parm, then there is a read_check error
*/
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
return;
fast_path_evt->un.read_check_error.header.event_type =
FC_REG_FABRIC_EVENT;
fast_path_evt->un.read_check_error.header.subcategory =
LPFC_EVENT_FCPRDCHKERR;
memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
&pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
&pnode->nlp_nodename, sizeof(struct lpfc_name));
fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
fast_path_evt->un.read_check_error.fcpiparam =
fcpi_parm;
} else
return;
fast_path_evt->vport = vport;
spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_worker_wake_up(phba);
return;
}
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
@ -411,6 +709,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
/*
* If this is a task management command, there is no
* scsi packet associated with this lpfc_cmd. The driver
@ -526,6 +825,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
out:
cmnd->result = ScsiResult(host_status, scsi_status);
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
static void
@ -542,9 +842,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
unsigned long flags;
struct lpfc_fast_path_event *fast_path_evt;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@ -570,12 +872,36 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
cmd->result = ScsiResult(DID_BUS_BUSY, 0);
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
fast_path_evt = lpfc_alloc_fast_evt(phba);
if (!fast_path_evt)
break;
fast_path_evt->un.fabric_evt.event_type =
FC_REG_FABRIC_EVENT;
fast_path_evt->un.fabric_evt.subcategory =
(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
memcpy(&fast_path_evt->un.fabric_evt.wwpn,
&pnode->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&fast_path_evt->un.fabric_evt.wwnn,
&pnode->nlp_nodename,
sizeof(struct lpfc_name));
}
fast_path_evt->vport = vport;
fast_path_evt->work_evt.evt =
LPFC_EVT_FASTPATH_MGMT_EVT;
spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&fast_path_evt->work_evt.evt_listp,
&phba->work_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
} /* else: fall through */
@ -586,7 +912,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
@ -602,8 +929,32 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
lpfc_update_stats(phba, lpfc_cmd);
result = cmd->result;
sdev = cmd->device;
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(sdev->host->host_lock, flags);
if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
pnode->last_change_time = jiffies;
spin_unlock_irqrestore(sdev->host->host_lock, flags);
} else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
time_after(jiffies, pnode->last_change_time +
msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
spin_lock_irqsave(sdev->host->host_lock, flags);
pnode->cmd_qdepth += pnode->cmd_qdepth *
LPFC_TGTQ_RAMPUP_PCENT / 100;
if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
pnode->last_change_time = jiffies;
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
cmd->scsi_done(cmd);
@ -647,6 +998,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_ramp_up_time = jiffies;
}
}
lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
0xFFFFFFFF,
sdev->queue_depth - 1, sdev->queue_depth);
}
/*
@ -676,6 +1030,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0711 detected queue full - lun queue "
"depth adjusted to %d.\n", depth);
lpfc_send_sdev_queuedepth_change_event(phba, vport,
pnode, 0xFFFFFFFF,
depth+1, depth);
}
}
@ -692,6 +1049,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
/**
* lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
* @data: A pointer to the immediate command data portion of the IOCB.
* @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
*
* The routine copies the entire FCP command from @fcp_cmnd to @data while
* byte swapping the data to big endian format for transmission on the wire.
**/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
int i, j;
for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
i += sizeof(uint32_t), j++) {
((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
}
}
static void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
@ -758,7 +1133,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
if (phba->sli_rev == 3)
lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@ -798,11 +1174,13 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
/* Clear out any old data in the FCP command area */
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
if (vport->phba->sli_rev == 3)
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
@ -967,9 +1345,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command;
}
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
goto out_host_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
lpfc_adjust_queue_depth(phba);
@ -980,6 +1361,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy;
}
lpfc_cmd->start_time = jiffies;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@ -987,6 +1369,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
@ -996,6 +1379,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
@ -1010,6 +1394,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
out_host_busy_free_buf:
atomic_dec(&ndlp->cmd_pending);
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
@ -1145,6 +1530,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
int status;
int cnt;
struct lpfc_scsi_event_header scsi_event;
lpfc_block_error_handler(cmnd);
/*
@ -1163,6 +1549,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
pnode = rdata->pnode;
}
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_TGTRESET;
scsi_event.lun = 0;
memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
@ -1242,10 +1641,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
int ret = SUCCESS, status, i;
int ret = SUCCESS, status = SUCCESS, i;
int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
struct lpfc_scsi_event_header scsi_event;
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_BUSRESET;
scsi_event.lun = 0;
memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
lpfc_block_error_handler(cmnd);
/*

View File

@ -107,6 +107,10 @@ struct fcp_cmnd {
};
struct lpfc_scsicmd_bkt {
uint32_t cmd_count;
};
struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
@ -139,6 +143,7 @@ struct lpfc_scsi_buf {
*/
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
unsigned long start_time;
};
#define LPFC_SCSI_DMA_EXT_SIZE 264

File diff suppressed because it is too large Load Diff

View File

@ -233,6 +233,7 @@ struct lpfc_sli {
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */

View File

@ -18,9 +18,11 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.2.7"
#define LPFC_DRIVER_VERSION "8.2.8"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION

View File

@ -34,6 +34,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
return 1;
}
/**
* lpfc_discovery_wait: Wait for driver discovery to quiesce.
* @vport: The virtual port for which this call is being executed.
*
* This driver calls this routine specifically from lpfc_vport_delete
* to enforce a synchronous execution of vport
* delete relative to discovery activities. The
* lpfc_vport_delete routine should not return until it
* can reasonably guarantee that discovery has quiesced.
* Post FDISC LOGO, the driver must wait until its SAN teardown is
* complete and all resources recovered before allowing
* cleanup.
*
* This routine does not require any locks held.
**/
static void lpfc_discovery_wait(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
uint32_t wait_flags = 0;
unsigned long wait_time_max;
unsigned long start_time;
wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
/*
* The time constraint on this loop is a balance between the
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
if ((vport->num_disc_nodes > 0) ||
(vport->fc_flag & wait_flags) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
"1833 Vport discovery quiesce Wait:"
" vpi x%x state x%x fc_flags x%x"
" num_nodes x%x, waiting 1000 msecs"
" total wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag, vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
msleep(1000);
} else {
/* Base case. Wait variants satisfied. Break out */
lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
"1834 Vport discovery quiesced:"
" vpi x%x state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
break;
}
}
if (time_after(jiffies, wait_time_max))
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1835 Vport discovery quiesce failed:"
" vpi x%x state x%x fc_flags x%x"
" wait msecs x%x\n",
vport->vpi, vport->port_state,
vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{
@ -506,8 +578,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* initiated after we've disposed of all other resources associated
* with the port.
*/
if (!scsi_host_get(shost) || !scsi_host_get(shost))
if (!scsi_host_get(shost))
return VPORT_INVAL;
if (!scsi_host_get(shost)) {
scsi_host_put(shost);
return VPORT_INVAL;
}
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
@ -597,11 +673,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
if (!(phba->pport->load_flag & FC_UNLOADING))
lpfc_discovery_wait(vport);
skip_logo:
lpfc_cleanup(vport);
lpfc_sli_host_down(vport);
@ -615,8 +696,10 @@ skip_logo:
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
* does the scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
}
if (lpfc_mbx_unreg_vpi(vport))
scsi_host_put(shost);
} else
scsi_host_put(shost);
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
@ -663,3 +746,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}
/**
* lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
* @vport: Pointer to vport object.
*
* This function resets the statistical data for the vport. This function
* is called with the host_lock held
**/
void
lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->lat_data)
memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
sizeof(struct lpfc_scsicmd_bkt));
}
}
/**
* lpfc_alloc_bucket: Allocate data buffer required for collecting
* statistical data.
* @vport: Pointer to vport object.
*
* This function allocates data buffer required for all the FC
* nodes of the vport to collect statistical data.
**/
void
lpfc_alloc_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
sizeof(struct lpfc_scsicmd_bkt),
GFP_ATOMIC);
if (!ndlp->lat_data)
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0287 lpfc_alloc_bucket failed to "
"allocate statistical data buffer DID "
"0x%x\n", ndlp->nlp_DID);
}
}
}
/**
* lpfc_free_bucket: Free data buffer required for collecting
* statistical data.
* @vport: Pointer to vport object.
*
* Th function frees statistical data buffer of all the FC
* nodes of the vport.
**/
void
lpfc_free_bucket(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
}
}

View File

@ -112,4 +112,8 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
void lpfc_vport_reset_stat_data(struct lpfc_vport *);
void lpfc_alloc_bucket(struct lpfc_vport *);
void lpfc_free_bucket(struct lpfc_vport *);
#endif /* H_LPFC_VPORT */

View File

@ -1006,7 +1006,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
}
qla2x00_abort_fcport_cmds(fcport);
scsi_target_unblock(&rport->dev);
}
static int

View File

@ -1187,7 +1187,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
cp->serial_number, comp_status,
atomic_read(&fcport->state)));
cp->result = DID_BUS_BUSY << 16;
/*
* We are going to have the fc class block the rport
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
break;
@ -1214,7 +1219,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
break;
case CS_TIMEOUT:
cp->result = DID_BUS_BUSY << 16;
/*
* We are going to have the fc class block the rport
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (IS_FWI2_CAPABLE(ha)) {
DEBUG2(printk(KERN_INFO

View File

@ -394,10 +394,8 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
}
if (fcport->drport)
goto qc_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@ -405,7 +403,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
goto qc_host_busy;
goto qc_target_busy;
}
spin_unlock_irq(ha->host->host_lock);
@ -428,10 +426,11 @@ qc_host_busy_free_sp:
qc_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc_fail_command:
done(cmd);
@ -461,10 +460,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
if (fcport->drport) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
}
if (fcport->drport)
goto qc24_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
@ -472,7 +469,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
goto qc24_host_busy;
goto qc24_target_busy;
}
spin_unlock_irq(ha->host->host_lock);
@ -495,10 +492,11 @@ qc24_host_busy_free_sp:
qc24_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
done(cmd);

View File

@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
cmd->result = DID_BUS_BUSY << 16;
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/*
* Mark device missing so that we won't continue to send
@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
cmd->result = DID_BUS_BUSY << 16;
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
case SCS_QUEUE_FULL:

View File

@ -353,7 +353,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
ha->host_no, ddb_entry->bus, ddb_entry->target,
ddb_entry->fw_ddb_index));
iscsi_block_session(ddb_entry->sess);
iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@ -439,7 +439,7 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
goto qc_host_busy;
return SCSI_MLQUEUE_TARGET_BUSY;
}
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))

View File

@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
rtn : SCSI_MLQUEUE_HOST_BUSY);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
scsi_queue_insert(cmd, rtn);
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
void scsi_finish_command(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
struct scsi_driver *drv;
unsigned int good_bytes;
@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
* XXX(hch): What about locking?
*/
shost->host_blocked = 0;
starget->target_blocked = 0;
sdev->device_blocked = 0;
/*

View File

@ -1065,10 +1065,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *tgtr_scmd, *next;
unsigned int id;
unsigned int id = 0;
int rtn;
for (id = 0; id <= shost->max_id; id++) {
do {
tgtr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (id == scmd_id(scmd)) {
@ -1076,8 +1076,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
break;
}
}
if (!tgtr_scmd) {
/* not one exactly equal; find the next highest */
list_for_each_entry(scmd, work_q, eh_entry) {
if (scmd_id(scmd) > id &&
(!tgtr_scmd ||
scmd_id(tgtr_scmd) > scmd_id(scmd)))
tgtr_scmd = scmd;
}
}
if (!tgtr_scmd)
continue;
/* no more commands, that's it */
break;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
"to target %d\n",
@ -1096,7 +1106,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
" failed target: "
"%d\n",
current->comm, id));
}
id++;
} while(id != 0);
return list_empty(work_q);
}
@ -1218,6 +1229,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
return;
}
/**
* scsi_noretry_cmd - determinte if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
int scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
switch (host_byte(scmd->result)) {
case DID_OK:
break;
case DID_BUS_BUSY:
return blk_failfast_transport(scmd->request);
case DID_PARITY:
return blk_failfast_dev(scmd->request);
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
return 0;
/* fall through */
case DID_SOFT_ERROR:
return blk_failfast_driver(scmd->request);
}
switch (status_byte(scmd->result)) {
case CHECK_CONDITION:
/*
* assume caller has checked sense and determinted
* the check condition was retryable.
*/
return blk_failfast_dev(scmd->request);
}
return 0;
}
/**
* scsi_decide_disposition - Disposition a cmd on return from LLD.
* @scmd: SCSI cmd to examine.
@ -1290,7 +1335,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case DID_REQUEUE:
return ADD_TO_MLQUEUE;
case DID_TRANSPORT_DISRUPTED:
/*
* LLD/transport was disrupted during processing of the IO.
* The transport class is now blocked/blocking,
* and the transport will decide what to do with the IO
* based on its timers and recovery capablilities.
*/
return ADD_TO_MLQUEUE;
case DID_TRANSPORT_FAILFAST:
/*
* The transport decided to failfast the IO (most likely
* the fast io fail tmo fired), so send IO directly upwards.
*/
return SUCCESS;
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
@ -1383,7 +1441,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
if ((++scmd->retries) <= scmd->allowed
&& !blk_noretry_request(scmd->request)) {
&& !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@ -1508,7 +1566,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
!blk_noretry_request(scmd->request) &&
!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
" retry cmd: %p\n",

View File

@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct scsi_target *starget = scsi_target(device);
struct request_queue *q = device->request_queue;
unsigned long flags;
@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
if (reason == SCSI_MLQUEUE_HOST_BUSY)
switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY:
host->host_blocked = host->max_host_blocked;
else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
device->device_blocked = device->max_device_blocked;
break;
case SCSI_MLQUEUE_TARGET_BUSY:
starget->target_blocked = starget->max_target_blocked;
break;
}
/*
* Decrement the counters, since these commands are no longer
@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
starget->target_busy--;
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled)))
scsi_eh_wakeup(shost);
@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
}
static inline int scsi_target_is_busy(struct scsi_target *starget)
{
return ((starget->can_queue > 0 &&
starget->target_busy >= starget->can_queue) ||
starget->target_blocked);
}
/*
* Function: scsi_run_queue()
*
@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
*/
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q)
*/
sdev = list_entry(shost->starved_list.next,
struct scsi_device, starved_entry);
/*
* The *queue_ready functions can add a device back onto the
* starved list's tail, so we must check for a infinite loop.
*/
if (sdev == starved_head)
break;
if (!starved_head)
starved_head = sdev;
if (scsi_target_is_busy(scsi_target(sdev))) {
list_move_tail(&sdev->starved_entry,
&shost->starved_list);
continue;
}
list_del_init(&sdev->starved_entry);
spin_unlock(shost->host_lock);
@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
if (unlikely(!list_empty(&sdev->starved_entry)))
/*
* sdev lost a race, and was put back on the
* starved list. This is unlikely but without this
* in theory we could loop forever.
*/
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
@ -681,7 +706,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
leftover = req->data_len;
/* kill remainder if no retrys */
if (error && blk_noretry_request(req))
if (error && scsi_noretry_cmd(cmd))
blk_end_request(req, error, leftover);
else {
if (requeue) {
@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
return 1;
}
/*
* scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check.
*
* Called with the host lock held.
*/
static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
if (starget->single_lun) {
if (starget->starget_sdev_user &&
starget->starget_sdev_user != sdev)
return 0;
starget->starget_sdev_user = sdev;
}
if (starget->target_busy == 0 && starget->target_blocked) {
/*
* unblock after target_blocked iterates to zero
*/
if (--starget->target_blocked == 0) {
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
"unblocking target at zero depth\n"));
} else {
blk_plug_device(sdev->request_queue);
return 0;
}
}
if (scsi_target_is_busy(starget)) {
if (list_empty(&sdev->starved_entry)) {
list_add_tail(&sdev->starved_entry,
&shost->starved_list);
return 0;
}
}
/* We're OK to process the command, so we can't be starved */
if (!list_empty(&sdev->starved_entry))
list_del_init(&sdev->starved_entry);
return 1;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
blkdev_dequeue_request(req);
@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
shost->host_busy++;
starget->target_busy++;
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q)
goto not_ready;
}
if (!scsi_target_queue_ready(shost, sdev))
goto not_ready;
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
if (scsi_target(sdev)->single_lun) {
if (scsi_target(sdev)->starget_sdev_user &&
scsi_target(sdev)->starget_sdev_user != sdev)
goto not_ready;
scsi_target(sdev)->starget_sdev_user = sdev;
}
scsi_target(sdev)->target_busy++;
shost->host_busy++;
/*

View File

@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);

View File

@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev->type = &scsi_target_type;
starget->id = id;
starget->channel = channel;
starget->can_queue = 0;
INIT_LIST_HEAD(&starget->siblings);
INIT_LIST_HEAD(&starget->devices);
starget->state = STARGET_CREATED;

View File

@ -2133,8 +2133,7 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
if (ft->terminate_rport_io)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
BUG_ON(count > FC_RPORT_NUM_ATTRS);
@ -2328,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(fc_remove_host);
static void fc_terminate_rport_io(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
/* Involve the LLDD if possible to terminate all io on the rport. */
if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
/*
* must unblock to flush queued IO. The caller will have set
* the port_state or flags, so that fc_remote_port_chkready will
* fail IO.
*/
scsi_target_unblock(&rport->dev);
}
/**
* fc_starget_delete - called to delete the scsi decendents of an rport
@ -2340,13 +2355,8 @@ fc_starget_delete(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, stgt_delete_work);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
/* Involve the LLDD if possible to terminate all io on the rport. */
if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
fc_terminate_rport_io(rport);
scsi_remove_target(&rport->dev);
}
@ -2372,10 +2382,7 @@ fc_rport_final_delete(struct work_struct *work)
if (rport->flags & FC_RPORT_SCAN_PENDING)
scsi_flush_work(shost);
/* involve the LLDD to terminate all pending i/o */
if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
fc_terminate_rport_io(rport);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
@ -2639,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
FC_RPORT_DEVLOSS_PENDING);
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
@ -2702,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
rport->port_id = ids->port_id;
rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
if (fci->f->dd_fcrport_size)
memset(rport->dd_data, 0,
@ -2784,7 +2793,6 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
int timeout = rport->dev_loss_tmo;
unsigned long flags;
@ -2830,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *rport)
/* see if we need to kill io faster than waiting for device loss */
if ((rport->fast_io_fail_tmo != -1) &&
(rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
(rport->fast_io_fail_tmo < timeout))
fc_queue_devloss_work(shost, &rport->fail_io_work,
rport->fast_io_fail_tmo * HZ);
@ -2906,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
FC_RPORT_DEVLOSS_PENDING);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
@ -3001,6 +3010,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
rport->supported_classes = FC_COS_UNSPECIFIED;
rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
/* remove the identifiers that aren't used in the consisting binding */
switch (fc_host->tgtid_bind_type) {
@ -3043,13 +3053,12 @@ fc_timeout_fail_rport_io(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, fail_io_work.work);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
if (rport->port_state != FC_PORTSTATE_BLOCKED)
return;
i->f->terminate_rport_io(rport);
rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
fc_terminate_rport_io(rport);
}
/**

View File

@ -138,7 +138,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
return sprintf(buf, "%u\n", ep->id);
return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@ -156,7 +156,7 @@ static struct attribute_group iscsi_endpoint_group = {
static int iscsi_match_epid(struct device *dev, void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
unsigned int *epid = (unsigned int *) data;
uint64_t *epid = (uint64_t *) data;
return *epid == ep->id;
}
@ -166,7 +166,7 @@ iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
unsigned int id;
uint64_t id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
@ -187,7 +187,8 @@ iscsi_create_endpoint(int dd_size)
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
(unsigned long long) id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
@ -374,10 +375,10 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
err = 0;
break;
case ISCSI_SESSION_FAILED:
err = DID_IMM_RETRY << 16;
err = DID_TRANSPORT_DISRUPTED << 16;
break;
case ISCSI_SESSION_FREE:
err = DID_NO_CONNECT << 16;
err = DID_TRANSPORT_FAILFAST << 16;
break;
default:
err = DID_NO_CONNECT << 16;
@ -1010,7 +1011,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
"control PDU: OOM\n");
return -ENOMEM;
@ -1031,7 +1032,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
}
EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
@ -1063,7 +1064,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
}
EXPORT_SYMBOL_GPL(iscsi_conn_error);
EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
static int
iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,

View File

@ -109,7 +109,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
for(i = 0; i < DV_RETRIES; i++) {
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST);
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER);
if (result & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)

View File

@ -384,7 +384,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
int ret;
int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
@ -515,7 +515,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
rq->nr_sectors));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
if (host_dif)
SCpnt->cmnd[1] = 1 << 5;
else
SCpnt->cmnd[1] = 0;
@ -573,8 +574,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->sdb.length = this_count * sdp->sector_size;
/* If DIF or DIX is enabled, tell HBA how to handle request */
if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
if (host_dif || scsi_prot_sg_count(SCpnt))
sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt),
sdkp->protection_type);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@ -1252,14 +1254,12 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
else
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
sdkp->protection_type = type;
switch (type) {
case SD_DIF_TYPE0_PROTECTION:
sdkp->protection_type = 0;
break;
case SD_DIF_TYPE1_PROTECTION:
case SD_DIF_TYPE3_PROTECTION:
sdkp->protection_type = type;
break;
case SD_DIF_TYPE2_PROTECTION:
@ -1277,7 +1277,6 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
return;
disable:
sdkp->protection_type = 0;
sdkp->capacity = 0;
}

View File

@ -97,19 +97,28 @@ struct sd_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int);
extern void sd_dif_config_host(struct scsi_disk *);
extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define sd_dif_op(a, b, c) do { } while (0)
#define sd_dif_config_host(a) do { } while (0)
#define sd_dif_prepare(a, b, c) (0)
#define sd_dif_complete(a, b) (0)
static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c)
{
}
static inline void sd_dif_config_host(struct scsi_disk *disk)
{
}
static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a)
{
return 0;
}
static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */

View File

@ -311,26 +311,27 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
int dif, dix;
/* If this HBA doesn't support DIX, resort to normal I/O or DIF */
if (scsi_host_dix_capable(sdp->host, type) == 0) {
dif = scsi_host_dif_capable(sdp->host, type);
dix = scsi_host_dix_capable(sdp->host, type);
if (type == SD_DIF_TYPE0_PROTECTION)
return;
if (scsi_host_dif_capable(sdp->host, type) == 0) {
sd_printk(KERN_INFO, sdkp, "Type %d protection " \
"unsupported by HBA. Disabling DIF.\n", type);
sdkp->protection_type = 0;
return;
}
sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
type);
return;
if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
dif = 0; dix = 1;
}
if (type) {
if (dif)
sd_printk(KERN_NOTICE, sdkp,
"Enabling DIF Type %d protection\n", type);
else
sd_printk(KERN_NOTICE, sdkp,
"Disabling DIF Type %d protection\n", type);
}
if (!dix)
return;
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
if (type == SD_DIF_TYPE3_PROTECTION)
@ -343,17 +344,17 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
else
blk_integrity_register(disk, &dif_type1_integrity_crc);
sd_printk(KERN_INFO, sdkp,
"Enabling %s integrity protection\n", disk->integrity->name);
sd_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
if (type && sdkp->ATO) {
if (dif && type && sdkp->ATO) {
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
disk->integrity->tag_size = sizeof(u16);
sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size);
}
}
@ -361,7 +362,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
/*
* DIF DMA operation magic decoder ring.
*/
void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type)
{
int csum_convert, prot_op;
@ -406,7 +407,8 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
}
scsi_set_prot_op(scmd, prot_op);
scsi_set_prot_type(scmd, dif);
if (dif)
scsi_set_prot_type(scmd, type);
}
/*

View File

@ -129,25 +129,30 @@ struct bio {
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued.
* bit 3 -- fail fast, don't want low level driver retries
* bit 4 -- synchronous I/O hint: the block layer will unplug immediately
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
* bit 5 -- metadata request
* bit 4 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
* bit 6 -- discard sectors
* bit 5 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
* bit 6 -- fail fast device errors
* bit 7 -- fail fast transport errors
* bit 8 -- fail fast driver errors
* Don't want driver retries for any fast fail whatever the reason.
*/
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
#define BIO_RW_FAILFAST 3
#define BIO_RW_SYNC 4
#define BIO_RW_META 5
#define BIO_RW_DISCARD 6
#define BIO_RW_SYNC 3
#define BIO_RW_META 4
#define BIO_RW_DISCARD 5
#define BIO_RW_FAILFAST_DEV 6
#define BIO_RW_FAILFAST_TRANSPORT 7
#define BIO_RW_FAILFAST_DRIVER 8
/*
* upper 16 bits of bi_rw define the io priority of this bio
@ -174,7 +179,10 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
#define bio_failfast_transport(bio) \
((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))

View File

@ -87,7 +87,9 @@ enum {
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@ -111,8 +113,10 @@ enum rq_flag_bits {
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
@ -560,7 +564,12 @@ enum {
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
blk_failfast_transport(rq) || \
blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))

View File

@ -213,6 +213,8 @@ enum iscsi_err {
ISCSI_ERR_DATA_DGST = ISCSI_ERR_BASE + 15,
ISCSI_ERR_PARAM_NOT_FOUND = ISCSI_ERR_BASE + 16,
ISCSI_ERR_NO_SCSI_CMD = ISCSI_ERR_BASE + 17,
ISCSI_ERR_INVALID_HOST = ISCSI_ERR_BASE + 18,
ISCSI_ERR_XMIT_FAILED = ISCSI_ERR_BASE + 19,
};
/*

View File

@ -287,6 +287,11 @@ struct iscsi_session {
struct iscsi_pool cmdpool; /* PDU's pool */
};
enum {
ISCSI_HOST_SETUP,
ISCSI_HOST_REMOVED,
};
struct iscsi_host {
char *initiatorname;
/* hw address or netdev iscsi connection is bound to */
@ -295,6 +300,12 @@ struct iscsi_host {
/* local address */
int local_port;
char local_address[ISCSI_ADDRESS_BUF_LEN];
wait_queue_head_t session_removal_wq;
/* protects sessions and state */
spinlock_t lock;
int num_sessions;
int state;
};
/*
@ -302,7 +313,7 @@ struct iscsi_host {
*/
extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
extern int iscsi_eh_abort(struct scsi_cmnd *sc);
extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
extern int iscsi_eh_target_reset(struct scsi_cmnd *sc);
extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
extern int iscsi_queuecommand(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *));
@ -351,6 +362,8 @@ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
int);
extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern void iscsi_session_failure(struct iscsi_cls_session *cls_session,
enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);

View File

@ -381,6 +381,11 @@ static inline int scsi_is_wlun(unsigned int lun)
#define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
#define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
* without decrementing the retry count */
#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
* and the driver blocked the port to
* recover the link. Transport class will
* retry or fail IO */
#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
#define DRIVER_OK 0x00 /* Driver status */
/*
@ -426,6 +431,7 @@ static inline int scsi_is_wlun(unsigned int lun)
#define SCSI_MLQUEUE_HOST_BUSY 0x1055
#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
#define SCSI_MLQUEUE_EH_RETRY 0x1057
#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
/*
* Use these to separate status msg and our bytes

View File

@ -238,6 +238,16 @@ struct scsi_target {
* for the device at a time. */
unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
/* means no lun present */
/* commands actually active on LLD. protected by host lock. */
unsigned int target_busy;
/*
* LLDs should set this in the slave_alloc host template callout.
* If set to zero then there is not limit.
*/
unsigned int can_queue;
unsigned int target_blocked;
unsigned int max_target_blocked;
#define SCSI_DEFAULT_TARGET_BLOCKED 3
char scsi_level;
struct execute_work ew;

View File

@ -357,6 +357,7 @@ struct fc_rport { /* aka fc_starget_attrs */
/* bit field values for struct fc_rport "flags" field: */
#define FC_RPORT_DEVLOSS_PENDING 0x01
#define FC_RPORT_SCAN_PENDING 0x02
#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x03
#define dev_to_rport(d) \
container_of(d, struct fc_rport, dev)
@ -678,12 +679,15 @@ fc_remote_port_chkready(struct fc_rport *rport)
if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
result = DID_IMM_RETRY << 16;
result = DID_TRANSPORT_DISRUPTED << 16;
else
result = DID_NO_CONNECT << 16;
break;
case FC_PORTSTATE_BLOCKED:
result = DID_IMM_RETRY << 16;
if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
result = DID_TRANSPORT_FAILFAST << 16;
else
result = DID_TRANSPORT_DISRUPTED << 16;
break;
default:
result = DID_NO_CONNECT << 16;

View File

@ -135,7 +135,8 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt);
/*
* control plane upcalls
*/
extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
enum iscsi_err error);
extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size);
@ -207,7 +208,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
unsigned int id;
uint64_t id;
};
/*