1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (209 commits)
  [SCSI] fix oops during scsi scanning
  [SCSI] libsrp: fix memory leak in srp_ring_free()
  [SCSI] libiscsi, bnx2i: make bound ep check common
  [SCSI] libiscsi: add completion function for drivers that do not need pdu processing
  [SCSI] scsi_dh_rdac: changes for rdac debug logging
  [SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization
  [SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach
  [SCSI] sg: fix oops in the error path in sg_build_indirect()
  [SCSI] mptsas : Bump version to 3.04.12
  [SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command
  [SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device
  [SCSI] mptsas : PAE Kernel more than 4 GB kernel panic
  [SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off
  [SCSI] mptsas : Sanity check for phyinfo is added
  [SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530
  [SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller
  [SCSI] qla2xxx: Update version number to 8.03.01-k6.
  [SCSI] qla2xxx: Properly delete rports attached to a vport.
  [SCSI] qla2xxx: Correct various NPIV issues.
  [SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly.
  ...
hifive-unleashed-5.1
Linus Torvalds 2009-09-14 17:53:36 -07:00
commit 39695224bd
112 changed files with 14396 additions and 4886 deletions

View File

@ -4009,6 +4009,14 @@ S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
PMC SIERRA MaxRAID DRIVER
P: Anil Ravindranath
M: anil_ravindranath@pmc-sierra.com
L: linux-scsi@vger.kernel.org
W: http://www.pmc-sierra.com/
S: Supported
F: drivers/scsi/pmcraid.*
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
S: Supported
@ -4447,7 +4455,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: "James E.J. Bottomley" <James.Bottomley@suse.de>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git

View File

@ -88,6 +88,14 @@ int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
void tod_to_timeval(__u64, struct timespec *);
static inline
void stck_to_timespec(unsigned long long stck, struct timespec *ts)
{
tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
}
extern u64 sched_clock_base_cc;
/**

View File

@ -63,8 +63,6 @@ typedef struct
} debug_sprintf_entry_t;
extern void tod_to_timeval(uint64_t todval, struct timespec *xtime);
/* internal function prototyes */
static int debug_init(void);
@ -1450,17 +1448,13 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
struct timespec time_spec;
unsigned long long time;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
time = entry->id.stck;
/* adjust todclock to 1970 */
time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
tod_to_timeval(time, &time_spec);
stck_to_timespec(entry->id.stck, &time_spec);
if (entry->id.fields.exception)
except_str = "*";

View File

@ -91,6 +91,7 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
todval -= (sec * 1000000) << 12;
xtime->tv_nsec = ((todval * 1000) >> 12);
}
EXPORT_SYMBOL(tod_to_timeval);
void clock_comparator_work(void)
{

View File

@ -64,6 +64,7 @@ struct multipath {
spinlock_t lock;
const char *hw_handler_name;
char *hw_handler_params;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@ -219,6 +220,7 @@ static void free_multipath(struct multipath *m)
}
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
mempool_destroy(m->mpio_pool);
kfree(m);
}
@ -615,6 +617,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
dm_put_device(ti, p->path.dev);
goto bad;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
ti->error = "unable to set hardware "
"handler parameters";
scsi_dh_detach(q);
dm_put_device(ti, p->path.dev);
goto bad;
}
}
}
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
@ -705,6 +718,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
static int parse_hw_handler(struct arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
static struct param _params[] = {
@ -726,17 +740,33 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return -EINVAL;
ret = -EINVAL;
goto fail;
}
if (hw_argc > 1)
DMWARN("Ignoring user-specified arguments for "
"hardware handler \"%s\"", m->hw_handler_name);
if (hw_argc > 1) {
char *p;
int i, j, len = 4;
for (i = 0; i <= hw_argc - 2; i++)
len += strlen(as->argv[i]) + 1;
p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
if (!p) {
ti->error = "memory allocation failed";
ret = -ENOMEM;
goto fail;
}
j = sprintf(p, "%d", hw_argc - 1);
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
consume(as, hw_argc - 1);
return 0;
fail:
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return ret;
}
static int parse_features(struct arg_set *as, struct multipath *m)

View File

@ -1015,9 +1015,9 @@ mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
pSge->Address.Low = cpu_to_le32
(lower_32_bits((unsigned long)(dma_addr)));
(lower_32_bits(dma_addr));
pSge->Address.High = cpu_to_le32
(upper_32_bits((unsigned long)dma_addr));
(upper_32_bits(dma_addr));
pSge->FlagsLength = cpu_to_le32
((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
@ -1038,8 +1038,8 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
u32 tmp;
pSge->Address.Low = cpu_to_le32
(lower_32_bits((unsigned long)(dma_addr)));
tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
(lower_32_bits(dma_addr));
tmp = (u32)(upper_32_bits(dma_addr));
/*
* 1078 errata workaround for the 36GB limitation
@ -1101,7 +1101,7 @@ mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
pChain->NextChainOffset = next;
pChain->Address.Low = cpu_to_le32(tmp);
tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
tmp = (u32)(upper_32_bits(dma_addr));
pChain->Address.High = cpu_to_le32(tmp);
}
@ -1297,12 +1297,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
psge = (char *)&ioc_init->HostPageBufferSGE;
flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_32_BIT_ADDRESSING |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
if (sizeof(dma_addr_t) == sizeof(u64)) {
flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
}
flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
flags_length |= ioc->HostPageBuffer_sz;
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
@ -2224,8 +2220,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
int hard;
int rc=0;
int ii;
u8 cb_idx;
int handlers;
int ret = 0;
int reset_alt_ioc_active = 0;
int irq_allocated = 0;
@ -2548,34 +2542,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
mpt_get_manufacturing_pg_0(ioc);
}
/*
* Call each currently registered protocol IOC reset handler
* with post-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
if (hard_reset_done) {
rc = handlers = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if ((ret == 0) && MptResetHandlers[cb_idx]) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Calling IOC post_reset handler #%d\n",
ioc->name, cb_idx));
rc += mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
handlers++;
}
if (alt_ioc_ready && MptResetHandlers[cb_idx]) {
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Calling IOC post_reset handler #%d\n",
ioc->alt_ioc->name, cb_idx));
rc += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_POST_RESET);
handlers++;
}
}
/* FIXME? Examine results here? */
}
out:
if ((ret != 0) && irq_allocated) {
free_irq(ioc->pci_irq, ioc);
@ -3938,6 +3904,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
int count = 0;
u32 diag1val = 0;
MpiFwHeader_t *cached_fw; /* Pointer to FW */
u8 cb_idx;
/* Clear any existing interrupts */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
@ -3956,6 +3923,18 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
else
mdelay(1);
/*
* Call each currently registered protocol IOC reset handler
* with pre-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
(*(MptResetHandlers[cb_idx]))(ioc,
MPT_IOC_PRE_RESET);
}
for (count = 0; count < 60; count ++) {
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
@ -4052,25 +4031,15 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
{
u8 cb_idx;
int r = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Calling IOC pre_reset handler #%d\n",
ioc->name, cb_idx));
r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
if (ioc->alt_ioc) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Calling alt-%s pre_reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, cb_idx));
r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_PRE_RESET);
}
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx,
ioc, MPT_IOC_PRE_RESET);
if (ioc->alt_ioc) {
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_PRE_RESET);
}
}
/* FIXME? Examine results here? */
}
if (ioc->cached_fw)
@ -6956,7 +6925,7 @@ EXPORT_SYMBOL(mpt_halt_firmware);
int
mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
int rc;
u8 cb_idx;
unsigned long flags;
unsigned long time_count;
@ -6982,8 +6951,6 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
ioc->alt_ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* FIXME: If do_ioc_recovery fails, repeat....
*/
/* The SCSI driver needs to adjust timeouts on all current
* commands prior to the diagnostic reset being issued.
@ -7020,6 +6987,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
if (ioc->alt_ioc)
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_POST_RESET);
}
}
dtmprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"HardResetHandler: completed (%d seconds): %s\n", ioc->name,

View File

@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.04.10"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
#define MPT_LINUX_VERSION_COMMON "3.04.12"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.12"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@ -157,8 +157,9 @@
/*
* Try to keep these at 2^N-1
*/
#define MPT_FC_CAN_QUEUE 127
#define MPT_FC_CAN_QUEUE 1024
#define MPT_SCSI_CAN_QUEUE 127
#define MPT_SAS_CAN_QUEUE 127
/*
* Set the MAX_SGE value based on user input.
@ -879,23 +880,9 @@ typedef enum {
typedef struct _MPT_SCSI_HOST {
MPT_ADAPTER *ioc;
int port;
u32 pad0;
MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
struct timer_list timer;
/* Pool of memory for holding SCpnts before doing
* OS callbacks. freeQ is the free pool.
*/
u8 negoNvram; /* DV disabled, nego NVRAM */
u8 pad1;
u8 rsvd[2];
MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
struct scsi_cmnd *abortSCpnt;
MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
ushort sel_timeout[MPT_MAX_FC_DEVICES];
char *info_kbuf;
long last_queue_full;
u16 tm_iocstatus;
u16 spi_pending;
struct list_head target_reset_list;
} MPT_SCSI_HOST;

View File

@ -1288,25 +1288,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
/* Clear the TM flags
*/
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
* single-threaded commands, i.e., those
* issued during a bus scan, dv and
* configuration pages.
*/
hd->cmdPtr = NULL;
/* Initialize this SCSI Hosts' timers
* To use, set the timer expires field
* and add_timer
*/
init_timer(&hd->timer);
hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired;
hd->last_queue_full = 0;
sh->transportt = mptfc_transport_template;

View File

@ -72,6 +72,7 @@
*/
#define MPTSAS_RAID_CHANNEL 1
#define SAS_CONFIG_PAGE_TIMEOUT 30
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
@ -324,7 +325,6 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event, *next;
struct mptsas_target_reset_event *target_reset_list, *n;
u8 flush_q;
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
/* flush the target_reset_list */
@ -344,15 +344,10 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
!ioc->fw_event_q || in_interrupt())
return;
flush_q = 0;
list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
if (cancel_delayed_work(&fw_event->work))
mptsas_free_fw_event(ioc, fw_event);
else
flush_q = 1;
}
if (flush_q)
flush_workqueue(ioc->fw_event_q);
}
@ -661,7 +656,7 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
cfg.pageAddr = starget->id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
@ -851,7 +846,13 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
sas_port_delete_phy(port_details->port, phy_info->phy);
if (phy_info->phy) {
devtprintk(ioc, dev_printk(KERN_DEBUG,
&phy_info->phy->dev, MYIOC_s_FMT
"delete phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy));
sas_port_delete_phy(port_details->port, phy_info->phy);
}
phy_info->port_details = NULL;
}
@ -1272,7 +1273,6 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
}
mptsas_cleanup_fw_event_q(ioc);
mptsas_queue_rescan(ioc);
mptsas_fw_event_on(ioc);
break;
default:
break;
@ -1318,7 +1318,7 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
@ -1592,6 +1592,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_scan_sas_topology(ioc);
ioc->in_rescan = 0;
mptsas_free_fw_event(ioc, fw_event);
mptsas_fw_event_on(ioc);
return;
}
@ -1891,7 +1892,7 @@ static struct scsi_host_template mptsas_driver_template = {
.eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_FC_CAN_QUEUE,
.can_queue = MPT_SAS_CAN_QUEUE,
.this_id = -1,
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
@ -1926,7 +1927,7 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
cfg.pageAddr = phy->identify.phy_identifier;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
@ -2278,7 +2279,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
@ -2349,7 +2350,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
@ -2411,7 +2412,7 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
cfg.cfghdr.ehdr = &hdr;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
/* Get Phy Pg 0 for each Phy. */
cfg.physAddr = -1;
@ -2479,7 +2480,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(device_info, 0, sizeof(struct mptsas_devinfo));
error = mpt_config(ioc, &cfg);
@ -2554,7 +2555,7 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(port_info, 0, sizeof(struct mptsas_portinfo));
error = mpt_config(ioc, &cfg);
@ -2635,7 +2636,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
@ -3307,6 +3308,7 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
expander_data = (MpiEventDataSasExpanderStatusChange_t *)
fw_event->event_data;
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
@ -4760,10 +4762,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* set 16 byte cdb's */
sh->max_cmd_len = 16;
sh->max_id = ioc->pfacts[0].PortSCSIID;
sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
sh->max_id = -1;
sh->max_lun = max_lun;
sh->transportt = mptsas_transport_template;
/* Required entry.
@ -4821,25 +4822,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
/* Clear the TM flags
*/
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
* single-threaded commands, i.e., those
* issued during a bus scan, dv and
* configuration pages.
*/
hd->cmdPtr = NULL;
/* Initialize this SCSI Hosts' timers
* To use, set the timer expires field
* and add_timer
*/
init_timer(&hd->timer);
hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired;
ioc->sas_data.ptClear = mpt_pt_clear;
hd->last_queue_full = 0;

View File

@ -628,6 +628,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 1;
}
if (ioc->bus_type == SAS) {
VirtDevice *vdevice = sc->device->hostdata;
if (!vdevice || !vdevice->vtarget ||
vdevice->vtarget->deleted) {
sc->result = DID_NO_CONNECT << 16;
goto out;
}
}
sc->host_scribble = NULL;
sc->result = DID_OK << 16; /* Set default reply as OK */
pScsiReq = (SCSIIORequest_t *) mf;
@ -689,6 +699,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
switch(status) {
case MPI_IOCSTATUS_BUSY: /* 0x0002 */
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
/* CHECKME!
* Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
* But not: DID_BUS_BUSY lest one risk
@ -872,7 +883,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
@ -892,7 +902,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
#endif
} /* end of address reply case */
out:
/* Unmap the DMA buffers, if any. */
scsi_dma_unmap(sc);
@ -1729,9 +1739,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
*/
mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
hd->abortSCpnt = SCpnt;
retval = mptscsih_IssueTaskMgmt(hd,
MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
vdevice->vtarget->channel,
@ -2293,7 +2300,10 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
else
max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
} else
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
max_depth = ioc->sh->can_queue;
if (!sdev->tagged_supported)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
@ -2627,50 +2637,6 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
return 1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_timer_expired - Call back for timer process.
* Used only for dv functionality.
* @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
*
*/
void
mptscsih_timer_expired(unsigned long data)
{
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data;
MPT_ADAPTER *ioc = hd->ioc;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", ioc->name, hd->cmdPtr));
if (hd->cmdPtr) {
MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr;
if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
/* Desire to issue a task management request here.
* TM requests MUST be single threaded.
* If old eh code and no TM current, issue request.
* If new eh code, do nothing. Wait for OS cmd timeout
* for bus reset.
*/
} else {
/* Perform a FW reload */
if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) {
printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
}
}
} else {
/* This should NEVER happen */
printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", ioc->name);
}
/* No more processing.
* TM call will generate an interrupt for SCSI TM Management.
* The FW will reply to all outstanding commands, callback will finish cleanup.
* Hard reset clean-up will free all resources.
*/
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", ioc->name));
return;
}
/**
* mptscsih_get_completion_code -
@ -3265,6 +3231,5 @@ EXPORT_SYMBOL(mptscsih_scandv_complete);
EXPORT_SYMBOL(mptscsih_event_process);
EXPORT_SYMBOL(mptscsih_ioc_reset);
EXPORT_SYMBOL(mptscsih_change_queue_depth);
EXPORT_SYMBOL(mptscsih_timer_expired);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

View File

@ -129,7 +129,6 @@ extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRA
extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[];

View File

@ -1472,28 +1472,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
/* Clear the TM flags
*/
hd->abortSCpnt = NULL;
/* Clear the pointer used to store
* single-threaded commands, i.e., those
* issued during a bus scan, dv and
* configuration pages.
*/
hd->cmdPtr = NULL;
/* Initialize this SCSI Hosts' timers
* To use, set the timer expires field
* and add_timer
*/
init_timer(&hd->timer);
hd->timer.data = (unsigned long) hd;
hd->timer.function = mptscsih_timer_expired;
ioc->spi_data.Saf_Te = mpt_saf_te;
hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"saf_te %x\n",
ioc->name,

View File

@ -33,24 +33,44 @@ static DEFINE_MUTEX(container_list_lock);
static struct class enclosure_class;
/**
* enclosure_find - find an enclosure given a device
* @dev: the device to find for
* enclosure_find - find an enclosure given a parent device
* @dev: the parent to match against
* @start: Optional enclosure device to start from (NULL if none)
*
* Looks through the list of registered enclosures to see
* if it can find a match for a device. Returns NULL if no
* enclosure is found. Obtains a reference to the enclosure class
* device which must be released with device_put().
* Looks through the list of registered enclosures to find all those
* with @dev as a parent. Returns NULL if no enclosure is
* found. @start can be used as a starting point to obtain multiple
* enclosures per parent (should begin with NULL and then be set to
* each returned enclosure device). Obtains a reference to the
* enclosure class device which must be released with device_put().
* If @start is not NULL, a reference must be taken on it which is
* released before returning (this allows a loop through all
* enclosures to exit with only the reference on the enclosure of
* interest held). Note that the @dev may correspond to the actual
* device housing the enclosure, in which case no iteration via @start
* is required.
*/
struct enclosure_device *enclosure_find(struct device *dev)
struct enclosure_device *enclosure_find(struct device *dev,
struct enclosure_device *start)
{
struct enclosure_device *edev;
mutex_lock(&container_list_lock);
list_for_each_entry(edev, &container_list, node) {
if (edev->edev.parent == dev) {
get_device(&edev->edev);
mutex_unlock(&container_list_lock);
return edev;
edev = list_prepare_entry(start, &container_list, node);
if (start)
put_device(&start->edev);
list_for_each_entry_continue(edev, &container_list, node) {
struct device *parent = edev->edev.parent;
/* parent might not be immediate, so iterate up to
* the root of the tree if necessary */
while (parent) {
if (parent == dev) {
get_device(&edev->edev);
mutex_unlock(&container_list_lock);
return edev;
}
parent = parent->parent;
}
}
mutex_unlock(&container_list_lock);
@ -295,6 +315,9 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
cdev = &edev->component[component];
if (cdev->dev == dev)
return -EEXIST;
if (cdev->dev)
enclosure_remove_links(cdev);
@ -312,19 +335,25 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
* Returns zero on success or an error.
*
*/
int enclosure_remove_device(struct enclosure_device *edev, int component)
int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
{
struct enclosure_component *cdev;
int i;
if (!edev || component >= edev->components)
if (!edev || !dev)
return -EINVAL;
cdev = &edev->component[component];
device_del(&cdev->cdev);
put_device(cdev->dev);
cdev->dev = NULL;
return device_add(&cdev->cdev);
for (i = 0; i < edev->components; i++) {
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
return device_add(&cdev->cdev);
}
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(enclosure_remove_device);

View File

@ -42,6 +42,12 @@ static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
static struct kmem_cache *zfcp_cache_hw_align(const char *name,
unsigned long size)
{
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
{
int idx;
@ -78,7 +84,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
struct zfcp_port *port;
struct zfcp_unit *unit;
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
read_lock_irq(&zfcp_data.config_lock);
adapter = zfcp_get_adapter_by_busid(busid);
if (adapter)
@ -93,31 +99,23 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
unit = zfcp_unit_enqueue(port, lun);
if (IS_ERR(unit))
goto out_unit;
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
ccw_device_set_online(adapter->ccw_device);
zfcp_erp_wait(adapter);
flush_work(&unit->scsi_work);
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
zfcp_unit_put(unit);
out_unit:
zfcp_port_put(port);
out_port:
zfcp_adapter_put(adapter);
out_adapter:
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
return;
}
static struct kmem_cache *zfcp_cache_create(int size, char *name)
{
int align = 1;
while ((size - align) > 0)
align <<= 1;
return kmem_cache_create(name , size, align, 0, NULL);
}
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
@ -158,24 +156,27 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
if (!zfcp_data.fsf_req_qtcb_cache)
zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
sizeof(struct ct_iu_gpn_ft_req));
if (!zfcp_data.gpn_ft_cache)
goto out;
zfcp_data.sr_buffer_cache = zfcp_cache_create(
sizeof(struct fsf_status_read_buffer), "zfcp_sr");
zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
sizeof(struct fsf_qtcb));
if (!zfcp_data.qtcb_cache)
goto out_qtcb_cache;
zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
sizeof(struct fsf_status_read_buffer));
if (!zfcp_data.sr_buffer_cache)
goto out_sr_cache;
zfcp_data.gid_pn_cache = zfcp_cache_create(
sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
sizeof(struct zfcp_gid_pn_data));
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
sema_init(&zfcp_data.config_sema, 1);
mutex_init(&zfcp_data.config_mutex);
rwlock_init(&zfcp_data.config_lock);
zfcp_data.scsi_transport_template =
@ -209,7 +210,9 @@ out_transport:
out_gid_cache:
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
out_sr_cache:
kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
kmem_cache_destroy(zfcp_data.qtcb_cache);
out_qtcb_cache:
kmem_cache_destroy(zfcp_data.gpn_ft_cache);
out:
return retval;
}
@ -263,7 +266,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: pointer to enqueued unit on success, ERR_PTR on error
* Locks: config_sema must be held to serialize changes to the unit list
* Locks: config_mutex must be held to serialize changes to the unit list
*
* Sets up some unit internal structures and creates sysfs entry.
*/
@ -271,6 +274,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
read_lock_irq(&zfcp_data.config_lock);
if (zfcp_get_unit_by_lun(port, fcp_lun)) {
read_unlock_irq(&zfcp_data.config_lock);
return ERR_PTR(-EINVAL);
}
read_unlock_irq(&zfcp_data.config_lock);
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
return ERR_PTR(-ENOMEM);
@ -282,8 +292,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
dev_set_name(&unit->sysfs_device, "0x%016llx",
(unsigned long long) fcp_lun);
if (dev_set_name(&unit->sysfs_device, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
return ERR_PTR(-ENOMEM);
}
unit->sysfs_device.parent = &port->sysfs_device;
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
@ -299,20 +312,15 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
read_lock_irq(&zfcp_data.config_lock);
if (zfcp_get_unit_by_lun(port, fcp_lun)) {
read_unlock_irq(&zfcp_data.config_lock);
goto err_out_free;
if (device_register(&unit->sysfs_device)) {
put_device(&unit->sysfs_device);
return ERR_PTR(-EINVAL);
}
read_unlock_irq(&zfcp_data.config_lock);
if (device_register(&unit->sysfs_device))
goto err_out_free;
if (sysfs_create_group(&unit->sysfs_device.kobj,
&zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->sysfs_device);
return ERR_PTR(-EIO);
return ERR_PTR(-EINVAL);
}
zfcp_unit_get(unit);
@ -327,10 +335,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
zfcp_port_get(port);
return unit;
err_out_free:
kfree(unit);
return ERR_PTR(-EINVAL);
}
/**
@ -353,37 +357,47 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit)
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
/* must only be called with zfcp_data.config_sema taken */
adapter->pool.fsf_req_erp =
mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
if (!adapter->pool.fsf_req_erp)
/* must only be called with zfcp_data.config_mutex taken */
adapter->pool.erp_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.erp_req)
return -ENOMEM;
adapter->pool.fsf_req_scsi =
mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
if (!adapter->pool.fsf_req_scsi)
adapter->pool.gid_pn_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.gid_pn_req)
return -ENOMEM;
adapter->pool.fsf_req_abort =
mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
if (!adapter->pool.fsf_req_abort)
adapter->pool.scsi_req =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_req)
return -ENOMEM;
adapter->pool.fsf_req_status_read =
adapter->pool.scsi_abort =
mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
if (!adapter->pool.scsi_abort)
return -ENOMEM;
adapter->pool.status_read_req =
mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
sizeof(struct zfcp_fsf_req));
if (!adapter->pool.fsf_req_status_read)
if (!adapter->pool.status_read_req)
return -ENOMEM;
adapter->pool.data_status_read =
adapter->pool.qtcb_pool =
mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
adapter->pool.status_read_data =
mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
zfcp_data.sr_buffer_cache);
if (!adapter->pool.data_status_read)
if (!adapter->pool.status_read_data)
return -ENOMEM;
adapter->pool.data_gid_pn =
adapter->pool.gid_pn_data =
mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
if (!adapter->pool.data_gid_pn)
if (!adapter->pool.gid_pn_data)
return -ENOMEM;
return 0;
@ -391,19 +405,21 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
{
/* zfcp_data.config_sema must be held */
if (adapter->pool.fsf_req_erp)
mempool_destroy(adapter->pool.fsf_req_erp);
if (adapter->pool.fsf_req_scsi)
mempool_destroy(adapter->pool.fsf_req_scsi);
if (adapter->pool.fsf_req_abort)
mempool_destroy(adapter->pool.fsf_req_abort);
if (adapter->pool.fsf_req_status_read)
mempool_destroy(adapter->pool.fsf_req_status_read);
if (adapter->pool.data_status_read)
mempool_destroy(adapter->pool.data_status_read);
if (adapter->pool.data_gid_pn)
mempool_destroy(adapter->pool.data_gid_pn);
/* zfcp_data.config_mutex must be held */
if (adapter->pool.erp_req)
mempool_destroy(adapter->pool.erp_req);
if (adapter->pool.scsi_req)
mempool_destroy(adapter->pool.scsi_req);
if (adapter->pool.scsi_abort)
mempool_destroy(adapter->pool.scsi_abort);
if (adapter->pool.qtcb_pool)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
if (adapter->pool.status_read_data)
mempool_destroy(adapter->pool.status_read_data);
if (adapter->pool.gid_pn_data)
mempool_destroy(adapter->pool.gid_pn_data);
}
/**
@ -418,7 +434,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter)) {
if (zfcp_fsf_status_read(adapter->qdio)) {
if (atomic_read(&adapter->stat_miss) >= 16) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
NULL);
@ -446,6 +462,27 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
adapter->fsf_lic_version);
}
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
char name[TASK_COMM_LEN];
snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev));
adapter->work_queue = create_singlethread_workqueue(name);
if (adapter->work_queue)
return 0;
return -ENOMEM;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
{
if (adapter->work_queue)
destroy_workqueue(adapter->work_queue);
adapter->work_queue = NULL;
}
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
@ -455,7 +492,7 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
* Enqueues an adapter at the end of the adapter list in the driver data.
* All adapter internal structures are set up.
* Proc-fs entries are also created.
* locks: config_sema must be held to serialise changes to the adapter list
* locks: config_mutex must be held to serialize changes to the adapter list
*/
int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
@ -463,37 +500,37 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
/*
* Note: It is safe to release the list_lock, as any list changes
* are protected by the config_sema, which must be held to get here
* are protected by the config_mutex, which must be held to get here
*/
adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
if (!adapter)
return -ENOMEM;
adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
if (!adapter->gs) {
kfree(adapter);
return -ENOMEM;
}
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
atomic_set(&adapter->refcount, 0);
if (zfcp_qdio_allocate(adapter))
goto qdio_allocate_failed;
if (zfcp_qdio_setup(adapter))
goto qdio_failed;
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed_low_mem_buffers;
goto low_mem_buffers_failed;
if (zfcp_reqlist_alloc(adapter))
goto failed_low_mem_buffers;
goto low_mem_buffers_failed;
if (zfcp_adapter_debug_register(adapter))
if (zfcp_dbf_adapter_register(adapter))
goto debug_register_failed;
if (zfcp_setup_adapter_work_queue(adapter))
goto work_queue_failed;
if (zfcp_fc_gs_setup(adapter))
goto generic_services_failed;
init_waitqueue_head(&adapter->remove_wq);
init_waitqueue_head(&adapter->erp_thread_wqh);
init_waitqueue_head(&adapter->erp_ready_wq);
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->port_list_head);
@ -502,20 +539,14 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
spin_lock_init(&adapter->req_list_lock);
spin_lock_init(&adapter->hba_dbf_lock);
spin_lock_init(&adapter->san_dbf_lock);
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
spin_lock_init(&adapter->req_q_lock);
spin_lock_init(&adapter->qdio_stat_lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
sema_init(&adapter->erp_ready_sem, 0);
if (zfcp_erp_thread_setup(adapter))
goto erp_thread_failed;
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
adapter->service_level.seq_print = zfcp_print_sl;
@ -529,20 +560,25 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto sysfs_failed;
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
zfcp_fc_wka_ports_init(adapter);
if (!zfcp_adapter_scsi_register(adapter))
return 0;
sysfs_failed:
zfcp_adapter_debug_unregister(adapter);
zfcp_erp_thread_kill(adapter);
erp_thread_failed:
zfcp_fc_gs_destroy(adapter);
generic_services_failed:
zfcp_destroy_adapter_work_queue(adapter);
work_queue_failed:
zfcp_dbf_adapter_unregister(adapter->dbf);
debug_register_failed:
dev_set_drvdata(&ccw_device->dev, NULL);
kfree(adapter->req_list);
failed_low_mem_buffers:
low_mem_buffers_failed:
zfcp_free_low_mem_buffers(adapter);
qdio_allocate_failed:
zfcp_qdio_free(adapter);
qdio_failed:
zfcp_qdio_destroy(adapter->qdio);
kfree(adapter);
return -ENOMEM;
}
@ -559,6 +595,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_adapter_scsi_unregister(adapter);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
@ -570,13 +607,15 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
if (!retval)
return;
zfcp_adapter_debug_unregister(adapter);
zfcp_qdio_free(adapter);
zfcp_fc_gs_destroy(adapter);
zfcp_erp_thread_kill(adapter);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_dbf_adapter_unregister(adapter->dbf);
zfcp_free_low_mem_buffers(adapter);
zfcp_qdio_destroy(adapter->qdio);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
kfree(adapter->gs);
kfree(adapter);
}
@ -592,7 +631,7 @@ static void zfcp_sysfs_port_release(struct device *dev)
* @status: initial status for the port
* @d_id: destination id of the remote port to be enqueued
* Returns: pointer to enqueued port on success, ERR_PTR on error
* Locks: config_sema must be held to serialize changes to the port list
* Locks: config_mutex must be held to serialize changes to the port list
*
* All port internal structures are set up and the sysfs entry is generated.
* d_id is used to enqueue ports with a well known address like the Directory
@ -602,7 +641,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval;
read_lock_irq(&zfcp_data.config_lock);
if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
read_unlock_irq(&zfcp_data.config_lock);
return ERR_PTR(-EINVAL);
}
read_unlock_irq(&zfcp_data.config_lock);
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
@ -610,7 +655,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
init_waitqueue_head(&port->remove_wq);
INIT_LIST_HEAD(&port->unit_list_head);
INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@ -623,29 +668,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set(&port->refcount, 0);
dev_set_name(&port->sysfs_device, "0x%016llx",
(unsigned long long)wwpn);
if (dev_set_name(&port->sysfs_device, "0x%016llx",
(unsigned long long)wwpn)) {
kfree(port);
return ERR_PTR(-ENOMEM);
}
port->sysfs_device.parent = &adapter->ccw_device->dev;
port->sysfs_device.release = zfcp_sysfs_port_release;
dev_set_drvdata(&port->sysfs_device, port);
read_lock_irq(&zfcp_data.config_lock);
if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
read_unlock_irq(&zfcp_data.config_lock);
goto err_out_free;
if (device_register(&port->sysfs_device)) {
put_device(&port->sysfs_device);
return ERR_PTR(-EINVAL);
}
read_unlock_irq(&zfcp_data.config_lock);
if (device_register(&port->sysfs_device))
goto err_out_free;
retval = sysfs_create_group(&port->sysfs_device.kobj,
&zfcp_sysfs_port_attrs);
if (retval) {
if (sysfs_create_group(&port->sysfs_device.kobj,
&zfcp_sysfs_port_attrs)) {
device_unregister(&port->sysfs_device);
goto err_out;
return ERR_PTR(-EINVAL);
}
zfcp_port_get(port);
@ -659,11 +699,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
zfcp_adapter_get(adapter);
return port;
err_out_free:
kfree(port);
err_out:
return ERR_PTR(-EINVAL);
}
/**
@ -672,12 +707,11 @@ err_out:
*/
void zfcp_port_dequeue(struct zfcp_port *port)
{
wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
write_lock_irq(&zfcp_data.config_lock);
list_del(&port->list);
write_unlock_irq(&zfcp_data.config_lock);
if (port->rport)
port->rport->dd_data = NULL;
wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
cancel_work_sync(&port->rport_work); /* usually not necessary */
zfcp_adapter_put(port->adapter);
sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
device_unregister(&port->sysfs_device);

View File

@ -18,12 +18,15 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
down(&zfcp_data.config_sema);
if (!adapter)
return 0;
mutex_lock(&zfcp_data.config_mutex);
zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
return 0;
}
@ -33,6 +36,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
if (!adapter)
return 0;
zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
@ -63,25 +69,14 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
* zfcp_ccw_probe - probe function of zfcp driver
* @ccw_device: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets up the initial
* data structures for each fcp adapter, which was detected by the system.
* Also the sysfs files for this adapter will be created by this function.
* In addition the nameserver port will be added to the ports of the adapter
* and its sysfs representation will be created too.
* This function gets called by the common i/o layer for each FCP
* device found on the current system. This is only a stub to make cio
* work: To only allocate adapter resources for devices actually used,
* the allocation is deferred to the first call to ccw_set_online.
*/
static int zfcp_ccw_probe(struct ccw_device *ccw_device)
{
int retval = 0;
down(&zfcp_data.config_sema);
if (zfcp_adapter_enqueue(ccw_device)) {
dev_err(&ccw_device->dev,
"Setting up data structures for the "
"FCP adapter failed\n");
retval = -EINVAL;
}
up(&zfcp_data.config_sema);
return retval;
return 0;
}
/**
@ -102,8 +97,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
LIST_HEAD(port_remove_lh);
ccw_device_set_offline(ccw_device);
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&ccw_device->dev);
if (!adapter)
goto out;
write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
@ -129,29 +127,41 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
zfcp_adapter_dequeue(adapter);
up(&zfcp_data.config_sema);
out:
mutex_unlock(&zfcp_data.config_mutex);
}
/**
* zfcp_ccw_set_online - set_online function of zfcp driver
* @ccw_device: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an adapter
* into state online. Setting an fcp device online means that it will be
* registered with the SCSI stack, that the QDIO queues will be set up
* and that the adapter will be opened (asynchronously).
* This function gets called by the common i/o layer and sets an
* adapter into state online. The first call will allocate all
* adapter resources that will be retained until the device is removed
* via zfcp_ccw_remove.
*
* Setting an fcp device online means that it will be registered with
* the SCSI stack, that the QDIO queues will be set up and that the
* adapter will be opened.
*/
static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
int retval;
int ret = 0;
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&ccw_device->dev);
retval = zfcp_erp_thread_setup(adapter);
if (retval)
goto out;
if (!adapter) {
ret = zfcp_adapter_enqueue(ccw_device);
if (ret) {
dev_err(&ccw_device->dev,
"Setting up data structures for the "
"FCP adapter failed\n");
goto out;
}
adapter = dev_get_drvdata(&ccw_device->dev);
}
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
@ -162,13 +172,11 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccsonl2", NULL);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
flush_work(&adapter->scan_work);
return 0;
out:
up(&zfcp_data.config_sema);
return retval;
out:
mutex_unlock(&zfcp_data.config_mutex);
if (!ret)
flush_work(&adapter->scan_work);
return ret;
}
/**
@ -182,12 +190,15 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
{
struct zfcp_adapter *adapter;
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&ccw_device->dev);
if (!adapter)
goto out;
zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
out:
return 0;
}
@ -240,11 +251,12 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter;
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&cdev->dev);
zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
zfcp_erp_wait(adapter);
up(&zfcp_data.config_sema);
zfcp_erp_thread_kill(adapter);
mutex_unlock(&zfcp_data.config_mutex);
}
static struct ccw_driver zfcp_ccw_driver = {

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
* This file is part of the zfcp device driver for
* FCP adapters for IBM System z9 and zSeries.
*
* Copyright IBM Corp. 2008, 2008
* Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -22,7 +22,9 @@
#ifndef ZFCP_DBF_H
#define ZFCP_DBF_H
#include "zfcp_ext.h"
#include "zfcp_fsf.h"
#include "zfcp_def.h"
#define ZFCP_DBF_TAG_SIZE 4
#define ZFCP_DBF_ID_SIZE 7
@ -35,13 +37,13 @@ struct zfcp_dbf_dump {
u8 data[]; /* dump data */
} __attribute__ ((packed));
struct zfcp_rec_dbf_record_thread {
struct zfcp_dbf_rec_record_thread {
u32 total;
u32 ready;
u32 running;
};
struct zfcp_rec_dbf_record_target {
struct zfcp_dbf_rec_record_target {
u64 ref;
u32 status;
u32 d_id;
@ -50,7 +52,7 @@ struct zfcp_rec_dbf_record_target {
u32 erp_count;
};
struct zfcp_rec_dbf_record_trigger {
struct zfcp_dbf_rec_record_trigger {
u8 want;
u8 need;
u32 as;
@ -62,21 +64,21 @@ struct zfcp_rec_dbf_record_trigger {
u64 fcp_lun;
};
struct zfcp_rec_dbf_record_action {
struct zfcp_dbf_rec_record_action {
u32 status;
u32 step;
u64 action;
u64 fsf_req;
};
struct zfcp_rec_dbf_record {
struct zfcp_dbf_rec_record {
u8 id;
char id2[7];
union {
struct zfcp_rec_dbf_record_action action;
struct zfcp_rec_dbf_record_thread thread;
struct zfcp_rec_dbf_record_target target;
struct zfcp_rec_dbf_record_trigger trigger;
struct zfcp_dbf_rec_record_action action;
struct zfcp_dbf_rec_record_thread thread;
struct zfcp_dbf_rec_record_target target;
struct zfcp_dbf_rec_record_trigger trigger;
} u;
};
@ -87,7 +89,7 @@ enum {
ZFCP_REC_DBF_ID_TRIGGER,
};
struct zfcp_hba_dbf_record_response {
struct zfcp_dbf_hba_record_response {
u32 fsf_command;
u64 fsf_reqid;
u32 fsf_seqno;
@ -125,7 +127,7 @@ struct zfcp_hba_dbf_record_response {
} u;
} __attribute__ ((packed));
struct zfcp_hba_dbf_record_status {
struct zfcp_dbf_hba_record_status {
u8 failed;
u32 status_type;
u32 status_subtype;
@ -139,24 +141,24 @@ struct zfcp_hba_dbf_record_status {
u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
} __attribute__ ((packed));
struct zfcp_hba_dbf_record_qdio {
struct zfcp_dbf_hba_record_qdio {
u32 qdio_error;
u8 sbal_index;
u8 sbal_count;
} __attribute__ ((packed));
struct zfcp_hba_dbf_record {
struct zfcp_dbf_hba_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u8 tag2[ZFCP_DBF_TAG_SIZE];
union {
struct zfcp_hba_dbf_record_response response;
struct zfcp_hba_dbf_record_status status;
struct zfcp_hba_dbf_record_qdio qdio;
struct zfcp_dbf_hba_record_response response;
struct zfcp_dbf_hba_record_status status;
struct zfcp_dbf_hba_record_qdio qdio;
struct fsf_bit_error_payload berr;
} u;
} __attribute__ ((packed));
struct zfcp_san_dbf_record_ct_request {
struct zfcp_dbf_san_record_ct_request {
u16 cmd_req_code;
u8 revision;
u8 gs_type;
@ -166,7 +168,7 @@ struct zfcp_san_dbf_record_ct_request {
u32 len;
} __attribute__ ((packed));
struct zfcp_san_dbf_record_ct_response {
struct zfcp_dbf_san_record_ct_response {
u16 cmd_rsp_code;
u8 revision;
u8 reason_code;
@ -176,27 +178,27 @@ struct zfcp_san_dbf_record_ct_response {
u32 len;
} __attribute__ ((packed));
struct zfcp_san_dbf_record_els {
struct zfcp_dbf_san_record_els {
u8 ls_code;
u32 len;
} __attribute__ ((packed));
struct zfcp_san_dbf_record {
struct zfcp_dbf_san_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u64 fsf_reqid;
u32 fsf_seqno;
u32 s_id;
u32 d_id;
union {
struct zfcp_san_dbf_record_ct_request ct_req;
struct zfcp_san_dbf_record_ct_response ct_resp;
struct zfcp_san_dbf_record_els els;
struct zfcp_dbf_san_record_ct_request ct_req;
struct zfcp_dbf_san_record_ct_response ct_resp;
struct zfcp_dbf_san_record_els els;
} u;
#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
u8 payload[32];
} __attribute__ ((packed));
struct zfcp_scsi_dbf_record {
struct zfcp_dbf_scsi_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u8 tag2[ZFCP_DBF_TAG_SIZE];
u32 scsi_id;
@ -222,4 +224,127 @@ struct zfcp_scsi_dbf_record {
u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
} __attribute__ ((packed));
struct zfcp_dbf {
debug_info_t *rec;
debug_info_t *hba;
debug_info_t *san;
debug_info_t *scsi;
spinlock_t rec_lock;
spinlock_t hba_lock;
spinlock_t san_lock;
spinlock_t scsi_lock;
struct zfcp_dbf_rec_record rec_buf;
struct zfcp_dbf_hba_record hba_buf;
struct zfcp_dbf_san_record san_buf;
struct zfcp_dbf_scsi_record scsi_buf;
struct zfcp_adapter *adapter;
};
static inline
void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
{
if (level <= dbf->hba->level)
_zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
}
/**
* zfcp_dbf_hba_fsf_response - trace event for request completion
* @fsf_req: request that has been completed
*/
static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb *qtcb = req->qtcb;
if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
} else if (qtcb->header.log_length) {
zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
} else {
zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
}
}
/**
* zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @dbf: reference to dbf structure
* @status_buffer: buffer containing payload of unsolicited status
*/
static inline
void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
struct fsf_status_read_buffer *buf)
{
int level = 2;
if (level <= dbf->hba->level)
_zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
}
static inline
void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
struct zfcp_fsf_req *req, unsigned long old_id)
{
if (level <= dbf->scsi->level)
_zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
}
/**
* zfcp_dbf_scsi_result - trace event for SCSI command completion
* @tag: tag indicating success or failure of SCSI command
* @level: trace level applicable for this event
* @adapter: adapter that has been used to issue the SCSI command
* @scmd: SCSI command pointer
* @fsf_req: request used to issue SCSI command (might be NULL)
*/
static inline
void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
{
zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
}
/**
* zfcp_dbf_scsi_abort - trace event for SCSI command abort
* @tag: tag indicating success or failure of abort operation
* @adapter: adapter thas has been used to issue SCSI command to be aborted
* @scmd: SCSI command to be aborted
* @new_req: request containing abort (might be NULL)
* @old_id: identifier of request containg SCSI command to be aborted
*/
static inline
void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
unsigned long old_id)
{
zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
}
/**
* zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
* @tag: tag indicating success or failure of reset operation
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
* @unit: unit that needs reset
* @scsi_cmnd: SCSI command which caused this error recovery
*/
static inline
void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
struct scsi_cmnd *scsi_cmnd)
{
zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
}
#endif /* ZFCP_DBF_H */

View File

@ -37,10 +37,8 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_dbf.h"
#include "zfcp_fsf.h"
/********************* GENERAL DEFINES *********************************/
#define REQUEST_LIST_SIZE 128
@ -75,9 +73,6 @@
/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
/* timeout for name-server lookup (in seconds) */
#define ZFCP_NS_GID_PN_TIMEOUT 10
/* task attribute values in FCP-2 FCP_CMND IU */
#define SIMPLE_Q 0
#define HEAD_OF_Q 1
@ -224,8 +219,6 @@ struct zfcp_ls_adisc {
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
@ -234,6 +227,7 @@ struct zfcp_ls_adisc {
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
/* well known address (WKA) port status*/
enum zfcp_wka_status {
@ -249,7 +243,6 @@ enum zfcp_wka_status {
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@ -266,12 +259,14 @@ struct zfcp_fsf_req;
/* holds various memory pools of an adapter */
struct zfcp_adapter_mempool {
mempool_t *fsf_req_erp;
mempool_t *fsf_req_scsi;
mempool_t *fsf_req_abort;
mempool_t *fsf_req_status_read;
mempool_t *data_status_read;
mempool_t *data_gid_pn;
mempool_t *erp_req;
mempool_t *gid_pn_req;
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
mempool_t *status_read_data;
mempool_t *gid_pn_data;
mempool_t *qtcb_pool;
};
/*
@ -305,6 +300,15 @@ struct ct_iu_gid_pn_resp {
u32 d_id;
} __attribute__ ((packed));
struct ct_iu_gpn_ft_req {
struct ct_hdr header;
u8 flags;
u8 domain_id_scope;
u8 area_id_scope;
u8 fc4_type;
} __attribute__ ((packed));
/**
* struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
* @wka_port: port where the request is sent to
@ -312,7 +316,6 @@ struct ct_iu_gid_pn_resp {
* @resp: scatter-gather list for response
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @timeout: FSF timeout for this request
* @completion: completion for synchronization purposes
* @status: used to pass error status to calling function
*/
@ -322,7 +325,6 @@ struct zfcp_send_ct {
struct scatterlist *resp;
void (*handler)(unsigned long);
unsigned long handler_data;
int timeout;
struct completion *completion;
int status;
};
@ -420,6 +422,29 @@ struct zfcp_latencies {
spinlock_t lock;
};
/** struct zfcp_qdio - basic QDIO data structure
* @resp_q: response queue
* @req_q: request queue
* @stat_lock: lock to protect req_q_util and req_q_time
* @req_q_lock; lock to serialize access to request queue
* @req_q_time: time of last fill level change
* @req_q_util: used for accounting
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this QDIO structure
*/
struct zfcp_qdio {
struct zfcp_qdio_queue resp_q;
struct zfcp_qdio_queue req_q;
spinlock_t stat_lock;
spinlock_t req_q_lock;
unsigned long long req_q_time;
u64 req_q_util;
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct zfcp_adapter *adapter;
};
struct zfcp_adapter {
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
@ -428,6 +453,7 @@ struct zfcp_adapter {
u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
struct zfcp_qdio *qdio;
u32 hydra_version; /* Hydra version */
u32 fsf_lic_version;
u32 adapter_features; /* FCP channel features */
@ -439,15 +465,7 @@ struct zfcp_adapter {
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue req_q; /* request queue */
spinlock_t req_q_lock; /* for operations on queue */
ktime_t req_q_time; /* time of last fill level change */
u64 req_q_util; /* for accounting */
spinlock_t qdio_stat_lock;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */
struct zfcp_qdio_queue resp_q; /* response queue */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
completion races */
@ -456,10 +474,9 @@ struct zfcp_adapter {
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
adapter/devices */
wait_queue_head_t erp_ready_wq;
struct list_head erp_running_head;
rwlock_t erp_lock;
struct semaphore erp_ready_sem;
wait_queue_head_t erp_thread_wqh;
wait_queue_head_t erp_done_wqh;
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
@ -467,27 +484,16 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct task_struct *erp_thread;
struct zfcp_wka_ports *gs; /* generic services */
debug_info_t *rec_dbf;
debug_info_t *hba_dbf;
debug_info_t *san_dbf; /* debug feature areas */
debug_info_t *scsi_dbf;
spinlock_t rec_dbf_lock;
spinlock_t hba_dbf_lock;
spinlock_t san_dbf_lock;
spinlock_t scsi_dbf_lock;
struct zfcp_rec_dbf_record rec_dbf_buf;
struct zfcp_hba_dbf_record hba_dbf_buf;
struct zfcp_san_dbf_record san_dbf_buf;
struct zfcp_scsi_dbf_record scsi_dbf_buf;
struct zfcp_dbf *dbf; /* debug traces */
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct qdio_initialize qdio_init_data; /* for qdio_establish */
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
struct service_level service_level;
atomic_t qdio_outb_full; /* queue full incidents */
struct workqueue_struct *work_queue;
};
struct zfcp_port {
@ -531,36 +537,64 @@ struct zfcp_unit {
struct work_struct scsi_work;
};
/* FSF request */
/**
* struct zfcp_queue_req - queue related values for a request
* @sbal_number: number of free SBALs
* @sbal_first: first SBAL for this request
* @sbal_last: last SBAL for this request
* @sbal_limit: last possible SBAL for this request
* @sbale_curr: current SBALE at creation of this request
* @sbal_response: SBAL used in interrupt
* @qdio_outb_usage: usage of outbound queue
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_queue_req {
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
u8 sbal_limit;
u8 sbale_curr;
u8 sbal_response;
u16 qdio_outb_usage;
u16 qdio_inb_usage;
};
/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
* @req_id: unique request ID
* @adapter: adapter this request belongs to
* @queue_req: queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
* @fsf_command: FSF command issued
* @qtcb: associated QTCB
* @seq_no: sequence number of this request
* @data: private data
* @timer: timer data of this request
* @erp_action: reference to erp action if request issued on behalf of ERP
* @pool: reference to memory pool if used for this request
* @issued: time when request was send (STCK)
* @unit: reference to unit if this request is a SCSI request
* @handler: handler which should be called to process response
*/
struct zfcp_fsf_req {
struct list_head list; /* list of FSF requests */
unsigned long req_id; /* unique request ID */
struct zfcp_adapter *adapter; /* adapter request belongs to */
u8 sbal_number; /* nr of SBALs free for use */
u8 sbal_first; /* first SBAL for this request */
u8 sbal_last; /* last SBAL for this request */
u8 sbal_limit; /* last possible SBAL for
this reuest */
u8 sbale_curr; /* current SBALE during creation
of request */
u8 sbal_response; /* SBAL used in interrupt */
wait_queue_head_t completion_wq; /* can be used by a routine
to wait for completion */
u32 status; /* status of this request */
u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */
void *data; /* private data of request */
struct timer_list timer; /* used for erp or scsi er */
struct zfcp_erp_action *erp_action; /* used if this request is
issued on behalf of erp */
mempool_t *pool; /* used if request was alloacted
from emergency pool */
unsigned long long issued; /* request sent time (STCK) */
struct zfcp_unit *unit;
struct list_head list;
unsigned long req_id;
struct zfcp_adapter *adapter;
struct zfcp_queue_req queue_req;
struct completion completion;
u32 status;
u32 fsf_command;
struct fsf_qtcb *qtcb;
u32 seq_no;
void *data;
struct timer_list timer;
struct zfcp_erp_action *erp_action;
mempool_t *pool;
unsigned long long issued;
struct zfcp_unit *unit;
void (*handler)(struct zfcp_fsf_req *);
u16 qdio_outb_usage;/* usage of outbound queue */
u16 qdio_inb_usage; /* usage of inbound queue */
};
/* driver data */
@ -570,18 +604,11 @@ struct zfcp_data {
rwlock_t config_lock; /* serialises changes
to adapter/port/unit
lists */
struct semaphore config_sema; /* serialises configuration
changes */
struct kmem_cache *fsf_req_qtcb_cache;
struct mutex config_mutex;
struct kmem_cache *gpn_ft_cache;
struct kmem_cache *qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
struct workqueue_struct *work_queue;
};
/* struct used by memory pools for fsf_requests */
struct zfcp_fsf_req_qtcb {
struct zfcp_fsf_req fsf_req;
struct fsf_qtcb qtcb;
};
/********************** ZFCP SPECIFIC DEFINES ********************************/

View File

@ -9,6 +9,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
#include "zfcp_ext.h"
#define ZFCP_MAX_ERPS 3
@ -26,7 +27,6 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
ZFCP_ERP_STEP_UNIT_OPENING = 0x2000,
@ -75,9 +75,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
list_move(&act->list, &act->adapter->erp_ready_head);
zfcp_rec_dbf_event_action("erardy1", act);
up(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread("erardy2", adapter);
zfcp_dbf_rec_action("erardy1", act);
wake_up(&adapter->erp_ready_wq);
zfcp_dbf_rec_thread("erardy2", adapter->dbf);
}
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@ -150,6 +150,9 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
return 0;
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
!(a_status & ZFCP_STATUS_COMMON_OPEN))
return 0; /* shutdown requested for closed adapter */
}
return need;
@ -213,8 +216,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
int retval = 1, need;
struct zfcp_erp_action *act = NULL;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
if (!adapter->erp_thread)
return -EIO;
need = zfcp_erp_required_act(want, adapter, port, unit);
@ -227,12 +229,11 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
goto out;
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
up(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread("eracte1", adapter);
wake_up(&adapter->erp_ready_wq);
zfcp_dbf_rec_thread("eracte1", adapter->dbf);
retval = 0;
out:
zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
adapter, port, unit);
zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit);
return retval;
}
@ -443,28 +444,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status)
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter);
zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
}
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
zfcp_rec_dbf_event_port("erpubl1", NULL, port);
zfcp_dbf_rec_port("erpubl1", NULL, port);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}
static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
{
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
zfcp_rec_dbf_event_unit("eruubl1", NULL, unit);
zfcp_dbf_rec_unit("eruubl1", NULL, unit);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
}
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
{
list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
zfcp_rec_dbf_event_action("erator1", erp_action);
zfcp_dbf_rec_action("erator1", erp_action);
}
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@ -480,13 +481,12 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_rec_dbf_event_action("erscf_1", act);
zfcp_dbf_rec_action("erscf_1", act);
act->fsf_req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_rec_dbf_event_action("erscf_2", act);
if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
ZFCP_STATUS_FSFREQ_DISMISSED))
zfcp_dbf_rec_action("erscf_2", act);
if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
act->fsf_req = NULL;
} else
act->fsf_req = NULL;
@ -604,9 +604,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
{
if (zfcp_qdio_open(act->adapter))
struct zfcp_qdio *qdio = act->adapter->qdio;
if (zfcp_qdio_open(qdio))
return ZFCP_ERP_FAILED;
init_waitqueue_head(&act->adapter->request_wq);
init_waitqueue_head(&qdio->req_q_wq);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
return ZFCP_ERP_SUCCEEDED;
}
@ -641,9 +643,10 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
}
zfcp_rec_dbf_event_thread_lock("erasfx1", adapter);
down(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erasfx2", adapter);
zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
break;
@ -682,9 +685,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
if (ret)
return ZFCP_ERP_FAILED;
zfcp_rec_dbf_event_thread_lock("erasox1", adapter);
down(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erasox2", adapter);
zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
wait_event(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head));
zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
return ZFCP_ERP_FAILED;
@ -711,10 +715,10 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter;
/* close queues to ensure that buffers are not accessed by adapter */
zfcp_qdio_close(adapter);
zfcp_qdio_close(adapter->qdio);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
zfcp_fc_wka_port_force_offline(&adapter->gs->ds);
zfcp_fc_wka_ports_force_offline(adapter->gs);
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
@ -841,27 +845,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return zfcp_erp_port_strategy_open_port(act);
}
void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
{
int retval;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
retval = zfcp_fc_ns_gid_pn(&port->erp_action);
if (!retval) {
port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
goto out;
}
if (retval == -ENOMEM) {
zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM);
goto out;
}
/* all other error condtions */
zfcp_erp_notify(&port->erp_action, 0);
out:
zfcp_port_put(port);
}
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
@ -876,15 +859,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
return zfcp_erp_open_ptp_port(act);
if (!port->d_id) {
zfcp_port_get(port);
if (!queue_work(zfcp_data.work_queue,
if (!queue_work(adapter->work_queue,
&port->gid_pn_work))
zfcp_port_put(port);
return ZFCP_ERP_CONTINUES;
return ZFCP_ERP_EXIT;
}
/* fall through */
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!port->d_id)
return ZFCP_ERP_FAILED;
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
@ -1163,7 +1142,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
}
list_del(&erp_action->list);
zfcp_rec_dbf_event_action("eractd1", erp_action);
zfcp_dbf_rec_action("eractd1", erp_action);
switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
@ -1311,20 +1290,16 @@ static int zfcp_erp_thread(void *data)
struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
int ignore;
daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
/* Block all signals */
siginitsetinv(&current->blocked, 0);
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
wake_up(&adapter->erp_thread_wqh);
for (;;) {
zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
wait_event_interruptible(adapter->erp_ready_wq,
!list_empty(&adapter->erp_ready_head) ||
kthread_should_stop());
zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
while (!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
ignore = down_interruptible(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
if (kthread_should_stop())
break;
write_lock_irqsave(&adapter->erp_lock, flags);
next = adapter->erp_ready_head.next;
@ -1339,9 +1314,6 @@ static int zfcp_erp_thread(void *data)
}
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
wake_up(&adapter->erp_thread_wqh);
return 0;
}
@ -1353,18 +1325,17 @@ static int zfcp_erp_thread(void *data)
*/
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
{
int retval;
struct task_struct *thread;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
if (retval < 0) {
thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
dev_name(&adapter->ccw_device->dev));
if (IS_ERR(thread)) {
dev_err(&adapter->ccw_device->dev,
"Creating an ERP thread for the FCP device failed.\n");
return retval;
return PTR_ERR(thread);
}
wait_event(adapter->erp_thread_wqh,
atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
adapter->erp_thread = thread;
return 0;
}
@ -1379,16 +1350,10 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
*/
void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
up(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock("erthrk1", adapter);
wait_event(adapter->erp_thread_wqh,
!(atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
&adapter->status);
kthread_stop(adapter->erp_thread);
adapter->erp_thread = NULL;
WARN_ON(!list_empty(&adapter->erp_ready_head));
WARN_ON(!list_empty(&adapter->erp_running_head));
}
/**
@ -1456,11 +1421,11 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
if (set_or_clear == ZFCP_SET) {
if (status_change_set(mask, &adapter->status))
zfcp_rec_dbf_event_adapter(id, ref, adapter);
zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
atomic_set_mask(mask, &adapter->status);
} else {
if (status_change_clear(mask, &adapter->status))
zfcp_rec_dbf_event_adapter(id, ref, adapter);
zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
atomic_clear_mask(mask, &adapter->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&adapter->erp_counter, 0);
@ -1490,11 +1455,11 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
if (set_or_clear == ZFCP_SET) {
if (status_change_set(mask, &port->status))
zfcp_rec_dbf_event_port(id, ref, port);
zfcp_dbf_rec_port(id, ref, port);
atomic_set_mask(mask, &port->status);
} else {
if (status_change_clear(mask, &port->status))
zfcp_rec_dbf_event_port(id, ref, port);
zfcp_dbf_rec_port(id, ref, port);
atomic_clear_mask(mask, &port->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&port->erp_counter, 0);
@ -1519,11 +1484,11 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
{
if (set_or_clear == ZFCP_SET) {
if (status_change_set(mask, &unit->status))
zfcp_rec_dbf_event_unit(id, ref, unit);
zfcp_dbf_rec_unit(id, ref, unit);
atomic_set_mask(mask, &unit->status);
} else {
if (status_change_clear(mask, &unit->status))
zfcp_rec_dbf_event_unit(id, ref, unit);
zfcp_dbf_rec_unit(id, ref, unit);
atomic_clear_mask(mask, &unit->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
atomic_set(&unit->erp_counter, 0);

View File

@ -34,37 +34,31 @@ extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
extern struct miscdevice zfcp_cfdc_misc;
/* zfcp_dbf.c */
extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *);
extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *);
extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *);
extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *);
extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *);
extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *,
struct zfcp_adapter *,
struct zfcp_port *, struct zfcp_unit *);
extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
int);
extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
struct scsi_cmnd *,
struct zfcp_fsf_req *);
extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
struct scsi_cmnd *, struct zfcp_fsf_req *,
unsigned long);
extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *);
extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *);
extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
struct zfcp_adapter *, struct zfcp_port *,
struct zfcp_unit *);
extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
struct zfcp_dbf *);
extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
struct fsf_status_read_buffer *);
extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
struct scsi_cmnd *, struct zfcp_fsf_req *,
unsigned long);
/* zfcp_erp.c */
extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
@ -96,22 +90,20 @@ extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
void *);
extern void zfcp_erp_timeout_handler(unsigned long);
extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
/* zfcp_fc.c */
extern int zfcp_scan_ports(struct zfcp_adapter *);
extern void _zfcp_scan_ports_later(struct work_struct *);
extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
extern void _zfcp_fc_scan_ports_later(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_port_did_lookup(struct work_struct *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
extern void zfcp_fc_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *);
extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@ -122,37 +114,39 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_config *);
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
struct zfcp_fsf_cfdc *);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
struct scsi_cmnd *);
extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
struct zfcp_unit *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
extern void zfcp_qdio_free(struct zfcp_adapter *);
extern int zfcp_qdio_send(struct zfcp_fsf_req *);
extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
extern struct qdio_buffer_element
*zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
extern struct qdio_buffer_element
*zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
struct zfcp_queue_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_adapter *);
extern void zfcp_qdio_close(struct zfcp_adapter *);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
/* zfcp_scsi.c */
extern struct zfcp_data zfcp_data;

View File

@ -25,14 +25,6 @@ static u32 rscn_range_mask[] = {
[RSCN_FABRIC_ADDRESS] = 0x000000,
};
struct ct_iu_gpn_ft_req {
struct ct_hdr header;
u8 flags;
u8 domain_id_scope;
u8 area_id_scope;
u8 fc4_type;
} __attribute__ ((packed));
struct gpn_ft_resp_acc {
u8 control;
u8 port_id[3];
@ -65,7 +57,7 @@ struct zfcp_fc_ns_handler_data {
unsigned long handler_data;
};
static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
@ -90,7 +82,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
return -EIO;
}
static void zfcp_wka_port_offline(struct work_struct *work)
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_wka_port *wka_port =
@ -110,7 +102,7 @@ out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
@ -129,10 +121,10 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
@ -140,15 +132,13 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter)
void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs)
{
struct zfcp_wka_ports *gs = adapter->gs;
zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
zfcp_fc_wka_port_force_offline(&gs->ks);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
@ -160,7 +150,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
if ((port->d_id & range) == (elem->nport_did & range))
zfcp_test_link(port);
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
@ -241,7 +231,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_san_dbf_event_incoming_els(fsf_req);
zfcp_dbf_san_incoming_els(fsf_req);
if (els_type == LS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == LS_LOGO)
@ -281,19 +271,18 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
}
int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
struct zfcp_gid_pn_data *gid_pn)
{
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_adapter *adapter = port->adapter;
struct zfcp_fc_ns_handler_data compl_rec;
int ret;
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
gid_pn->port = port;
gid_pn->ct.wka_port = &adapter->gs->ds;
gid_pn->ct.handler = zfcp_fc_ns_handler;
gid_pn->ct.handler_data = (unsigned long) &compl_rec;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
gid_pn->ct.req = &gid_pn->req;
gid_pn->ct.resp = &gid_pn->resp;
sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
@ -308,13 +297,12 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
gid_pn->ct_iu_req.wwpn = port->wwpn;
init_completion(&compl_rec.done);
compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
compl_rec.handler_data = (unsigned long) gid_pn;
ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
erp_action);
ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req);
if (!ret)
wait_for_completion(&compl_rec.done);
return ret;
@ -322,33 +310,56 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
/**
* zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
* @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
struct zfcp_gid_pn_data *gid_pn;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_adapter *adapter = port->adapter;
gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC);
if (!gid_pn)
return -ENOMEM;
memset(gid_pn, 0, sizeof(*gid_pn));
ret = zfcp_wka_port_get(&adapter->gs->ds);
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
zfcp_wka_port_put(&adapter->gs->ds);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
mempool_free(gid_pn, adapter->pool.data_gid_pn);
mempool_free(gid_pn, adapter->pool.gid_pn_data);
return ret;
}
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
int ret;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
goto out;
}
if (!port->d_id) {
zfcp_erp_port_failed(port, "fcgpn_2", NULL);
goto out;
}
zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
zfcp_port_put(port);
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
@ -404,6 +415,7 @@ static void zfcp_fc_adisc_handler(unsigned long data)
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_port_put(port);
kfree(adisc);
}
@ -450,28 +462,36 @@ void zfcp_fc_link_test_work(struct work_struct *work)
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
out:
zfcp_port_put(port);
}
/**
* zfcp_test_link - lightweight link test procedure
* zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_test_link(struct zfcp_port *port)
void zfcp_fc_test_link(struct zfcp_port *port)
{
zfcp_port_get(port);
if (!queue_work(zfcp_data.work_queue, &port->test_link_work))
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
zfcp_port_put(port);
}
@ -479,7 +499,7 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
{
struct scatterlist *sg = &gpn_ft->sg_req;
kfree(sg_virt(sg)); /* free request buffer */
kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
kfree(gpn_ft);
@ -494,7 +514,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
if (!gpn_ft)
return NULL;
req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
@ -511,9 +531,8 @@ out:
}
static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
struct zfcp_adapter *adapter,
int max_bytes)
static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
struct zfcp_adapter *adapter, int max_bytes)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@ -536,19 +555,18 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
ct->wka_port = &adapter->gs->ds;
ct->handler = zfcp_fc_ns_handler;
ct->handler_data = (unsigned long)&compl_rec;
ct->timeout = 10;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
init_completion(&compl_rec.done);
compl_rec.handler = NULL;
ret = zfcp_fsf_send_ct(ct, NULL, NULL);
ret = zfcp_fsf_send_ct(ct, NULL);
if (!ret)
wait_for_completion(&compl_rec.done);
return ret;
}
static void zfcp_validate_port(struct zfcp_port *port)
static void zfcp_fc_validate_port(struct zfcp_port *port)
{
struct zfcp_adapter *adapter = port->adapter;
@ -568,7 +586,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
zfcp_port_dequeue(port);
}
static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct scatterlist *sg = gpn_ft->sg_resp;
@ -595,7 +613,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
return -E2BIG;
}
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
@ -628,16 +646,16 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
zfcp_erp_wait(adapter);
list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
zfcp_validate_port(port);
up(&zfcp_data.config_sema);
zfcp_fc_validate_port(port);
mutex_unlock(&zfcp_data.config_mutex);
return ret;
}
/**
* zfcp_scan_ports - scan remote ports and attach new ports
* zfcp_fc_scan_ports - scan remote ports and attach new ports
* @adapter: pointer to struct zfcp_adapter
*/
int zfcp_scan_ports(struct zfcp_adapter *adapter)
int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
{
int ret, i;
struct zfcp_gpn_ft *gpn_ft;
@ -652,7 +670,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return 0;
ret = zfcp_wka_port_get(&adapter->gs->ds);
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
return ret;
@ -663,9 +681,9 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
for (i = 0; i < 3; i++) {
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
if (!ret) {
ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
@ -674,14 +692,14 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
zfcp_free_sg_env(gpn_ft, buf_num);
out:
zfcp_wka_port_put(&adapter->gs->ds);
zfcp_fc_wka_port_put(&adapter->gs->ds);
return ret;
}
void _zfcp_scan_ports_later(struct work_struct *work)
void _zfcp_fc_scan_ports_later(struct work_struct *work)
{
zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
}
struct zfcp_els_fc_job {
@ -732,7 +750,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
els_fc_job->els.adapter = adapter;
if (rport) {
read_lock_irq(&zfcp_data.config_lock);
port = rport->dd_data;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port)
els_fc_job->els.d_id = port->d_id;
read_unlock_irq(&zfcp_data.config_lock);
@ -771,7 +789,7 @@ static void zfcp_fc_generic_ct_handler(unsigned long data)
job->state_flags = FC_RQST_STATE_DONE;
job->job_done(job);
zfcp_wka_port_put(ct_fc_job->ct.wka_port);
zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
kfree(ct_fc_job);
}
@ -817,7 +835,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
return -EINVAL; /* no such service */
}
ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port);
ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
if (ret) {
kfree(ct_fc_job);
return ret;
@ -825,16 +843,40 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
ct_fc_job->ct.req = job->request_payload.sg_list;
ct_fc_job->ct.resp = job->reply_payload.sg_list;
ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
ct_fc_job->ct.completion = NULL;
ct_fc_job->job = job;
ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL);
ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
if (ret) {
kfree(ct_fc_job);
zfcp_wka_port_put(ct_fc_job->ct.wka_port);
zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
}
return ret;
}
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_wka_ports *wka_ports;
wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
adapter->gs = wka_ports;
zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
return 0;
}
void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
kfree(adapter->gs);
adapter->gs = NULL;
}

File diff suppressed because it is too large Load Diff

View File

@ -3,13 +3,14 @@
*
* Interface to the FSF support functions.
*
* Copyright IBM Corporation 2002, 2008
* Copyright IBM Corporation 2002, 2009
*/
#ifndef FSF_H
#define FSF_H
#include <linux/pfn.h>
#include <linux/scatterlist.h>
#define FSF_QTCB_CURRENT_VERSION 0x00000001

View File

@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
* Copyright IBM Corporation 2002, 2008
* Copyright IBM Corporation 2002, 2009
*/
#define KMSG_COMPONENT "zfcp"
@ -34,29 +34,10 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
return &q->sbal[sbal_idx]->element[sbale_idx];
}
/**
* zfcp_qdio_free - free memory used by request- and resposne queue
* @adapter: pointer to the zfcp_adapter structure
*/
void zfcp_qdio_free(struct zfcp_adapter *adapter)
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
{
struct qdio_buffer **sbal_req, **sbal_resp;
int p;
struct zfcp_adapter *adapter = qdio->adapter;
if (adapter->ccw_device)
qdio_free(adapter->ccw_device);
sbal_req = adapter->req_q.sbal;
sbal_resp = adapter->resp_q.sbal;
for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
free_page((unsigned long) sbal_req[p]);
free_page((unsigned long) sbal_resp[p]);
}
}
static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
{
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
zfcp_erp_adapter_reopen(adapter,
@ -75,72 +56,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
/* this needs to be called prior to updating the queue fill level */
static void zfcp_qdio_account(struct zfcp_adapter *adapter)
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
ktime_t now;
s64 span;
unsigned long long now, span;
int free, used;
spin_lock(&adapter->qdio_stat_lock);
now = ktime_get();
span = ktime_us_delta(now, adapter->req_q_time);
free = max(0, atomic_read(&adapter->req_q.count));
spin_lock(&qdio->stat_lock);
now = get_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
free = atomic_read(&qdio->req_q.count);
used = QDIO_MAX_BUFFERS_PER_Q - free;
adapter->req_q_util += used * span;
adapter->req_q_time = now;
spin_unlock(&adapter->qdio_stat_lock);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
spin_unlock(&qdio->stat_lock);
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->req_q;
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_qdio_queue *queue = &qdio->req_q;
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, "qdireq1");
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
count);
zfcp_qdio_handler_error(qdio, "qdireq1");
return;
}
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(queue->sbal, first, count);
zfcp_qdio_account(adapter);
zfcp_qdio_account(qdio);
atomic_add(count, &queue->count);
wake_up(&adapter->request_wq);
wake_up(&qdio->req_q_wq);
}
static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
unsigned long req_id, int sbal_idx)
static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
{
struct zfcp_fsf_req *fsf_req;
unsigned long flags;
spin_lock_irqsave(&adapter->req_list_lock, flags);
fsf_req = zfcp_reqlist_find(adapter, req_id);
if (!fsf_req)
/*
* Unknown request means that we have potentially memory
* corruption and must stop the machine immediatly.
*/
panic("error: unknown request id (%lx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
zfcp_reqlist_remove(adapter, fsf_req);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
fsf_req->sbal_response = sbal_idx;
fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
zfcp_fsf_req_complete(fsf_req);
}
static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
{
struct zfcp_qdio_queue *queue = &adapter->resp_q;
struct ccw_device *cdev = adapter->ccw_device;
struct zfcp_qdio_queue *queue = &qdio->resp_q;
struct ccw_device *cdev = qdio->adapter->ccw_device;
u8 count, start = queue->first;
unsigned int retval;
@ -162,14 +118,13 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->resp_q;
struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
int sbal_idx, sbal_no;
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, "qdires1");
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
count);
zfcp_qdio_handler_error(qdio, "qdires1");
return;
}
@ -179,39 +134,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
sbale_idx++) {
sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
zfcp_qdio_reqid_check(adapter,
(unsigned long) sbale->addr,
sbal_idx);
if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
break;
};
if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
dev_warn(&adapter->ccw_device->dev,
"A QDIO protocol error occurred, "
"operations continue\n");
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
* put range of SBALs back to response queue
* (including SBALs which have already been free before)
*/
zfcp_qdio_resp_put_back(adapter, count);
zfcp_qdio_resp_put_back(qdio, count);
}
/**
* zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
* @fsf_req: pointer to struct fsf_req
* @qdio: pointer to struct zfcp_qdio
* @q_rec: pointer to struct zfcp_queue_rec
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
}
/**
@ -219,74 +162,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
req->sbale_curr);
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
q_req->sbale_curr);
}
static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req, int max_sbals)
{
int count = atomic_read(&fsf_req->adapter->req_q.count);
int count = atomic_read(&qdio->req_q.count);
count = min(count, max_sbals);
fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
q_req->sbal_limit = (q_req->sbal_first + count - 1)
% QDIO_MAX_BUFFERS_PER_Q;
}
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
unsigned long sbtype)
{
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(fsf_req);
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
if (fsf_req->sbal_last == fsf_req->sbal_limit)
if (q_req->sbal_last == q_req->sbal_limit)
return NULL;
/* set chaining flag in first SBALE of current SBAL */
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
fsf_req->sbal_last++;
fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
q_req->sbal_last++;
q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
/* keep this requests number of SBALs up-to-date */
fsf_req->sbal_number++;
q_req->sbal_number++;
/* start at first SBALE of new SBAL */
fsf_req->sbale_curr = 0;
q_req->sbale_curr = 0;
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(fsf_req);
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->flags |= sbtype;
return sbale;
}
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
unsigned int sbtype)
{
if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
return zfcp_qdio_sbal_chain(fsf_req, sbtype);
fsf_req->sbale_curr++;
return zfcp_qdio_sbale_curr(fsf_req);
if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
{
struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
int first = fsf_req->sbal_first;
int last = fsf_req->sbal_last;
struct qdio_buffer **sbal = qdio->req_q.sbal;
int first = q_req->sbal_first;
int last = q_req->sbal_last;
int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
QDIO_MAX_BUFFERS_PER_Q + 1;
zfcp_qdio_zero_sbals(sbal, first, count);
}
static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
@ -297,10 +246,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
/* split segment up */
for (addr = start_addr, remaining = total_length; remaining > 0;
addr += length, remaining -= length) {
sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
if (!sbale) {
atomic_inc(&fsf_req->adapter->qdio_outb_full);
zfcp_qdio_undo_sbals(fsf_req);
atomic_inc(&qdio->req_q_full);
zfcp_qdio_undo_sbals(qdio, q_req);
return -EINVAL;
}
@ -322,29 +271,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
struct scatterlist *sg, int max_sbals)
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req,
unsigned long sbtype, struct scatterlist *sg,
int max_sbals)
{
struct qdio_buffer_element *sbale;
int retval, bytes = 0;
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit(fsf_req, max_sbals);
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->flags |= sbtype;
for (; sg; sg = sg_next(sg)) {
retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
sg->length);
retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
sg_virt(sg), sg->length);
if (retval < 0)
return retval;
bytes += sg->length;
}
/* assume that no other SBALEs are to follow in the same SBAL */
sbale = zfcp_qdio_sbale_curr(fsf_req);
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
return bytes;
@ -352,21 +303,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @fsf_req: pointer to struct zfcp_fsf_req
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
* Returns: 0 on success, error otherwise
*/
int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
{
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_qdio_queue *req_q = &adapter->req_q;
int first = fsf_req->sbal_first;
int count = fsf_req->sbal_number;
struct zfcp_qdio_queue *req_q = &qdio->req_q;
int first = q_req->sbal_first;
int count = q_req->sbal_number;
int retval;
unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
zfcp_qdio_account(adapter);
zfcp_qdio_account(qdio);
retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count);
retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first,
count);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval;
@ -379,63 +331,69 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
return 0;
}
static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
struct zfcp_qdio *qdio)
{
id->cdev = qdio->adapter->ccw_device;
id->q_format = QDIO_ZFCP_QFMT;
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
id->qib_param_field_format = 0;
id->qib_param_field = NULL;
id->input_slib_elements = NULL;
id->output_slib_elements = NULL;
id->no_input_qs = 1;
id->no_output_qs = 1;
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
id->flags = QDIO_INBOUND_0COPY_SBALS |
QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
}
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @adapter: pointer to struct zfcp_adapter
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
struct qdio_initialize *init_data;
struct qdio_initialize init_data;
if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) ||
zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal))
return -ENOMEM;
init_data = &adapter->qdio_init_data;
zfcp_qdio_setup_init_data(&init_data, qdio);
init_data->cdev = adapter->ccw_device;
init_data->q_format = QDIO_ZFCP_QFMT;
memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
ASCEBC(init_data->adapter_name, 8);
init_data->qib_param_field_format = 0;
init_data->qib_param_field = NULL;
init_data->input_slib_elements = NULL;
init_data->output_slib_elements = NULL;
init_data->no_input_qs = 1;
init_data->no_output_qs = 1;
init_data->input_handler = zfcp_qdio_int_resp;
init_data->output_handler = zfcp_qdio_int_req;
init_data->int_parm = (unsigned long) adapter;
init_data->flags = QDIO_INBOUND_0COPY_SBALS |
QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
init_data->input_sbal_addr_array =
(void **) (adapter->resp_q.sbal);
init_data->output_sbal_addr_array =
(void **) (adapter->req_q.sbal);
return qdio_allocate(init_data);
return qdio_allocate(&init_data);
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_adapter *adapter)
void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
struct zfcp_qdio_queue *req_q;
int first, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
req_q = &adapter->req_q;
spin_lock_bh(&adapter->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_bh(&adapter->req_q_lock);
req_q = &qdio->req_q;
spin_lock_bh(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
spin_unlock_bh(&qdio->req_q_lock);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
qdio_shutdown(qdio->adapter->ccw_device,
QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&req_q->count);
@ -446,50 +404,99 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
}
req_q->first = 0;
atomic_set(&req_q->count, 0);
adapter->resp_q.first = 0;
atomic_set(&adapter->resp_q.count, 0);
qdio->resp_q.first = 0;
atomic_set(&qdio->resp_q.count, 0);
}
/**
* zfcp_qdio_open - prepare and initialize response queue
* @adapter: pointer to struct zfcp_adapter
* @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, otherwise -EIO
*/
int zfcp_qdio_open(struct zfcp_adapter *adapter)
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer_element *sbale;
struct qdio_initialize init_data;
struct ccw_device *cdev = qdio->adapter->ccw_device;
int cc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
if (qdio_establish(&adapter->qdio_init_data))
zfcp_qdio_setup_init_data(&init_data, qdio);
if (qdio_establish(&init_data))
goto failed_establish;
if (qdio_activate(adapter->ccw_device))
if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(adapter->resp_q.sbal[cc]->element[0]);
sbale = &(qdio->resp_q.sbal[cc]->element[0]);
sbale->length = 0;
sbale->flags = SBAL_FLAGS_LAST_ENTRY;
sbale->addr = NULL;
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first avalable SBALS / number of available SBALS */
adapter->req_q.first = 0;
atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
qdio->req_q.first = 0;
atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
return 0;
failed_qdio:
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&adapter->ccw_device->dev,
dev_err(&cdev->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
struct qdio_buffer **sbal_req, **sbal_resp;
int p;
if (!qdio)
return;
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
sbal_req = qdio->req_q.sbal;
sbal_resp = qdio->resp_q.sbal;
for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
free_page((unsigned long) sbal_req[p]);
free_page((unsigned long) sbal_resp[p]);
}
kfree(qdio);
}
int zfcp_qdio_setup(struct zfcp_adapter *adapter)
{
struct zfcp_qdio *qdio;
qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
if (!qdio)
return -ENOMEM;
qdio->adapter = adapter;
if (zfcp_qdio_allocate(qdio)) {
zfcp_qdio_destroy(qdio);
return -ENOMEM;
}
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
adapter->qdio = qdio;
return 0;
}

View File

@ -9,8 +9,9 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
#include <asm/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
@ -52,11 +53,11 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
set_host_byte(scpnt, result);
if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
zfcp_scsi_dbf_event_result("fail", 4,
(struct zfcp_adapter*) scpnt->device->host->hostdata[0],
scpnt, NULL);
zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
/* return directly */
scpnt->scsi_done(scpnt);
}
@ -92,7 +93,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL);
zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
scpnt->scsi_done(scpnt);
return 0;
}
@ -180,8 +181,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
spin_unlock(&adapter->req_list_lock);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
old_reqid);
zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
old_reqid);
return FAILED; /* completion could be in progress */
}
old_req->data = NULL;
@ -197,16 +198,15 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
zfcp_erp_wait(adapter);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
old_reqid);
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
old_reqid);
return SUCCESS;
}
}
if (!abrt_req)
return FAILED;
wait_event(abrt_req->completion_wq,
abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
wait_for_completion(&abrt_req->completion);
if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
dbf_tag = "okay";
@ -216,7 +216,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
dbf_tag = "fail";
retval = FAILED;
}
zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid);
zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
zfcp_fsf_req_free(abrt_req);
return retval;
}
@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req;
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS;
int retry = 3;
@ -237,25 +237,23 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit,
scpnt);
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
return SUCCESS;
}
}
if (!fsf_req)
return FAILED;
wait_event(fsf_req->completion_wq,
fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
retval = FAILED;
} else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
retval = FAILED;
} else
zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
zfcp_fsf_req_free(fsf_req);
return retval;
@ -430,7 +428,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
if (!data)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) {
kfree(data);
return NULL;
@ -459,7 +457,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
if (!data)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret)
kfree(data);
else {
@ -492,21 +490,6 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
rport->dev_loss_tmo = timeout;
}
/**
* zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
* @rport: The rport that is about to be deleted.
*/
static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct zfcp_port *port;
write_lock_irq(&zfcp_data.config_lock);
port = rport->dd_data;
if (port)
port->rport = NULL;
write_unlock_irq(&zfcp_data.config_lock);
}
/**
* zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
* @rport: The FC rport where to teminate I/O
@ -518,9 +501,12 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
{
struct zfcp_port *port;
struct Scsi_Host *shost = rport_to_shost(rport);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *)shost->hostdata[0];
write_lock_irq(&zfcp_data.config_lock);
port = rport->dd_data;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (port)
zfcp_port_get(port);
write_unlock_irq(&zfcp_data.config_lock);
@ -552,7 +538,6 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
return;
}
rport->dd_data = port;
rport->maxframe_size = port->maxframe_size;
rport->supported_classes = port->supported_classes;
port->rport = rport;
@ -573,7 +558,7 @@ void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
zfcp_port_get(port);
port->rport_task = RPORT_ADD;
if (!queue_work(zfcp_data.work_queue, &port->rport_work))
if (!queue_work(port->adapter->work_queue, &port->rport_work))
zfcp_port_put(port);
}
@ -582,8 +567,11 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
zfcp_port_get(port);
port->rport_task = RPORT_DEL;
if (!queue_work(zfcp_data.work_queue, &port->rport_work))
zfcp_port_put(port);
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
zfcp_port_put(port);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@ -662,7 +650,6 @@ struct fc_function_template zfcp_transport_functions = {
.reset_fc_host_stats = zfcp_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
.get_host_port_state = zfcp_get_host_port_state,
.dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
.bsg_request = zfcp_execute_fc_job,

View File

@ -88,7 +88,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
unsigned long val; \
int retval = 0; \
\
down(&zfcp_data.config_sema); \
mutex_lock(&zfcp_data.config_mutex); \
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
retval = -EBUSY; \
goto out; \
@ -105,7 +105,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
up(&zfcp_data.config_sema); \
mutex_unlock(&zfcp_data.config_mutex); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@ -126,7 +126,7 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
return -EBUSY;
ret = zfcp_scan_ports(adapter);
ret = zfcp_fc_scan_ports(adapter);
return ret ? ret : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
@ -142,7 +142,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
int retval = 0;
LIST_HEAD(port_remove_lh);
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
@ -173,7 +173,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
zfcp_port_put(port);
zfcp_port_dequeue(port);
out:
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@ -207,7 +207,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
u64 fcp_lun;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
@ -226,7 +226,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
zfcp_erp_wait(unit->port->adapter);
zfcp_unit_put(unit);
out:
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@ -241,7 +241,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
int retval = 0;
LIST_HEAD(unit_remove_lh);
down(&zfcp_data.config_sema);
mutex_lock(&zfcp_data.config_mutex);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
retval = -EBUSY;
goto out;
@ -282,7 +282,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
zfcp_unit_put(unit);
zfcp_unit_dequeue(unit);
out:
up(&zfcp_data.config_sema);
mutex_unlock(&zfcp_data.config_mutex);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
if (!qtcb_port)
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
if (!qtcb_config)
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval)
*stat_inf = qtcb_config->stat_info;
@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_qdio *qdio =
((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
u64 util;
spin_lock_bh(&adapter->qdio_stat_lock);
util = adapter->req_q_util;
spin_unlock_bh(&adapter->qdio_stat_lock);
spin_lock_bh(&qdio->stat_lock);
util = qdio->req_q_util;
spin_unlock_bh(&qdio->stat_lock);
return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
(unsigned long long)util);
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);

View File

@ -1811,6 +1811,12 @@ config ZFCP
called zfcp. If you want to compile it as a module, say M here
and read <file:Documentation/kbuild/modules.txt>.
config SCSI_PMCRAID
tristate "PMC SIERRA Linux MaxRAID adapter support"
depends on PCI && SCSI
---help---
This driver supports the PMC SIERRA MaxRAID adapters.
config SCSI_SRP
tristate "SCSI RDMA Protocol helper library"
depends on SCSI && PCI

View File

@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_ARM) += arm/

View File

@ -15,11 +15,10 @@
static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
static int bnx2i_reg_device;
#define DRV_MODULE_NAME "bnx2i"
#define DRV_MODULE_VERSION "2.0.1d"
#define DRV_MODULE_RELDATE "Mar 25, 2009"
#define DRV_MODULE_VERSION "2.0.1e"
#define DRV_MODULE_RELDATE "June 22, 2009"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@ -31,7 +30,7 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static DEFINE_RWLOCK(bnx2i_dev_lock);
static DEFINE_MUTEX(bnx2i_dev_lock);
unsigned int event_coal_div = 1;
module_param(event_coal_div, int, 0664);
@ -100,14 +99,14 @@ struct bnx2i_hba *get_adapter_list_head(void)
if (!adapter_count)
goto hba_not_found;
read_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
list_for_each_entry(tmp_hba, &adapter_list, link) {
if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
hba = tmp_hba;
break;
}
}
read_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
hba_not_found:
return hba;
}
@ -122,14 +121,14 @@ struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link) {
if (hba->cnic == cnic) {
read_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
return hba;
}
}
read_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
return NULL;
}
@ -186,18 +185,17 @@ void bnx2i_stop(void *handle)
*/
void bnx2i_register_device(struct bnx2i_hba *hba)
{
int rc;
if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
return;
}
hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
spin_lock(&hba->lock);
bnx2i_reg_device++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
if (!rc)
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
@ -211,10 +209,10 @@ void bnx2i_reg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_register_device(hba);
read_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
}
@ -234,10 +232,6 @@ static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
/* ep_disconnect could come before NETDEV_DOWN, driver won't
* see NETDEV_DOWN as it already unregistered itself.
*/
@ -255,10 +249,10 @@ void bnx2i_unreg_dev_all(void)
{
struct bnx2i_hba *hba, *temp;
read_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
list_for_each_entry_safe(hba, temp, &adapter_list, link)
bnx2i_unreg_one_device(hba);
read_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
}
@ -267,35 +261,34 @@ void bnx2i_unreg_dev_all(void)
* @hba: bnx2i adapter instance
* @cnic: cnic device handle
*
* Global resource lock and host adapter lock is held during critical sections
* below. This routine is called from cnic_register_driver() context and
* work horse thread which does majority of device specific initialization
* Global resource lock is held during critical sections below. This routine is
* called from either cnic_register_driver() or device hot plug context and
* and does majority of device specific initialization
*/
static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
{
int rc;
read_lock(&bnx2i_dev_lock);
if (bnx2i_reg_device &&
!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (rc) /* duplicate registration */
printk(KERN_ERR "bnx2i- dev reg failed\n");
spin_lock(&hba->lock);
bnx2i_reg_device++;
mutex_lock(&bnx2i_dev_lock);
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (!rc) {
hba->age++;
spin_unlock(&hba->lock);
set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
}
read_unlock(&bnx2i_dev_lock);
list_add_tail(&hba->link, &adapter_list);
adapter_count++;
} else if (rc == -EBUSY) /* duplicate registration */
printk(KERN_ALERT "bnx2i, duplicate registration"
"hba=%p, cnic=%p\n", hba, cnic);
else if (rc == -EAGAIN)
printk(KERN_ERR "bnx2i, driver not registered\n");
else if (rc == -EINVAL)
printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI);
else
printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
write_lock(&bnx2i_dev_lock);
list_add_tail(&hba->link, &adapter_list);
adapter_count++;
write_unlock(&bnx2i_dev_lock);
return 0;
mutex_unlock(&bnx2i_dev_lock);
return rc;
}
@ -343,19 +336,15 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
"found, dev 0x%p\n", dev);
return;
}
write_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
list_del_init(&hba->link);
adapter_count--;
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
spin_lock(&hba->lock);
bnx2i_reg_device--;
spin_unlock(&hba->lock);
}
write_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
}
@ -377,6 +366,8 @@ static int __init bnx2i_mod_init(void)
if (!is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
mutex_init(&bnx2i_dev_lock);
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
@ -412,7 +403,7 @@ static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
write_lock(&bnx2i_dev_lock);
mutex_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
list_del(&hba->link);
@ -421,14 +412,11 @@ static void __exit bnx2i_mod_exit(void)
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
bnx2i_reg_device--;
}
write_unlock(&bnx2i_dev_lock);
bnx2i_free_hba(hba);
write_lock(&bnx2i_dev_lock);
}
write_unlock(&bnx2i_dev_lock);
mutex_unlock(&bnx2i_dev_lock);
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);

View File

@ -387,6 +387,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
bnx2i_ep = ep->dd_data;
INIT_LIST_HEAD(&bnx2i_ep->link);
bnx2i_ep->state = EP_STATE_IDLE;
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->hba = hba;
bnx2i_ep->hba_age = hba->age;
hba->ofld_conns_active++;
@ -1160,9 +1161,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_cmd *cmd = task->dd_data;
struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
return -ENOTCONN;
if (!bnx2i_conn->is_bound)
return -ENOTCONN;
@ -1653,15 +1651,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
struct iscsi_endpoint *ep;
int rc = 0;
if (shost)
if (shost) {
/* driver is given scsi host to work with */
hba = iscsi_host_priv(shost);
else
/* Register the device with cnic if not already done so */
bnx2i_register_device(hba);
} else
/*
* check if the given destination can be reached through
* a iscsi capable NetXtreme2 device
*/
hba = bnx2i_check_route(dst_addr);
if (!hba) {
rc = -ENOMEM;
goto check_busy;
@ -1681,8 +1682,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
goto net_if_down;
}
bnx2i_ep->state = EP_STATE_IDLE;
bnx2i_ep->ep_iscsi_cid = (u16) -1;
bnx2i_ep->num_active_cmds = 0;
iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
if (iscsi_cid == -1) {

View File

@ -353,6 +353,12 @@ ch_readconfig(scsi_changer *ch)
/* look up the devices of the data transfer elements */
ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device),
GFP_KERNEL);
if (!ch->dt) {
kfree(buffer);
return -ENOMEM;
}
for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
id = -1;
lun = 0;

View File

@ -4,8 +4,7 @@
* Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
* Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
* by D. Gilbert and aeb (20020609)
* Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
* Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
* Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624
*/
#include <linux/blkdev.h>
@ -56,9 +55,9 @@ static const char * cdb_byte0_names[] = {
"Read Buffer",
/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
/* 40-41 */ "Change Definition", "Write Same(10)",
/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
"Play audio(10)", "Get configuration", "Play audio msf",
"Play audio track/index",
/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP",
"Read density support", "Play audio(10)", "Get configuration",
"Play audio msf", "Play audio track/index",
/* 49-4f */ "Play track relative(10)", "Get event status notification",
"Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
NULL,
@ -71,12 +70,13 @@ static const char * cdb_byte0_names[] = {
/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB",
"Variable length",
/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
"Receive copy results",
/* 85-89 */ "ATA command pass through(16)", "Access control in",
"Access control out", "Read(16)", "Memory Export Out(16)",
/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes",
"Write and verify(16)", "Verify(16)",
/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
"Lock/unlock cache(16)", "Write same(16)", NULL,
@ -107,22 +107,24 @@ struct value_name_pair {
};
static const struct value_name_pair maint_in_arr[] = {
{0x5, "Report device identifier"},
{0x5, "Report identifying information"},
{0xa, "Report target port groups"},
{0xb, "Report aliases"},
{0xc, "Report supported operation codes"},
{0xd, "Report supported task management functions"},
{0xe, "Report priority"},
{0xf, "Report timestamp"},
{0x10, "Management protocol in"},
};
#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
static const struct value_name_pair maint_out_arr[] = {
{0x6, "Set device identifier"},
{0x6, "Set identifying information"},
{0xa, "Set target port groups"},
{0xb, "Change aliases"},
{0xe, "Set priority"},
{0xe, "Set timestamp"},
{0xf, "Set timestamp"},
{0x10, "Management protocol out"},
};
#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
@ -412,6 +414,7 @@ static const struct error_info additional[] =
{0x0004, "Beginning-of-partition/medium detected"},
{0x0005, "End-of-data detected"},
{0x0006, "I/O process terminated"},
{0x0007, "Programmable early warning detected"},
{0x0011, "Audio play operation in progress"},
{0x0012, "Audio play operation paused"},
{0x0013, "Audio play operation successfully completed"},
@ -425,6 +428,7 @@ static const struct error_info additional[] =
{0x001B, "Set capacity operation in progress"},
{0x001C, "Verify operation in progress"},
{0x001D, "ATA pass through information available"},
{0x001E, "Conflicting SA creation request"},
{0x0100, "No index/sector signal"},
@ -449,9 +453,12 @@ static const struct error_info additional[] =
{0x040B, "Logical unit not accessible, target port in standby state"},
{0x040C, "Logical unit not accessible, target port in unavailable "
"state"},
{0x040D, "Logical unit not ready, structure check required"},
{0x0410, "Logical unit not ready, auxiliary memory not accessible"},
{0x0411, "Logical unit not ready, notify (enable spinup) required"},
{0x0412, "Logical unit not ready, offline"},
{0x0413, "Logical unit not ready, SA creation in progress"},
{0x0414, "Logical unit not ready, space allocation in progress"},
{0x0500, "Logical unit does not respond to selection"},
@ -479,6 +486,9 @@ static const struct error_info additional[] =
{0x0B03, "Warning - background self-test failed"},
{0x0B04, "Warning - background pre-scan detected medium error"},
{0x0B05, "Warning - background medium scan detected medium error"},
{0x0B06, "Warning - non-volatile cache now volatile"},
{0x0B07, "Warning - degraded power to non-volatile cache"},
{0x0B08, "Warning - power loss expected"},
{0x0C00, "Write error"},
{0x0C01, "Write error - recovered with auto reallocation"},
@ -593,6 +603,7 @@ static const struct error_info additional[] =
{0x1C02, "Grown defect list not found"},
{0x1D00, "Miscompare during verify operation"},
{0x1D01, "Miscompare verify of unmapped LBA"},
{0x1E00, "Recovered id with ECC correction"},
@ -626,6 +637,7 @@ static const struct error_info additional[] =
{0x2405, "Security working key frozen"},
{0x2406, "Nonce not unique"},
{0x2407, "Nonce timestamp out of range"},
{0x2408, "Invalid XCDB"},
{0x2500, "Logical unit not supported"},
@ -656,10 +668,12 @@ static const struct error_info additional[] =
{0x2704, "Persistent write protect"},
{0x2705, "Permanent write protect"},
{0x2706, "Conditional write protect"},
{0x2707, "Space allocation failed write protect"},
{0x2800, "Not ready to ready change, medium may have changed"},
{0x2801, "Import or export element accessed"},
{0x2802, "Format-layer may have changed"},
{0x2803, "Import/export element accessed, medium changed"},
{0x2900, "Power on, reset, or bus device reset occurred"},
{0x2901, "Power on occurred"},
@ -680,11 +694,16 @@ static const struct error_info additional[] =
{0x2A07, "Implicit asymmetric access state transition failed"},
{0x2A08, "Priority changed"},
{0x2A09, "Capacity data has changed"},
{0x2A0A, "Error history I_T nexus cleared"},
{0x2A0B, "Error history snapshot released"},
{0x2A0C, "Error recovery attributes have changed"},
{0x2A0D, "Data encryption capabilities changed"},
{0x2A10, "Timestamp changed"},
{0x2A11, "Data encryption parameters changed by another i_t nexus"},
{0x2A12, "Data encryption parameters changed by vendor specific "
"event"},
{0x2A13, "Data encryption key instance counter has changed"},
{0x2A14, "SA creation capabilities data has changed"},
{0x2B00, "Copy cannot execute since host cannot disconnect"},
@ -723,6 +742,8 @@ static const struct error_info additional[] =
{0x300C, "WORM medium - overwrite attempted"},
{0x300D, "WORM medium - integrity check"},
{0x3010, "Medium not formatted"},
{0x3011, "Incompatible volume type"},
{0x3012, "Incompatible volume qualifier"},
{0x3100, "Medium format corrupted"},
{0x3101, "Format command failed"},
@ -782,6 +803,10 @@ static const struct error_info additional[] =
{0x3B15, "Medium magazine unlocked"},
{0x3B16, "Mechanical positioning or changer error"},
{0x3B17, "Read past end of user object"},
{0x3B18, "Element disabled"},
{0x3B19, "Element enabled"},
{0x3B1A, "Data transfer device removed"},
{0x3B1B, "Data transfer device inserted"},
{0x3D00, "Invalid bits in identify message"},
@ -882,6 +907,8 @@ static const struct error_info additional[] =
{0x5506, "Auxiliary memory out of space"},
{0x5507, "Quota error"},
{0x5508, "Maximum number of supplemental decryption keys exceeded"},
{0x5509, "Medium auxiliary memory not accessible"},
{0x550A, "Data currently unavailable"},
{0x5700, "Unable to recover table-of-contents"},
@ -993,6 +1020,12 @@ static const struct error_info additional[] =
{0x5E02, "Standby condition activated by timer"},
{0x5E03, "Idle condition activated by command"},
{0x5E04, "Standby condition activated by command"},
{0x5E05, "Idle_b condition activated by timer"},
{0x5E06, "Idle_b condition activated by command"},
{0x5E07, "Idle_c condition activated by timer"},
{0x5E08, "Idle_c condition activated by command"},
{0x5E09, "Standby_y condition activated by timer"},
{0x5E0A, "Standby_y condition activated by command"},
{0x5E41, "Power state change to active"},
{0x5E42, "Power state change to idle"},
{0x5E43, "Power state change to standby"},
@ -1091,7 +1124,28 @@ static const struct error_info additional[] =
{0x7403, "Incorrect data encryption key"},
{0x7404, "Cryptographic integrity validation failed"},
{0x7405, "Error decrypting data"},
{0x7406, "Unknown signature verification key"},
{0x7407, "Encryption parameters not useable"},
{0x7408, "Digital signature validation failure"},
{0x7409, "Encryption mode mismatch on read"},
{0x740A, "Encrypted block not raw read enabled"},
{0x740B, "Incorrect Encryption parameters"},
{0x740C, "Unable to decrypt parameter list"},
{0x740D, "Encryption algorithm disabled"},
{0x7410, "SA creation parameter value invalid"},
{0x7411, "SA creation parameter value rejected"},
{0x7412, "Invalid SA usage"},
{0x7421, "Data Encryption configuration prevented"},
{0x7430, "SA creation parameter not supported"},
{0x7440, "Authentication failed"},
{0x7461, "External data encryption key manager access error"},
{0x7462, "External data encryption key manager error"},
{0x7463, "External data encryption key not found"},
{0x7464, "External data encryption request not authorized"},
{0x746E, "External data encryption control timeout"},
{0x746F, "External data encryption control error"},
{0x7471, "Logical unit access not authorized"},
{0x7479, "Security conflict in translated device"},
{0, NULL}
};
@ -1103,12 +1157,12 @@ struct error_info2 {
static const struct error_info2 additional2[] =
{
{0x40,0x00,0x7f,"Ram failure (%x)"},
{0x40,0x80,0xff,"Diagnostic failure on component (%x)"},
{0x41,0x00,0xff,"Data path failure (%x)"},
{0x42,0x00,0xff,"Power-on or self-test failure (%x)"},
{0x4D,0x00,0xff,"Tagged overlapped commands (queue tag %x)"},
{0x70,0x00,0xff,"Decompression exception short algorithm id of %x"},
{0x40, 0x00, 0x7f, "Ram failure (%x)"},
{0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"},
{0x41, 0x00, 0xff, "Data path failure (%x)"},
{0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"},
{0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"},
{0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"},
{0, 0, 0, NULL}
};
@ -1157,14 +1211,15 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
int i;
unsigned short code = ((asc << 8) | ascq);
for (i=0; additional[i].text; i++)
for (i = 0; additional[i].text; i++)
if (additional[i].code12 == code)
return additional[i].text;
for (i=0; additional2[i].fmt; i++)
for (i = 0; additional2[i].fmt; i++) {
if (additional2[i].code1 == asc &&
additional2[i].code2_min >= ascq &&
additional2[i].code2_max <= ascq)
ascq >= additional2[i].code2_min &&
ascq <= additional2[i].code2_max)
return additional2[i].fmt;
}
#endif
return NULL;
}

View File

@ -153,12 +153,24 @@ static int scsi_dh_handler_attach(struct scsi_device *sdev,
if (sdev->scsi_dh_data) {
if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
err = -EBUSY;
} else if (scsi_dh->attach)
else
kref_get(&sdev->scsi_dh_data->kref);
} else if (scsi_dh->attach) {
err = scsi_dh->attach(sdev);
if (!err) {
kref_init(&sdev->scsi_dh_data->kref);
sdev->scsi_dh_data->sdev = sdev;
}
}
return err;
}
static void __detach_handler (struct kref *kref)
{
struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref);
scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev);
}
/*
* scsi_dh_handler_detach - Detach a device handler from a device
* @sdev - SCSI device the device handler should be detached from
@ -180,7 +192,7 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev,
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (scsi_dh && scsi_dh->detach)
scsi_dh->detach(sdev);
kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
}
/*
@ -439,6 +451,39 @@ int scsi_dh_activate(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(scsi_dh_activate);
/*
* scsi_dh_set_params - set the parameters for the device as per the
* string specified in params.
* @q - Request queue that is associated with the scsi_device for
* which the parameters to be set.
* @params - parameters in the following format
* "no_of_params\0param1\0param2\0param3\0...\0"
* for example, string for 2 parameters with value 10 and 21
* is specified as "2\010\021\0".
*/
int scsi_dh_set_params(struct request_queue *q, const char *params)
{
int err = -SCSI_DH_NOSYS;
unsigned long flags;
struct scsi_device *sdev;
struct scsi_device_handler *scsi_dh = NULL;
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
if (sdev && sdev->scsi_dh_data)
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
err = 0;
spin_unlock_irqrestore(q->queue_lock, flags);
if (err)
return err;
err = scsi_dh->set_params(sdev, params);
put_device(&sdev->sdev_gendev);
return err;
}
EXPORT_SYMBOL_GPL(scsi_dh_set_params);
/*
* scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
* the given name. FALSE(0) otherwise.
@ -474,7 +519,6 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
if (!err) {
err = scsi_dh_handler_attach(sdev, scsi_dh);
put_device(&sdev->sdev_gendev);
}
return err;
@ -505,10 +549,8 @@ void scsi_dh_detach(struct request_queue *q)
return;
if (sdev->scsi_dh_data) {
/* if sdev is not on internal list, detach */
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (!device_handler_match(scsi_dh, sdev))
scsi_dh_handler_detach(sdev, scsi_dh);
scsi_dh_handler_detach(sdev, scsi_dh);
}
put_device(&sdev->sdev_gendev);
}

View File

@ -663,7 +663,7 @@ static int alua_activate(struct scsi_device *sdev)
goto out;
}
if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
out:

View File

@ -561,6 +561,61 @@ done:
return result;
}
/*
* params - parameters in the following format
* "no_of_params\0param1\0param2\0param3\0...\0"
* for example, string for 2 parameters with value 10 and 21
* is specified as "2\010\021\0".
*/
static int clariion_set_params(struct scsi_device *sdev, const char *params)
{
struct clariion_dh_data *csdev = get_clariion_data(sdev);
unsigned int hr = 0, st = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
if ((sscanf(params, "%u", &argc) != 1) || (argc != 2))
return -EINVAL;
while (*p++)
;
if ((sscanf(p, "%u", &st) != 1) || (st > 1))
return -EINVAL;
while (*p++)
;
if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
return -EINVAL;
if (st)
csdev->flags |= CLARIION_SHORT_TRESPASS;
else
csdev->flags &= ~CLARIION_SHORT_TRESPASS;
if (hr)
csdev->flags |= CLARIION_HONOR_RESERVATIONS;
else
csdev->flags &= ~CLARIION_HONOR_RESERVATIONS;
/*
* If this path is owned, we have to send a trespass command
* with the new parameters. If not, simply return. Next trespass
* command would use the parameters.
*/
if (csdev->lun_state != CLARIION_LUN_OWNED)
goto done;
csdev->lun_state = CLARIION_LUN_UNINITIALIZED;
result = send_trespass_cmd(sdev, csdev);
if (result != SCSI_DH_OK)
goto done;
/* Update status */
result = clariion_send_inquiry(sdev, csdev);
done:
return result;
}
static const struct scsi_dh_devlist clariion_dev_list[] = {
{"DGC", "RAID"},
@ -581,11 +636,9 @@ static struct scsi_device_handler clariion_dh = {
.check_sense = clariion_check_sense,
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
};
/*
* TODO: need some interface so we can set trespass values
*/
static int clariion_bus_attach(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data;

View File

@ -112,6 +112,7 @@ struct c9_inquiry {
#define SUBSYS_ID_LEN 16
#define SLOT_ID_LEN 2
#define ARRAY_LABEL_LEN 31
struct c4_inquiry {
u8 peripheral_info;
@ -135,6 +136,8 @@ struct rdac_controller {
struct rdac_pg_legacy legacy;
struct rdac_pg_expanded expanded;
} mode_select;
u8 index;
u8 array_name[ARRAY_LABEL_LEN];
};
struct c8_inquiry {
u8 peripheral_info;
@ -198,6 +201,31 @@ static const char *lun_state[] =
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
/*
* module parameter to enable rdac debug logging.
* 2 bits for each type of logging, only two types defined for now
* Can be enhanced if required at later point
*/
static int rdac_logging = 1;
module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
"Default is 1 - failover logging enabled, "
"set it to 0xF to enable all the logs");
#define RDAC_LOG_FAILOVER 0
#define RDAC_LOG_SENSE 2
#define RDAC_LOG_BITS 2
#define RDAC_LOG_LEVEL(SHIFT) \
((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
#define RDAC_LOG(SHIFT, sdev, f, arg...) \
do { \
if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@ -303,7 +331,8 @@ static void release_controller(struct kref *kref)
kfree(ctlr);
}
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
char *array_name)
{
struct rdac_controller *ctlr, *tmp;
@ -324,6 +353,14 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
/* initialize fields of controller */
memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
/* update the controller index */
if (slot_id[1] == 0x31)
ctlr->index = 0;
else
ctlr->index = 1;
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
list_add(&ctlr->node, &ctlr_list);
@ -363,9 +400,10 @@ done:
return err;
}
static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
char *array_name)
{
int err;
int err, i;
struct c8_inquiry *inqp;
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
@ -377,6 +415,11 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
return SCSI_DH_NOSYS;
h->lun = inqp->lun[7]; /* Uses only the last byte */
for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
*(array_name+i) = inqp->array_user_label[(2*i)+1];
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
}
return err;
}
@ -410,7 +453,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
}
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h)
struct rdac_dh_data *h, char *array_name)
{
int err;
struct c4_inquiry *inqp;
@ -418,7 +461,8 @@ static int initialize_controller(struct scsi_device *sdev,
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
array_name);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
}
@ -450,6 +494,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
{
struct scsi_sense_hdr sense_hdr;
int err = SCSI_DH_IO, ret;
struct rdac_dh_data *h = get_rdac_data(sdev);
ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
@ -478,11 +523,14 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
err = SCSI_DH_RETRY;
break;
default:
sdev_printk(KERN_INFO, sdev,
"MODE_SELECT failed with sense %02x/%02x/%02x.\n",
sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
break;
}
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
done:
return err;
}
@ -499,7 +547,9 @@ retry:
if (!rq)
goto done;
sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
err = blk_execute_rq(q, NULL, rq, 1);
@ -509,8 +559,12 @@ retry:
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
}
if (err == SCSI_DH_OK)
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT completed",
(char *) h->ctlr->array_name, h->ctlr->index);
}
done:
return err;
@ -525,17 +579,6 @@ static int rdac_activate(struct scsi_device *sdev)
if (err != SCSI_DH_OK)
goto done;
if (!h->ctlr) {
err = initialize_controller(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (h->ctlr->use_ms10 == -1) {
err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (h->lun_state == RDAC_LUN_UNOWNED)
err = send_mode_select(sdev, h);
done:
@ -559,6 +602,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
"I/O returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
@ -628,11 +677,18 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_I"},
{"SUN", "LCSM100_S"},
{"SUN", "LCSM100_E"},
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
{"SUN", "CSM100_R_FC"},
{NULL, NULL},
};
@ -656,6 +712,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
struct rdac_dh_data *h;
unsigned long flags;
int err;
char array_name[ARRAY_LABEL_LEN];
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(*h) , GFP_KERNEL);
@ -670,16 +727,24 @@ static int rdac_bus_attach(struct scsi_device *sdev)
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
err = get_lun(sdev, h);
err = get_lun_info(sdev, h, array_name);
if (err != SCSI_DH_OK)
goto failed;
err = initialize_controller(sdev, h, array_name);
if (err != SCSI_DH_OK)
goto failed;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto failed;
goto clean_ctlr;
err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto clean_ctlr;
if (!try_module_get(THIS_MODULE))
goto failed;
goto clean_ctlr;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
@ -691,6 +756,9 @@ static int rdac_bus_attach(struct scsi_device *sdev)
return 0;
clean_ctlr:
kref_put(&h->ctlr->kref, release_controller);
failed:
kfree(scsi_dh_data);
sdev_printk(KERN_ERR, sdev, "%s: not attached\n",

File diff suppressed because it is too large Load Diff

View File

@ -37,8 +37,8 @@
#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
@ -53,7 +53,7 @@ do { \
do { \
CMD; \
} while (0); \
} while (0);
} while (0)
#define FCOE_DBG(fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_LOGGING, \
@ -61,7 +61,7 @@ do { \
#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
printk(KERN_INFO "fcoe: %s" fmt, \
printk(KERN_INFO "fcoe: %s: " fmt, \
netdev->name, ##args);)
/*
@ -75,26 +75,36 @@ struct fcoe_percpu_s {
};
/*
* the fcoe sw transport private data
* an FCoE interface, 1:1 with netdev
*/
struct fcoe_softc {
struct fcoe_interface {
struct list_head list;
struct net_device *real_dev;
struct net_device *phys_dev; /* device with ethtool_ops */
struct net_device *netdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem; /* offload exchange manager */
struct kref kref;
};
/*
* the FCoE private structure that's allocated along with the
* Scsi_Host and libfc fc_lport structures
*/
struct fcoe_port {
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
struct timer_list timer; /* queue timer */
struct fcoe_ctlr ctlr;
struct work_struct destroy_work; /* to prevent rtnl deadlocks */
};
#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr)
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
static inline struct net_device *fcoe_netdev(
const struct fc_lport *lp)
static inline struct net_device *fcoe_netdev(const struct fc_lport *lp)
{
return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev;
}
#endif /* _FCOE_H_ */

View File

@ -69,7 +69,7 @@ do { \
do { \
CMD; \
} while (0); \
} while (0);
} while (0)
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
@ -148,13 +148,17 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
*/
void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
{
flush_work(&fip->recv_work);
cancel_work_sync(&fip->recv_work);
spin_lock_bh(&fip->fip_recv_list.lock);
__skb_queue_purge(&fip->fip_recv_list);
spin_unlock_bh(&fip->fip_recv_list.lock);
spin_lock_bh(&fip->lock);
fip->state = FIP_ST_DISABLED;
fcoe_ctlr_reset_fcfs(fip);
spin_unlock_bh(&fip->lock);
del_timer_sync(&fip->timer);
flush_work(&fip->link_work);
cancel_work_sync(&fip->link_work);
}
EXPORT_SYMBOL(fcoe_ctlr_destroy);
@ -413,10 +417,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
struct fip_mac_desc *mac;
struct fcoe_fcf *fcf;
size_t dlen;
u16 fip_flags;
fcf = fip->sel_fcf;
if (!fcf)
return -ENODEV;
/* set flags according to both FCF and lport's capability on SPMA */
fip_flags = fcf->flags;
fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA;
if (!fip_flags)
return -ENODEV;
dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
@ -429,9 +441,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
cap->fip.fip_op = htons(FIP_OP_LS);
cap->fip.fip_subcode = FIP_SC_REQ;
cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
cap->fip.fip_flags = htons(FIP_FL_FPMA);
if (fip->spma)
cap->fip.fip_flags |= htons(FIP_FL_SPMA);
cap->fip.fip_flags = htons(fip_flags);
cap->encaps.fd_desc.fip_dtype = dtype;
cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
@ -879,7 +889,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
fc_exch_recv(lp, lp->emp, fp);
fc_exch_recv(lp, fp);
return;
len_err:
@ -1104,7 +1114,6 @@ static void fcoe_ctlr_timeout(unsigned long arg)
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
DECLARE_MAC_BUF(buf);
u8 send_ctlr_ka;
u8 send_port_ka;
@ -1128,9 +1137,8 @@ static void fcoe_ctlr_timeout(unsigned long arg)
fcf = sel; /* the old FCF may have been freed */
if (sel) {
printk(KERN_INFO "libfcoe: host%d: FIP selected "
"Fibre-Channel Forwarder MAC %s\n",
fip->lp->host->host_no,
print_mac(buf, sel->fcf_mac));
"Fibre-Channel Forwarder MAC %pM\n",
fip->lp->host->host_no, sel->fcf_mac);
memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_KA_PERIOD);

View File

@ -115,7 +115,7 @@ void fnic_handle_frame(struct work_struct *work)
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fc_exch_recv(lp, lp->emp, fp);
fc_exch_recv(lp, fp);
}
}

View File

@ -671,14 +671,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
lp->link_up = 0;
lp->tt = fnic_transport_template;
lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
FCPIO_HOST_EXCH_RANGE_START,
FCPIO_HOST_EXCH_RANGE_END);
if (!lp->emp) {
err = -ENOMEM;
goto err_out_remove_scsi_host;
}
lp->max_retry_count = fnic->config.flogi_retries;
lp->max_rport_retry_count = fnic->config.plogi_retries;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@ -693,12 +685,18 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
fc_set_wwnn(lp, fnic->config.node_wwn);
fc_set_wwpn(lp, fnic->config.port_wwn);
fc_exch_init(lp);
fc_lport_init(lp);
fc_exch_init(lp);
fc_elsct_init(lp);
fc_rport_init(lp);
fc_disc_init(lp);
if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
FCPIO_HOST_EXCH_RANGE_END, NULL)) {
err = -ENOMEM;
goto err_out_remove_scsi_host;
}
fc_lport_config(lp);
if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
@ -738,7 +736,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
return 0;
err_out_free_exch_mgr:
fc_exch_mgr_free(lp->emp);
fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
@ -827,7 +825,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
fc_exch_mgr_free(fnic->lport->emp);
fc_exch_mgr_free(fnic->lport);
vnic_dev_notify_unset(fnic->vdev);
fnic_free_vnic_resources(fnic);
fnic_free_intr(fnic);

View File

@ -4217,7 +4217,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
if (!vhost->trace)
goto free_disc_buffer;
vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
sizeof(struct ibmvfc_target));
if (!vhost->tgt_pool) {

View File

@ -1199,7 +1199,7 @@ struct ipr_ioa_cfg {
struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmnd"
#define IPR_CMD_LABEL "ipr_cmd"
struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
};

View File

@ -99,6 +99,27 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
return total_consumed;
}
/**
* iscsi_sw_sk_state_check - check socket state
* @sk: socket
*
* If the socket is in CLOSE or CLOSE_WAIT we should
* not close the connection if there is still some
* data pending.
*/
static inline int iscsi_sw_sk_state_check(struct sock *sk)
{
struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
!atomic_read(&sk->sk_rmem_alloc)) {
ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
return -ECONNRESET;
}
return 0;
}
static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
{
struct iscsi_conn *conn = sk->sk_user_data;
@ -117,6 +138,8 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
rd_desc.count = 1;
tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
iscsi_sw_sk_state_check(sk);
read_unlock(&sk->sk_callback_lock);
/* If we had to (atomically) map a highmem page,
@ -137,13 +160,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
conn = (struct iscsi_conn*)sk->sk_user_data;
session = conn->session;
if ((sk->sk_state == TCP_CLOSE_WAIT ||
sk->sk_state == TCP_CLOSE) &&
!atomic_read(&sk->sk_rmem_alloc)) {
ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: "
"TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
}
iscsi_sw_sk_state_check(sk);
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;

View File

@ -43,46 +43,13 @@
#define FC_DISC_RETRY_LIMIT 3 /* max retries */
#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
#define FC_DISC_DELAY 3
static void fc_disc_gpn_ft_req(struct fc_disc *);
static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
struct fc_rport_identifiers *);
static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
static void fc_disc_done(struct fc_disc *);
static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
static void fc_disc_timeout(struct work_struct *);
static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_lookup_rport() - lookup a remote port by port_id
* @lport: Fibre Channel host port instance
* @port_id: remote port port_id to match
*/
struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
u32 port_id)
{
const struct fc_disc *disc = &lport->disc;
struct fc_rport *rport, *found = NULL;
struct fc_rport_libfc_priv *rdata;
int disc_found = 0;
list_for_each_entry(rdata, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
if (rport->port_id == port_id) {
disc_found = 1;
found = rport;
break;
}
}
if (!disc_found)
found = NULL;
return found;
}
/**
* fc_disc_stop_rports() - delete all the remote ports associated with the lport
* @disc: The discovery job to stop rports on
@ -93,69 +60,16 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata, *next;
struct fc_rport_priv *rdata, *next;
lport = disc->lport;
mutex_lock(&disc->disc_mutex);
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
rport = PRIV_TO_RPORT(rdata);
lport->tt.rport_logoff(rport);
}
list_for_each_entry_safe(rdata, next, &disc->rports, peers)
lport->tt.rport_logoff(rdata);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_rport_callback() - Event handler for rport events
* @lport: The lport which is receiving the event
* @rport: The rport which the event has occured on
* @event: The event that occured
*
* Locking Note: The rport lock should not be held when calling
* this function.
*/
static void fc_disc_rport_callback(struct fc_lport *lport,
struct fc_rport *rport,
enum fc_rport_event event)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_disc *disc = &lport->disc;
FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
rport->port_id);
switch (event) {
case RPORT_EV_CREATED:
if (disc) {
mutex_lock(&disc->disc_mutex);
list_add_tail(&rdata->peers, &disc->rports);
mutex_unlock(&disc->disc_mutex);
}
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
mutex_lock(&disc->disc_mutex);
mutex_lock(&rdata->rp_mutex);
if (rdata->trans_state == FC_PORTSTATE_ROGUE)
list_del(&rdata->peers);
mutex_unlock(&rdata->rp_mutex);
mutex_unlock(&disc->disc_mutex);
break;
default:
break;
}
}
/**
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
* @sp: Current sequence of the RSCN exchange
@ -169,8 +83,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata;
struct fc_els_rscn *rp;
struct fc_els_rscn_page *pp;
struct fc_seq_els_data rjt_data;
@ -224,10 +136,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
break;
}
dp->lp = lport;
dp->ids.port_id = ntoh24(pp->rscn_fid);
dp->ids.port_name = -1;
dp->ids.node_name = -1;
dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
dp->port_id = ntoh24(pp->rscn_fid);
list_add_tail(&dp->peers, &disc_ports);
break;
case ELS_ADDR_FMT_AREA:
@ -240,6 +149,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
}
}
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
* the individual ports mentioned in the list.
* If any of these get an error, do a full rediscovery.
* In any case, go through the list and free the entries.
*/
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
if (!redisc)
redisc = fc_disc_single(lport, dp);
kfree(dp);
}
if (redisc) {
FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
fc_disc_restart(disc);
@ -247,16 +169,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
"redisc %d state %d in_prog %d\n",
redisc, lport->state, disc->pending);
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
if (rport) {
rdata = rport->dd_data;
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
fc_disc_single(disc, dp);
}
}
fc_frame_free(fp);
return;
@ -308,35 +220,34 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
*/
static void fc_disc_restart(struct fc_disc *disc)
{
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata, *next;
struct fc_lport *lport = disc->lport;
if (!disc->disc_callback)
return;
FC_DISC_DBG(disc, "Restarting discovery\n");
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
disc->requested = 1;
if (!disc->pending)
fc_disc_gpn_ft_req(disc);
if (disc->pending)
return;
/*
* Advance disc_id. This is an arbitrary non-zero number that will
* match the value in the fc_rport_priv after discovery for all
* freshly-discovered remote ports. Avoid wrapping to zero.
*/
disc->disc_id = (disc->disc_id + 2) | 1;
disc->retry_count = 0;
fc_disc_gpn_ft_req(disc);
}
/**
* fc_disc_start() - Fibre Channel Target discovery
* @lport: FC local port
*
* Returns non-zero if discovery cannot be started.
* @disc_callback: function to be called when discovery is complete
*/
static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
enum fc_disc_event),
struct fc_lport *lport)
{
struct fc_rport *rport;
struct fc_rport_identifiers ids;
struct fc_disc *disc = &lport->disc;
/*
@ -345,145 +256,47 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
* and send the GPN_FT request.
*/
mutex_lock(&disc->disc_mutex);
disc->disc_callback = disc_callback;
/*
* If not ready, or already running discovery, just set request flag.
*/
disc->requested = 1;
if (disc->pending) {
mutex_unlock(&disc->disc_mutex);
return;
}
/*
* Handle point-to-point mode as a simple discovery
* of the remote port. Yucky, yucky, yuck, yuck!
*/
rport = disc->lport->ptp_rp;
if (rport) {
ids.port_id = rport->port_id;
ids.port_name = rport->port_name;
ids.node_name = rport->node_name;
ids.roles = FC_RPORT_ROLE_UNKNOWN;
get_device(&rport->dev);
if (!fc_disc_new_target(disc, rport, &ids)) {
disc->event = DISC_EV_SUCCESS;
fc_disc_done(disc);
}
put_device(&rport->dev);
} else {
fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
}
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
static struct fc_rport_operations fc_disc_rport_ops = {
.event_callback = fc_disc_rport_callback,
};
/**
* fc_disc_new_target() - Handle new target found by discovery
* @lport: FC local port
* @rport: The previous FC remote port (NULL if new remote port)
* @ids: Identifiers for the new FC remote port
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static int fc_disc_new_target(struct fc_disc *disc,
struct fc_rport *rport,
struct fc_rport_identifiers *ids)
{
struct fc_lport *lport = disc->lport;
struct fc_rport_libfc_priv *rdata;
int error = 0;
if (rport && ids->port_name) {
if (rport->port_name == -1) {
/*
* Set WWN and fall through to notify of create.
*/
fc_rport_set_name(rport, ids->port_name,
rport->node_name);
} else if (rport->port_name != ids->port_name) {
/*
* This is a new port with the same FCID as
* a previously-discovered port. Presumably the old
* port logged out and a new port logged in and was
* assigned the same FCID. This should be rare.
* Delete the old one and fall thru to re-create.
*/
fc_disc_del_target(disc, rport);
rport = NULL;
}
}
if (((ids->port_name != -1) || (ids->port_id != -1)) &&
ids->port_id != fc_host_port_id(lport->host) &&
ids->port_name != lport->wwpn) {
if (!rport) {
rport = lport->tt.rport_lookup(lport, ids->port_id);
if (!rport) {
struct fc_disc_port dp;
dp.lp = lport;
dp.ids.port_id = ids->port_id;
dp.ids.port_name = ids->port_name;
dp.ids.node_name = ids->node_name;
dp.ids.roles = ids->roles;
rport = lport->tt.rport_create(&dp);
}
if (!rport)
error = -ENOMEM;
}
if (rport) {
rdata = rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
rdata->rp_state = RPORT_ST_INIT;
list_add_tail(&rdata->peers, &disc->rogue_rports);
lport->tt.rport_login(rport);
}
}
return error;
}
/**
* fc_disc_del_target() - Delete a target
* @disc: FC discovery context
* @rport: The remote port to be removed
*/
static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
{
struct fc_lport *lport = disc->lport;
struct fc_rport_libfc_priv *rdata = rport->dd_data;
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
/**
* fc_disc_done() - Discovery has been completed
* @disc: FC discovery context
* @event: discovery completion status
*
* Locking Note: This function expects that the disc mutex is locked before
* it is called. The discovery callback is then made with the lock released,
* and the lock is re-taken before returning from this function
*/
static void fc_disc_done(struct fc_disc *disc)
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
{
struct fc_lport *lport = disc->lport;
enum fc_disc_event event;
struct fc_rport_priv *rdata;
FC_DISC_DBG(disc, "Discovery complete\n");
event = disc->event;
disc->event = DISC_EV_NONE;
disc->pending = 0;
if (disc->requested) {
fc_disc_restart(disc);
return;
}
if (disc->requested)
fc_disc_gpn_ft_req(disc);
else
disc->pending = 0;
/*
* Go through all remote ports. If they were found in the latest
* discovery, reverify or log them in. Otherwise, log them out.
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*/
list_for_each_entry(rdata, &disc->rports, peers) {
if (!rdata->disc_id)
continue;
if (rdata->disc_id == disc->disc_id)
lport->tt.rport_login(rdata);
else
lport->tt.rport_logoff(rdata);
}
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
@ -522,11 +335,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
}
disc->retry_count++;
schedule_delayed_work(&disc->disc_work, delay);
} else {
/* exceeded retries */
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
}
} else
fc_disc_done(disc, DISC_EV_FAILED);
}
}
@ -555,7 +365,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc)
if (!fp)
goto err;
if (lport->tt.elsct_send(lport, NULL, fp,
if (lport->tt.elsct_send(lport, 0, fp,
FC_NS_GPN_FT,
fc_disc_gpn_ft_resp,
disc, lport->e_d_tov))
@ -565,10 +375,12 @@ err:
}
/**
* fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
* fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
* @lport: Fibre Channel host port instance
* @buf: GPN_FT response buffer
* @len: size of response buffer
*
* Goes through the list of IDs and names resulting from a request.
*/
static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
{
@ -578,11 +390,11 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
size_t plen;
size_t tlen;
int error = 0;
struct fc_disc_port dp;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata;
struct fc_rport_identifiers ids;
struct fc_rport_priv *rdata;
lport = disc->lport;
disc->seq_count++;
/*
* Handle partial name record left over from previous call.
@ -591,6 +403,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
plen = len;
np = (struct fc_gpn_ft_resp *)bp;
tlen = disc->buf_len;
disc->buf_len = 0;
if (tlen) {
WARN_ON(tlen >= sizeof(*np));
plen = sizeof(*np) - tlen;
@ -621,31 +434,25 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
* After the first time through the loop, things return to "normal".
*/
while (plen >= sizeof(*np)) {
dp.lp = lport;
dp.ids.port_id = ntoh24(np->fp_fid);
dp.ids.port_name = ntohll(np->fp_wwpn);
dp.ids.node_name = -1;
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
ids.port_id = ntoh24(np->fp_fid);
ids.port_name = ntohll(np->fp_wwpn);
if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
(dp.ids.port_name != lport->wwpn)) {
rport = lport->tt.rport_create(&dp);
if (rport) {
rdata = rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
rdata->local_port = lport;
list_add_tail(&rdata->peers,
&disc->rogue_rports);
lport->tt.rport_login(rport);
} else
if (ids.port_id != fc_host_port_id(lport->host) &&
ids.port_name != lport->wwpn) {
rdata = lport->tt.rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
} else {
printk(KERN_WARNING "libfc: Failed to allocate "
"memory for the newly discovered port "
"(%6x)\n", dp.ids.port_id);
"(%6x)\n", ids.port_id);
error = -ENOMEM;
}
}
if (np->fp_flags & FC_NS_FID_LAST) {
disc->event = DISC_EV_SUCCESS;
fc_disc_done(disc);
fc_disc_done(disc, DISC_EV_SUCCESS);
len = 0;
break;
}
@ -665,8 +472,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
memcpy(&disc->partial_buf, np, len);
}
disc->buf_len = (unsigned char) len;
} else {
disc->buf_len = 0;
}
return error;
}
@ -683,8 +488,7 @@ static void fc_disc_timeout(struct work_struct *work)
struct fc_disc,
disc_work.work);
mutex_lock(&disc->disc_mutex);
if (disc->requested && !disc->pending)
fc_disc_gpn_ft_req(disc);
fc_disc_gpn_ft_req(disc);
mutex_unlock(&disc->disc_mutex);
}
@ -703,10 +507,10 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_disc *disc = disc_arg;
struct fc_ct_hdr *cp;
struct fc_frame_header *fh;
enum fc_disc_event event = DISC_EV_NONE;
unsigned int seq_cnt;
void *buf = NULL;
unsigned int len;
int error;
int error = 0;
mutex_lock(&disc->disc_mutex);
FC_DISC_DBG(disc, "Received a GPN_FT response\n");
@ -721,77 +525,158 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
fh = fc_frame_header_get(fp);
len = fr_len(fp) - sizeof(*fh);
seq_cnt = ntohs(fh->fh_seq_cnt);
if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
disc->seq_count == 0) {
if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp) {
FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
fr_len(fp));
event = DISC_EV_FAILED;
} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
/* Accepted, parse the response. */
buf = cp + 1;
len -= sizeof(*cp);
error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
"(check zoning)\n", cp->ct_reason,
cp->ct_explan);
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
event = DISC_EV_FAILED;
if (cp->ct_reason == FC_FS_RJT_UNABL &&
cp->ct_explan == FC_FS_EXP_FTNR)
event = DISC_EV_SUCCESS;
} else {
FC_DISC_DBG(disc, "GPN_FT unexpected response code "
"%x\n", ntohs(cp->ct_cmd));
event = DISC_EV_FAILED;
}
} else if (fr_sof(fp) == FC_SOF_N3 &&
seq_cnt == disc->seq_count) {
buf = fh + 1;
} else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
} else {
FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
"seq_cnt %x expected %x sof %x eof %x\n",
seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
event = DISC_EV_FAILED;
}
if (buf) {
error = fc_disc_gpn_ft_parse(disc, buf, len);
if (error)
fc_disc_error(disc, fp);
else
disc->seq_count++;
}
if (error)
fc_disc_error(disc, fp);
else if (event != DISC_EV_NONE)
fc_disc_done(disc, event);
fc_frame_free(fp);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
* @sp: exchange sequence
* @fp: response frame
* @rdata_arg: remote port private data
*
* Locking Note: This function is called without disc mutex held.
*/
static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct fc_rport_priv *new_rdata;
struct fc_lport *lport;
struct fc_disc *disc;
struct fc_ct_hdr *cp;
struct fc_ns_gid_pn *pn;
u64 port_name;
lport = rdata->local_port;
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp))
goto redisc;
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp)
goto redisc;
if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
if (fr_len(fp) < sizeof(struct fc_frame_header) +
sizeof(*cp) + sizeof(*pn))
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %x wwpn %llx\n",
rdata->ids.port_id, port_name);
lport->tt.rport_logoff(rdata);
new_rdata = lport->tt.rport_create(lport,
rdata->ids.port_id);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
lport->tt.rport_login(new_rdata);
}
goto out;
}
rdata->disc_id = disc->disc_id;
lport->tt.rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
lport->tt.rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
fc_disc_restart(disc);
}
out:
mutex_unlock(&disc->disc_mutex);
kref_put(&rdata->kref, lport->tt.rport_destroy);
}
/**
* fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
* @lport: local port
* @rdata: remote port private data
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
* On failure, an error code is returned.
*/
static int fc_disc_gpn_id_req(struct fc_lport *lport,
struct fc_rport_priv *rdata)
{
struct fc_frame *fp;
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_fid));
if (!fp)
return -ENOMEM;
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
fc_disc_gpn_id_resp, rdata, lport->e_d_tov))
return -ENOMEM;
kref_get(&rdata->kref);
return 0;
}
/**
* fc_disc_single() - Discover the directory information for a single target
* @lport: FC local port
* @lport: local port
* @dp: The port to rediscover
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_lport *lport;
struct fc_rport *new_rport;
struct fc_rport_libfc_priv *rdata;
struct fc_rport_priv *rdata;
lport = disc->lport;
if (dp->ids.port_id == fc_host_port_id(lport->host))
goto out;
new_rport = lport->tt.rport_create(dp);
if (new_rport) {
rdata = new_rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
kfree(dp);
list_add_tail(&rdata->peers, &disc->rogue_rports);
lport->tt.rport_login(new_rport);
}
return;
out:
kfree(dp);
rdata = lport->tt.rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
return fc_disc_gpn_id_req(lport, rdata);
}
/**
@ -841,18 +726,12 @@ int fc_disc_init(struct fc_lport *lport)
if (!lport->tt.disc_recv_req)
lport->tt.disc_recv_req = fc_disc_recv_req;
if (!lport->tt.rport_lookup)
lport->tt.rport_lookup = fc_disc_lookup_rport;
disc = &lport->disc;
INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
mutex_init(&disc->disc_mutex);
INIT_LIST_HEAD(&disc->rports);
INIT_LIST_HEAD(&disc->rogue_rports);
disc->lport = lport;
disc->delay = FC_DISC_DELAY;
disc->event = DISC_EV_NONE;
return 0;
}

View File

@ -32,7 +32,7 @@
* fc_elsct_send - sends ELS/CT frame
*/
static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
struct fc_rport *rport,
u32 did,
struct fc_frame *fp,
unsigned int op,
void (*resp)(struct fc_seq *,
@ -41,16 +41,17 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
u32 did = FC_FID_NONE;
enum fc_fh_type fh_type;
int rc;
/* ELS requests */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
else
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
did = FC_FID_DIR_SERV;
}
if (rc)
return NULL;
@ -69,3 +70,41 @@ int fc_elsct_init(struct fc_lport *lport)
return 0;
}
EXPORT_SYMBOL(fc_elsct_init);
/**
* fc_els_resp_type() - return string describing ELS response for debug.
* @fp: frame pointer with possible error code.
*/
const char *fc_els_resp_type(struct fc_frame *fp)
{
const char *msg;
if (IS_ERR(fp)) {
switch (-PTR_ERR(fp)) {
case FC_NO_ERR:
msg = "response no error";
break;
case FC_EX_TIMEOUT:
msg = "response timeout";
break;
case FC_EX_CLOSED:
msg = "response closed";
break;
default:
msg = "response unknown error";
break;
}
} else {
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
msg = "accept";
break;
case ELS_LS_RJT:
msg = "reject";
break;
default:
msg = "response unknown ELS";
break;
}
}
return msg;
}

View File

@ -32,6 +32,9 @@
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
/*
@ -47,6 +50,20 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
* fc_seq holds the state for an individual sequence.
*/
/*
* Per cpu exchange pool
*
* This structure manages per cpu exchanges in array of exchange pointers.
* This array is allocated followed by struct fc_exch_pool memory for
* assigned range of exchanges to per cpu pool.
*/
struct fc_exch_pool {
u16 next_index; /* next possible free exchange index */
u16 total_exches; /* total allocated exchanges */
spinlock_t lock; /* exch pool lock */
struct list_head ex_list; /* allocated exchanges list */
};
/*
* Exchange manager.
*
@ -55,17 +72,13 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
*/
struct fc_exch_mgr {
enum fc_class class; /* default class for sequences */
spinlock_t em_lock; /* exchange manager lock,
must be taken before ex_lock */
u16 last_xid; /* last allocated exchange ID */
struct kref kref; /* exchange mgr reference count */
u16 min_xid; /* min exchange ID */
u16 max_xid; /* max exchange ID */
u16 max_read; /* max exchange ID for read */
u16 last_read; /* last xid allocated for read */
u32 total_exches; /* total allocated exchanges */
struct list_head ex_list; /* allocated exchanges list */
struct fc_lport *lp; /* fc device instance */
mempool_t *ep_pool; /* reserve ep's */
u16 pool_max_index; /* max exch array index in exch pool */
struct fc_exch_pool *pool; /* per cpu exch pool */
/*
* currently exchange mgr stats are updated but not used.
@ -80,10 +93,15 @@ struct fc_exch_mgr {
atomic_t seq_not_found;
atomic_t non_bls_resp;
} stats;
struct fc_exch **exches; /* for exch pointers indexed by xid */
};
#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
struct fc_exch_mgr_anchor {
struct list_head ema_list;
struct fc_exch_mgr *mp;
bool (*match)(struct fc_frame *);
};
static void fc_exch_rrq(struct fc_exch *);
static void fc_seq_ls_acc(struct fc_seq *);
static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
@ -167,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
* sequence allocation and deallocation must be locked.
* - exchange refcnt can be done atomicly without locks.
* - sequence allocation must be locked by exch lock.
* - If the em_lock and ex_lock must be taken at the same time, then the
* em_lock must be taken before the ex_lock.
* - If the EM pool lock and ex_lock must be taken at the same time, then the
* EM pool lock must be taken before the ex_lock.
*/
/*
@ -268,8 +286,6 @@ static void fc_exch_release(struct fc_exch *ep)
mp = ep->em;
if (ep->destructor)
ep->destructor(&ep->seq, ep->arg);
if (ep->lp->tt.exch_put)
ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
mempool_free(ep, mp->ep_pool);
}
@ -299,17 +315,31 @@ static int fc_exch_done_locked(struct fc_exch *ep)
return rc;
}
static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
u16 index)
{
struct fc_exch_mgr *mp;
struct fc_exch **exches = (struct fc_exch **)(pool + 1);
return exches[index];
}
mp = ep->em;
spin_lock_bh(&mp->em_lock);
WARN_ON(mp->total_exches <= 0);
mp->total_exches--;
mp->exches[ep->xid - mp->min_xid] = NULL;
static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
struct fc_exch *ep)
{
((struct fc_exch **)(pool + 1))[index] = ep;
}
static void fc_exch_delete(struct fc_exch *ep)
{
struct fc_exch_pool *pool;
pool = ep->pool;
spin_lock_bh(&pool->lock);
WARN_ON(pool->total_exches <= 0);
pool->total_exches--;
fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
NULL);
list_del(&ep->ex_list);
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
}
@ -322,7 +352,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
return;
FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
FC_EXCH_DBG(ep, "Exchange timer armed\n");
if (schedule_delayed_work(&ep->timeout_work,
msecs_to_jiffies(timer_msec)))
@ -408,6 +438,8 @@ static void fc_exch_timeout(struct work_struct *work)
u32 e_stat;
int rc = 1;
FC_EXCH_DBG(ep, "Exchange timed out\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
goto unlock;
@ -427,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
@ -460,65 +492,20 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
return sp;
}
/*
* fc_em_alloc_xid - returns an xid based on request type
* @lp : ptr to associated lport
* @fp : ptr to the assocated frame
/**
* fc_exch_em_alloc() - allocate an exchange from a specified EM.
* @lport: ptr to the local port
* @mp: ptr to the exchange manager
*
* check the associated fc_fsp_pkt to get scsi command type and
* command direction to decide from which range this exch id
* will be allocated from.
*
* Returns : 0 or an valid xid
* Returns pointer to allocated fc_exch with exch lock held.
*/
static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
{
u16 xid, min, max;
u16 *plast;
struct fc_exch *ep = NULL;
if (mp->max_read) {
if (fc_fcp_is_read(fr_fsp(fp))) {
min = mp->min_xid;
max = mp->max_read;
plast = &mp->last_read;
} else {
min = mp->max_read + 1;
max = mp->max_xid;
plast = &mp->last_xid;
}
} else {
min = mp->min_xid;
max = mp->max_xid;
plast = &mp->last_xid;
}
xid = *plast;
do {
xid = (xid == max) ? min : xid + 1;
ep = mp->exches[xid - mp->min_xid];
} while ((ep != NULL) && (xid != *plast));
if (unlikely(ep))
xid = 0;
else
*plast = xid;
return xid;
}
/*
* fc_exch_alloc - allocate an exchange.
* @mp : ptr to the exchange manager
* @xid: input xid
*
* if xid is supplied zero then assign next free exchange ID
* from exchange manager, otherwise use supplied xid.
* Returns with exch lock held.
*/
struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
struct fc_frame *fp, u16 xid)
static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
struct fc_exch_mgr *mp)
{
struct fc_exch *ep;
unsigned int cpu;
u16 index;
struct fc_exch_pool *pool;
/* allocate memory for exchange */
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
@ -528,16 +515,17 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
}
memset(ep, 0, sizeof(*ep));
spin_lock_bh(&mp->em_lock);
/* alloc xid if input xid 0 */
if (!xid) {
/* alloc a new xid */
xid = fc_em_alloc_xid(mp, fp);
if (!xid) {
printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
cpu = smp_processor_id();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
index = index == mp->pool_max_index ? 0 : index + 1;
if (index == pool->next_index)
goto err;
}
}
pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
fc_exch_hold(ep); /* hold for exch in mp */
spin_lock_init(&ep->ex_lock);
@ -548,18 +536,19 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
*/
spin_lock_bh(&ep->ex_lock);
mp->exches[xid - mp->min_xid] = ep;
list_add_tail(&ep->ex_list, &mp->ex_list);
fc_exch_ptr_set(pool, index, ep);
list_add_tail(&ep->ex_list, &pool->ex_list);
fc_seq_alloc(ep, ep->seq_id++);
mp->total_exches++;
spin_unlock_bh(&mp->em_lock);
pool->total_exches++;
spin_unlock_bh(&pool->lock);
/*
* update exchange
*/
ep->oxid = ep->xid = xid;
ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
ep->em = mp;
ep->lp = mp->lp;
ep->pool = pool;
ep->lp = lport;
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
ep->rxid = FC_XID_UNKNOWN;
ep->class = mp->class;
@ -567,11 +556,36 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
out:
return ep;
err:
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
atomic_inc(&mp->stats.no_free_exch_xid);
mempool_free(ep, mp->ep_pool);
return NULL;
}
/**
* fc_exch_alloc() - allocate an exchange.
* @lport: ptr to the local port
* @fp: ptr to the FC frame
*
* This function walks the list of the exchange manager(EM)
* anchors to select a EM for new exchange allocation. The
* EM is selected having either a NULL match function pointer
* or call to match function returning true.
*/
struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
struct fc_exch *ep;
list_for_each_entry(ema, &lport->ema_list, ema_list) {
if (!ema->match || ema->match(fp)) {
ep = fc_exch_em_alloc(lport, ema->mp);
if (ep)
return ep;
}
}
return NULL;
}
EXPORT_SYMBOL(fc_exch_alloc);
/*
@ -579,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc);
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
spin_lock_bh(&mp->em_lock);
ep = mp->exches[xid - mp->min_xid];
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
if (ep) {
fc_exch_hold(ep);
WARN_ON(ep->xid != xid);
}
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
}
return ep;
}
@ -602,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
}
EXPORT_SYMBOL(fc_exch_done);
@ -610,12 +626,14 @@ EXPORT_SYMBOL(fc_exch_done);
* Allocate a new exchange as responder.
* Sets the responder ID in the frame header.
*/
static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_exch *ep;
struct fc_frame_header *fh;
ep = mp->lp->tt.exch_get(mp->lp, fp);
ep = fc_exch_alloc(lport, fp);
if (ep) {
ep->class = fc_frame_class(fp);
@ -641,7 +659,7 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
fc_exch_hold(ep); /* hold for caller */
spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
}
return ep;
}
@ -651,7 +669,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
* on the ep that should be released by the caller.
*/
static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
@ -705,7 +724,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
reject = FC_RJT_RX_ID;
goto rel;
}
ep = fc_exch_resp(mp, fp);
ep = fc_exch_resp(lport, mp, fp);
if (!ep) {
reject = FC_RJT_EXCH_EST; /* XXX */
goto out;
@ -822,7 +841,6 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
struct fc_exch *ep = fc_seq_exch(sp);
spin_lock_bh(&ep->ex_lock);
WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
sp = fc_seq_start_next_locked(sp);
spin_unlock_bh(&ep->ex_lock);
@ -999,8 +1017,8 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
*/
memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
fh->fh_ox_id = rx_fh->fh_rx_id;
fh->fh_rx_id = rx_fh->fh_ox_id;
fh->fh_ox_id = rx_fh->fh_ox_id;
fh->fh_rx_id = rx_fh->fh_rx_id;
fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
fh->fh_r_ctl = FC_RCTL_BA_RJT;
fh->fh_type = FC_TYPE_BLS;
@ -1097,7 +1115,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
enum fc_pf_rjt_reason reject;
fr_seq(fp) = NULL;
reject = fc_seq_lookup_recip(mp, fp);
reject = fc_seq_lookup_recip(lp, mp, fp);
if (reject == FC_RJT_NONE) {
sp = fr_seq(fp); /* sequence will be held */
ep = fc_seq_exch(sp);
@ -1123,7 +1141,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
lp->tt.lport_recv(lp, sp, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject);
fc_frame_free(fp);
}
}
@ -1193,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
}
/*
@ -1229,13 +1247,12 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
struct fc_seq *sp;
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
if (!sp) {
if (!sp)
atomic_inc(&mp->stats.xid_not_found);
FC_EM_DBG(mp, "seq lookup failed\n");
} else {
else
atomic_inc(&mp->stats.non_bls_resp);
FC_EM_DBG(mp, "non-BLS response to sequence");
}
fc_frame_free(fp);
}
@ -1304,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
if (resp)
resp(sp, fp, ex_resp_arg);
@ -1447,44 +1464,77 @@ static void fc_exch_reset(struct fc_exch *ep)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
}
/*
* Reset an exchange manager, releasing all sequences and exchanges.
* If sid is non-zero, reset only exchanges we source from that FID.
* If did is non-zero, reset only exchanges destined to that FID.
/**
* fc_exch_pool_reset() - Resets an per cpu exches pool.
* @lport: ptr to the local port
* @pool: ptr to the per cpu exches pool
* @sid: source FC ID
* @did: destination FC ID
*
* Resets an per cpu exches pool, releasing its all sequences
* and exchanges. If sid is non-zero, then reset only exchanges
* we sourced from that FID. If did is non-zero, reset only
* exchanges destined to that FID.
*/
void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
static void fc_exch_pool_reset(struct fc_lport *lport,
struct fc_exch_pool *pool,
u32 sid, u32 did)
{
struct fc_exch *ep;
struct fc_exch *next;
struct fc_exch_mgr *mp = lp->emp;
spin_lock_bh(&mp->em_lock);
spin_lock_bh(&pool->lock);
restart:
list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
if ((sid == 0 || sid == ep->sid) &&
list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
if ((lport == ep->lp) &&
(sid == 0 || sid == ep->sid) &&
(did == 0 || did == ep->did)) {
fc_exch_hold(ep);
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
fc_exch_reset(ep);
fc_exch_release(ep);
spin_lock_bh(&mp->em_lock);
spin_lock_bh(&pool->lock);
/*
* must restart loop incase while lock was down
* multiple eps were released.
* must restart loop incase while lock
* was down multiple eps were released.
*/
goto restart;
}
}
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
}
/**
* fc_exch_mgr_reset() - Resets all EMs of a lport
* @lport: ptr to the local port
* @sid: source FC ID
* @did: destination FC ID
*
* Reset all EMs of a lport, releasing its all sequences and
* exchanges. If sid is non-zero, then reset only exchanges
* we sourced from that FID. If did is non-zero, reset only
* exchanges destined to that FID.
*/
void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
{
struct fc_exch_mgr_anchor *ema;
unsigned int cpu;
list_for_each_entry(ema, &lport->ema_list, ema_list) {
for_each_possible_cpu(cpu)
fc_exch_pool_reset(lport,
per_cpu_ptr(ema->mp->pool, cpu),
sid, did);
}
}
EXPORT_SYMBOL(fc_exch_mgr_reset);
@ -1730,85 +1780,129 @@ reject:
fc_frame_free(fp);
}
struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
struct fc_exch_mgr *mp,
bool (*match)(struct fc_frame *))
{
struct fc_exch_mgr_anchor *ema;
ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
if (!ema)
return ema;
ema->mp = mp;
ema->match = match;
/* add EM anchor to EM anchors list */
list_add_tail(&ema->ema_list, &lport->ema_list);
kref_get(&mp->kref);
return ema;
}
EXPORT_SYMBOL(fc_exch_mgr_add);
static void fc_exch_mgr_destroy(struct kref *kref)
{
struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
mempool_destroy(mp->ep_pool);
free_percpu(mp->pool);
kfree(mp);
}
void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
{
/* remove EM anchor from EM anchors list */
list_del(&ema->ema_list);
kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
kfree(ema);
}
EXPORT_SYMBOL(fc_exch_mgr_del);
struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
enum fc_class class,
u16 min_xid, u16 max_xid)
u16 min_xid, u16 max_xid,
bool (*match)(struct fc_frame *))
{
struct fc_exch_mgr *mp;
size_t len;
u16 pool_exch_range;
size_t pool_size;
unsigned int cpu;
struct fc_exch_pool *pool;
if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
(min_xid & fc_cpu_mask) != 0) {
FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
min_xid, max_xid);
return NULL;
}
/*
* Memory need for EM
* allocate memory for EM
*/
#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
len += sizeof(struct fc_exch_mgr);
mp = kzalloc(len, GFP_ATOMIC);
mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
if (!mp)
return NULL;
mp->class = class;
mp->total_exches = 0;
mp->exches = (struct fc_exch **)(mp + 1);
mp->lp = lp;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
mp->max_xid = max_xid;
mp->last_xid = min_xid - 1;
mp->max_read = 0;
mp->last_read = 0;
if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
mp->max_read = lp->lro_xid;
mp->last_read = min_xid - 1;
mp->last_xid = mp->max_read;
} else {
/* disable lro if no xid control over read */
lp->lro_enabled = 0;
}
INIT_LIST_HEAD(&mp->ex_list);
spin_lock_init(&mp->em_lock);
mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
if (!mp->ep_pool)
goto free_mp;
/*
* Setup per cpu exch pool with entire exchange id range equally
* divided across all cpus. The exch pointers array memory is
* allocated for exch range per pool.
*/
pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
mp->pool_max_index = pool_exch_range - 1;
/*
* Allocate and initialize per cpu exch pool
*/
pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
if (!mp->pool)
goto free_mempool;
for_each_possible_cpu(cpu) {
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->ex_list);
}
kref_init(&mp->kref);
if (!fc_exch_mgr_add(lp, mp, match)) {
free_percpu(mp->pool);
goto free_mempool;
}
/*
* Above kref_init() sets mp->kref to 1 and then
* call to fc_exch_mgr_add incremented mp->kref again,
* so adjust that extra increment.
*/
kref_put(&mp->kref, fc_exch_mgr_destroy);
return mp;
free_mempool:
mempool_destroy(mp->ep_pool);
free_mp:
kfree(mp);
return NULL;
}
EXPORT_SYMBOL(fc_exch_mgr_alloc);
void fc_exch_mgr_free(struct fc_exch_mgr *mp)
void fc_exch_mgr_free(struct fc_lport *lport)
{
WARN_ON(!mp);
/*
* The total exch count must be zero
* before freeing exchange manager.
*/
WARN_ON(mp->total_exches != 0);
mempool_destroy(mp->ep_pool);
kfree(mp);
struct fc_exch_mgr_anchor *ema, *next;
list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
fc_exch_mgr_del(ema);
}
EXPORT_SYMBOL(fc_exch_mgr_free);
struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
{
if (!lp || !lp->emp)
return NULL;
return fc_exch_alloc(lp->emp, fp, 0);
}
EXPORT_SYMBOL(fc_exch_get);
struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
struct fc_frame *fp,
@ -1823,7 +1917,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
struct fc_frame_header *fh;
int rc = 1;
ep = lp->tt.exch_get(lp, fp);
ep = fc_exch_alloc(lp, fp);
if (!ep) {
fc_frame_free(fp);
return NULL;
@ -1843,7 +1937,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
if (ep->xid <= lp->lro_xid)
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
if (unlikely(lp->tt.frame_send(lp, fp)))
goto err;
@ -1860,7 +1955,7 @@ err:
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
return NULL;
}
EXPORT_SYMBOL(fc_exch_seq_send);
@ -1868,24 +1963,44 @@ EXPORT_SYMBOL(fc_exch_seq_send);
/*
* Receive a frame
*/
void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
struct fc_frame *fp)
void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
u32 f_ctl;
struct fc_exch_mgr_anchor *ema;
u32 f_ctl, found = 0;
u16 oxid;
/* lport lock ? */
if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
if (!lp || lp->state == LPORT_ST_DISABLED) {
FC_LPORT_DBG(lp, "Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;
}
f_ctl = ntoh24(fh->fh_f_ctl);
oxid = ntohs(fh->fh_ox_id);
if (f_ctl & FC_FC_EX_CTX) {
list_for_each_entry(ema, &lp->ema_list, ema_list) {
if ((oxid >= ema->mp->min_xid) &&
(oxid <= ema->mp->max_xid)) {
found = 1;
break;
}
}
if (!found) {
FC_LPORT_DBG(lp, "Received response for out "
"of range oxid:%hx\n", oxid);
fc_frame_free(fp);
return;
}
} else
ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list);
/*
* If frame is marked invalid, just drop it.
*/
f_ctl = ntoh24(fh->fh_f_ctl);
switch (fr_eof(fp)) {
case FC_EOF_T:
if (f_ctl & FC_FC_END_SEQ)
@ -1893,34 +2008,24 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
/* fall through */
case FC_EOF_N:
if (fh->fh_type == FC_TYPE_BLS)
fc_exch_recv_bls(mp, fp);
fc_exch_recv_bls(ema->mp, fp);
else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
FC_FC_EX_CTX)
fc_exch_recv_seq_resp(mp, fp);
fc_exch_recv_seq_resp(ema->mp, fp);
else if (f_ctl & FC_FC_SEQ_CTX)
fc_exch_recv_resp(mp, fp);
fc_exch_recv_resp(ema->mp, fp);
else
fc_exch_recv_req(lp, mp, fp);
fc_exch_recv_req(lp, ema->mp, fp);
break;
default:
FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp));
fc_frame_free(fp);
break;
}
}
EXPORT_SYMBOL(fc_exch_recv);
int fc_exch_init(struct fc_lport *lp)
{
if (!lp->tt.exch_get) {
/*
* exch_put() should be NULL if
* exch_get() is NULL
*/
WARN_ON(lp->tt.exch_put);
lp->tt.exch_get = fc_exch_get;
}
if (!lp->tt.seq_start_next)
lp->tt.seq_start_next = fc_seq_start_next;
@ -1942,6 +2047,28 @@ int fc_exch_init(struct fc_lport *lp)
if (!lp->tt.seq_exch_abort)
lp->tt.seq_exch_abort = fc_seq_exch_abort;
/*
* Initialize fc_cpu_mask and fc_cpu_order. The
* fc_cpu_mask is set for nr_cpu_ids rounded up
* to order of 2's * power and order is stored
* in fc_cpu_order as this is later required in
* mapping between an exch id and exch array index
* in per cpu exch pool.
*
* This round up is required to align fc_cpu_mask
* to exchange id's lower bits such that all incoming
* frames of an exchange gets delivered to the same
* cpu on which exchange originated by simple bitwise
* AND operation between fc_cpu_mask and exchange id.
*/
fc_cpu_mask = 1;
fc_cpu_order = 0;
while (fc_cpu_mask < nr_cpu_ids) {
fc_cpu_mask <<= 1;
fc_cpu_order++;
}
fc_cpu_mask--;
return 0;
}
EXPORT_SYMBOL(fc_exch_init);

View File

@ -507,33 +507,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
/*
* If a get_page()/put_page() will fail, don't use sg lists
* in the fc_frame structure.
*
* The put_page() may be long after the I/O has completed
* in the case of FCoE, since the network driver does it
* via free_skb(). See the test in free_pages_check().
*
* Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
*/
if (using_sg) {
for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
if (page_count(sg_page(sg)) == 0 ||
(sg_page(sg)->flags & (1 << PG_lru |
1 << PG_private |
1 << PG_locked |
1 << PG_active |
1 << PG_slab |
1 << PG_swapcache |
1 << PG_writeback |
1 << PG_reserved |
1 << PG_buddy))) {
using_sg = 0;
break;
}
}
}
sg = scsi_sglist(sc);
while (remaining > 0 && sg) {
@ -569,8 +542,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
}
sg_bytes = min(tlen, sg->length - offset);
if (using_sg) {
WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
FC_FRAME_SG_LEN);
get_page(sg_page(sg));
skb_fill_page_desc(fp_skb(fp),
skb_shinfo(fp_skb(fp))->nr_frags,
@ -1337,7 +1308,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
return;

View File

@ -113,7 +113,7 @@ static void fc_lport_enter_ready(struct fc_lport *);
static void fc_lport_enter_logo(struct fc_lport *);
static const char *fc_lport_state_names[] = {
[LPORT_ST_NONE] = "none",
[LPORT_ST_DISABLED] = "disabled",
[LPORT_ST_FLOGI] = "FLOGI",
[LPORT_ST_DNS] = "dNS",
[LPORT_ST_RPN_ID] = "RPN_ID",
@ -133,57 +133,44 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
/**
* fc_lport_rport_callback() - Event handler for rport events
* @lport: The lport which is receiving the event
* @rport: The rport which the event has occured on
* @rdata: private remote port data
* @event: The event that occured
*
* Locking Note: The rport lock should not be held when calling
* this function.
*/
static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport *rport,
struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
rport->port_id);
rdata->ids.port_id);
mutex_lock(&lport->lp_mutex);
switch (event) {
case RPORT_EV_CREATED:
if (rport->port_id == FC_FID_DIR_SERV) {
mutex_lock(&lport->lp_mutex);
if (lport->state == LPORT_ST_DNS) {
lport->dns_rp = rport;
fc_lport_enter_rpn_id(lport);
} else {
FC_LPORT_DBG(lport, "Received an CREATED event "
"on port (%6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"%d state", rport->port_id,
lport->state);
lport->tt.rport_logoff(rport);
}
mutex_unlock(&lport->lp_mutex);
} else
FC_LPORT_DBG(lport, "Received an event for port (%6x) "
"which is not the directory server\n",
rport->port_id);
case RPORT_EV_READY:
if (lport->state == LPORT_ST_DNS) {
lport->dns_rp = rdata;
fc_lport_enter_rpn_id(lport);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
"on port (%6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
lport->tt.rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
if (rport->port_id == FC_FID_DIR_SERV) {
mutex_lock(&lport->lp_mutex);
lport->dns_rp = NULL;
mutex_unlock(&lport->lp_mutex);
} else
FC_LPORT_DBG(lport, "Received an event for port (%6x) "
"which is not the directory server\n",
rport->port_id);
lport->dns_rp = NULL;
break;
case RPORT_EV_NONE:
break;
}
mutex_unlock(&lport->lp_mutex);
}
/**
@ -211,20 +198,13 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
struct fc_disc_port dp;
dp.lp = lport;
dp.ids.port_id = remote_fid;
dp.ids.port_name = remote_wwpn;
dp.ids.node_name = remote_wwnn;
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (lport->ptp_rp) {
mutex_lock(&lport->disc.disc_mutex);
if (lport->ptp_rp)
lport->tt.rport_logoff(lport->ptp_rp);
lport->ptp_rp = NULL;
}
lport->ptp_rp = lport->tt.rport_create(&dp);
lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
lport->ptp_rp->ids.port_name = remote_wwpn;
lport->ptp_rp->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
lport->tt.rport_login(lport->ptp_rp);
@ -471,56 +451,6 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
fc_frame_free(in_fp);
}
/**
* fc_lport_recv_adisc_req() - Handle received Address Discovery Request
* @lport: Fibre Channel local port recieving the ADISC
* @sp: current sequence in the ADISC exchange
* @fp: ADISC request frame
*
* Locking Note: The lport lock is expected to be held before calling
* this function.
*/
static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
struct fc_lport *lport)
{
struct fc_frame *fp;
struct fc_exch *ep = fc_seq_exch(sp);
struct fc_els_adisc *req, *rp;
struct fc_seq_els_data rjt_data;
size_t len;
u32 f_ctl;
FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
} else {
len = sizeof(*rp);
fp = fc_frame_alloc(lport, len);
if (fp) {
rp = fc_frame_payload_get(fp, len);
memset(rp, 0, len);
rp->adisc_cmd = ELS_LS_ACC;
rp->adisc_wwpn = htonll(lport->wwpn);
rp->adisc_wwnn = htonll(lport->wwnn);
hton24(rp->adisc_port_id,
fc_host_port_id(lport->host));
sp = lport->tt.seq_start_next(sp);
f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
FC_TYPE_ELS, f_ctl, 0);
lport->tt.seq_send(lport, sp, fp);
}
}
fc_frame_free(in_fp);
}
/**
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
* @lport: Fibre Channel local port recieving the LOGO
@ -550,7 +480,7 @@ int fc_fabric_login(struct fc_lport *lport)
int rc = -1;
mutex_lock(&lport->lp_mutex);
if (lport->state == LPORT_ST_NONE) {
if (lport->state == LPORT_ST_DISABLED) {
fc_lport_enter_reset(lport);
rc = 0;
}
@ -637,12 +567,13 @@ EXPORT_SYMBOL(fc_fabric_logoff);
int fc_lport_destroy(struct fc_lport *lport)
{
mutex_lock(&lport->lp_mutex);
lport->state = LPORT_ST_NONE;
lport->state = LPORT_ST_DISABLED;
lport->link_up = 0;
lport->tt.frame_send = fc_frame_drop;
mutex_unlock(&lport->lp_mutex);
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
return 0;
}
@ -722,7 +653,8 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
fc_lport_state_enter(lport, LPORT_ST_READY);
lport->tt.disc_start(fc_lport_disc_callback, lport);
if (!lport->ptp_rp)
lport->tt.disc_start(fc_lport_disc_callback, lport);
}
/**
@ -808,8 +740,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
get_unaligned_be64(&flp->fl_wwnn));
lport->tt.disc_start(fc_lport_disc_callback, lport);
out:
sp = fr_seq(rx_fp);
fc_frame_free(rx_fp);
@ -832,10 +762,6 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
struct fc_rport *rport;
u32 s_id;
u32 d_id;
struct fc_seq_els_data rjt_data;
mutex_lock(&lport->lp_mutex);
@ -844,11 +770,14 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
* RSCN here. These don't require a session.
* Even if we had a session, it might not be ready.
*/
if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
if (!lport->link_up)
fc_frame_free(fp);
else if (fh->fh_type == FC_TYPE_ELS &&
fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
/*
* Check opcode.
*/
recv = NULL;
recv = lport->tt.rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
recv = fc_lport_recv_flogi_req;
@ -870,34 +799,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
case ELS_RNID:
recv = fc_lport_recv_rnid_req;
break;
case ELS_ADISC:
recv = fc_lport_recv_adisc_req;
break;
}
if (recv)
recv(sp, fp, lport);
else {
/*
* Find session.
* If this is a new incoming PLOGI, we won't find it.
*/
s_id = ntoh24(fh->fh_s_id);
d_id = ntoh24(fh->fh_d_id);
rport = lport->tt.rport_lookup(lport, s_id);
if (rport)
lport->tt.rport_recv_req(sp, fp, rport);
else {
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_NONE;
lport->tt.seq_els_rsp_send(sp,
ELS_LS_RJT,
&rjt_data);
fc_frame_free(fp);
}
}
recv(sp, fp, lport);
} else {
FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
fr_eof(fp));
@ -930,7 +834,28 @@ int fc_lport_reset(struct fc_lport *lport)
EXPORT_SYMBOL(fc_lport_reset);
/**
* fc_rport_enter_reset() - Reset the local port
* fc_lport_reset_locked() - Reset the local port
* @lport: Fibre Channel local port to be reset
*
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
if (lport->dns_rp)
lport->tt.rport_logoff(lport->dns_rp);
lport->ptp_rp = NULL;
lport->tt.disc_stop(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
fc_host_fabric_name(lport->host) = 0;
fc_host_port_id(lport->host) = 0;
}
/**
* fc_lport_enter_reset() - Reset the local port
* @lport: Fibre Channel local port to be reset
*
* Locking Note: The lport lock is expected to be held before calling
@ -942,25 +867,27 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RESET);
if (lport->dns_rp)
lport->tt.rport_logoff(lport->dns_rp);
if (lport->ptp_rp) {
lport->tt.rport_logoff(lport->ptp_rp);
lport->ptp_rp = NULL;
}
lport->tt.disc_stop(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
fc_host_fabric_name(lport->host) = 0;
fc_host_port_id(lport->host) = 0;
fc_lport_reset_locked(lport);
if (lport->link_up)
fc_lport_enter_flogi(lport);
}
/**
* fc_lport_enter_disabled() - disable the local port
* @lport: Fibre Channel local port to be reset
*
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
static void fc_lport_enter_disabled(struct fc_lport *lport)
{
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DISABLED);
fc_lport_reset_locked(lport);
}
/**
* fc_lport_error() - Handler for any errors
* @lport: The fc_lport object
@ -992,7 +919,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
schedule_delayed_work(&lport->retry_work, delay);
} else {
switch (lport->state) {
case LPORT_ST_NONE:
case LPORT_ST_DISABLED:
case LPORT_ST_READY:
case LPORT_ST_RESET:
case LPORT_ST_RPN_ID:
@ -1026,13 +953,13 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
if (lport->state != LPORT_ST_RFT_ID) {
FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
"%s\n", fc_lport_state(lport));
@ -1080,13 +1007,13 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
if (lport->state != LPORT_ST_RPN_ID) {
FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
"%s\n", fc_lport_state(lport));
@ -1132,13 +1059,13 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = lp_arg;
u8 op;
FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received a SCR response\n");
if (lport->state != LPORT_ST_SCR) {
FC_LPORT_DBG(lport, "Received a SCR response, but in state "
"%s\n", fc_lport_state(lport));
@ -1186,7 +1113,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
return;
}
if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
fc_lport_scr_resp, lport, lport->e_d_tov))
fc_lport_error(lport, fp);
}
@ -1227,7 +1154,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
return;
}
if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
fc_lport_rft_id_resp,
lport, lport->e_d_tov))
fc_lport_error(lport, fp);
@ -1256,7 +1183,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
return;
}
if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
fc_lport_rpn_id_resp,
lport, lport->e_d_tov))
fc_lport_error(lport, fp);
@ -1275,28 +1202,21 @@ static struct fc_rport_operations fc_lport_rport_ops = {
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
{
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata;
struct fc_disc_port dp;
dp.ids.port_id = FC_FID_DIR_SERV;
dp.ids.port_name = -1;
dp.ids.node_name = -1;
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
dp.lp = lport;
struct fc_rport_priv *rdata;
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DNS);
rport = lport->tt.rport_create(&dp);
if (!rport)
mutex_lock(&lport->disc.disc_mutex);
rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata = rport->dd_data;
rdata->ops = &fc_lport_rport_ops;
lport->tt.rport_login(rport);
lport->tt.rport_login(rdata);
return;
err:
@ -1316,7 +1236,7 @@ static void fc_lport_timeout(struct work_struct *work)
mutex_lock(&lport->lp_mutex);
switch (lport->state) {
case LPORT_ST_NONE:
case LPORT_ST_DISABLED:
case LPORT_ST_READY:
case LPORT_ST_RESET:
WARN_ON(1);
@ -1360,13 +1280,13 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = lp_arg;
u8 op;
FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received a LOGO response\n");
if (lport->state != LPORT_ST_LOGO) {
FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
"%s\n", fc_lport_state(lport));
@ -1382,7 +1302,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC)
fc_lport_enter_reset(lport);
fc_lport_enter_disabled(lport);
else
fc_lport_error(lport, fp);
@ -1415,8 +1335,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
return;
}
if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
lport, lport->e_d_tov))
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
fc_lport_logo_resp, lport, lport->e_d_tov))
fc_lport_error(lport, fp);
}
@ -1442,13 +1362,13 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
unsigned int e_d_tov;
u16 mfs;
FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received a FLOGI response\n");
if (lport->state != LPORT_ST_FLOGI) {
FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
"%s\n", fc_lport_state(lport));
@ -1501,14 +1421,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_lport_enter_dns(lport);
}
}
if (flp) {
csp_flags = ntohs(flp->fl_csp.sp_features);
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
lport->tt.disc_start(fc_lport_disc_callback,
lport);
}
}
} else {
FC_LPORT_DBG(lport, "Bad FLOGI response\n");
}
@ -1539,7 +1451,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
if (!fp)
return fc_lport_error(lport, fp);
if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
fc_lport_flogi_resp, lport, lport->e_d_tov))
fc_lport_error(lport, fp);
}
@ -1550,7 +1462,7 @@ int fc_lport_config(struct fc_lport *lport)
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
fc_lport_state_enter(lport, LPORT_ST_NONE);
fc_lport_state_enter(lport, LPORT_ST_DISABLED);
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
@ -1588,6 +1500,7 @@ int fc_lport_init(struct fc_lport *lport)
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
INIT_LIST_HEAD(&lport->ema_list);
return 0;
}
EXPORT_SYMBOL(fc_lport_init);

File diff suppressed because it is too large Load Diff

View File

@ -109,12 +109,9 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
}
EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
void
iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
{
uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
/*
* standard specifies this check for when to update expected and
* max sequence numbers
@ -138,6 +135,12 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
iscsi_conn_queue_work(session->leadconn);
}
}
void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
{
__iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
be32_to_cpu(hdr->max_cmdsn));
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
/**
@ -301,8 +304,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
memcpy(task->lun, hdr->lun, sizeof(task->lun));
hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
cmd_len = sc->cmd_len;
if (cmd_len < ISCSI_CDB_SIZE)
@ -388,6 +389,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
return -EIO;
task->state = ISCSI_TASK_RUNNING;
hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
conn->scsicmd_pdus_cnt++;
ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@ -499,6 +502,31 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
__iscsi_put_task(task);
}
/**
* iscsi_complete_scsi_task - finish scsi task normally
* @task: iscsi task for scsi cmd
* @exp_cmdsn: expected cmd sn in cpu format
* @max_cmdsn: max cmd sn in cpu format
*
* This is used when drivers do not need or cannot perform
* lower level pdu processing.
*
* Called with session lock
*/
void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
{
struct iscsi_conn *conn = task->conn;
ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
conn->last_recv = jiffies;
__iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
}
EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
/*
* session lock must be held and if not called for a task that is
* still pending or from the xmit thread, then xmit thread must
@ -857,27 +885,102 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
}
}
static int iscsi_nop_out_rsp(struct iscsi_task *task,
struct iscsi_nopin *nop, char *data, int datalen)
{
struct iscsi_conn *conn = task->conn;
int rc = 0;
if (conn->ping_task != task) {
/*
* If this is not in response to one of our
* nops then it must be from userspace.
*/
if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
data, datalen))
rc = ISCSI_ERR_CONN_FAILED;
} else
mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
return rc;
}
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
{
struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
struct iscsi_hdr rejected_pdu;
int opcode, rc = 0;
conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
if (ntoh24(reject->dlength) > datalen)
return ISCSI_ERR_PROTO;
if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x) rejected "
"due to DataDigest error.\n",
rejected_pdu.opcode);
}
if (ntoh24(reject->dlength) > datalen ||
ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
"pdu. Invalid data length (pdu dlength "
"%u, datalen %d\n", ntoh24(reject->dlength),
datalen);
return ISCSI_ERR_PROTO;
}
return 0;
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
switch (reject->reason) {
case ISCSI_REASON_DATA_DIGEST_ERROR:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected "
"due to DataDigest error.\n",
rejected_pdu.itt, opcode);
break;
case ISCSI_REASON_IMM_CMD_REJECT:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Too many "
"immediate commands.\n",
rejected_pdu.itt, opcode);
/*
* We only send one TMF at a time so if the target could not
* handle it, then it should get fixed (RFC mandates that
* a target can handle one immediate TMF per conn).
*
* For nops-outs, we could have sent more than one if
* the target is sending us lots of nop-ins
*/
if (opcode != ISCSI_OP_NOOP_OUT)
return 0;
if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
/*
* nop-out in response to target's nop-out rejected.
* Just resend.
*/
iscsi_send_nopout(conn,
(struct iscsi_nopin*)&rejected_pdu);
else {
struct iscsi_task *task;
/*
* Our nop as ping got dropped. We know the target
* and transport are ok so just clean up
*/
task = iscsi_itt_to_task(conn, rejected_pdu.itt);
if (!task) {
iscsi_conn_printk(KERN_ERR, conn,
"Invalid pdu reject. Could "
"not lookup rejected task.\n");
rc = ISCSI_ERR_BAD_ITT;
} else
rc = iscsi_nop_out_rsp(task,
(struct iscsi_nopin*)&rejected_pdu,
NULL, 0);
}
break;
default:
iscsi_conn_printk(KERN_ERR, conn,
"pdu (op 0x%x itt 0x%x) rejected. Reason "
"code 0x%x\n", rejected_pdu.itt,
rejected_pdu.opcode, reject->reason);
break;
}
return rc;
}
/**
@ -1038,15 +1141,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
if (conn->ping_task != task)
/*
* If this is not in response to one of our
* nops then it must be from userspace.
*/
goto recv_pdu;
mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
data, datalen);
break;
default:
rc = ISCSI_ERR_BAD_OPCODE;
@ -1212,6 +1308,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
struct iscsi_task *task = conn->task;
int rc;
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
__iscsi_get_task(task);
spin_unlock_bh(&conn->session->lock);
rc = conn->session->tt->xmit_task(task);
@ -1261,7 +1360,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->lock);
if (unlikely(conn->suspend_tx)) {
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->lock);
return -ENODATA;
@ -1270,7 +1369,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
if (conn->task) {
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
goto done;
}
/*
@ -1290,7 +1389,7 @@ check_mgmt:
}
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
goto done;
}
/* process pending command queue */
@ -1311,14 +1410,14 @@ check_mgmt:
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
goto again;
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
goto done;
/*
* we could continuously get new task requests so
* we need to check the mgmt queue for nops that need to
@ -1344,16 +1443,14 @@ check_mgmt:
conn->task->state = ISCSI_TASK_RUNNING;
rc = iscsi_xmit_task(conn);
if (rc)
goto again;
goto done;
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
spin_unlock_bh(&conn->session->lock);
return -ENODATA;
again:
if (unlikely(conn->suspend_tx))
rc = -ENODATA;
done:
spin_unlock_bh(&conn->session->lock);
return rc;
}
@ -1474,6 +1571,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto fault;
}
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE;
goto fault;
}
if (iscsi_check_cmdsn_window_closed(conn)) {
reason = FAILURE_WINDOW_CLOSED;
goto reject;
@ -1497,6 +1600,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
}
}
if (session->tt->xmit_task(task)) {
session->cmdsn--;
reason = FAILURE_SESSION_NOT_READY;
goto prepd_reject;
}
@ -1712,6 +1816,33 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
}
}
/**
* iscsi_suspend_queue - suspend iscsi_queuecommand
* @conn: iscsi conn to stop queueing IO on
*
* This grabs the session lock to make sure no one is in
* xmit_task/queuecommand, and then sets suspend to prevent
* new commands from being queued. This only needs to be called
* by offload drivers that need to sync a path like ep disconnect
* with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
* will call iscsi_start_tx and iscsi_unblock_session when in FFP.
*/
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
spin_unlock_bh(&conn->session->lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
* @conn: iscsi conn tp stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
* it will wait for it to be completed.
*/
void iscsi_suspend_tx(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;

View File

@ -124,6 +124,7 @@ static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
kfree(ring[i]);
}
kfree(ring);
}
int srp_target_alloc(struct srp_target *target, struct device *dev,

View File

@ -28,4 +28,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
lpfc_vport.o lpfc_debugfs.o
lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o

View File

@ -312,6 +312,7 @@ struct lpfc_vport {
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@ -440,6 +441,12 @@ enum intr_type_t {
MSIX,
};
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
uint32_t oxid;
};
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@ -525,6 +532,8 @@ struct lpfc_hba {
#define FCP_XRI_ABORT_EVENT 0x20
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
@ -616,6 +625,8 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
uint32_t fcp_qidx; /* next work queue to post work to */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
@ -682,6 +693,7 @@ struct lpfc_hba {
struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@ -763,11 +775,18 @@ struct lpfc_hba {
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
atomic_t fast_event_count;
uint32_t fcoe_eventtag;
uint32_t fcoe_eventtag_at_fcf_scan;
struct lpfc_fcf fcf;
uint8_t fc_map[3];
uint8_t valid_vlan;
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[64];
uint32_t ctx_idx;
};
static inline struct Scsi_Host *

View File

@ -394,7 +394,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
if (phba->hba_flag & LINK_DISABLED)
len += snprintf(buf + len, PAGE_SIZE-len,
"Link Down - User disabled\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
"Link Down\n");
break;
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
@ -4127,6 +4132,9 @@ struct fc_function_template lpfc_transport_functions = {
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
.bsg_request = lpfc_bsg_request,
.bsg_timeout = lpfc_bsg_timeout,
};
struct fc_function_template lpfc_vport_transport_functions = {

View File

@ -0,0 +1,904 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/interrupt.h>
#include <linux/mempool.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
/**
* lpfc_bsg_rport_ct - send a CT command from a bsg request
* @job: fc_bsg_job to handle
*/
static int
lpfc_bsg_rport_ct(struct fc_bsg_job *job)
{
struct Scsi_Host *shost = job->shost;
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct ulp_bde64 *bpl = NULL;
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq *rspiocbq = NULL;
IOCB_t *cmd;
IOCB_t *rsp;
struct lpfc_dmabuf *bmp = NULL;
int request_nseg;
int reply_nseg;
struct scatterlist *sgel = NULL;
int numbde;
dma_addr_t busaddr;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
if (!lpfc_nlp_get(ndlp)) {
job->reply->result = -ENODEV;
return 0;
}
if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
rc = -ENODEV;
goto free_ndlp_exit;
}
spin_lock_irq(shost->host_lock);
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
spin_unlock_irq(shost->host_lock);
goto free_ndlp_exit;
}
cmd = &cmdiocbq->iocb;
rspiocbq = lpfc_sli_get_iocbq(phba);
if (!rspiocbq) {
rc = -ENOMEM;
goto free_cmdiocbq;
}
spin_unlock_irq(shost->host_lock);
rsp = &rspiocbq->iocb;
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
spin_lock_irq(shost->host_lock);
goto free_rspiocbq;
}
spin_lock_irq(shost->host_lock);
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
goto free_bmp;
}
spin_unlock_irq(shost->host_lock);
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *) bmp->virt;
request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
busaddr = sg_dma_address(sgel);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = cpu_to_le32(bpl->tus.w);
bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
bpl++;
}
reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
busaddr = sg_dma_address(sgel);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = cpu_to_le32(bpl->tus.w);
bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
bpl++;
}
cmd->un.genreq64.bdl.ulpIoTag32 = 0;
cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
cmd->un.genreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
cmd->un.genreq64.w5.hcsw.Dfctl = 0;
cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
cmd->ulpContext = ndlp->nlp_rpi;
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context1 = NULL;
cmdiocbq->context2 = NULL;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
timeout = phba->fc_ratov * 2;
job->dd_data = cmdiocbq;
rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
timeout + LPFC_DRVR_TIMEOUT);
if (rc != IOCB_TIMEDOUT) {
pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
}
if (rc == IOCB_TIMEDOUT) {
lpfc_sli_release_iocbq(phba, rspiocbq);
rc = -EACCES;
goto free_ndlp_exit;
}
if (rc != IOCB_SUCCESS) {
rc = -EACCES;
goto free_outdmp;
}
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & 0xff) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
case IOERR_INVALID_RPI:
rc = -EFAULT;
break;
default:
rc = -EACCES;
break;
}
goto free_outdmp;
}
} else
job->reply->reply_payload_rcv_len =
rsp->un.genreq64.bdl.bdeSize;
free_outdmp:
spin_lock_irq(shost->host_lock);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
free_bmp:
kfree(bmp);
free_rspiocbq:
lpfc_sli_release_iocbq(phba, rspiocbq);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
spin_unlock_irq(shost->host_lock);
free_ndlp_exit:
lpfc_nlp_put(ndlp);
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace */
job->job_done(job);
return 0;
}
/**
* lpfc_bsg_rport_els - send an ELS command from a bsg request
* @job: fc_bsg_job to handle
*/
static int
lpfc_bsg_rport_els(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
uint32_t elscmd;
uint32_t cmdsize;
uint32_t rspsize;
struct lpfc_iocbq *rspiocbq;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *rsp;
uint16_t rpi = 0;
struct lpfc_dmabuf *pcmd;
struct lpfc_dmabuf *prsp;
struct lpfc_dmabuf *pbuflist = NULL;
struct ulp_bde64 *bpl;
int iocb_status;
int request_nseg;
int reply_nseg;
struct scatterlist *sgel = NULL;
int numbde;
dma_addr_t busaddr;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
goto out;
}
elscmd = job->request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
rspsize = job->reply_payload.payload_len;
rspiocbq = lpfc_sli_get_iocbq(phba);
if (!rspiocbq) {
lpfc_nlp_put(ndlp);
rc = -ENOMEM;
goto out;
}
rsp = &rspiocbq->iocb;
rpi = ndlp->nlp_rpi;
cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
lpfc_sli_release_iocbq(phba, rspiocbq);
return -EIO;
}
job->dd_data = cmdiocbq;
pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *) pcmd->list.next;
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(prsp);
cmdiocbq->context2 = NULL;
pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
bpl = (struct ulp_bde64 *) pbuflist->virt;
request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
busaddr = sg_dma_address(sgel);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = cpu_to_le32(bpl->tus.w);
bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
bpl++;
}
reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
busaddr = sg_dma_address(sgel);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = cpu_to_le32(bpl->tus.w);
bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
bpl++;
}
cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = NULL;
cmdiocbq->context2 = NULL;
iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2)
+ LPFC_DRVR_TIMEOUT);
/* release the new ndlp once the iocb completes */
lpfc_nlp_put(ndlp);
if (iocb_status != IOCB_TIMEDOUT) {
pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
}
if (iocb_status == IOCB_SUCCESS) {
if (rsp->ulpStatus == IOSTAT_SUCCESS) {
job->reply->reply_payload_rcv_len =
rsp->un.elsreq64.bdl.bdeSize;
rc = 0;
} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
struct fc_bsg_ctels_reply *els_reply;
/* LS_RJT data returned in word 4 */
uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
els_reply = &job->reply->reply_data.ctels_reply;
job->reply->result = 0;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[0];
els_reply->rjt_data.reason_code = rjt_data[1];
els_reply->rjt_data.reason_explanation = rjt_data[2];
els_reply->rjt_data.vendor_unique = rjt_data[3];
} else
rc = -EIO;
} else
rc = -EIO;
if (iocb_status != IOCB_TIMEDOUT)
lpfc_els_free_iocb(phba, cmdiocbq);
lpfc_sli_release_iocbq(phba, rspiocbq);
out:
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace */
job->job_done(job);
return 0;
}
struct lpfc_ct_event {
struct list_head node;
int ref;
wait_queue_head_t wq;
/* Event type and waiter identifiers */
uint32_t type_mask;
uint32_t req_id;
uint32_t reg_id;
/* next two flags are here for the auto-delete logic */
unsigned long wait_time_stamp;
int waiting;
/* seen and not seen events */
struct list_head events_to_get;
struct list_head events_to_see;
};
struct event_data {
struct list_head node;
uint32_t type;
uint32_t immed_dat;
void *data;
uint32_t len;
};
static struct lpfc_ct_event *
lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
{
struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
return NULL;
INIT_LIST_HEAD(&evt->events_to_get);
INIT_LIST_HEAD(&evt->events_to_see);
evt->req_id = ev_req_id;
evt->reg_id = ev_reg_id;
evt->wait_time_stamp = jiffies;
init_waitqueue_head(&evt->wq);
return evt;
}
static void
lpfc_ct_event_free(struct lpfc_ct_event *evt)
{
struct event_data *ed;
list_del(&evt->node);
while (!list_empty(&evt->events_to_get)) {
ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
list_del(&ed->node);
kfree(ed->data);
kfree(ed);
}
while (!list_empty(&evt->events_to_see)) {
ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
list_del(&ed->node);
kfree(ed->data);
kfree(ed);
}
kfree(evt);
}
static inline void
lpfc_ct_event_ref(struct lpfc_ct_event *evt)
{
evt->ref++;
}
static inline void
lpfc_ct_event_unref(struct lpfc_ct_event *evt)
{
if (--evt->ref < 0)
lpfc_ct_event_free(evt);
}
#define SLI_CT_ELX_LOOPBACK 0x10
enum ELX_LOOPBACK_CMD {
ELX_LOOPBACK_XRI_SETUP,
ELX_LOOPBACK_DATA,
};
/**
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
* @phba:
* @pring:
* @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registerd to receive CT events.
*/
void
lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
uint32_t evt_req_id = 0;
uint32_t cmd;
uint32_t len;
struct lpfc_dmabuf *dmabuf = NULL;
struct lpfc_ct_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
if (piocbq->iocb.ulpBdeCount == 0 ||
piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
goto error_ct_unsol_exit;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
dmabuf = bdeBuf1;
else {
dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
piocbq->iocb.un.cont64[0].addrLow);
dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
}
ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
len = ct_req->CommandResponse.bits.Size;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
mutex_lock(&phba->ct_event_mutex);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->req_id != evt_req_id)
continue;
lpfc_ct_event_ref(evt);
evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
if (!evt_dat) {
lpfc_ct_event_unref(evt);
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2614 Memory allocation failed for "
"CT event\n");
break;
}
mutex_unlock(&phba->ct_event_mutex);
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
evt_dat->len +=
iocbq->iocb.un.cont64[i].tus.f.bdeSize;
}
}
evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
if (!evt_dat->data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2615 Memory allocation failed for "
"CT event data, size %d\n",
evt_dat->len);
kfree(evt_dat);
mutex_lock(&phba->ct_event_mutex);
lpfc_ct_event_unref(evt);
mutex_unlock(&phba->ct_event_mutex);
goto error_ct_unsol_exit;
}
list_for_each_entry(iocbq, &head, list) {
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
bdeBuf1 = iocbq->context2;
bdeBuf2 = iocbq->context3;
}
for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
int size = 0;
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
hbqe = (struct lpfc_hbq_entry *)
&iocbq->iocb.un.ulpWord[0];
size = hbqe->bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
hbqe = (struct lpfc_hbq_entry *)
&iocbq->iocb.unsli3.
sli3Words[4];
size = hbqe->bde.tus.f.bdeSize;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
size = evt_dat->len - offset;
} else {
size = iocbq->iocb.un.cont64[i].
tus.f.bdeSize;
bde = &iocbq->iocb.un.cont64[i];
dma_addr = getPaddr(bde->addrHigh,
bde->addrLow);
dmabuf = lpfc_sli_ringpostbuf_get(phba,
pring, dma_addr);
}
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
"found for iocbq 0x%p\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
mutex_lock(&phba->ct_event_mutex);
lpfc_ct_event_unref(evt);
mutex_unlock(&phba->ct_event_mutex);
goto error_ct_unsol_exit;
}
memcpy((char *)(evt_dat->data) + offset,
dmabuf->virt, size);
offset += size;
if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED)) {
lpfc_sli_ringpostbuf_put(phba, pring,
dmabuf);
} else {
switch (cmd) {
case ELX_LOOPBACK_XRI_SETUP:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba,
pring,
1);
else
lpfc_in_buf_free(phba,
dmabuf);
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba,
pring,
1);
break;
}
}
}
}
mutex_lock(&phba->ct_event_mutex);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
phba->ctx_idx = (phba->ctx_idx + 1) % 64;
phba->ct_ctx[evt_dat->immed_dat].oxid =
piocbq->iocb.ulpContext;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
} else
evt_dat->immed_dat = piocbq->iocb.ulpContext;
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
wake_up_interruptible(&evt->wq);
lpfc_ct_event_unref(evt);
if (evt_req_id == SLI_CT_ELX_LOOPBACK)
break;
}
mutex_unlock(&phba->ct_event_mutex);
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
return;
}
/**
* lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
*/
static int
lpfc_bsg_set_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct set_ct_event *event_req;
struct lpfc_ct_event *evt;
int rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2612 Received SET_CT_EVENT below minimum "
"size\n");
return -EINVAL;
}
event_req = (struct set_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
mutex_lock(&phba->ct_event_mutex);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
lpfc_ct_event_ref(evt);
evt->wait_time_stamp = jiffies;
break;
}
}
mutex_unlock(&phba->ct_event_mutex);
if (&evt->node == &phba->ct_ev_waiters) {
/* no event waiting struct yet - first call */
evt = lpfc_ct_event_new(event_req->ev_reg_id,
event_req->ev_req_id);
if (!evt) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2617 Failed allocation of event "
"waiter\n");
return -ENOMEM;
}
mutex_lock(&phba->ct_event_mutex);
list_add(&evt->node, &phba->ct_ev_waiters);
lpfc_ct_event_ref(evt);
mutex_unlock(&phba->ct_event_mutex);
}
evt->waiting = 1;
if (wait_event_interruptible(evt->wq,
!list_empty(&evt->events_to_see))) {
mutex_lock(&phba->ct_event_mutex);
lpfc_ct_event_unref(evt); /* release ref */
lpfc_ct_event_unref(evt); /* delete */
mutex_unlock(&phba->ct_event_mutex);
rc = -EINTR;
goto set_event_out;
}
evt->wait_time_stamp = jiffies;
evt->waiting = 0;
mutex_lock(&phba->ct_event_mutex);
list_move(evt->events_to_see.prev, &evt->events_to_get);
lpfc_ct_event_unref(evt); /* release ref */
mutex_unlock(&phba->ct_event_mutex);
set_event_out:
/* set_event carries no reply payload */
job->reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace */
job->job_done(job);
return 0;
}
/**
* lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
* @job: GET_EVENT fc_bsg_job
*/
static int
lpfc_bsg_get_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
struct lpfc_ct_event *evt;
struct event_data *evt_dat = NULL;
int rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2613 Received GET_CT_EVENT request below "
"minimum size\n");
return -EINVAL;
}
event_req = (struct get_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
mutex_lock(&phba->ct_event_mutex);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
lpfc_ct_event_ref(evt);
evt->wait_time_stamp = jiffies;
evt_dat = list_entry(evt->events_to_get.prev,
struct event_data, node);
list_del(&evt_dat->node);
break;
}
}
mutex_unlock(&phba->ct_event_mutex);
if (!evt_dat) {
job->reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto error_get_event_exit;
}
if (evt_dat->len > job->reply_payload.payload_len) {
evt_dat->len = job->reply_payload.payload_len;
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2618 Truncated event data at %d "
"bytes\n",
job->reply_payload.payload_len);
}
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
job->reply->reply_payload_rcv_len = 0;
rc = 0;
if (evt_dat)
kfree(evt_dat->data);
kfree(evt_dat);
mutex_lock(&phba->ct_event_mutex);
lpfc_ct_event_unref(evt);
mutex_unlock(&phba->ct_event_mutex);
error_get_event_exit:
/* make error code available to userspace */
job->reply->result = rc;
/* complete the job back to userspace */
job->job_done(job);
return rc;
}
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
*/
static int
lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
{
int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
switch (command) {
case LPFC_BSG_VENDOR_SET_CT_EVENT:
return lpfc_bsg_set_event(job);
break;
case LPFC_BSG_VENDOR_GET_CT_EVENT:
return lpfc_bsg_get_event(job);
break;
default:
return -EINVAL;
}
}
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
* @job: fc_bsg_job to handle
*/
int
lpfc_bsg_request(struct fc_bsg_job *job)
{
uint32_t msgcode;
int rc = -EINVAL;
msgcode = job->request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
break;
case FC_BSG_RPT_ELS:
rc = lpfc_bsg_rport_els(job);
break;
case FC_BSG_RPT_CT:
rc = lpfc_bsg_rport_ct(job);
break;
default:
break;
}
return rc;
}
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
* @job: fc_bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
*/
int
lpfc_bsg_timeout(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
if (cmdiocb)
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
return 0;
}

View File

@ -21,9 +21,11 @@
typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@ -135,6 +137,9 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
int lpfc_els_disc_plogi(struct lpfc_vport *);
void lpfc_els_timeout(unsigned long);
void lpfc_els_timeout_handler(struct lpfc_vport *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
uint32_t, uint32_t);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@ -182,11 +187,12 @@ int lpfc_mbox_dev_check(struct lpfc_hba *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
@ -234,6 +240,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@ -360,3 +367,8 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
#define HBA_EVENT_LINK_UP 2
#define HBA_EVENT_LINK_DOWN 3
/* functions to support SGIOv4/bsg interface */
int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);

View File

@ -97,6 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head head;
struct lpfc_dmabuf *bdeBuf;
lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&

View File

@ -146,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* Pointer to the newly allocated/prepared els iocb data structure
* NULL - when els iocb data structure allocation/preparation failed
**/
static struct lpfc_iocbq *
struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
uint16_t cmdSize, uint8_t retry,
struct lpfc_nodelist *ndlp, uint32_t did,

View File

@ -61,6 +61,7 @@ static uint8_t lpfcAlpaArray[] = {
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@ -1009,9 +1010,15 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irqsave(&phba->hbalock, flags);
phba->fcf.fcf_flag |= FCF_REGISTERED;
spin_unlock_irqrestore(&phba->hbalock, flags);
/* If there is a pending FCoE event, restart FCF table scan. */
if (lpfc_check_pending_fcoe_event(phba, 1)) {
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
if (vport->port_state != LPFC_FLOGI) {
spin_lock_irqsave(&phba->hbalock, flags);
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_initial_flogi(vport);
}
@ -1053,6 +1060,39 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
return 0;
}
/**
* lpfc_sw_name_match - Check if the fcf switch name match.
* @fab_name: pointer to fabric name.
* @new_fcf_record: pointer to fcf record.
*
* This routine compare the fcf record's switch name with provided
* switch name. If the switch name are identical this function
* returns 1 else return 0.
**/
static uint32_t
lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
{
if ((sw_name[0] ==
bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
(sw_name[1] ==
bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
(sw_name[2] ==
bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
(sw_name[3] ==
bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
(sw_name[4] ==
bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
(sw_name[5] ==
bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
(sw_name[6] ==
bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
(sw_name[7] ==
bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
return 1;
else
return 0;
}
/**
* lpfc_mac_addr_match - Check if the fcf mac address match.
* @phba: pointer to lpfc hba data structure.
@ -1123,6 +1163,22 @@ lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
phba->fcf.priority = new_fcf_record->fip_priority;
phba->fcf.switch_name[0] =
bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
phba->fcf.switch_name[1] =
bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
phba->fcf.switch_name[2] =
bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
phba->fcf.switch_name[3] =
bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
phba->fcf.switch_name[4] =
bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
phba->fcf.switch_name[5] =
bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
phba->fcf.switch_name[6] =
bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
phba->fcf.switch_name[7] =
bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
}
/**
@ -1150,6 +1206,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->pport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(phba->pport);
@ -1239,9 +1296,12 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
new_fcf_record))
new_fcf_record))
continue;
if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
new_fcf_record))
continue;
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
/*
* If the vlan bit map does not have the bit set for the
@ -1335,6 +1395,60 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
return 0;
}
/**
* lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
* @phba: pointer to lpfc hba data structure.
* @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
*
* This function check if there is any fcoe event pending while driver
* scan FCF entries. If there is any pending event, it will restart the
* FCF saning and return 1 else return 0.
*/
int
lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
{
LPFC_MBOXQ_t *mbox;
int rc;
/*
* If the Link is up and no FCoE events while in the
* FCF discovery, no need to restart FCF discovery.
*/
if ((phba->link_state >= LPFC_LINK_UP) &&
(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
return 0;
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
spin_unlock_irq(&phba->hbalock);
if (phba->link_state >= LPFC_LINK_UP)
lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
if (unreg_fcf) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_REGISTERED;
spin_unlock_irq(&phba->hbalock);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR,
LOG_DISCOVERY|LOG_MBOX,
"2610 UNREG_FCFI mbox allocation failed\n");
return 1;
}
lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
"2611 UNREG_FCFI issue mbox failed\n");
mempool_free(mbox, phba->mbox_mem_pool);
}
}
return 1;
}
/**
* lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
* @phba: pointer to lpfc hba data structure.
@ -1367,6 +1481,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
unsigned long flags;
uint16_t vlan_id;
/* If there is pending FCoE event restart FCF table scan */
if (lpfc_check_pending_fcoe_event(phba, 0)) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
}
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
@ -1424,7 +1544,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irqsave(&phba->hbalock, flags);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
if (lpfc_fab_name_match(phba->fcf.fabric_name,
new_fcf_record) &&
new_fcf_record) &&
lpfc_sw_name_match(phba->fcf.switch_name,
new_fcf_record) &&
lpfc_mac_addr_match(phba, new_fcf_record)) {
phba->fcf.fcf_flag |= FCF_AVAILABLE;
spin_unlock_irqrestore(&phba->hbalock, flags);
@ -1464,9 +1586,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* If there is a record with lower priority value for
* the current FCF, use that record.
*/
if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
&& (new_fcf_record->fip_priority <
phba->fcf.priority)) {
if (lpfc_fab_name_match(phba->fcf.fabric_name,
new_fcf_record) &&
(new_fcf_record->fip_priority < phba->fcf.priority)) {
/* Use this FCF record */
lpfc_copy_fcf_record(phba, new_fcf_record);
phba->fcf.addr_mode = addr_mode;
@ -1511,6 +1633,39 @@ out:
return;
}
/**
* lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox data structure.
*
* This function handles completion of init vpi mailbox command.
*/
static void
lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2609 Init VPI mailbox failed 0x%x\n",
mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vport);
else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR,
LOG_ELS,
"2606 No NPIV Fabric support\n");
}
return;
}
/**
* lpfc_start_fdiscs - send fdiscs for each vports on this port.
* @phba: pointer to lpfc hba data structure.
@ -1523,6 +1678,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
LPFC_MBOXQ_t *mboxq;
int rc;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@ -1540,6 +1697,29 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
FC_VPORT_LINKDOWN);
continue;
}
if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
mboxq = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vports[i], KERN_ERR,
LOG_MBOX, "2607 Failed to allocate "
"init_vpi mailbox\n");
continue;
}
lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
mboxq->vport = vports[i];
mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mboxq,
MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vports[i], KERN_ERR,
LOG_MBOX, "2608 Failed to issue "
"init_vpi mailbox\n");
mempool_free(mboxq,
phba->mbox_mem_pool);
}
continue;
}
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vports[i]);
else {
@ -1769,6 +1949,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
goto out;
}
} else {
vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
* is phase 1 implementation that support FCF index 0 and driver
@ -1804,6 +1985,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
spin_lock_irq(&phba->hbalock);
if (phba->hba_flag & FCF_DISC_INPROGRESS) {
spin_unlock_irq(&phba->hbalock);
return;
}
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli4_read_fcf_record(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
@ -2113,13 +2300,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
struct static_vport_info *vport_info;
int rc, i;
int rc = 0, i;
struct fc_vport_identifiers vport_id;
struct fc_vport *new_fc_vport;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
uint16_t offset = 0;
uint8_t *vport_buff;
struct lpfc_dmabuf *mp;
uint32_t byte_count = 0;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@ -2142,7 +2331,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
lpfc_dump_static_vport(phba, pmb, offset);
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
pmb->vport = phba->pport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
@ -2155,17 +2346,30 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
goto out;
}
if (mb->un.varDmp.word_cnt >
sizeof(struct static_vport_info) - offset)
mb->un.varDmp.word_cnt =
sizeof(struct static_vport_info) - offset;
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
mp = (struct lpfc_dmabuf *) pmb->context2;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
- offset;
memcpy(vport_buff + offset, mp->virt, byte_count);
offset += byte_count;
} else {
if (mb->un.varDmp.word_cnt >
sizeof(struct static_vport_info) - offset)
mb->un.varDmp.word_cnt =
sizeof(struct static_vport_info)
- offset;
byte_count = mb->un.varDmp.word_cnt;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
vport_buff + offset,
byte_count);
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
vport_buff + offset,
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
offset += byte_count;
}
} while (mb->un.varDmp.word_cnt &&
} while (byte_count &&
offset < sizeof(struct static_vport_info));
@ -2198,7 +2402,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (!new_fc_vport) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0546 lpfc_create_static_vport failed to"
" create vport \n");
" create vport\n");
continue;
}
@ -2207,16 +2411,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
}
out:
/*
* If this is timed out command, setting NULL to context2 tell SLI
* layer not to use this buffer.
*/
spin_lock_irq(&phba->hbalock);
pmb->context2 = NULL;
spin_unlock_irq(&phba->hbalock);
kfree(vport_info);
if (rc != MBX_TIMEOUT)
if (rc != MBX_TIMEOUT) {
if (pmb->context2) {
mp = (struct lpfc_dmabuf *) pmb->context2;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free(pmb, phba->mbox_mem_pool);
}
return;
}
@ -4360,7 +4563,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
buff;
fcoe_param = (struct lpfc_fcoe_params *)
buff + sizeof(struct lpfc_fip_param_hdr);
(buff + sizeof(struct lpfc_fip_param_hdr));
if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))

View File

@ -2496,8 +2496,8 @@ typedef struct {
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
#define DMP_FCOEPARAM_RGN_SIZE 0x400
#define DMP_REGION_23 0x17 /* fcoe param and port state region */
#define DMP_RGN23_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15

View File

@ -52,6 +52,31 @@ struct dma_address {
uint32_t addr_hi;
};
#define LPFC_SLIREV_CONF_WORD 0x58
struct lpfc_sli_intf {
uint32_t word0;
#define lpfc_sli_intf_iftype_MASK 0x00000007
#define lpfc_sli_intf_iftype_SHIFT 0
#define lpfc_sli_intf_iftype_WORD word0
#define lpfc_sli_intf_rev_MASK 0x0000000f
#define lpfc_sli_intf_rev_SHIFT 4
#define lpfc_sli_intf_rev_WORD word0
#define LPFC_SLIREV_CONF_SLI4 4
#define lpfc_sli_intf_family_MASK 0x000000ff
#define lpfc_sli_intf_family_SHIFT 8
#define lpfc_sli_intf_family_WORD word0
#define lpfc_sli_intf_feat1_MASK 0x000000ff
#define lpfc_sli_intf_feat1_SHIFT 16
#define lpfc_sli_intf_feat1_WORD word0
#define lpfc_sli_intf_feat2_MASK 0x0000001f
#define lpfc_sli_intf_feat2_SHIFT 24
#define lpfc_sli_intf_feat2_WORD word0
#define lpfc_sli_intf_valid_MASK 0x00000007
#define lpfc_sli_intf_valid_SHIFT 29
#define lpfc_sli_intf_valid_WORD word0
#define LPFC_SLI_INTF_VALID 6
};
#define LPFC_SLI4_BAR0 1
#define LPFC_SLI4_BAR1 2
#define LPFC_SLI4_BAR2 4
@ -1181,6 +1206,32 @@ struct fcf_record {
#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
#define lpfc_fcf_record_fcf_state_WORD word8
uint8_t vlan_bitmap[512];
uint32_t word137;
#define lpfc_fcf_record_switch_name_0_SHIFT 0
#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_0_WORD word137
#define lpfc_fcf_record_switch_name_1_SHIFT 8
#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_1_WORD word137
#define lpfc_fcf_record_switch_name_2_SHIFT 16
#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_2_WORD word137
#define lpfc_fcf_record_switch_name_3_SHIFT 24
#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_3_WORD word137
uint32_t word138;
#define lpfc_fcf_record_switch_name_4_SHIFT 0
#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_4_WORD word138
#define lpfc_fcf_record_switch_name_5_SHIFT 8
#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_5_WORD word138
#define lpfc_fcf_record_switch_name_6_SHIFT 16
#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_6_WORD word138
#define lpfc_fcf_record_switch_name_7_SHIFT 24
#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF
#define lpfc_fcf_record_switch_name_7_WORD word138
};
struct lpfc_mbx_read_fcf_tbl {
@ -1385,20 +1436,17 @@ struct lpfc_mbx_unreg_vfi {
struct lpfc_mbx_resume_rpi {
uint32_t word1;
#define lpfc_resume_rpi_rpi_SHIFT 0
#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
#define lpfc_resume_rpi_rpi_WORD word1
#define lpfc_resume_rpi_index_SHIFT 0
#define lpfc_resume_rpi_index_MASK 0x0000FFFF
#define lpfc_resume_rpi_index_WORD word1
#define lpfc_resume_rpi_ii_SHIFT 30
#define lpfc_resume_rpi_ii_MASK 0x00000003
#define lpfc_resume_rpi_ii_WORD word1
#define RESUME_INDEX_RPI 0
#define RESUME_INDEX_VPI 1
#define RESUME_INDEX_VFI 2
#define RESUME_INDEX_FCFI 3
uint32_t event_tag;
uint32_t word3_rsvd;
uint32_t word4_rsvd;
uint32_t word5_rsvd;
uint32_t word6;
#define lpfc_resume_rpi_vpi_SHIFT 0
#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
#define lpfc_resume_rpi_vpi_WORD word6
#define lpfc_resume_rpi_vfi_SHIFT 16
#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
#define lpfc_resume_rpi_vfi_WORD word6
};
#define REG_FCF_INVALID_QID 0xFFFF

View File

@ -211,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
goto out_free_mbox;
do {
lpfc_dump_mem(phba, pmb, offset);
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
@ -425,6 +425,9 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
/* Check if the port is disabled */
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
phba->cfg_hba_queue_depth =
@ -524,27 +527,46 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
if (phba->hba_flag & LINK_DISABLED) {
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"2598 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"2599 Adapter failed to issue DOWN_LINK"
" mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
} else {
lpfc_init_link(phba, pmb, phba->cfg_topology,
phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@ -558,7 +580,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
KERN_ERR,
LOG_INIT,
"0456 Adapter failed to issue "
"ASYNCEVT_ENABLE mbox status x%x \n.",
"ASYNCEVT_ENABLE mbox status x%x\n",
rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@ -572,7 +594,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
"to get Option ROM version status x%x\n.", rc);
"to get Option ROM version status x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@ -2133,6 +2155,8 @@ lpfc_online(struct lpfc_hba *phba)
vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
if (phba->sli_rev == LPFC_SLI_REV4)
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
@ -2807,6 +2831,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
return;
phba->fcoe_eventtag = acqe_link->event_tag;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@ -2894,18 +2919,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
int rc;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2546 New FCF found index 0x%x tag 0x%x \n",
"2546 New FCF found index 0x%x tag 0x%x\n",
acqe_fcoe->fcf_index,
acqe_fcoe->event_tag);
/*
* If the current FCF is in discovered state,
* do nothing.
* If the current FCF is in discovered state, or
* FCF discovery is in progress do nothing.
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
spin_unlock_irq(&phba->hbalock);
break;
}
@ -2922,7 +2949,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2548 FCF Table full count 0x%x tag 0x%x \n",
"2548 FCF Table full count 0x%x tag 0x%x\n",
bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
acqe_fcoe->event_tag);
break;
@ -2930,7 +2957,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2549 FCF disconnected fron network index 0x%x"
" tag 0x%x \n", acqe_fcoe->fcf_index,
" tag 0x%x\n", acqe_fcoe->fcf_index,
acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */
if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
@ -4130,8 +4157,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
/* Allocate memory for HBA structure */
phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
if (!phba) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1417 Failed to allocate hba struct.\n");
dev_err(&pdev->dev, "failed to allocate hba struct\n");
return NULL;
}
@ -4145,6 +4171,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
return NULL;
}
mutex_init(&phba->ct_event_mutex);
INIT_LIST_HEAD(&phba->ct_ev_waiters);
return phba;
}
@ -4489,23 +4518,6 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
if (!phba->sli4_hba.STAregaddr)
return -ENODEV;
/* With uncoverable error, log the error message and return error */
onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
if (uerrlo_reg.word0 || uerrhi_reg.word0) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1422 HBA Unrecoverable error: "
"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
"online0_reg=0x%x, online1_reg=0x%x\n",
uerrlo_reg.word0, uerrhi_reg.word0,
onlnreg0, onlnreg1);
}
return -ENODEV;
}
/* Wait up to 30 seconds for the SLI Port POST done and ready */
for (i = 0; i < 3000; i++) {
sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@ -4545,6 +4557,23 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
/* With uncoverable error, log the error message and return error */
onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
if (uerrlo_reg.word0 || uerrhi_reg.word0) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1422 HBA Unrecoverable error: "
"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
"online0_reg=0x%x, online1_reg=0x%x\n",
uerrlo_reg.word0, uerrhi_reg.word0,
onlnreg0, onlnreg1);
}
return -ENODEV;
}
return port_error;
}
@ -7347,6 +7376,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
return 0;
out_disable_intr:
@ -7636,19 +7668,17 @@ static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
int rc;
uint16_t dev_id;
struct lpfc_sli_intf intf;
if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
return -ENODEV;
switch (dev_id) {
case PCI_DEVICE_ID_TIGERSHARK:
if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
rc = lpfc_pci_probe_one_s4(pdev, pid);
break;
default:
else
rc = lpfc_pci_probe_one_s3(pdev, pid);
break;
}
return rc;
}

View File

@ -52,48 +52,85 @@
* This routine prepares the mailbox command for dumping list of static
* vports to be created.
**/
void
int
lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
uint16_t offset)
{
MAILBOX_t *mb;
void *ctx;
struct lpfc_dmabuf *mp;
mb = &pmb->u.mb;
ctx = pmb->context2;
/* Setup to dump vport info region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = DMP_REGION_VPORT;
mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
pmb->context2 = ctx;
mb->mbxOwner = OWN_HOST;
return;
/* For SLI3 HBAs data is embedded in mailbox */
if (phba->sli_rev != LPFC_SLI_REV4) {
mb->un.varDmp.cv = 1;
mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
return 0;
}
/* For SLI4 HBAs driver need to allocate memory */
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2605 lpfc_dump_static_vport: memory"
" allocation failed\n");
return 1;
}
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
pmb->context2 = (uint8_t *) mp;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
return 0;
}
/**
* lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
* lpfc_down_link - Bring down HBAs link.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset for dumping VPD memory mailbox command.
*
* This routine prepares a mailbox command to bring down HBA link.
**/
void
lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb = &pmb->u.mb;
mb->mbxCommand = MBX_DOWN_LINK;
mb->mbxOwner = OWN_HOST;
}
/**
* lpfc_dump_mem - Prepare a mailbox command for reading a region.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset into the region.
* @region_id: config region id.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
* This routine prepares the mailbox command for dumping HBA Vital Product
* Data (VPD) memory. This mailbox command is to be used for retrieving a
* portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
* offset specified by the offset parameter.
* This routine prepares the mailbox command for dumping HBA's config region.
**/
void
lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
uint16_t region_id)
{
MAILBOX_t *mb;
void *ctx;
@ -107,7 +144,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = DMP_REGION_VPD;
mb->un.varDmp.region_id = region_id;
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
@ -1789,6 +1826,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
/**
* lpfc_init_vpi - Initialize the INIT_VPI mailbox command
* @phba: pointer to the hba structure to init the VPI for.
* @mbox: pointer to lpfc mbox command to initialize.
* @vpi: VPI to be initialized.
*
@ -1799,11 +1837,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
* successful virtual NPort login.
**/
void
lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
vpi + phba->vpi_base);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
phba->pport->vfi + phba->vfi_base);
}
/**
@ -1852,7 +1893,7 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
/* dump_fcoe_param failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"2569 lpfc_dump_fcoe_param: memory"
" allocation failed \n");
" allocation failed\n");
return 1;
}
@ -1864,8 +1905,8 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
mb->un.varDmp.region_id = DMP_REGION_23;
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
@ -1938,9 +1979,7 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
bf_set(lpfc_resume_rpi_vpi, resume_rpi,
ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
bf_set(lpfc_resume_rpi_vfi, resume_rpi,
ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}

View File

@ -110,17 +110,28 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
sizeof(struct lpfc_nodelist));
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
if (phba->sli_rev == LPFC_SLI_REV4) {
phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
phba->pcidev,
LPFC_HDR_BUF_SIZE, align, 0);
if (!phba->lpfc_hrb_pool)
goto fail_free_nlp_mem_pool;
phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
if (!phba->lpfc_hrb_pool)
goto fail_free_nlp_mem_pool;
phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
phba->pcidev,
LPFC_DATA_BUF_SIZE, align, 0);
if (!phba->lpfc_drb_pool)
goto fail_free_hbq_pool;
if (!phba->lpfc_drb_pool)
goto fail_free_hrb_pool;
phba->lpfc_hbq_pool = NULL;
} else {
phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
phba->pcidev, LPFC_BPL_SIZE, align, 0);
if (!phba->lpfc_hbq_pool)
goto fail_free_nlp_mem_pool;
phba->lpfc_hrb_pool = NULL;
phba->lpfc_drb_pool = NULL;
}
/* vpi zero is reserved for the physical port so add 1 to max */
longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
@ -132,7 +143,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
fail_free_dbq_pool:
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
fail_free_hbq_pool:
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
fail_free_nlp_mem_pool:
@ -176,11 +187,17 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
pci_pool_destroy(phba->lpfc_drb_pool);
if (phba->lpfc_drb_pool)
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
pci_pool_destroy(phba->lpfc_hrb_pool);
if (phba->lpfc_hrb_pool)
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
if (phba->lpfc_hbq_pool)
pci_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
@ -380,7 +397,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
if (!hbqbp)
return NULL;
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
@ -405,7 +422,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}

View File

@ -177,3 +177,23 @@ struct temp_event {
uint32_t data;
};
/* bsg definitions */
#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
struct set_ct_event {
uint32_t command;
uint32_t ev_req_id;
uint32_t ev_reg_id;
};
struct get_ct_event {
uint32_t command;
uint32_t ev_reg_id;
uint32_t ev_req_id;
};
struct get_ct_event_reply {
uint32_t immed_data;
uint32_t type;
};

View File

@ -2142,7 +2142,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else if (resp_info & RESID_OVER) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"9028 FCP command x%x residual overrun error. "
"Data: x%x x%x \n", cmnd->cmnd[0],
"Data: x%x x%x\n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
@ -2843,7 +2843,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dif_op_str[scsi_get_prot_op(cmnd)]);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x \n",
"%02x %02x %02x %02x %02x\n",
cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@ -2871,7 +2871,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dif_op_str[scsi_get_prot_op(cmnd)]);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x \n",
"%02x %02x %02x %02x %02x\n",
cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@ -3584,6 +3584,7 @@ struct scsi_host_template lpfc_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
};
struct scsi_host_template lpfc_vport_template = {

View File

@ -4139,7 +4139,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
return -EIO;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
@ -4304,7 +4304,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters \n");
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = PAGE_SIZE;
@ -4522,12 +4522,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_sli4_rb_setup(phba);
/* Start the ELS watchdog timer */
/*
* The driver for SLI4 is not yet ready to process timeouts
* or interrupts. Once it is, the comment bars can be removed.
*/
/* mod_timer(&vport->els_tmofunc,
* jiffies + HZ * (phba->fc_ratov*2)); */
mod_timer(&vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
@ -4706,13 +4702,13 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* processing mbox queue from intr_handler */
if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
processing_queue = 1;
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
pmbox = lpfc_mbox_get(phba);
if (!pmbox) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@ -5279,6 +5275,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
unsigned long iflags;
int rc;
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2544 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
/* Detect polling mode and jump to a handler */
if (!phba->sli4_hba.intr_enable) {
if (flag == MBX_POLL)
@ -5338,17 +5346,6 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
psli->sli_flag, flag);
goto out_not_finished;
}
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2544 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
/* Put the mailbox command to the driver internal FIFO */
psli->slistat.mbox_busy++;
@ -5817,19 +5814,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
* @piocb: Pointer to command iocb.
*
* This routine performs a round robin SCSI command to SLI4 FCP WQ index
* distribution.
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
static uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
static uint32_t fcp_qidx;
++phba->fcp_qidx;
if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
phba->fcp_qidx = 0;
return fcp_qidx++ % phba->cfg_fcp_wq_count;
return phba->fcp_qidx;
}
/**
@ -6156,7 +6155,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
if (piocb->iocb_flag & LPFC_IO_FCP) {
fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
return IOCB_ERROR;
} else {
@ -6327,7 +6326,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
KERN_ERR,
LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x \n"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
@ -6789,6 +6788,33 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
}
/**
* lpfc_sli_bemem_bcopy - SLI memory copy function
* @srcp: Source memory pointer.
* @destp: Destination memory pointer.
* @cnt: Number of words required to be copied.
*
* This function is used for copying data between a data structure
* with big endian representation to local endianness.
* This function can be called with or without lock.
**/
void
lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
uint32_t *src = srcp;
uint32_t *dest = destp;
uint32_t ldata;
int i;
for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
ldata = *src;
ldata = be32_to_cpu(ldata);
*dest = ldata;
src++;
dest++;
}
}
/**
* lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
* @phba: Pointer to HBA context object.
@ -7678,12 +7704,6 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
"online0_reg=0x%x, online1_reg=0x%x\n",
uerr_sta_lo, uerr_sta_hi,
onlnreg0, onlnreg1);
/* TEMP: as the driver error recover logic is not
* fully developed, we just log the error message
* and the device error attention action is now
* temporarily disabled.
*/
return 0;
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
/* Set the driver HA work bitmap */
@ -9499,8 +9519,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
eq->host_index = 0;
eq->hba_index = 0;
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -9604,10 +9623,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
cq->host_index = 0;
cq->hba_index = 0;
out:
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -9712,8 +9730,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
/* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list);
out:
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -9795,8 +9812,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -9970,8 +9986,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
list_add_tail(&drq->list, &cq->child_list);
out:
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@ -10026,8 +10041,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
/* Remove eq from any list */
list_del_init(&eq->list);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, eq->phba->mbox_mem_pool);
mempool_free(mbox, eq->phba->mbox_mem_pool);
return status;
}
@ -10080,8 +10094,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
}
/* Remove cq from any list */
list_del_init(&cq->list);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, cq->phba->mbox_mem_pool);
mempool_free(mbox, cq->phba->mbox_mem_pool);
return status;
}
@ -10134,8 +10147,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
}
/* Remove mq from any list */
list_del_init(&mq->list);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, mq->phba->mbox_mem_pool);
mempool_free(mbox, mq->phba->mbox_mem_pool);
return status;
}
@ -10187,8 +10199,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
}
/* Remove wq from any list */
list_del_init(&wq->list);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, wq->phba->mbox_mem_pool);
mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
@ -10258,8 +10269,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
list_del_init(&hrq->list);
list_del_init(&drq->list);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, hrq->phba->mbox_mem_pool);
mempool_free(mbox, hrq->phba->mbox_mem_pool);
return status;
}
@ -10933,6 +10943,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
@ -10945,6 +10956,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
}
iocbq = first_iocbq;
/*
@ -10961,6 +10974,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount++;
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
@ -10978,6 +10993,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
@ -11324,7 +11341,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
lpfc_init_vpi(mboxq, vpi);
lpfc_init_vpi(phba, mboxq, vpi);
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
if (rc != MBX_TIMEOUT)
@ -11519,6 +11536,7 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
uint32_t alloc_len, req_len;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -11570,7 +11588,140 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
if (rc == MBX_NOT_FINISHED) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
error = -EIO;
} else
} else {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
error = 0;
}
return error;
}
/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
*
* This function read region 23 and parse TLV for port status to
* decide if the user disaled the port. If the TLV indicates the
* port is disabled, the hba_flag is set accordingly.
**/
void
lpfc_sli_read_link_ste(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
uint8_t *rgn23_data = NULL;
uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2600 lpfc_sli_read_serdes_param failed to"
" allocate mailbox memory\n");
goto out;
}
mb = &pmb->u.mb;
/* Get adapter Region 23 data */
rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
if (!rgn23_data)
goto out;
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2601 lpfc_sli_read_link_ste failed to"
" read config region 23 rc 0x%x Status 0x%x\n",
rc, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
/*
* dump mem may return a zero when finished or we got a
* mailbox error, either way we are done.
*/
if (mb->un.varDmp.word_cnt == 0)
break;
if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
rgn23_data + offset,
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
data_size = offset;
offset = 0;
if (!data_size)
goto out;
/* Check the region signature first */
if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2619 Config region 23 has bad signature\n");
goto out;
}
offset += 4;
/* Check the data structure version */
if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2620 Config region 23 has bad version\n");
goto out;
}
offset += 4;
/* Parse TLV entries in the region */
while (offset < data_size) {
if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
break;
/*
* If the TLV is not driver specific TLV or driver id is
* not linux driver id, skip the record.
*/
if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
(rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
(rgn23_data[offset + 3] != 0)) {
offset += rgn23_data[offset + 1] * 4 + 4;
continue;
}
/* Driver found a driver specific TLV in the config region */
sub_tlv_len = rgn23_data[offset + 1] * 4;
offset += 4;
tlv_offset = 0;
/*
* Search for configured port state sub-TLV.
*/
while ((offset < data_size) &&
(tlv_offset < sub_tlv_len)) {
if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
offset += 4;
tlv_offset += 4;
break;
}
if (rgn23_data[offset] != PORT_STE_TYPE) {
offset += rgn23_data[offset + 1] * 4 + 4;
tlv_offset += rgn23_data[offset + 1] * 4 + 4;
continue;
}
/* This HBA contains PORT_STE configured */
if (!rgn23_data[offset + 2])
phba->hba_flag |= LINK_DISABLED;
goto out;
}
}
out:
if (pmb)
mempool_free(pmb, phba->mbox_mem_pool);
kfree(rgn23_data);
return;
}

View File

@ -132,6 +132,7 @@ struct lpfc_sli4_link {
struct lpfc_fcf {
uint8_t fabric_name[8];
uint8_t switch_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
uint16_t fcfi;
@ -150,6 +151,10 @@ struct lpfc_fcf {
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
#define LPFC_REGION23_LAST_REC 0xff
#define DRIVER_SPECIFIC_TYPE 0xA2
#define LINUX_DRIVER_ID 0x20
#define PORT_STE_TYPE 0x1
struct lpfc_fip_param_hdr {
uint8_t type;
#define FCOE_PARAM_TYPE 0xA0

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.3"
#define LPFC_DRIVER_VERSION "8.3.4"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"

View File

@ -313,22 +313,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_sli4_init_vpi(phba, vpi);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1838 Failed to INIT_VPI on vpi %d "
"status %d\n", vpi, rc);
rc = VPORT_NORESOURCES;
lpfc_free_vpi(phba, vpi);
goto error_out;
}
}
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@ -367,12 +351,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0)
u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
@ -404,7 +384,34 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
(pport->vfi_state & LPFC_VFI_REGISTERED)) {
rc = lpfc_sli4_init_vpi(phba, vpi);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1838 Failed to INIT_VPI on vpi %d "
"status %d\n", vpi, rc);
rc = VPORT_NORESOURCES;
lpfc_free_vpi(phba, vpi);
goto error_out;
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
/*
* Driver cannot INIT_VPI now. Set the flags to
* init_vpi when reg_vfi complete.
*/
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
goto out;
}
if ((phba->link_state < LPFC_LINK_UP) ||
(pport->port_state < LPFC_FABRIC_CFG_LINK) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
@ -661,7 +668,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
"delete objects on fabric. \n");
"delete objects on fabric\n");
}
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);

View File

@ -94,7 +94,7 @@ _base_fault_reset_work(struct work_struct *work)
int rc;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->ioc_reset_in_progress)
if (ioc->shost_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@ -687,6 +687,14 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
ioc->mask_interrupts = 0;
}
union reply_descriptor {
u64 word;
struct {
u32 low;
u32 high;
} u;
};
/**
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
@ -698,47 +706,38 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
static irqreturn_t
_base_interrupt(int irq, void *bus_id)
{
union reply_descriptor {
u64 word;
struct {
u32 low;
u32 high;
} u;
};
union reply_descriptor rd;
u32 post_index, post_index_next, completed_cmds;
u32 completed_cmds;
u8 request_desript_type;
u16 smid;
u8 cb_idx;
u32 reply;
u8 VF_ID;
int i;
struct MPT2SAS_ADAPTER *ioc = bus_id;
Mpi2ReplyDescriptorsUnion_t *rpf;
if (ioc->mask_interrupts)
return IRQ_NONE;
post_index = ioc->reply_post_host_index;
request_desript_type = ioc->reply_post_free[post_index].
Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
request_desript_type = rpf->Default.ReplyFlags
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
return IRQ_NONE;
completed_cmds = 0;
do {
rd.word = ioc->reply_post_free[post_index].Words;
rd.word = rpf->Words;
if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
goto out;
reply = 0;
cb_idx = 0xFF;
smid = le16_to_cpu(ioc->reply_post_free[post_index].
Default.DescriptorTypeDependent1);
VF_ID = ioc->reply_post_free[post_index].
Default.VF_ID;
smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
VF_ID = rpf->Default.VF_ID;
if (request_desript_type ==
MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
reply = le32_to_cpu(ioc->reply_post_free[post_index].
AddressReply.ReplyFrameAddress);
reply = le32_to_cpu
(rpf->AddressReply.ReplyFrameAddress);
} else if (request_desript_type ==
MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
goto next;
@ -765,21 +764,27 @@ _base_interrupt(int irq, void *bus_id)
0 : ioc->reply_free_host_index + 1;
ioc->reply_free[ioc->reply_free_host_index] =
cpu_to_le32(reply);
wmb();
writel(ioc->reply_free_host_index,
&ioc->chip->ReplyFreeHostIndex);
wmb();
}
next:
post_index_next = (post_index == (ioc->reply_post_queue_depth -
1)) ? 0 : post_index + 1;
rpf->Words = ULLONG_MAX;
ioc->reply_post_host_index = (ioc->reply_post_host_index ==
(ioc->reply_post_queue_depth - 1)) ? 0 :
ioc->reply_post_host_index + 1;
request_desript_type =
ioc->reply_post_free[post_index_next].Default.ReplyFlags
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
ioc->reply_post_free[ioc->reply_post_host_index].Default.
ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
completed_cmds++;
if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
goto out;
post_index = post_index_next;
if (!ioc->reply_post_host_index)
rpf = ioc->reply_post_free;
else
rpf++;
} while (1);
out:
@ -787,19 +792,8 @@ _base_interrupt(int irq, void *bus_id)
if (!completed_cmds)
return IRQ_NONE;
/* reply post descriptor handling */
post_index_next = ioc->reply_post_host_index;
for (i = 0 ; i < completed_cmds; i++) {
post_index = post_index_next;
/* poison the reply post descriptor */
ioc->reply_post_free[post_index_next].Words = ULLONG_MAX;
post_index_next = (post_index ==
(ioc->reply_post_queue_depth - 1))
? 0 : post_index + 1;
}
ioc->reply_post_host_index = post_index_next;
writel(post_index_next, &ioc->chip->ReplyPostHostIndex);
wmb();
writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
return IRQ_HANDLED;
}
@ -1542,6 +1536,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
(ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
ioc->bios_pg3.BiosVersion & 0x000000FF);
_base_display_dell_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@ -1554,8 +1550,6 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
i++;
}
_base_display_dell_branding(ioc);
i = 0;
printk("), ");
printk("Capabilities=(");
@ -1627,6 +1621,9 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
u32 iounit_pg1_flags;
mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
if (ioc->ir_firmware)
mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
&ioc->manu_pg10);
mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
@ -1647,7 +1644,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
iounit_pg1_flags |=
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, ioc->iounit_pg1);
mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
}
/**
@ -3303,13 +3300,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->tm_cmds.mutex);
init_completion(&ioc->tm_cmds.done);
/* config page internal command bits */
ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->config_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->config_cmds.mutex);
init_completion(&ioc->config_cmds.done);
/* ctl module internal command bits */
ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
@ -3433,6 +3428,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
ioc->config_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
ioc->config_cmds.smid = USHORT_MAX;
complete(&ioc->config_cmds.done);
}
break;
@ -3501,20 +3497,13 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
__func__));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->ioc_reset_in_progress) {
if (ioc->shost_recovery) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
printk(MPT2SAS_ERR_FMT "%s: busy\n",
ioc->name, __func__);
return -EBUSY;
}
ioc->ioc_reset_in_progress = 1;
ioc->shost_recovery = 1;
if (ioc->shost->shost_state == SHOST_RUNNING) {
/* set back to SHOST_RUNNING in mpt2sas_scsih.c */
scsi_host_set_state(ioc->shost, SHOST_RECOVERY);
printk(MPT2SAS_INFO_FMT "putting controller into "
"SHOST_RECOVERY\n", ioc->name);
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
@ -3534,7 +3523,10 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->ioc_reset_in_progress = 0;
ioc->shost_recovery = 0;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_RUNNING);
return r;
}

View File

@ -69,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
#define MPT2SAS_DRIVER_VERSION "01.100.04.00"
#define MPT2SAS_DRIVER_VERSION "01.100.06.00"
#define MPT2SAS_MAJOR_VERSION 01
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 04
#define MPT2SAS_BUILD_VERSION 06
#define MPT2SAS_RELEASE_VERSION 00
/*
@ -119,6 +119,7 @@
#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
#define MPT2_IOC_RUNNING 4 /* shost running */
/*
* logging format
@ -196,6 +197,38 @@ struct MPT2SAS_TARGET {
* @block: device is in SDEV_BLOCK state
* @tlr_snoop_check: flag used in determining whether to disable TLR
*/
/* OEM Identifiers */
#define MFG10_OEM_ID_INVALID (0x00000000)
#define MFG10_OEM_ID_DELL (0x00000001)
#define MFG10_OEM_ID_FSC (0x00000002)
#define MFG10_OEM_ID_SUN (0x00000003)
#define MFG10_OEM_ID_IBM (0x00000004)
/* GENERIC Flags 0*/
#define MFG10_GF0_OCE_DISABLED (0x00000001)
#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
#define MFG10_GF0_R10_DISPLAY (0x00000004)
#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
/* OEM Specific Flags will come from OEM specific header files */
typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
U8 OEMIdentifier; /* 04h */
U8 Reserved1; /* 05h */
U16 Reserved2; /* 08h */
U32 Reserved3; /* 0Ch */
U32 GenericFlags0; /* 10h */
U32 GenericFlags1; /* 14h */
U32 Reserved4; /* 18h */
U32 OEMSpecificFlags0; /* 1Ch */
U32 OEMSpecificFlags1; /* 20h */
U32 Reserved5[18]; /* 24h-60h*/
} MPI2_CONFIG_PAGE_MAN_10,
MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
struct MPT2SAS_DEVICE {
struct MPT2SAS_TARGET *sas_target;
unsigned int lun;
@ -431,7 +464,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
* @fw_event_list: list of fw events
* @aen_event_read_flag: event log was read
* @broadcast_aen_busy: broadcast aen waiting to be serviced
* @ioc_reset_in_progress: host reset in progress
* @shost_recovery: host reset in progress
* @ioc_reset_in_progress_lock:
* @ioc_link_reset_in_progress: phy/hard reset in progress
* @ignore_loginfos: ignore loginfos during task managment
@ -460,6 +493,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
* @facts: static facts data
* @pfacts: static port facts data
* @manu_pg0: static manufacturing page 0
* @manu_pg10: static manufacturing page 10
* @bios_pg2: static bios page 2
* @bios_pg3: static bios page 3
* @ioc_pg8: static ioc page 8
@ -544,7 +578,6 @@ struct MPT2SAS_ADAPTER {
/* misc flags */
int aen_event_read_flag;
u8 broadcast_aen_busy;
u8 ioc_reset_in_progress;
u8 shost_recovery;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
@ -663,6 +696,7 @@ struct MPT2SAS_ADAPTER {
dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
Mpi2ManufacturingPage10_t manu_pg10;
u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
};
@ -734,6 +768,8 @@ void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 re
int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page);
int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage2_t *config_page);
int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@ -749,7 +785,7 @@ int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRep
int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t config_page);
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@ -776,7 +812,6 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
u16 *volume_handle);
int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
u64 *wwid);
/* ctl shared API */
extern struct device_attribute *mpt2sas_host_attrs[];
extern struct device_attribute *mpt2sas_dev_attrs[];
@ -798,9 +833,11 @@ int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
*mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
*mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle,
void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle,
u16 attached_handle, u8 phy_number, u8 link_rate);
extern struct sas_function_template mpt2sas_transport_functions;
extern struct scsi_transport_template *mpt2sas_transport_template;
extern int scsi_internal_device_block(struct scsi_device *sdev);
extern int scsi_internal_device_unblock(struct scsi_device *sdev);
#endif /* MPT2SAS_BASE_H_INCLUDED */

File diff suppressed because it is too large Load Diff

View File

@ -1963,7 +1963,6 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
{
enum block_state state;
long ret = -EINVAL;
unsigned long flags;
state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
BLOCKING;
@ -1989,13 +1988,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
!ioc)
return -ENODEV;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
flags);
if (ioc->shost_recovery)
return -EAGAIN;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
uarg = arg;
@ -2098,7 +2092,6 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
struct mpt2_ioctl_command karg;
struct MPT2SAS_ADAPTER *ioc;
enum block_state state;
unsigned long flags;
if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
return -EINVAL;
@ -2113,13 +2106,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
return -ENODEV;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
flags);
if (ioc->shost_recovery)
return -EAGAIN;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
karg.hdr.ioc_number = karg32.hdr.ioc_number;

View File

@ -103,7 +103,6 @@ struct sense_info {
};
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
@ -1502,7 +1501,13 @@ _scsih_slave_configure(struct scsi_device *sdev)
break;
case MPI2_RAID_VOL_TYPE_RAID1E:
qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
r_level = "RAID1E";
if (ioc->manu_pg10.OEMIdentifier &&
(ioc->manu_pg10.GenericFlags0 &
MFG10_GF0_R10_DISPLAY) &&
!(raid_device->num_pds % 2))
r_level = "RAID10";
else
r_level = "RAID1E";
break;
case MPI2_RAID_VOL_TYPE_RAID1:
qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
@ -1786,17 +1791,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
u32 ioc_state;
unsigned long timeleft;
u8 VF_ID = 0;
unsigned long flags;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED ||
ioc->shost_recovery) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
return;
}
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
@ -1830,6 +1836,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt2sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
mpt2sas_scsih_clear_tm_flag(ioc, handle);
@ -2222,7 +2229,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
MPT2SAS_INFO_FMT "SDEV_RUNNING: "
"handle(0x%04x)\n", ioc->name, handle));
sas_device_priv_data->block = 0;
scsi_device_set_state(sdev, SDEV_RUNNING);
scsi_internal_device_unblock(sdev);
}
}
}
@ -2251,7 +2258,7 @@ _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
MPT2SAS_INFO_FMT "SDEV_BLOCK: "
"handle(0x%04x)\n", ioc->name, handle));
sas_device_priv_data->block = 1;
scsi_device_set_state(sdev, SDEV_BLOCK);
scsi_internal_device_block(sdev);
}
}
}
@ -2327,6 +2334,7 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
u16 handle;
u16 reason_code;
u8 phy_number;
u8 link_rate;
for (i = 0; i < event_data->NumEntries; i++) {
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
@ -2337,6 +2345,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_RC_MASK;
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
_scsih_block_io_device(ioc, handle);
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
link_rate = event_data->PHY[i].LinkRate >> 4;
if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
_scsih_ublock_io_device(ioc, handle);
}
}
}
@ -2404,27 +2417,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
* _scsih_queue_rescan - queue a topology rescan from user context
* @ioc: per adapter object
*
* Return nothing.
*/
static void
_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
if (ioc->wait_for_port_enable_to_complete)
return;
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event)
return;
fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
}
/**
* _scsih_flush_running_cmds - completing outstanding commands.
* @ioc: per adapter object
@ -2455,46 +2447,6 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
ioc->name, count));
}
/**
* mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
* @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
*
* The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
* MPT2_IOC_DONE_RESET
*
* Return nothing.
*/
void
mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
{
switch (reset_phase) {
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
_scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
_scsih_fw_event_on(ioc);
_scsih_flush_running_cmds(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_queue_rescan(ioc);
break;
}
}
/**
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @scmd: pointer to scsi command object
@ -2615,7 +2567,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
Mpi2SCSIIORequest_t *mpi_request;
u32 mpi_control;
u16 smid;
unsigned long flags;
scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
@ -2634,13 +2585,10 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
/* see if we are busy with task managment stuff */
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (sas_target_priv_data->tm_busy ||
ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (sas_target_priv_data->tm_busy)
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
@ -3188,25 +3136,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
scmd->scsi_done(scmd);
}
/**
* _scsih_link_change - process phy link changes
* @ioc: per adapter object
* @handle: phy handle
* @attached_handle: valid for devices attached to link
* @phy_number: phy number
* @link_rate: new link rate
* Context: user.
*
* Return nothing.
*/
static void
_scsih_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle,
u8 phy_number, u8 link_rate)
{
mpt2sas_transport_update_phy_link_change(ioc, handle, attached_handle,
phy_number, link_rate);
}
/**
* _scsih_sas_host_refresh - refreshing sas host object contents
* @ioc: per adapter object
@ -3251,7 +3180,8 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
le16_to_cpu(sas_iounit_pg0->PhyData[i].
ControllerDevHandle);
if (update)
_scsih_link_change(ioc,
mpt2sas_transport_update_links(
ioc,
ioc->sas_hba.phy[i].handle,
le16_to_cpu(sas_iounit_pg0->PhyData[i].
AttachedDevHandle), i,
@ -3436,6 +3366,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
if (!handle)
return -1;
if (ioc->shost_recovery)
return -1;
if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@ -3572,6 +3505,9 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
struct _sas_node *sas_expander;
unsigned long flags;
if (ioc->shost_recovery)
return;
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@ -3743,6 +3679,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
mutex_unlock(&ioc->tm_cmds.mutex);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
"done: handle(0x%04x)\n", ioc->name, device_handle));
if (ioc->shost_recovery)
goto out;
}
/* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
@ -3765,6 +3703,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
le32_to_cpu(mpi_reply.IOCLogInfo)));
out:
_scsih_ublock_io_device(ioc, handle);
mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
sas_device->parent_handle);
@ -3908,6 +3849,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
"expander event\n", ioc->name));
return;
}
if (ioc->shost_recovery)
return;
if (event_data->PHY[i].PhyStatus &
MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
continue;
@ -3923,9 +3866,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
if (!parent_handle) {
if (phy_number < ioc->sas_hba.num_phys)
_scsih_link_change(ioc,
ioc->sas_hba.phy[phy_number].handle,
handle, phy_number, link_rate_);
mpt2sas_transport_update_links(
ioc,
ioc->sas_hba.phy[phy_number].handle,
handle, phy_number, link_rate_);
} else {
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander =
@ -3935,17 +3879,14 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
flags);
if (sas_expander) {
if (phy_number < sas_expander->num_phys)
_scsih_link_change(ioc,
sas_expander->
phy[phy_number].handle,
handle, phy_number,
link_rate_);
mpt2sas_transport_update_links(
ioc,
sas_expander->
phy[phy_number].handle,
handle, phy_number,
link_rate_);
}
}
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5)
_scsih_ublock_io_device(ioc, handle);
}
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
break;
@ -4455,7 +4396,7 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
return;
}
_scsih_link_change(ioc,
mpt2sas_transport_update_links(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
@ -4744,7 +4685,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
return;
}
_scsih_link_change(ioc,
mpt2sas_transport_update_links(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
@ -5156,22 +5097,9 @@ static void
_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander, *sas_expander_next;
struct _sas_node *sas_expander;
struct _raid_device *raid_device, *raid_device_next;
unsigned long flags;
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
if (ioc->shost->shost_state == SHOST_RECOVERY) {
printk(MPT2SAS_INFO_FMT "putting controller into "
"SHOST_RUNNING\n", ioc->name);
scsi_host_set_state(ioc->shost, SHOST_RUNNING);
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
@ -5207,16 +5135,63 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
_scsih_raid_device_remove(ioc, raid_device);
}
list_for_each_entry_safe(sas_expander, sas_expander_next,
&ioc->sas_expander_list, list) {
retry_expander_search:
sas_expander = NULL;
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->responding) {
sas_expander->responding = 0;
continue;
}
printk("\tremoving expander: handle(0x%04x), "
" sas_addr(0x%016llx)\n", sas_expander->handle,
(unsigned long long)sas_expander->sas_address);
_scsih_expander_remove(ioc, sas_expander->handle);
goto retry_expander_search;
}
}
/**
* mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
* @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
*
* The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
* MPT2_IOC_DONE_RESET
*
* Return nothing.
*/
void
mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
{
switch (reset_phase) {
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
_scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
_scsih_fw_event_on(ioc);
_scsih_flush_running_cmds(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_sas_host_refresh(ioc, 0);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
break;
case MPT2_IOC_RUNNING:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_RUNNING\n", ioc->name, __func__));
_scsih_remove_unresponding_devices(ioc);
break;
}
}
@ -5236,14 +5211,6 @@ _firmware_event_work(struct work_struct *work)
unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* This is invoked by calling _scsih_queue_rescan(). */
if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
_scsih_fw_event_free(ioc, fw_event);
_scsih_sas_host_refresh(ioc, 1);
_scsih_remove_unresponding_devices(ioc);
return;
}
/* the queue is being flushed so ignore this event */
spin_lock_irqsave(&ioc->fw_event_lock, flags);
if (ioc->fw_events_off || ioc->remove_host) {
@ -5253,13 +5220,10 @@ _firmware_event_work(struct work_struct *work)
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
_scsih_fw_event_requeue(ioc, fw_event, 1000);
return;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
switch (fw_event->event) {
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
@ -5461,6 +5425,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
if (!sas_device)
continue;
_scsih_remove_device(ioc, sas_device->handle);
if (ioc->shost_recovery)
return;
goto retry_device_search;
}
}
@ -5482,6 +5448,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
if (!expander_sibling)
continue;
_scsih_expander_remove(ioc, expander_sibling->handle);
if (ioc->shost_recovery)
return;
goto retry_expander_search;
}
}

View File

@ -140,11 +140,18 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
u32 device_info;
u32 ioc_status;
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return -1;
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@ -153,7 +160,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
"\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
__FILE__, __LINE__, __func__);
return -1;
return -EIO;
}
memset(identify, 0, sizeof(identify));
@ -288,21 +295,17 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
unsigned long flags;
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
u64 *sas_address_le;
u16 wait_state_count;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
mutex_lock(&ioc->transport_cmds.mutex);
@ -789,7 +792,7 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
}
/**
* mpt2sas_transport_update_phy_link_change - refreshing phy link changes and attached devices
* mpt2sas_transport_update_links - refreshing phy link changes
* @ioc: per adapter object
* @handle: handle to sas_host or expander
* @attached_handle: attached device handle
@ -799,13 +802,19 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
* Returns nothing.
*/
void
mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate)
{
unsigned long flags;
struct _sas_node *sas_node;
struct _sas_phy *mpt2sas_phy;
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return;
}
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_node = _transport_sas_node_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@ -1025,7 +1034,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
void *psge;
u32 sgl_flags;
u8 issue_reset = 0;
unsigned long flags;
dma_addr_t dma_addr_in = 0;
dma_addr_t dma_addr_out = 0;
u16 wait_state_count;
@ -1045,14 +1053,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
return -EINVAL;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
if (rc)

View File

@ -1713,7 +1713,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
nsp_dbg(NSP_DEBUG_INIT, "in");
cfg_mem = kzalloc(sizeof(cfg_mem), GFP_KERNEL);
cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL);
if (!cfg_mem)
return -ENOMEM;
cfg_mem->data = data;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1670,7 +1670,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qla24xx_vport_disable(fc_vport, disable);
if (ql2xmultique_tag) {
if (ha->flags.cpu_affinity_enabled) {
req = ha->req_q_map[1];
goto vport_queue;
} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
@ -1736,6 +1736,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
qla24xx_deallocate_vp_id(vha);
mutex_lock(&ha->vport_lock);
ha->cur_vport_count--;
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
@ -1743,7 +1748,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->host_no, vha->vp_idx, vha));
}
if (vha->req->id && !ql2xmultique_tag) {
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"Queue delete failed.\n");

View File

@ -189,6 +189,7 @@ struct req_que;
*/
typedef struct srb {
struct fc_port *fcport;
uint32_t handle;
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@ -196,6 +197,8 @@ typedef struct srb {
uint32_t request_sense_length;
uint8_t *request_sense_ptr;
void *ctx;
} srb_t;
/*
@ -203,6 +206,28 @@ typedef struct srb {
*/
#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
/*
* SRB extensions.
*/
struct srb_ctx {
#define SRB_LOGIN_CMD 1
#define SRB_LOGOUT_CMD 2
uint16_t type;
struct timer_list timer;
void (*free)(srb_t *sp);
void (*timeout)(srb_t *sp);
};
struct srb_logio {
struct srb_ctx ctx;
#define SRB_LOGIN_RETRIED BIT_0
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
uint16_t flags;
};
/*
* ISP I/O Register Set structure definitions.
*/
@ -1482,7 +1507,7 @@ typedef union {
uint8_t domain;
uint8_t area;
uint8_t al_pa;
#elif __LITTLE_ENDIAN
#elif defined(__LITTLE_ENDIAN)
uint8_t al_pa;
uint8_t area;
uint8_t domain;
@ -1565,6 +1590,7 @@ typedef struct fc_port {
#define FCF_FABRIC_DEVICE BIT_0
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_TAPE_PRESENT BIT_2
#define FCF_FCP2_DEVICE BIT_3
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@ -2093,6 +2119,10 @@ struct qla_msix_entry {
enum qla_work_type {
QLA_EVT_AEN,
QLA_EVT_IDC_ACK,
QLA_EVT_ASYNC_LOGIN,
QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
};
@ -2111,6 +2141,11 @@ struct qla_work_evt {
#define QLA_IDC_ACK_REGS 7
uint16_t mb[QLA_IDC_ACK_REGS];
} idc_ack;
struct {
struct fc_port *fcport;
#define QLA_LOGIO_LOGIN_RETRIED BIT_0
u16 data[2];
} logio;
} u;
};
@ -2224,6 +2259,7 @@ struct qla_hw_data {
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
uint32_t cpu_affinity_enabled :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@ -2350,6 +2386,7 @@ struct qla_hw_data {
(ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)

View File

@ -1126,7 +1126,7 @@ struct vp_config_entry_24xx {
uint16_t id;
uint16_t reserved_4;
uint16_t hopct;
uint8_t reserved_5;
uint8_t reserved_5[2];
};
#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */

View File

@ -52,6 +52,14 @@ extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
/*
* Global Data in qla_os.c source file.
*/
@ -76,6 +84,15 @@ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
fc_host_event_code, u32);
extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
@ -83,6 +100,8 @@ extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
struct qla_hw_data *);
extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
/*
* Global Functions in qla_mid.c source file.
*/
@ -135,6 +154,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
/*
* Global Function Prototypes in qla_mbx.c source file.

View File

@ -1674,6 +1674,10 @@ int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return QLA_FUNCTION_FAILED;
rval = qla2x00_mgmt_svr_login(vha);
if (rval)

View File

@ -40,6 +40,210 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
/* SRB Extensions ---------------------------------------------------------- */
static void
qla2x00_ctx_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_ctx *ctx;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
struct req_que *req;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
ctx = sp->ctx;
ctx->timeout(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ctx->free(sp);
}
static void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
}
inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
unsigned long tmo)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
goto done;
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
goto done;
}
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
ctx->free = qla2x00_ctx_sp_free;
init_timer(&ctx->timer);
if (!tmo)
goto done;
ctx->timer.expires = jiffies + tmo * HZ;
ctx->timer.data = (unsigned long)sp;
ctx->timer.function = qla2x00_ctx_sp_timeout;
add_timer(&ctx->timer);
done:
return sp;
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
static void
qla2x00_async_logio_timeout(srb_t *sp)
{
fc_port_t *fcport = sp->fcport;
struct srb_logio *lio = sp->ctx;
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s timeout.\n",
fcport->vha->host_no, sp->handle,
lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
if (lio->ctx.type == SRB_LOGIN_CMD)
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
}
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_logio *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
lio = sp->ctx;
lio->ctx.type = SRB_LOGIN_CMD;
lio->ctx.timeout = qla2x00_async_logio_timeout;
lio->flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
"retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->login_retry));
return rval;
done_free_sp:
del_timer_sync(&lio->ctx.timer);
lio->ctx.free(sp);
done:
return rval;
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_logio *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
lio = sp->ctx;
lio->ctx.type = SRB_LOGOUT_CMD;
lio->ctx.timeout = qla2x00_async_logio_timeout;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
fcport->vha->host_no, sp->handle, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
return rval;
done_free_sp:
del_timer_sync(&lio->ctx.timer);
lio->ctx.free(sp);
done:
return rval;
}
int
qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
int rval;
uint8_t opts = 0;
switch (data[0]) {
case MBS_COMMAND_COMPLETE:
if (fcport->flags & FCF_TAPE_PRESENT)
opts |= BIT_1;
rval = qla2x00_get_port_database(vha, fcport, opts);
if (rval != QLA_SUCCESS)
qla2x00_mark_device_lost(vha, fcport, 1, 0);
else
qla2x00_update_fcport(vha, fcport);
break;
case MBS_COMMAND_ERROR:
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
qla2x00_mark_device_lost(vha, fcport, 1, 0);
break;
case MBS_PORT_ID_USED:
fcport->loop_id = data[1];
qla2x00_post_async_login_work(vha, fcport, NULL);
break;
case MBS_LOOP_ID_USED:
fcport->loop_id++;
rval = qla2x00_find_new_loop_id(vha, fcport);
if (rval != QLA_SUCCESS) {
qla2x00_mark_device_lost(vha, fcport, 1, 0);
break;
}
qla2x00_post_async_login_work(vha, fcport, NULL);
break;
}
return QLA_SUCCESS;
}
int
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
qla2x00_mark_device_lost(vha, fcport, 1, 0);
return QLA_SUCCESS;
}
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@ -987,7 +1191,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
ha->phy_version);
if (rval != QLA_SUCCESS)
goto failed;
ha->flags.npiv_supported = 0;
if (IS_QLA2XXX_MIDTYPE(ha) &&
(ha->fw_attributes & BIT_2)) {
@ -1591,7 +1794,8 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
char *st, *en;
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
!IS_QLA81XX(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@ -1978,7 +2182,7 @@ qla2x00_rport_del(void *data)
struct fc_rport *rport;
spin_lock_irq(fcport->vha->host->host_lock);
rport = fcport->drport;
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irq(fcport->vha->host->host_lock);
if (rport)
@ -2345,8 +2549,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport *rport;
struct qla_hw_data *ha = vha->hw;
if (fcport->drport)
qla2x00_rport_del(fcport);
qla2x00_rport_del(fcport);
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
@ -3039,6 +3242,12 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
rval = QLA_SUCCESS;
retry = 0;
if (IS_ALOGIO_CAPABLE(ha)) {
rval = qla2x00_post_async_login_work(vha, fcport, NULL);
if (!rval)
return rval;
}
rval = qla2x00_fabric_login(vha, fcport, next_loopid);
if (rval == QLA_SUCCESS) {
/* Send an ADISC to tape devices.*/
@ -3133,7 +3342,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
} else {
fcport->port_type = FCT_TARGET;
if (mb[1] & BIT_1) {
fcport->flags |= FCF_TAPE_PRESENT;
fcport->flags |= FCF_FCP2_DEVICE;
}
}
@ -3244,7 +3453,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
struct req_que *req;
struct rsp_que *rsp;
if (ql2xmultique_tag)
if (vha->hw->flags.cpu_affinity_enabled)
req = vha->hw->req_q_map[0];
else
req = vha->req;
@ -3286,15 +3495,17 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
}
void
qla2x00_update_fcports(scsi_qla_host_t *vha)
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
struct scsi_qla_host *tvp, *vha;
/* Go with deferred removal of rport references. */
list_for_each_entry(fcport, &vha->vp_fcports, list)
if (fcport && fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED)
qla2x00_rport_del(fcport);
list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
list_for_each_entry(fcport, &vha->vp_fcports, list)
if (fcport && fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED)
qla2x00_rport_del(fcport);
}
/*
@ -3331,8 +3542,6 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
qla2x00_mark_all_devices_lost(vp, 0);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@ -4264,7 +4473,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
return -EINVAL;
rval = qla2x00_fw_ready(base_vha);
if (ql2xmultique_tag)
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
else
req = vha->req;

View File

@ -350,6 +350,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Build command packet */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
@ -778,6 +779,7 @@ qla24xx_start_scsi(srb_t *sp)
/* Build command packet. */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
@ -852,9 +854,211 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
if (ql2xmultique_tag && affinity >= 0 &&
if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
affinity < ha->max_rsp_queues - 1)
*rsp = ha->rsp_q_map[affinity + 1];
else
*rsp = ha->rsp_q_map[0];
}
/* Generic Control-SRB manipulation functions. */
static void *
qla2x00_alloc_iocbs(srb_t *sp)
{
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
pkt = NULL;
req_cnt = 1;
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
goto queuing_error;
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
if (ha->mqenable)
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length -
(req->ring_index - cnt);
}
if (req->cnt < req_cnt)
goto queuing_error;
/* Prep packet */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
req->cnt -= req_cnt;
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt->entry_count = req_cnt;
pkt->handle = handle;
sp->handle = handle;
queuing_error:
return pkt;
}
static void
qla2x00_start_iocbs(srb_t *sp)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
struct req_que *req = ha->req_q_map[0];
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
/* Set chip new ring index. */
if (ha->mqenable) {
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
} else {
WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_logio *lio = sp->ctx;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (lio->flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
}
static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
struct srb_logio *lio = sp->ctx;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
if (HAS_EXTENDED_IDS(ha)) {
mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
mbx->mb10 = cpu_to_le16(opts);
} else {
mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
}
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
}
static void
qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
}
static void
qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
cpu_to_le16(sp->fcport->loop_id):
cpu_to_le16(sp->fcport->loop_id << 8);
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
int
qla2x00_start_sp(srb_t *sp)
{
int rval;
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *pkt;
struct srb_ctx *ctx = sp->ctx;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
pkt = qla2x00_alloc_iocbs(sp);
if (!pkt)
goto done;
rval = QLA_SUCCESS;
switch (ctx->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_login_iocb(sp, pkt):
qla2x00_login_iocb(sp, pkt);
break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_logout_iocb(sp, pkt):
qla2x00_logout_iocb(sp, pkt);
break;
default:
break;
}
wmb();
qla2x00_start_iocbs(sp);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}

View File

@ -598,9 +598,54 @@ skip_rio:
break;
case MBA_PORT_UPDATE: /* Port database update */
/* Only handle SCNs for our Vport index. */
if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
/*
* Handle only global and vn-port update events
*
* Relevant inputs:
* mb[1] = N_Port handle of changed port
* OR 0xffff for global event
* mb[2] = New login state
* 7 = Port logged out
* mb[3] = LSB is vp_idx, 0xff = all vps
*
* Skip processing if:
* Event is global, vp_idx is NOT all vps,
* vp_idx does not match
* Event is not global, vp_idx does not match
*/
if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
|| (mb[1] != 0xffff)) {
if (vha->vp_idx != (mb[3] & 0xff))
break;
}
/* Global event -- port logout or port unavailable. */
if (mb[1] == 0xffff && mb[2] == 0x7) {
DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
vha->host_no));
DEBUG(printk(KERN_INFO
"scsi(%ld): Port unavailable %04x %04x %04x.\n",
vha->host_no, mb[1], mb[2], mb[3]));
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
vha->device_flags |= DFLG_NO_CABLE;
qla2x00_mark_all_devices_lost(vha, 1);
}
if (vha->vp_idx) {
atomic_set(&vha->vp_state, VP_FAILED);
fc_vport_set_state(vha->fc_vport,
FC_VPORT_FAILED);
qla2x00_mark_all_devices_lost(vha, 1);
}
vha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
break;
}
/*
* If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
@ -640,8 +685,9 @@ skip_rio:
if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
break;
/* Only handle SCNs for our Vport index. */
if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
break;
DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
vha->host_no));
DEBUG(printk(KERN_INFO
@ -874,6 +920,249 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
static srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
struct req_que *req, void *iocb)
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
srb_t *sp = NULL;
uint16_t index;
index = LSW(pkt->handle);
if (index >= MAX_OUTSTANDING_COMMANDS) {
qla_printk(KERN_WARNING, ha,
"%s: Invalid completion handle (%x).\n", func, index);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
goto done;
}
sp = req->outstanding_cmds[index];
if (!sp) {
qla_printk(KERN_WARNING, ha,
"%s: Invalid completion handle (%x) -- timed-out.\n", func,
index);
return sp;
}
if (sp->handle != index) {
qla_printk(KERN_WARNING, ha,
"%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
index);
return NULL;
}
req->outstanding_cmds[index] = NULL;
done:
return sp;
}
static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct mbx_entry *mbx)
{
const char func[] = "MBX-IOCB";
const char *type;
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
struct srb_logio *lio;
uint16_t data[2];
sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
if (!sp)
return;
type = NULL;
lio = sp->ctx;
switch (lio->ctx.type) {
case SRB_LOGIN_CMD:
type = "login";
break;
case SRB_LOGOUT_CMD:
type = "logout";
break;
default:
qla_printk(KERN_WARNING, ha,
"%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
lio->ctx.type);
return;
}
del_timer(&lio->ctx.timer);
fcport = sp->fcport;
data[0] = data[1] = 0;
if (mbx->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x "
"status=%x state-flag=%x status-flags=%x.\n",
fcport->vha->host_no, sp->handle, type,
mbx->entry_status, le16_to_cpu(mbx->status),
le16_to_cpu(mbx->state_flags),
le16_to_cpu(mbx->status_flags)));
DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
data[0] = MBS_COMMAND_ERROR;
data[1] = lio->flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED: 0;
goto done_post_logio_done_work;
}
if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
fcport->vha->host_no, sp->handle, type,
le16_to_cpu(mbx->mb1)));
data[0] = MBS_COMMAND_COMPLETE;
if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
fcport->flags |= FCF_FCP2_DEVICE;
goto done_post_logio_done_work;
}
data[0] = le16_to_cpu(mbx->mb0);
switch (data[0]) {
case MBS_PORT_ID_USED:
data[1] = le16_to_cpu(mbx->mb1);
break;
case MBS_LOOP_ID_USED:
break;
default:
data[0] = MBS_COMMAND_ERROR;
data[1] = lio->flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
"mb6=%x mb7=%x.\n",
fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7)));
done_post_logio_done_work:
lio->ctx.type == SRB_LOGIN_CMD ?
qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
lio->ctx.free(sp);
}
static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
struct logio_entry_24xx *logio)
{
const char func[] = "LOGIO-IOCB";
const char *type;
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
struct srb_logio *lio;
uint16_t data[2];
uint32_t iop[2];
sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
if (!sp)
return;
type = NULL;
lio = sp->ctx;
switch (lio->ctx.type) {
case SRB_LOGIN_CMD:
type = "login";
break;
case SRB_LOGOUT_CMD:
type = "logout";
break;
default:
qla_printk(KERN_WARNING, ha,
"%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
lio->ctx.type);
return;
}
del_timer(&lio->ctx.timer);
fcport = sp->fcport;
data[0] = data[1] = 0;
if (logio->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
fcport->vha->host_no, sp->handle, type,
logio->entry_status));
DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
data[0] = MBS_COMMAND_ERROR;
data[1] = lio->flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED: 0;
goto done_post_logio_done_work;
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
fcport->vha->host_no, sp->handle, type,
le32_to_cpu(logio->io_parameter[0])));
data[0] = MBS_COMMAND_COMPLETE;
if (lio->ctx.type == SRB_LOGOUT_CMD)
goto done_post_logio_done_work;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
fcport->port_type = FCT_TARGET;
if (iop[0] & BIT_8)
fcport->flags |= FCF_FCP2_DEVICE;
}
if (iop[0] & BIT_5)
fcport->port_type = FCT_INITIATOR;
if (logio->io_parameter[7] || logio->io_parameter[8])
fcport->supported_classes |= FC_COS_CLASS2;
if (logio->io_parameter[9] || logio->io_parameter[10])
fcport->supported_classes |= FC_COS_CLASS3;
goto done_post_logio_done_work;
}
iop[0] = le32_to_cpu(logio->io_parameter[0]);
iop[1] = le32_to_cpu(logio->io_parameter[1]);
switch (iop[0]) {
case LSC_SCODE_PORTID_USED:
data[0] = MBS_PORT_ID_USED;
data[1] = LSW(iop[1]);
break;
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
case LSC_SCODE_CMD_FAILED:
if ((iop[1] & 0xff) == 0x05) {
data[0] = MBS_NOT_LOGGED_IN;
break;
}
/* Fall through. */
default:
data[0] = MBS_COMMAND_ERROR;
data[1] = lio->flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
fcport->vha->host_no, sp->handle, type,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
le32_to_cpu(logio->io_parameter[1])));
done_post_logio_done_work:
lio->ctx.type == SRB_LOGIN_CMD ?
qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
lio->ctx.free(sp);
}
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@ -935,6 +1224,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
case STATUS_CONT_TYPE:
qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
break;
case MBX_IOCB_TYPE:
qla2x00_mbx_iocb_entry(vha, rsp->req,
(struct mbx_entry *)pkt);
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@ -1223,6 +1515,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->device->id, cp->device->lun, resid,
scsi_bufflen(cp)));
scsi_set_resid(cp, resid);
cp->result = DID_ERROR << 16;
break;
}
@ -1544,6 +1837,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_report_id_acquisition(vha,
(struct vp_rpt_id_entry_24xx *)pkt);
break;
case LOGINOUT_PORT_IOCB_TYPE:
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@ -1723,8 +2020,10 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = qla25xx_get_host(rsp);
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
if (!ha->mqenable) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
spin_unlock_irq(&ha->hardware_lock);
return IRQ_HANDLED;

View File

@ -1507,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
if (ql2xmultique_tag)
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
else
req = vha->req;
@ -2324,7 +2324,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
vha = fcport->vha;
ha = vha->hw;
req = vha->req;
if (ql2xmultique_tag)
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
else
rsp = req->rsp;
@ -2746,7 +2746,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (rptid_entry->format == 0) {
DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
" number of VPs acquired %d\n", __func__, vha->host_no,
MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
MSB(le16_to_cpu(rptid_entry->vp_count)),
LSB(le16_to_cpu(rptid_entry->vp_count))));
DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));

View File

@ -42,7 +42,6 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
ha->cur_vport_count++;
vha->vp_idx = vp_id;
list_add_tail(&vha->list, &ha->vp_list);
mutex_unlock(&ha->vport_lock);
@ -58,7 +57,6 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = vha->vp_idx;
ha->num_vhosts--;
ha->cur_vport_count--;
clear_bit(vp_id, ha->vp_idx_map);
list_del(&vha->list);
mutex_unlock(&ha->vport_lock);
@ -235,7 +233,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
}
/* To exclusively reset vport, we need to log it out first.*/
/*
* To exclusively reset vport, we need to log it out first. Note: this
* control_vp can fail if ISP reset is already issued, this is
* expected, as the vp would be already logged out due to ISP reset.
*/
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@ -247,18 +249,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
qla24xx_configure_vp(vha);
} else {
set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
}
qla24xx_configure_vp(vha);
return 0;
}
@ -309,6 +304,9 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
if (!(ha->current_topology & ISP_CFG_F))
return;
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (vp->vp_idx)
ret = qla2x00_do_dpc_vp(vp);
@ -413,6 +411,11 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->flags.init_done = 1;
mutex_lock(&ha->vport_lock);
set_bit(vha->vp_idx, ha->vp_idx_map);
ha->cur_vport_count++;
mutex_unlock(&ha->vport_lock);
return vha;
create_vhost_failed:

View File

@ -287,9 +287,12 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
int ques, req, ret;
struct qla_hw_data *ha = vha->hw;
if (!(ha->fw_attributes & BIT_6)) {
qla_printk(KERN_INFO, ha,
"Firmware is not multi-queue capable\n");
goto fail;
}
if (ql2xmultique_tag) {
/* CPU affinity mode */
ha->wq = create_workqueue("qla2xxx_wq");
/* create a request queue for IO */
options |= BIT_7;
req = qla25xx_create_req_que(ha, options, 0, 0, -1,
@ -299,6 +302,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
"Can't create request queue\n");
goto fail;
}
ha->wq = create_workqueue("qla2xxx_wq");
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@ -309,6 +313,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
goto fail2;
}
}
ha->flags.cpu_affinity_enabled = 1;
DEBUG2(qla_printk(KERN_INFO, ha,
"CPU affinity mode enabled, no. of response"
" queues:%d, no. of request queues:%d\n",
@ -317,8 +323,13 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
return 0;
fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
kfree(ha->rsp_q_map);
ha->max_req_queues = ha->max_rsp_queues = 1;
return 1;
}
@ -462,6 +473,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
cmd->scsi_done = done;
sp->ctx = NULL;
return sp;
}
@ -556,11 +568,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
unsigned long wait_iter = ABORT_WAIT_ITER;
int ret = QLA_SUCCESS;
while (CMD_SP(cmd)) {
while (CMD_SP(cmd) && wait_iter--) {
msleep(ABORT_POLLING_PERIOD);
if (--wait_iter)
break;
}
if (CMD_SP(cmd))
ret = QLA_FUNCTION_FAILED;
@ -698,6 +707,8 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
continue;
if (sp->fcport != fcport)
continue;
if (sp->ctx)
continue;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
@ -783,7 +794,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp == NULL)
continue;
if (sp->ctx)
continue;
if (sp->cmd != cmd)
continue;
@ -848,7 +860,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
if (sp->ctx)
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
@ -1106,8 +1119,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
!IS_QLA81XX(ha)) {
if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@ -1120,7 +1132,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset && !vha->vp_idx) {
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@ -1154,6 +1166,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que, cnt;
unsigned long flags;
srb_t *sp;
struct srb_ctx *ctx;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@ -1166,8 +1179,14 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
if (!sp->ctx) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
del_timer_sync(&ctx->timer);
ctx->free(sp);
}
}
}
}
@ -1193,6 +1212,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct qla_hw_data *ha = vha->hw;
struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
struct req_que *req = vha->req;
if (sdev->tagged_supported)
@ -1201,6 +1221,8 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, req->max_q_depth);
rport->dev_loss_tmo = ha->port_down_retry_count;
if (sdev->type == TYPE_TAPE)
fcport->flags |= FCF_TAPE_PRESENT;
return 0;
}
@ -1923,6 +1945,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto probe_init_failed;
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to allocate memory for queue"
@ -1959,11 +1982,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
if (ha->mqenable)
if (qla25xx_setup_mode(base_vha))
if (ha->mqenable) {
if (qla25xx_setup_mode(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Can't create queues, falling back to single"
" queue mode\n");
goto que_init;
}
}
if (ha->flags.running_gold_fw)
goto skip_dpc;
@ -2155,17 +2181,19 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
int defer)
{
struct fc_rport *rport;
scsi_qla_host_t *base_vha;
if (!fcport->rport)
return;
rport = fcport->rport;
if (defer) {
base_vha = pci_get_drvdata(vha->hw->pdev);
spin_lock_irq(vha->host->host_lock);
fcport->drport = rport;
spin_unlock_irq(vha->host->host_lock);
set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else
fc_remote_port_delete(rport);
}
@ -2237,8 +2265,9 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (vha->vp_idx != fcport->vp_idx)
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
continue;
/*
* No point in marking the device as lost, if the device is
* already DEAD.
@ -2246,10 +2275,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
if (atomic_read(&fcport->state) == FCS_ONLINE) {
atomic_set(&fcport->state, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
} else
atomic_set(&fcport->state, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
else if (vha->vp_idx == fcport->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
}
@ -2598,7 +2629,31 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
return qla2x00_post_work(vha, e);
}
static void
#define qla2x00_post_async_work(name, type) \
int qla2x00_post_async_##name##_work( \
struct scsi_qla_host *vha, \
fc_port_t *fcport, uint16_t *data) \
{ \
struct qla_work_evt *e; \
\
e = qla2x00_alloc_work(vha, type); \
if (!e) \
return QLA_FUNCTION_FAILED; \
\
e->u.logio.fcport = fcport; \
if (data) { \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
return qla2x00_post_work(vha, e); \
}
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
struct qla_work_evt *e, *tmp;
@ -2620,6 +2675,21 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_IDC_ACK:
qla81xx_idc_ack(vha, e->u.idc_ack.mb);
break;
case QLA_EVT_ASYNC_LOGIN:
qla2x00_async_login(vha, e->u.logio.fcport,
e->u.logio.data);
break;
case QLA_EVT_ASYNC_LOGIN_DONE:
qla2x00_async_login_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
case QLA_EVT_ASYNC_LOGOUT:
qla2x00_async_logout(vha, e->u.logio.fcport);
break;
case QLA_EVT_ASYNC_LOGOUT_DONE:
qla2x00_async_logout_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@ -2635,6 +2705,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
int status;
uint16_t next_loopid = 0;
struct qla_hw_data *ha = vha->hw;
uint16_t data[2];
list_for_each_entry(fcport, &vha->vp_fcports, list) {
/*
@ -2644,6 +2715,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (atomic_read(&fcport->state) !=
FCS_ONLINE && fcport->login_retry) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags & FCF_TAPE_PRESENT)
ha->isp_ops->fabric_logout(vha,
@ -2652,13 +2724,22 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
status = qla2x00_fabric_login(vha, fcport,
&next_loopid);
if (IS_ALOGIO_CAPABLE(ha)) {
data[0] = 0;
data[1] = QLA_LOGIO_LOGIN_RETRIED;
status = qla2x00_post_async_login_work(
vha, fcport, data);
if (status == QLA_SUCCESS)
continue;
/* Attempt a retry. */
status = 1;
} else
status = qla2x00_fabric_login(vha,
fcport, &next_loopid);
} else
status = qla2x00_local_device_login(vha,
fcport);
fcport->login_retry--;
if (status == QLA_SUCCESS) {
fcport->old_loop_id = fcport->loop_id;
@ -2831,6 +2912,9 @@ qla2x00_do_dpc(void *data)
*/
ha->dpc_active = 0;
/* Cleanup any residual CTX SRBs. */
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
return 0;
}
@ -2971,6 +3055,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
if (sp->ctx)
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_TAPE_PRESENT))
continue;
@ -2987,8 +3073,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
if (!(vha->device_flags & DFLG_NO_CABLE) &&
!vha->vp_idx) {
if (!(vha->device_flags & DFLG_NO_CABLE)) {
DEBUG(printk("scsi(%ld): Loop down - "
"aborting ISP.\n",
vha->host_no));

View File

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.03.01-k4"
#define QLA2XXX_VERSION "8.03.01-k6"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3

View File

@ -227,11 +227,11 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
"residual = 0x%x\n", ha->host_no,
(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__, residual));
cmd->device->lun, __func__));
cmd->result = DID_ERROR << 16;
break;

View File

@ -994,7 +994,7 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
* all the existing users tried this hard.
*/
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
len + 4, NULL, 30 * HZ, 3, NULL);
len, NULL, 30 * HZ, 3, NULL);
if (result)
return result;
@ -1021,13 +1021,14 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
{
int i, result;
unsigned int len;
unsigned char *buf = kmalloc(259, GFP_KERNEL);
const unsigned int init_vpd_len = 255;
unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
if (!buf)
return NULL;
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, 255);
result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
if (result)
goto fail;
@ -1050,12 +1051,12 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
* Some pages are longer than 255 bytes. The actual length of
* the page is returned in the header.
*/
len = (buf[2] << 8) | buf[3];
if (len <= 255)
len = ((buf[2] << 8) | buf[3]) + 4;
if (len <= init_vpd_len)
return buf;
kfree(buf);
buf = kmalloc(len + 4, GFP_KERNEL);
buf = kmalloc(len, GFP_KERNEL);
result = scsi_vpd_inquiry(sdev, buf, page, len);
if (result)
goto fail;

View File

@ -382,9 +382,13 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
* who knows? FIXME(eric)
*/
return SUCCESS;
case RESERVATION_CONFLICT:
/*
* let issuer deal with this, it could be just fine
*/
return SUCCESS;
case BUSY:
case QUEUE_FULL:
case RESERVATION_CONFLICT:
default:
return FAILED;
}

View File

@ -896,6 +896,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_print_result(cmd);
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
scsi_print_command(cmd);
}
blk_end_request_all(req, -EIO);
scsi_next_command(cmd);

View File

@ -291,7 +291,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
#define FC_STARGET_NUM_ATTRS 3
#define FC_RPORT_NUM_ATTRS 10
#define FC_VPORT_NUM_ATTRS 9
#define FC_HOST_NUM_ATTRS 21
#define FC_HOST_NUM_ATTRS 22
struct fc_internal {
struct scsi_transport_template t;
@ -3432,7 +3432,7 @@ fc_bsg_jobdone(struct fc_bsg_job *job)
/**
* fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
* @req: BSG request that holds the job to be destroyed
* @rq: BSG request that holds the job to be destroyed
*/
static void fc_bsg_softirq_done(struct request *rq)
{

View File

@ -36,6 +36,38 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
static int dbg_session;
module_param_named(debug_session, dbg_session, int,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_session,
"Turn on debugging for sessions in scsi_transport_iscsi "
"module. Set to 1 to turn on, and zero to turn off. Default "
"is off.");
static int dbg_conn;
module_param_named(debug_conn, dbg_conn, int,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_conn,
"Turn on debugging for connections in scsi_transport_iscsi "
"module. Set to 1 to turn on, and zero to turn off. Default "
"is off.");
#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \
do { \
if (dbg_session) \
iscsi_cls_session_printk(KERN_INFO, _session, \
"%s: " dbg_fmt, \
__func__, ##arg); \
} while (0);
#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
do { \
if (dbg_conn) \
iscsi_cls_conn_printk(KERN_INFO, _conn, \
"%s: " dbg_fmt, \
__func__, ##arg); \
} while (0);
struct iscsi_internal {
struct scsi_transport_template t;
struct iscsi_transport *iscsi_transport;
@ -377,6 +409,7 @@ static void iscsi_session_release(struct device *dev)
shost = iscsi_session_to_shost(session);
scsi_host_put(shost);
ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n");
kfree(session);
}
@ -441,6 +474,9 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
return 0;
session = iscsi_dev_to_session(dev);
ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n");
shost = iscsi_session_to_shost(session);
ihost = shost->shost_data;
@ -448,8 +484,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
spin_lock_irqsave(&session->lock, flags);
if (session->state != ISCSI_SESSION_LOGGED_IN) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return 0;
goto user_scan_exit;
}
id = session->target_id;
spin_unlock_irqrestore(&session->lock, flags);
@ -462,7 +497,10 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
scsi_scan_target(&session->dev, 0, id,
scan_data->lun, 1);
}
user_scan_exit:
mutex_unlock(&ihost->mutex);
ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n");
return 0;
}
@ -522,7 +560,9 @@ static void session_recovery_timedout(struct work_struct *work)
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
}
static void __iscsi_unblock_session(struct work_struct *work)
@ -534,6 +574,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
/*
* The recovery and unblock work get run from the same workqueue,
* so try to cancel it if it was going to run after this unblock.
@ -553,6 +594,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
if (scsi_queue_work(shost, &session->scan_work))
atomic_inc(&ihost->nr_scans);
}
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
}
/**
@ -579,10 +621,12 @@ static void __iscsi_block_session(struct work_struct *work)
block_work);
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
session->recovery_tmo * HZ);
}
@ -602,6 +646,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
/* Prevent new scans and make sure scanning is not in progress */
mutex_lock(&ihost->mutex);
spin_lock_irqsave(&session->lock, flags);
@ -616,6 +662,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
scsi_remove_target(&session->dev);
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
struct iscsi_cls_session *
@ -647,6 +694,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
device_initialize(&session->dev);
if (dd_size)
session->dd_data = &session[1];
ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n");
return session;
}
EXPORT_SYMBOL_GPL(iscsi_alloc_session);
@ -712,6 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
spin_unlock_irqrestore(&sesslock, flags);
iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
return 0;
release_host:
@ -752,6 +802,7 @@ static void iscsi_conn_release(struct device *dev)
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
struct device *parent = conn->dev.parent;
ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n");
kfree(conn);
put_device(parent);
}
@ -774,6 +825,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
unsigned long flags;
int err;
ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
spin_lock_irqsave(&sesslock, flags);
list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
@ -807,12 +860,15 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
"for session. Error %d.\n", err);
transport_unregister_device(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
device_del(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
put_device(&session->dev);
}
@ -828,6 +884,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
int iscsi_destroy_session(struct iscsi_cls_session *session)
{
iscsi_remove_session(session);
ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
iscsi_free_session(session);
return 0;
}
@ -885,6 +942,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
list_add(&conn->conn_list, &connlist);
conn->active = 1;
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
return conn;
release_parent_ref:
@ -912,6 +971,7 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
device_unregister(&conn->dev);
return 0;
}
@ -1200,6 +1260,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
"Cannot notify userspace of session "
"event %u. Check iscsi daemon\n",
event);
ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n",
event, rc);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_session_event);
@ -1221,6 +1284,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
shost = iscsi_session_to_shost(session);
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
ISCSI_DBG_TRANS_SESSION(session,
"Completed creating transport session\n");
return 0;
}
@ -1246,6 +1311,8 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
ev->r.c_conn_ret.sid = session->sid;
ev->r.c_conn_ret.cid = conn->cid;
ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n");
return 0;
}
@ -1258,8 +1325,10 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
if (!conn)
return -EINVAL;
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
if (transport->destroy_conn)
transport->destroy_conn(conn);
return 0;
}

View File

@ -1691,10 +1691,6 @@ sas_attach_transport(struct sas_function_template *ft)
i->f = ft;
count = 0;
SETUP_PORT_ATTRIBUTE(num_phys);
i->host_attrs[count] = NULL;
count = 0;
SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
SETUP_PHY_ATTRIBUTE(target_port_protocols);

View File

@ -2021,6 +2021,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
put_device(&sdkp->dev);
}
/**
@ -2106,6 +2107,7 @@ static int sd_probe(struct device *dev)
get_device(&sdp->sdev_gendev);
get_device(&sdkp->dev); /* prevent release before async_schedule */
async_schedule(sd_probe_async, sdkp);
return 0;

Some files were not shown because too many files have changed in this diff Show More