1
0
Fork 0

SCSI misc on 20170303

This is the set of stuff that didn't quite make the initial pull and a
 set of fixes for stuff which did.  The new stuff is basically lpfc
 (nvme), qedi and aacraid.  The fixes cover a lot of previously
 submitted stuff, the most important of which probably covers some of
 the failing irq vectors allocation and other fallout from having the
 SCSI command allocated as part of the block allocation functions.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABAgAGBQJYug/3AAoJEAVr7HOZEZN4bRwP/RvAW/NWlvs812gUeHNahUHx
 HF3EtY5YHneZev1L4fa1Myd8cNcB3gic53e1WvQhN5f+LczeI7iXHH8xIQK/B4GT
 OI5W7ZCGlhIamgsU8tUFGrtIOM6N/LIMZPMsaE62dcev0jC0Db4p/5yv5bUg1yuL
 JOLWqIaTYTx7b5FwR2cXKv7KGYpXonvcoVUDV6s8aMYToJhJiQ6JtWmjaAA+/OJ7
 HTcqNX/KSWQBG3ERjE3A/rGBFxPR9KPq8kpqVJlw39KEN9tMjXb0QAGvAeauNye2
 xaIjV41tXH1Ys3aoh6ZU+TpzN3HLlH/gegRe8671fUwqu/RnSzFC8Eidoddzgqne
 z5BF0Dy36FA5ol2HTFEwS5WM1gRM+z6YGnbhpE/XfyTL2sLHRynSXKxb02IdjRNi
 gCdV89AL8xdfPPFJjsx4hGWUJhalW/RwuPayAupePKGQHePabHa19McI2ssg0WBg
 OC6uuEk7vT95xuTZVJlrwXTJPnc9dxWJwzh21oEnvMfWM0VMW06EKT4AcX8FOsjL
 RswiYp9wl8JDmfNxyJnQJj0+QXeIE/gh35vBQCGsLVKY4+xzbi3yZfb1sZB0XOum
 yLK2LFQ+Ltk1TaPPSZsgXQZfodot0dFJPTnWr+whOfY1kepNlFRwQHltXnCWzgRs
 QG//WHsk0u1Mj2gehwmQ
 =e+6v
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "This is the set of stuff that didn't quite make the initial pull and a
  set of fixes for stuff which did.

  The new stuff is basically lpfc (nvme), qedi and aacraid. The fixes
  cover a lot of previously submitted stuff, the most important of which
  probably covers some of the failing irq vectors allocation and other
  fallout from having the SCSI command allocated as part of the block
  allocation functions"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (59 commits)
  scsi: qedi: Fix memory leak in tmf response processing.
  scsi: aacraid: remove redundant zero check on ret
  scsi: lpfc: use proper format string for dma_addr_t
  scsi: lpfc: use div_u64 for 64-bit division
  scsi: mac_scsi: Fix MAC_SCSI=m option when SCSI=m
  scsi: cciss: correct check map error.
  scsi: qla2xxx: fix spelling mistake: "seperator" -> "separator"
  scsi: aacraid: Fixed expander hotplug for SMART family
  scsi: mpt3sas: switch to pci_alloc_irq_vectors
  scsi: qedf: fixup compilation warning about atomic_t usage
  scsi: remove scsi_execute_req_flags
  scsi: merge __scsi_execute into scsi_execute
  scsi: simplify scsi_execute_req_flags
  scsi: make the sense header argument to scsi_test_unit_ready mandatory
  scsi: sd: improve TUR handling in sd_check_events
  scsi: always zero sshdr in scsi_normalize_sense
  scsi: scsi_dh_emc: return success in clariion_std_inquiry()
  scsi: fix memory leak of sdpk on when gd fails to allocate
  scsi: sd: make sd_devt_release() static
  scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-03-03 21:36:56 -08:00
commit a3b4924b02
87 changed files with 22412 additions and 3228 deletions

View File

@ -10336,6 +10336,12 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/qedi/
QLOGIC QL41xxx FCOE DRIVER
M: QLogic-Storage-Upstream@cavium.com
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/qedf/
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/

View File

@ -600,6 +600,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 args[4], *argbuf = NULL, *sensebuf = NULL;
int argsize = 0;
enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
int cmd_result;
if (arg == NULL)
@ -648,7 +649,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
sensebuf, (10*HZ), 5, 0, NULL);
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;
@ -657,9 +658,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
@ -707,6 +705,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
int rc = 0;
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[7], *sensebuf = NULL;
struct scsi_sense_hdr sshdr;
int cmd_result;
if (arg == NULL)
@ -734,7 +733,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
sensebuf, (10*HZ), 5, 0, NULL);
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;
@ -743,9 +742,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;

View File

@ -348,7 +348,7 @@ static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
SGDescriptor_struct *chain_block, int len)
{
SGDescriptor_struct *chain_sg;
@ -359,8 +359,16 @@ static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
chain_sg->Len = len;
temp64.val = pci_map_single(h->pdev, chain_block, len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
dev_warn(&h->pdev->dev,
"%s: error mapping chain block for DMA\n",
__func__);
return -1;
}
chain_sg->Addr.lower = temp64.val32.lower;
chain_sg->Addr.upper = temp64.val32.upper;
return 0;
}
#include "cciss_scsi.c" /* For SCSI tape support */
@ -3369,15 +3377,31 @@ static void do_cciss_request(struct request_queue *q)
temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
dev_warn(&h->pdev->dev,
"%s: error mapping page for DMA\n", __func__);
creq->errors = make_status_bytes(SAM_STAT_GOOD,
0, DRIVER_OK,
DID_SOFT_ERROR);
cmd_free(h, c);
return;
}
curr_sg[sg_index].Addr.lower = temp64.val32.lower;
curr_sg[sg_index].Addr.upper = temp64.val32.upper;
curr_sg[sg_index].Ext = 0; /* we are not chaining */
++sg_index;
}
if (chained)
cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
if (chained) {
if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
(seg - (h->max_cmd_sgentries - 1)) *
sizeof(SGDescriptor_struct));
sizeof(SGDescriptor_struct))) {
creq->errors = make_status_bytes(SAM_STAT_GOOD,
0, DRIVER_OK,
DID_SOFT_ERROR);
cmd_free(h, c);
return;
}
}
/* track how many SG entries we are using */
if (seg > h->maxSG)

View File

@ -1235,11 +1235,13 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
source "drivers/scsi/qedi/Kconfig"
source "drivers/scsi/qedf/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
depends on NVME_FC && NVME_TARGET_FC
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
@ -1478,7 +1480,7 @@ config ATARI_SCSI
config MAC_SCSI
tristate "Macintosh NCR5380 SCSI"
depends on MAC && SCSI=y
depends on MAC && SCSI
select SCSI_SPI_ATTRS
help
This is the NCR 5380 SCSI controller included on most of the 68030

View File

@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_QEDF) += qedf/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o

View File

@ -294,6 +294,10 @@ MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
"deregistering them. This is typically adjusted for heavily burdened"
" systems.");
int aac_fib_dump;
module_param(aac_fib_dump, int, 0644);
MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
@ -311,7 +315,7 @@ module_param(update_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
" updates issued to adapter.");
int check_interval = 24 * 60 * 60;
int check_interval = 60;
module_param(check_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
" checks.");
@ -483,7 +487,7 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_SUPPORTED_240_VOLUMES) {
maximum_num_containers =
le32_to_cpu(dresp->MaxSimpleVolumes);
@ -639,13 +643,16 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
__le32 sup_options2;
fsa_dev_ptr += scmd_id(scsicmd);
sup_options2 =
fibptr->dev->supplement_adapter_info.supported_options2;
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
fsa_dev_ptr->block_size = 0x200;
} else {
@ -688,7 +695,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE))
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
@ -705,7 +712,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo = (struct aac_query_mount *)fib_data(fibptr);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
@ -745,7 +752,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo = (struct aac_query_mount *)fib_data(fibptr);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
@ -896,12 +903,14 @@ char * get_container_type(unsigned tindex)
static void setinqstr(struct aac_dev *dev, void *data, int tindex)
{
struct scsi_inq *str;
struct aac_supplement_adapter_info *sup_adap_info;
sup_adap_info = &dev->supplement_adapter_info;
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
memset(str, ' ', sizeof(*str));
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
if (sup_adap_info->adapter_type_text[0]) {
char *cp = sup_adap_info->adapter_type_text;
int c;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
@ -911,8 +920,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
str->vid);
inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@ -1675,8 +1683,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
if (!identify_resp)
goto fib_free_ptr;
vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
aac_fib_init(fibptr);
@ -1815,9 +1823,9 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
}
vbus = (u32) le16_to_cpu(
dev->supplement_adapter_info.VirtDeviceBus);
dev->supplement_adapter_info.virt_device_bus);
vid = (u32) le16_to_cpu(
dev->supplement_adapter_info.VirtDeviceTarget);
dev->supplement_adapter_info.virt_device_target);
aac_fib_init(fibptr);
@ -1893,7 +1901,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
memcpy(&dev->adapter_info, info, sizeof(*info));
dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
dev->supplement_adapter_info.virt_device_bus = 0xffff;
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
struct aac_supplement_adapter_info * sinfo;
@ -1961,7 +1969,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (!dev->sync_mode && dev->sa_firmware &&
dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
dev->supplement_adapter_info.virt_device_bus != 0xffff) {
/* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
}
@ -1976,8 +1984,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
(tmp>>16)&0xff,
tmp&0xff,
le32_to_cpu(dev->adapter_info.kernelbuild),
(int)sizeof(dev->supplement_adapter_info.BuildDate),
dev->supplement_adapter_info.BuildDate);
(int)sizeof(dev->supplement_adapter_info.build_date),
dev->supplement_adapter_info.build_date);
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
dev->name, dev->id,
@ -1993,14 +2001,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
shost_to_class(dev->scsi_host_ptr), buffer))
printk(KERN_INFO "%s%d: serial %s",
dev->name, dev->id, buffer);
if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
printk(KERN_INFO "%s%d: TSID %.*s\n",
dev->name, dev->id,
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
(int)sizeof(dev->supplement_adapter_info
.vpd_info.tsid),
dev->supplement_adapter_info.vpd_info.tsid);
}
if (!aac_check_reset || ((aac_check_reset == 1) &&
(dev->supplement_adapter_info.SupportedOptions2 &
(dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET))) {
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
dev->name, dev->id);
@ -2008,7 +2017,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
dev->cache_protected = 0;
dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
dev->jbod = ((dev->supplement_adapter_info.feature_bits &
AAC_FEATURE_JBOD) != 0);
dev->nondasd_support = 0;
dev->raid_scsi_mode = 0;
@ -2631,7 +2640,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
struct scsi_device *sdev = scsicmd->device;
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
if (!(aac->supplement_adapter_info.SupportedOptions2 &
if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD;

View File

@ -97,7 +97,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 50740
# define AAC_DRIVER_BUILD 50792
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@ -1380,57 +1380,57 @@ struct aac_adapter_info
struct aac_supplement_adapter_info
{
u8 AdapterTypeText[17+1];
u8 Pad[2];
__le32 FlashMemoryByteSize;
__le32 FlashImageId;
__le32 MaxNumberPorts;
__le32 Version;
__le32 FeatureBits;
u8 SlotNumber;
u8 ReservedPad0[3];
u8 BuildDate[12];
__le32 CurrentNumberPorts;
u8 adapter_type_text[17+1];
u8 pad[2];
__le32 flash_memory_byte_size;
__le32 flash_image_id;
__le32 max_number_ports;
__le32 version;
__le32 feature_bits;
u8 slot_number;
u8 reserved_pad0[3];
u8 build_date[12];
__le32 current_number_ports;
struct {
u8 AssemblyPn[8];
u8 FruPn[8];
u8 BatteryFruPn[8];
u8 EcVersionString[8];
u8 Tsid[12];
} VpdInfo;
__le32 FlashFirmwareRevision;
__le32 FlashFirmwareBuild;
__le32 RaidTypeMorphOptions;
__le32 FlashFirmwareBootRevision;
__le32 FlashFirmwareBootBuild;
u8 MfgPcbaSerialNo[12];
u8 MfgWWNName[8];
__le32 SupportedOptions2;
__le32 StructExpansion;
u8 assembly_pn[8];
u8 fru_pn[8];
u8 battery_fru_pn[8];
u8 ec_version_string[8];
u8 tsid[12];
} vpd_info;
__le32 flash_firmware_revision;
__le32 flash_firmware_build;
__le32 raid_type_morph_options;
__le32 flash_firmware_boot_revision;
__le32 flash_firmware_boot_build;
u8 mfg_pcba_serial_no[12];
u8 mfg_wwn_name[8];
__le32 supported_options2;
__le32 struct_expansion;
/* StructExpansion == 1 */
__le32 FeatureBits3;
__le32 SupportedPerformanceModes;
u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */
u8 HostBusWidth; /* actual width in bits or links */
u16 HostBusSpeed; /* actual bus speed/link rate in MHz */
u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */
u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */
__le32 feature_bits3;
__le32 supported_performance_modes;
u8 host_bus_type; /* uses HOST_BUS_TYPE_xxx defines */
u8 host_bus_width; /* actual width in bits or links */
u16 host_bus_speed; /* actual bus speed/link rate in MHz */
u8 max_rrc_drives; /* max. number of ITP-RRC drives/pool */
u8 max_disk_xtasks; /* max. possible num of DiskX Tasks */
u8 CpldVerLoaded;
u8 CpldVerInFlash;
u8 cpld_ver_loaded;
u8 cpld_ver_in_flash;
__le64 MaxRRCCapacity;
__le32 CompiledMaxHistLogLevel;
u8 CustomBoardName[12];
u16 SupportedCntlrMode; /* identify supported controller mode */
u16 ReservedForFuture16;
__le32 SupportedOptions3; /* reserved for future options */
__le64 max_rrc_capacity;
__le32 compiled_max_hist_log_level;
u8 custom_board_name[12];
u16 supported_cntlr_mode; /* identify supported controller mode */
u16 reserved_for_future16;
__le32 supported_options3; /* reserved for future options */
__le16 VirtDeviceBus; /* virt. SCSI device for Thor */
__le16 VirtDeviceTarget;
__le16 VirtDeviceLUN;
__le16 Unused;
__le32 ReservedForFutureGrowth[68];
__le16 virt_device_bus; /* virt. SCSI device for Thor */
__le16 virt_device_target;
__le16 virt_device_lun;
__le16 unused;
__le32 reserved_for_future_growth[68];
};
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
@ -1444,6 +1444,10 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
/* 240 simple volume support */
#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
/*
* Supports FIB dump sync command send prior to IOP_RESET
*/
#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP cpu_to_le32(0x00004000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@ -2483,6 +2487,7 @@ struct aac_hba_info {
#define GET_DRIVER_BUFFER_PROPERTIES 0x00000023
#define RCV_TEMP_READINGS 0x00000025
#define GET_COMM_PREFERRED_SETTINGS 0x00000026
#define IOP_RESET_FW_FIB_DUMP 0x00000034
#define IOP_RESET 0x00001000
#define IOP_RESET_ALWAYS 0x00001001
#define RE_INIT_ADAPTER 0x000000ee
@ -2639,6 +2644,7 @@ void aac_hba_callback(void *context, struct fib *fibptr);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
void aac_src_access_devreg(struct aac_dev *dev, int mode);
void aac_set_intx_mode(struct aac_dev *dev);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
@ -2685,4 +2691,5 @@ extern int aac_commit;
extern int update_interval;
extern int check_interval;
extern int aac_check_reset;
extern int aac_fib_dump;
#endif

View File

@ -580,7 +580,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
chn = aac_logical_to_phys(user_srbcmd->channel);
chn = user_srbcmd->channel;
if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
dev->hba_map[chn][user_srbcmd->id].devtype ==
AAC_DEVTYPE_NATIVE_RAW) {

View File

@ -330,7 +330,7 @@ int aac_send_shutdown(struct aac_dev * dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) &&
dev->msi_enabled)
aac_src_access_devreg(dev, AAC_ENABLE_INTX);
aac_set_intx_mode(dev);
return status;
}

View File

@ -95,12 +95,20 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
if (dev->hw_fib_va && dev->max_cmd_size) {
pci_free_consistent(dev->pdev,
(dev->max_cmd_size *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
dev->hw_fib_va, dev->hw_fib_pa);
}
size_t alloc_size;
size_t fib_size;
int num_fibs;
if(!dev->hw_fib_va || !dev->max_cmd_size)
return;
num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
alloc_size = fib_size * num_fibs + ALIGN32 - 1;
pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va,
dev->hw_fib_pa);
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
@ -153,22 +161,20 @@ int aac_fib_setup(struct aac_dev * dev)
if (i<0)
return -ENOMEM;
/* 32 byte alignment for PMC */
hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
(hw_fib_pa - dev->hw_fib_pa));
dev->hw_fib_pa = hw_fib_pa;
memset(dev->hw_fib_va, 0,
(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/* add Xport header */
dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
sizeof(struct aac_fib_xporthdr));
dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
/* 32 byte alignment for PMC */
hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
(hw_fib_pa - dev->hw_fib_pa));
/* add Xport header */
hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
sizeof(struct aac_fib_xporthdr));
hw_fib_pa += sizeof(struct aac_fib_xporthdr);
hw_fib = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
/*
* Initialise the fibs
*/
@ -461,6 +467,35 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
return 0;
}
#ifdef CONFIG_EEH
static inline int aac_check_eeh_failure(struct aac_dev *dev)
{
/* Check for an EEH failure for the given
* device node. Function eeh_dev_check_failure()
* returns 0 if there has not been an EEH error
* otherwise returns a non-zero value.
*
* Need to be called before any PCI operation,
* i.e.,before aac_adapter_check_health()
*/
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
if (eeh_dev_check_failure(edev)) {
/* The EEH mechanisms will handle this
* error and reset the device if
* necessary.
*/
return 1;
}
return 0;
}
#else
static inline int aac_check_eeh_failure(struct aac_dev *dev)
{
return 0;
}
#endif
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
@ -496,9 +531,12 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
unsigned long mflags = 0;
unsigned long sflags = 0;
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
return -EINVAL;
/*
* There are 5 cases with the wait and response requested flags.
* The only invalid cases are if the caller requests to wait and
@ -662,6 +700,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
return -ETIMEDOUT;
}
if (aac_check_eeh_failure(dev))
return -EFAULT;
if ((blink = aac_adapter_check_health(dev)) > 0) {
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
@ -755,7 +797,12 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
FIB_COUNTER_INCREMENT(aac_config.NativeSent);
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
if (aac_check_eeh_failure(dev))
return -EFAULT;
/* Only set for first known interruptable command */
if (down_interruptible(&fibptr->event_wait)) {
fibptr->done = 2;
@ -1590,11 +1637,29 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
command->scsi_done(command);
}
/*
* Any Device that was already marked offline needs to be cleaned up
*/
__shost_for_each_device(dev, host) {
if (!scsi_device_online(dev)) {
sdev_printk(KERN_INFO, dev, "Removing offline device\n");
scsi_remove_device(dev);
scsi_device_put(dev);
}
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
if (!retval) {
dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
scsi_scan_host(host);
}
if (jafo) {
spin_lock_irq(host->host_lock);
}
@ -1815,7 +1880,7 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
if (!aac_check_reset || ((aac_check_reset == 1) &&
(aac->supplement_adapter_info.SupportedOptions2 &
(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET)))
goto out;
host = aac->scsi_host_ptr;
@ -1843,9 +1908,6 @@ static void aac_resolve_luns(struct aac_dev *dev)
for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
for (target = 0; target < AAC_MAX_TARGETS; target++) {
if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL)
continue;
if (bus == CONTAINER_CHANNEL)
channel = CONTAINER_CHANNEL;
else
@ -1857,7 +1919,7 @@ static void aac_resolve_luns(struct aac_dev *dev)
sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
target, 0);
if (!sdev && devtype)
if (!sdev && new_devtype)
scsi_add_device(dev->scsi_host_ptr, channel,
target, 0);
else if (sdev && new_devtype != devtype)
@ -2150,7 +2212,7 @@ static void aac_process_events(struct aac_dev *dev)
/* Thor AIF */
aac_handle_sa_aif(dev, fib);
aac_fib_adapter_complete(fib, (u16)sizeof(u32));
continue;
goto free_fib;
}
/*
* We will process the FIB here or pass it to a
@ -2264,8 +2326,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
aac_fib_init(fibptr);
vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
srbcmd = (struct aac_srb *)fib_data(fibptr);
@ -2434,7 +2496,7 @@ int aac_command_thread(void *data)
/* Don't even try to talk to adapter if its sick */
ret = aac_check_health(dev);
if (!dev->queues)
if (ret || !dev->queues)
break;
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
@ -2446,8 +2508,7 @@ int aac_command_thread(void *data)
&& (now.tv_usec > (1000000 / HZ)))
difference = (((1000000 - now.tv_usec) * HZ)
+ 500000) / 1000000;
else if (ret == 0) {
else {
if (now.tv_usec > 500000)
++now.tv_sec;
@ -2458,9 +2519,6 @@ int aac_command_thread(void *data)
ret = aac_send_hosttime(dev, &now);
difference = (long)(unsigned)update_interval*HZ;
} else {
/* retry shortly */
difference = 10 * HZ;
}
next_jiffies = jiffies + difference;
if (time_before(next_check_jiffies,next_jiffies))

View File

@ -891,13 +891,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
* Adapters that support a register, instead of a commanded,
* reset.
*/
if (((aac->supplement_adapter_info.SupportedOptions2 &
if (((aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_MU_RESET) ||
(aac->supplement_adapter_info.SupportedOptions2 &
(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_DOORBELL_RESET)) &&
aac_check_reset &&
((aac_check_reset != 1) ||
!(aac->supplement_adapter_info.SupportedOptions2 &
!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET))) {
/* Bypass wait for command quiesce */
aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
@ -1029,8 +1029,8 @@ static ssize_t aac_show_model(struct device *device,
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
if (dev->supplement_adapter_info.adapter_type_text[0]) {
char *cp = dev->supplement_adapter_info.adapter_type_text;
while (*cp && *cp != ' ')
++cp;
while (*cp == ' ')
@ -1046,18 +1046,20 @@ static ssize_t aac_show_vendor(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
struct aac_supplement_adapter_info *sup_adap_info;
int len;
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
sup_adap_info = &dev->supplement_adapter_info;
if (sup_adap_info->adapter_type_text[0]) {
char *cp = sup_adap_info->adapter_type_text;
while (*cp && *cp != ' ')
++cp;
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
(int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
dev->supplement_adapter_info.AdapterTypeText);
(int)(cp - (char *)sup_adap_info->adapter_type_text),
sup_adap_info->adapter_type_text);
} else
len = snprintf(buf, PAGE_SIZE, "%s\n",
aac_drivers[dev->cardtype].vname);
aac_drivers[dev->cardtype].vname);
return len;
}
@ -1078,7 +1080,7 @@ static ssize_t aac_show_flags(struct device *cdev,
"SAI_READ_CAPACITY_16\n");
if (dev->jbod)
len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
if (dev->supplement_adapter_info.SupportedOptions2 &
if (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)
len += snprintf(buf + len, PAGE_SIZE - len,
"SUPPORTED_POWER_MANAGEMENT\n");
@ -1129,6 +1131,13 @@ static ssize_t aac_show_bios_version(struct device *device,
return len;
}
static ssize_t aac_show_driver_version(struct device *device,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
}
static ssize_t aac_show_serial_number(struct device *device,
struct device_attribute *attr, char *buf)
{
@ -1139,12 +1148,12 @@ static ssize_t aac_show_serial_number(struct device *device,
len = snprintf(buf, 16, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
if (len &&
!memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
!memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
buf, len-1))
len = snprintf(buf, 16, "%.*s\n",
(int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
dev->supplement_adapter_info.MfgPcbaSerialNo);
(int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
dev->supplement_adapter_info.mfg_pcba_serial_no);
return min(len, 16);
}
@ -1239,6 +1248,13 @@ static struct device_attribute aac_bios_version = {
},
.show = aac_show_bios_version,
};
static struct device_attribute aac_lld_version = {
.attr = {
.name = "driver_version",
.mode = 0444,
},
.show = aac_show_driver_version,
};
static struct device_attribute aac_serial_number = {
.attr = {
.name = "serial_number",
@ -1276,6 +1292,7 @@ static struct device_attribute *aac_attrs[] = {
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
&aac_lld_version,
&aac_serial_number,
&aac_max_channel,
&aac_max_id,

View File

@ -475,7 +475,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
u32 var = 0;
if (!(dev->supplement_adapter_info.SupportedOptions2 &
if (!(dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",

View File

@ -436,17 +436,24 @@ static int aac_src_check_health(struct aac_dev *dev)
{
u32 status = src_readl(dev, MUnit.OMR);
/*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & SELF_TEST_FAILED))
return -1;
/*
* Check to see if the board panic'd.
*/
if (unlikely(status & KERNEL_PANIC))
return (status >> 16) & 0xFF;
goto err_blink;
/*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & SELF_TEST_FAILED))
goto err_out;
/*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & MONITOR_PANIC))
goto err_out;
/*
* Wait for the adapter to be up and running.
*/
@ -456,6 +463,12 @@ static int aac_src_check_health(struct aac_dev *dev)
* Everything is OK
*/
return 0;
err_out:
return -1;
err_blink:
return (status > 16) & 0xFF;
}
static inline u32 aac_get_vector(struct aac_dev *dev)
@ -657,7 +670,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
return 0;
}
static void aac_set_intx_mode(struct aac_dev *dev)
void aac_set_intx_mode(struct aac_dev *dev)
{
if (dev->msi_enabled) {
aac_src_access_devreg(dev, AAC_ENABLE_INTX);
@ -666,10 +679,27 @@ static void aac_set_intx_mode(struct aac_dev *dev)
}
}
static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
{
__le32 supported_options3;
if (!aac_fib_dump)
return;
supported_options3 = dev->supplement_adapter_info.supported_options3;
if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
return;
aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
static void aac_send_iop_reset(struct aac_dev *dev, int bled)
{
u32 var, reset_mask;
aac_dump_fw_fib_iop_reset(dev);
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var,
&reset_mask, NULL, NULL, NULL);
@ -684,7 +714,7 @@ static void aac_send_iop_reset(struct aac_dev *dev, int bled)
aac_set_intx_mode(dev);
if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
if (!bled && (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_DOORBELL_RESET)) {
src_writel(dev, MUnit.IDR, reset_mask);
} else {
@ -714,6 +744,12 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
pr_err("%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
/*
* When there is a BlinkLED, IOP_RESET has not effect
*/
if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
reset_type &= ~HW_IOP_RESET;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
switch (reset_type) {

View File

@ -2259,6 +2259,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
0ULL };
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
/*
* PCI device binding table
@ -2268,6 +2270,8 @@ static struct pci_device_id cxlflash_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
{}
};

View File

@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
/* Since there is only one target, make it 0 */
#define CXLFLASH_TARGET 0

View File

@ -305,6 +305,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct glun_info *gli = lli->parent;
struct scsi_sense_hdr sshdr;
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
u8 *sense_buf = NULL;
@ -332,7 +333,8 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES,
0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
@ -345,10 +347,6 @@ retry:
if (driver_byte(result) == DRIVER_SENSE) {
result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
if (result & SAM_STAT_CHECK_CONDITION) {
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:

View File

@ -453,8 +453,8 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
CMD_BUFSIZE, sense_buf, to, CMD_RETRIES,
0, NULL);
CMD_BUFSIZE, sense_buf, NULL, to,
CMD_RETRIES, 0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {

View File

@ -151,11 +151,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
cdb[1] = MI_REPORT_TARGET_PGS;
put_unaligned_be32(bufflen, &cdb[6]);
return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
buff, bufflen, sshdr,
ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, NULL,
req_flags, 0);
return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
}
/*
@ -185,11 +183,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
cdb[1] = MO_SET_TARGET_PGS;
put_unaligned_be32(stpg_len, &cdb[6]);
return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
stpg_data, stpg_len,
sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, NULL,
req_flags, 0);
return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,

View File

@ -276,10 +276,9 @@ static int send_trespass_cmd(struct scsi_device *sdev,
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
csdev->buffer, len, &sshdr,
CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
NULL, req_flags, 0);
err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL,
&sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
req_flags, 0, NULL);
if (err) {
if (scsi_sense_valid(&sshdr))
res = trespass_endio(sdev, &sshdr);
@ -358,7 +357,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
int err;
int err = SCSI_DH_OK;
char *sp_model;
sp_model = parse_sp_model(sdev, sdev->inquiry);

View File

@ -100,9 +100,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
REQ_FAILFAST_DRIVER;
retry:
res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
HP_SW_TIMEOUT, HP_SW_RETRIES,
NULL, req_flags, 0);
res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
if (res) {
if (scsi_sense_valid(&sshdr))
ret = tur_done(sdev, h, &sshdr);
@ -139,9 +138,8 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
REQ_FAILFAST_DRIVER;
retry:
res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
HP_SW_TIMEOUT, HP_SW_RETRIES,
NULL, req_flags, 0);
res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
if (res) {
if (!scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_WARNING, sdev,

View File

@ -555,10 +555,9 @@ static void send_mode_select(struct work_struct *work)
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
&h->ctlr->mode_select, data_size, &sshdr,
RDAC_TIMEOUT * HZ,
RDAC_RETRIES, NULL, req_flags, 0)) {
if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select,
data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ,
RDAC_RETRIES, req_flags, 0, NULL)) {
err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;

View File

@ -1,9 +1,11 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * www.broadcom.com *
# * *
# * This program is free software; you can redistribute it and/or *
# * modify it under the terms of version 2 of the GNU General *
@ -28,6 +30,7 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
lpfc_nvme.o lpfc_nvmet.o

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -20,6 +22,7 @@
*******************************************************************/
#include <scsi/scsi_host.h>
#include <linux/ktime.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS
@ -53,6 +56,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
#define LPFC_MIN_NVME_SEG_CNT 254
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@ -114,6 +118,20 @@ enum lpfc_polling_flags {
DISABLE_FCP_RING_INT = 0x2
};
struct perf_prof {
uint16_t cmd_cpu[40];
uint16_t rsp_cpu[40];
uint16_t qh_cpu[40];
uint16_t wqidx[40];
};
/*
* Provide for FC4 TYPE x28 - NVME. The
* bit mask for FCP and NVME is 0x8 identically
* because they are 32 bit positions distance.
*/
#define LPFC_FC4_TYPE_BITMASK 0x00000100
/* Provide DMA memory definitions the driver uses per port instance. */
struct lpfc_dmabuf {
struct list_head list;
@ -131,10 +149,24 @@ struct lpfc_dma_pool {
struct hbq_dmabuf {
struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
uint32_t size;
uint16_t total_size;
uint16_t bytes_recv;
uint32_t tag;
struct lpfc_cq_event cq_event;
unsigned long time_stamp;
void *context;
};
struct rqb_dmabuf {
struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
uint16_t total_size;
uint16_t bytes_recv;
void *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */
};
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@ -367,7 +399,8 @@ struct lpfc_vport {
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t num_disc_nodes; /*in addition to hba_state */
uint32_t num_disc_nodes; /* in addition to hba_state */
uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
@ -420,7 +453,6 @@ struct lpfc_vport {
uint32_t cfg_max_scsicmpl_time;
uint32_t cfg_tgt_queue_depth;
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
struct fc_vport *fc_vport;
@ -428,6 +460,9 @@ struct lpfc_vport {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_nvmektime;
struct dentry *debug_cpucheck;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@ -442,6 +477,11 @@ struct lpfc_vport {
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
uint32_t fdmi_port_mask;
/* There is a single nvme instance per vport. */
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
uint32_t last_fcp_wqidx;
};
struct hbq_s {
@ -459,10 +499,9 @@ struct hbq_s {
struct hbq_dmabuf *);
};
#define LPFC_MAX_HBQS 4
/* this matches the position in the lpfc_hbq_defs array */
#define LPFC_ELS_HBQ 0
#define LPFC_EXTRA_HBQ 1
#define LPFC_MAX_HBQS 1
enum hba_temp_state {
HBA_NORMAL_TEMP,
@ -652,6 +691,8 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@ -700,6 +741,9 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint8_t fcp_embed_io;
uint8_t nvme_support; /* Firmware supports NVME */
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
/* HBA Config Parameters */
@ -725,6 +769,14 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq;
uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_enable_nvmet;
uint32_t cfg_nvme_enable_fb;
uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@ -770,6 +822,13 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
uint32_t cfg_enable_fc4_type;
uint32_t cfg_xri_split;
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@ -784,11 +843,11 @@ struct lpfc_hba {
unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head rb_pend_list; /* Received buffers to be processed */
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
atomic_t fcp_qidx; /* next work queue to post work to */
atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
@ -843,9 +902,17 @@ struct lpfc_hba {
/*
* stat counters
*/
uint64_t fc4InputRequests;
uint64_t fc4OutputRequests;
uint64_t fc4ControlRequests;
uint64_t fc4ScsiInputRequests;
uint64_t fc4ScsiOutputRequests;
uint64_t fc4ScsiControlRequests;
uint64_t fc4ScsiIoCmpls;
uint64_t fc4NvmeInputRequests;
uint64_t fc4NvmeOutputRequests;
uint64_t fc4NvmeControlRequests;
uint64_t fc4NvmeIoCmpls;
uint64_t fc4NvmeLsRequests;
uint64_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
@ -856,17 +923,23 @@ struct lpfc_hba {
struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs;
spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
spinlock_t hbalock;
/* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool;
struct pci_pool *lpfc_sg_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct pci_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@ -878,8 +951,6 @@ struct lpfc_hba {
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
@ -925,6 +996,12 @@ struct lpfc_hba {
struct dentry *debug_readApp; /* inject read app_tag errors */
struct dentry *debug_readRef; /* inject read ref_tag errors */
struct dentry *debug_nvmeio_trc;
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
/* T10 DIF error injection */
uint32_t lpfc_injerr_wgrd_cnt;
uint32_t lpfc_injerr_wapp_cnt;
@ -950,7 +1027,9 @@ struct lpfc_hba {
struct dentry *idiag_ctl_acc;
struct dentry *idiag_mbx_acc;
struct dentry *idiag_ext_acc;
uint8_t lpfc_idiag_last_eq;
#endif
uint16_t nvmeio_trc_on;
/* Used for deferred freeing of ELS data buffers */
struct list_head elsbuf;
@ -1023,6 +1102,53 @@ struct lpfc_hba {
#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000
uint16_t sfp_alarm;
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
#define LPFC_CHECK_CPU_CNT 32
uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
uint16_t cpucheck_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
#define LPFC_CHECK_NVMET_RCV 2
#define LPFC_CHECK_NVMET_IO 4
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
uint64_t ktime_last_cmd;
uint64_t ktime_seg1_total;
uint64_t ktime_seg1_min;
uint64_t ktime_seg1_max;
uint64_t ktime_seg2_total;
uint64_t ktime_seg2_min;
uint64_t ktime_seg2_max;
uint64_t ktime_seg3_total;
uint64_t ktime_seg3_min;
uint64_t ktime_seg3_max;
uint64_t ktime_seg4_total;
uint64_t ktime_seg4_min;
uint64_t ktime_seg4_max;
uint64_t ktime_seg5_total;
uint64_t ktime_seg5_min;
uint64_t ktime_seg5_max;
uint64_t ktime_seg6_total;
uint64_t ktime_seg6_min;
uint64_t ktime_seg6_max;
uint64_t ktime_seg7_total;
uint64_t ktime_seg7_min;
uint64_t ktime_seg7_max;
uint64_t ktime_seg8_total;
uint64_t ktime_seg8_min;
uint64_t ktime_seg8_max;
uint64_t ktime_seg9_total;
uint64_t ktime_seg9_min;
uint64_t ktime_seg9_max;
uint64_t ktime_seg10_total;
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
};
static inline struct Scsi_Host *
@ -1093,3 +1219,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
return 0;
}
static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba *phba)
{
if (phba->sli_rev == LPFC_SLI_REV4)
return phba->sli4_hba.els_wq->pring;
return &phba->sli.sli3_ring[LPFC_ELS_RING];
}

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -35,14 +37,18 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_version.h"
#include "lpfc_compat.h"
@ -50,9 +56,13 @@
#include "lpfc_vport.h"
#include "lpfc_attr.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
#define LPFC_DEF_MRQ_POST 256
#define LPFC_MIN_MRQ_POST 32
#define LPFC_MAX_MRQ_POST 512
/*
* Write key size should be multiple of 4. If write key is changed
@ -129,6 +139,211 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0\n");
}
static ssize_t
lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = shost_priv(shost);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *nrport;
char *statep;
int len = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
return len;
}
if (phba->nvmet_support) {
if (!phba->targetport) {
len = snprintf(buf, PAGE_SIZE,
"NVME Target: x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
/* Port state is only one of two values for now. */
if (phba->targetport->port_id)
statep = "REGISTERED";
else
statep = "INIT";
len += snprintf(buf + len, PAGE_SIZE - len,
"NVME Target: Enabled State %s\n",
statep);
len += snprintf(buf + len, PAGE_SIZE - len,
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
"NVME Target: lpfc",
phba->brd_no,
wwn_to_u64(vport->fc_portname.u.wwn),
wwn_to_u64(vport->fc_nodename.u.wwn),
phba->targetport->port_id);
len += snprintf(buf + len, PAGE_SIZE,
"\nNVME Target: Statistics\n");
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Rcv %08x Drop %08x Abort %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_drop),
atomic_read(&tgtp->xmt_ls_abort));
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
len += snprintf(buf+len, PAGE_SIZE-len,
"Rcv LS: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_out));
}
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
atomic_read(&tgtp->xmt_ls_rsp_cmpl),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rcv %08x Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
len += snprintf(buf+len, PAGE_SIZE-len,
"Rcv FCP: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out));
}
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n",
atomic_read(&tgtp->xmt_fcp_read),
atomic_read(&tgtp->xmt_fcp_read_rsp),
atomic_read(&tgtp->xmt_fcp_write),
atomic_read(&tgtp->xmt_fcp_rsp));
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP Rsp: abort %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
atomic_read(&tgtp->xmt_fcp_rsp_error),
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
"ABORT: Xmt %08x Err %08x Cmpl %08x",
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error),
atomic_read(&tgtp->xmt_abort_cmpl));
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
return len;
}
localport = vport->localport;
if (!localport) {
len = snprintf(buf, PAGE_SIZE,
"NVME Initiator x%llx is not allocated\n",
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
lport = (struct lpfc_nvme_lport *)localport->private;
/* Port state is only one of two values for now. */
if (localport->port_id)
statep = "ONLINE";
else
statep = "UNKNOWN ";
len += snprintf(buf + len, PAGE_SIZE - len,
"%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
"NVME LPORT lpfc",
phba->brd_no,
wwn_to_u64(vport->fc_portname.u.wwn),
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
list_for_each_entry(rport, &lport->rport_list, list) {
/* local short-hand pointer. */
nrport = rport->remoteport;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
case FC_OBJSTATE_ONLINE:
statep = "ONLINE";
break;
case FC_OBJSTATE_UNKNOWN:
statep = "UNKNOWN ";
break;
default:
statep = "UNSUPPORTED";
break;
}
/* Tab in to show lport ownership. */
len += snprintf(buf + len, PAGE_SIZE - len,
"NVME RPORT ");
if (phba->brd_no >= 10)
len += snprintf(buf + len, PAGE_SIZE - len, " ");
len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
nrport->port_name);
len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
nrport->node_name);
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
nrport->port_id);
switch (nrport->port_role) {
case FC_PORT_ROLE_NVME_INITIATOR:
len += snprintf(buf + len, PAGE_SIZE - len,
"INITIATOR ");
break;
case FC_PORT_ROLE_NVME_TARGET:
len += snprintf(buf + len, PAGE_SIZE - len,
"TARGET ");
break;
case FC_PORT_ROLE_NVME_DISCOVERY:
len += snprintf(buf + len, PAGE_SIZE - len,
"DISCOVERY ");
break;
default:
len += snprintf(buf + len, PAGE_SIZE - len,
"UNKNOWN_ROLE x%x",
nrport->port_role);
break;
}
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
/* Terminate the string. */
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
spin_unlock_irq(shost->host_lock);
len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %016llx Cmpl %016llx\n",
phba->fc4NvmeLsRequests,
phba->fc4NvmeLsCmpls);
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
phba->fc4NvmeInputRequests,
phba->fc4NvmeOutputRequests,
phba->fc4NvmeControlRequests);
len += snprintf(buf+len, PAGE_SIZE-len,
" Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
return len;
}
static ssize_t
lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -675,6 +890,28 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return 0;
}
int
lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
{
int cnt = 0;
spin_lock_irq(lock);
while (!list_empty(q)) {
spin_unlock_irq(lock);
msleep(20);
if (cnt++ > 250) { /* 5 secs */
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0466 %s %s\n",
"Outstanding IO when ",
"bringing Adapter offline\n");
return 0;
}
spin_lock_irq(lock);
}
spin_unlock_irq(lock);
return 1;
}
/**
* lpfc_do_offline - Issues a mailbox command to bring the link down
* @phba: lpfc_hba pointer.
@ -694,10 +931,10 @@ static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
struct completion online_compl;
struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
int status = 0;
int cnt = 0;
int i;
int rc;
@ -717,20 +954,24 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
/* Wait a little for things to settle down, but not
* long enough for dev loss timeout to expire.
*/
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
while (!list_empty(&pring->txcmplq)) {
msleep(10);
if (cnt++ > 500) { /* 5 secs */
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"0466 Outstanding IO when "
"bringing Adapter offline\n");
break;
}
if (phba->sli_rev != LPFC_SLI_REV4) {
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->sli3_ring[i];
if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
&phba->hbalock))
goto out;
}
} else {
list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
pring = qp->pring;
if (!pring)
continue;
if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
&pring->ring_lock))
goto out;
}
}
out:
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
if (rc == 0)
@ -1945,6 +2186,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
}
static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
@ -2751,6 +2993,13 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
lpfc_oas_lun_show, lpfc_oas_lun_store);
int lpfc_enable_nvmet_cnt;
unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
@ -2816,9 +3065,9 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
return snprintf(buf, PAGE_SIZE, "%d\n",
phba->sli.ring[LPFC_ELS_RING].txq_max);
return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
@ -2829,9 +3078,9 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
return snprintf(buf, PAGE_SIZE, "%d\n",
phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
@ -3029,6 +3278,59 @@ lpfc_vport_param_store(devloss_tmo)
static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
/*
* lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
* lpfc_suppress_rsp = 0 Disable
* lpfc_suppress_rsp = 1 Enable (default)
*
*/
LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
"Enable suppress rsp feature is firmware supports it");
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
1, 1, 16,
"Specify number of RQ pairs for processing NVMET cmds");
/*
* lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
"Specify number of buffers to post on every MRQ");
/*
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
"Define fc4 type to register with fabric.");
/*
* lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
* This parameter is only used if:
* lpfc_enable_fc4_type is 3 - register both FCP and NVME and
* port is not configured for NVMET.
*
* ELS/CT always get 10% of XRIs, up to a maximum of 250
* The remaining XRIs get split up based on lpfc_xri_split per port:
*
* Supported Values are in percentages
* the xri_split value is the percentage the SCSI port will get. The remaining
* percentage will go to NVME.
*/
LPFC_ATTR_R(xri_split, 50, 10, 90,
"Division of XRI resources between SCSI and NVME");
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
@ -4143,13 +4445,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
/*
* Value range for the HBA is [5000,5000000]
* The value for each EQ depends on how many EQs are configured.
* Allow value == 0
*/
if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, i);
for (i = 0; i < phba->io_channel_irqs; i++)
lpfc_modify_hba_eq_delay(phba, i);
return strlen(buf);
}
@ -4187,7 +4490,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
return 0;
}
if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
(val == 0)) {
phba->cfg_fcp_imax = val;
return 0;
}
@ -4376,6 +4680,32 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
"First burst size for Targets that support first burst");
/*
* lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
* When the driver is configured as an NVME target, this value is
* communicated to the NVME initiator in the PRLI response. It is
* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
* parameters are set and the target is sending the PRLI RSP.
* Parameter supported on physical port only - no NPIV support.
* Value range is [0,65536]. Default value is 0.
*/
LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
"NVME Target mode first burst size in 512B increments.");
/*
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVMET
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
* processed by the initiator for subsequent NVME FCP IO. For the target
* function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
* driver parameter as the target function's first burst size returned to the
* initiator in the target's NVME PRLI response. Parameter supported on physical
* port only - no NPIV support.
* Value range is [0,1]. Default value is 0 (disabled).
*/
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
"Enable First Burst feature on I and T functions.");
/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
# depth. Default value is 0. When the value of this parameter is zero the
@ -4423,17 +4753,25 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
# range is [0,1]. Default value is 0.
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the
# current CPU.
# It would be set to 1 by the driver if it's able to set up cpu affinity
# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
# roundrobin scheduling of FCP I/Os through WQs will be used.
*/
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
* lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
* range is [0,1]. Default value is 0.
* For [0], FCP commands are issued to Work Queues ina round robin fashion.
* For [1], FCP commands are issued to a Work Queue associated with the
* current CPU.
*
* LPFC_FCP_SCHED_ROUND_ROBIN == 0
* LPFC_FCP_SCHED_BY_CPU == 1
*
* The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
* affinity for FCP/NVME I/Os through Work Queues associated with the current
* CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
* through WQs will be used.
*/
LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
LPFC_FCP_SCHED_ROUND_ROBIN,
LPFC_FCP_SCHED_BY_CPU,
"Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
@ -4560,14 +4898,53 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
* lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
*
* 0 = NVME OAS disabled
* 1 = NVME OAS enabled
*
* Value range is [0,1]. Default value is 0.
*/
LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
"Use OAS bit on NVME IOs");
/*
* lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
* will advertise it supports to the SCSI layer. This also will map to
* the number of WQs the driver will create.
*
* 0 = Configure the number of io channels to the number of active CPUs.
* 1,32 = Manually specify how many io channels to use.
*
* Value range is [0,32]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel,
LPFC_FCP_IO_CHAN_DEF,
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/*
* lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
* will advertise it supports to the NVME layer. This also will map to
* the number of WQs the driver will create.
*
* This module parameter is valid when lpfc_enable_fc4_type is set
* to support NVME.
*
* The NVME Layer will try to create this many, plus 1 administrative
* hardware queue. The administrative queue will always map to WQ 0
* A hardware IO queue maps (qidx) to a specific driver WQ.
*
* 0 = Configure the number of io channels to the number of active CPUs.
* 1,32 = Manually specify how many io channels to use.
*
* Value range is [0,32]. Default value is 0.
*/
LPFC_ATTR_R(nvme_io_channel,
LPFC_NVME_IO_CHAN_DEF,
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
"Set the number of NVME I/O channels");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
@ -4692,6 +5069,7 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
&dev_attr_bg_apptag_err,
@ -4718,6 +5096,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_enable_fc4_type,
&dev_attr_lpfc_xri_split,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_first_burst_size,
@ -4752,9 +5132,16 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_nvme_oas,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb,
&dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@ -5764,15 +6151,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
if (phba->sli_rev != LPFC_SLI_REV4)
phba->cfg_EnableXLane = 0;
lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
phba->cfg_oas_lun_state = 0;
@ -5786,9 +6175,48 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_poll = 0;
else
phba->cfg_poll = lpfc_poll;
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
phba->nvmet_support = 0;
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
} else {
/* We MUST have FCP support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
}
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_nvme_io_channel == 0)
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
phba->cfg_fcp_io_channel = 0;
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
phba->cfg_nvme_io_channel = 0;
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
else
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_xri_split_init(phba, lpfc_xri_split);
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
@ -5804,6 +6232,60 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
return;
}
/**
* lpfc_nvme_mod_param_dep - Adjust module parameter value based on
* dependencies between protocols and roles.
* @phba: lpfc_hba pointer.
**/
void
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{
if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) {
phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
phba->cfg_fcp_io_channel = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6013 %s x%x fb_size x%x, fb_max x%x\n",
"NVME Target PRLI ACC enable_fb ",
phba->cfg_nvme_enable_fb,
phba->cfg_nvmet_fb_size,
LPFC_NVMET_FB_SZ_MAX);
if (phba->cfg_nvme_enable_fb == 0)
phba->cfg_nvmet_fb_size = 0;
else {
if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
phba->cfg_nvmet_mrq_post = 0;
phba->cfg_nvmet_fb_size = 0;
}
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
else
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
}
/**
* lpfc_get_vport_cfgparam - Used during port create, init the vport structure
* @vport: lpfc_vport pointer.

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -1704,6 +1706,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct lpfc_sli *psli;
struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
int i = 0;
@ -1711,9 +1714,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
if (!psli)
return -ENODEV;
pring = &psli->ring[LPFC_FCP_RING];
if (!pring)
return -ENODEV;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@ -1732,10 +1732,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
scsi_block_requests(shost);
}
while (!list_empty(&pring->txcmplq)) {
if (i++ > 500) /* wait up to 5 seconds */
if (phba->sli_rev != LPFC_SLI_REV4) {
pring = &psli->sli3_ring[LPFC_FCP_RING];
lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
return 0;
}
list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
pring = qp->pring;
if (!pring || (pring->ringno != LPFC_FCP_RING))
continue;
if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
&pring->ring_lock))
break;
msleep(10);
}
return 0;
}
@ -2703,7 +2711,7 @@ err_get_xri_exit:
* lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
* @phba: Pointer to HBA context object
*
* This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
* This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
* returns the pointer to the buffer.
**/
static struct lpfc_dmabuf *
@ -2875,8 +2883,7 @@ out:
static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
size_t len)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd = NULL;
struct list_head head, *curr, *next;
@ -2890,6 +2897,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
int iocb_stat;
int i = 0;
pring = lpfc_phba_elsring(phba);
cmdiocbq = lpfc_sli_get_iocbq(phba);
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
@ -5403,13 +5412,15 @@ lpfc_bsg_timeout(struct bsg_job *job)
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
struct bsg_job_data *dd_data;
unsigned long flags;
int rc = 0;
LIST_HEAD(completions);
struct lpfc_iocbq *check_iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
* so the timeout is retried. This avoids double completion issues

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -21,6 +23,7 @@
typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
struct fc_frame_header;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
@ -167,6 +170,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
@ -186,6 +191,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
spinlock_t *slock);
int lpfc_fof_queue_create(struct lpfc_hba *);
int lpfc_fof_queue_setup(struct lpfc_hba *);
@ -193,7 +200,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *);
irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *);
int lpfc_sli4_setup(struct lpfc_hba *phba);
void lpfc_sli_queue_init(struct lpfc_hba *phba);
void lpfc_sli4_queue_init(struct lpfc_hba *phba);
struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba,
struct lpfc_iocbq *iocbq);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
@ -220,6 +231,7 @@ void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
@ -231,8 +243,15 @@ struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
@ -287,6 +306,11 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
struct lpfc_iocbq *iocbq);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@ -336,8 +360,13 @@ void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags,
dma_addr_t *handle);
void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
@ -356,6 +385,7 @@ extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@ -375,9 +405,11 @@ void lpfc_host_attrib_init(struct Scsi_Host *);
extern void lpfc_debugfs_initialize(struct lpfc_vport *);
extern void lpfc_debugfs_terminate(struct lpfc_vport *);
extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
uint32_t, uint32_t);
uint32_t, uint32_t);
extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
uint32_t, uint32_t);
extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt,
uint16_t data1, uint16_t data2, uint32_t data3);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* SLI4 if_type 2 externs. */
@ -471,7 +503,10 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *);
int lpfc_sli4_read_config(struct lpfc_hba *);
void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
@ -496,3 +531,26 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
uint32_t *, uint32_t *);
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
/* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
int lpfc_nvme_register_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
int lpfc_nvme_create_localport(struct lpfc_vport *vport);
void lpfc_nvme_destroy_localport(struct lpfc_vport *vport);
void lpfc_nvme_update_localport(struct lpfc_vport *vport);
int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -40,8 +42,9 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
@ -453,8 +456,90 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
return NULL;
}
static void
lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
{
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_type == FC_TYPE_NVME)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
"Data: x%x x%x x%x x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
"Data: x%x x%x\n", Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
lpfc_rscn_payload_check(vport, Did)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
/*
* This NPortID was previously a FCP target,
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
ndlp->nlp_fc4_type = fc4_type;
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
ndlp->nlp_fc4_type = fc4_type;
if (ndlp->nlp_type & NLP_FCP_TARGET)
lpfc_setup_disc_node(vport, Did);
else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
0, Did) == 0)
vport->num_disc_nodes++;
else
lpfc_setup_disc_node(vport, Did);
}
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0245 Skip x%06x NameServer Rsp "
"Data: x%x x%x\n", Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
}
}
static int
lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
uint32_t Size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
@ -499,97 +584,12 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
*/
if ((Did != vport->fc_myDID) &&
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
vport->cfg_peer_port_login)) {
if ((vport->port_type != LPFC_NPIV_PORT) ||
(!(vport->ct_flags & FC_CT_RFF_ID)) ||
(!vport->cfg_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: "
"did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag,
vport->fc_flag);
vport->cfg_peer_port_login))
lpfc_prep_node_fc4type(vport, Did, fc4_type);
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"0238 Process "
"x%x NameServer Rsp"
"Data: x%x x%x x%x\n",
Did, ndlp->nlp_flag,
vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_CT,
"Skip1 GID_FTrsp: "
"did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"0239 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
(lpfc_rscn_payload_check(vport, Did))) {
lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_CT,
"Query GID_FTrsp: "
"did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
/* This NPortID was previously
* a FCP target, * Don't even
* bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport,
Did);
if (ndlp &&
NLP_CHK_NODE_ACT(ndlp)
&& (ndlp->nlp_type &
NLP_FCP_TARGET))
lpfc_setup_disc_node
(vport, Did);
else if (lpfc_ns_cmd(vport,
SLI_CTNS_GFF_ID,
0, Did) == 0)
vport->num_disc_nodes++;
else
lpfc_setup_disc_node
(vport, Did);
}
else {
lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_CT,
"Skip2 GID_FTrsp: "
"did:x%x flg:x%x cnt:%d",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"0245 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
}
}
}
if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
goto nsout1;
Cnt -= sizeof(uint32_t);
}
ctptr = NULL;
@ -609,16 +609,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
int rc;
int rc, type;
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
irsp = &rspiocb->iocb;
@ -656,9 +658,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
type = lpfc_get_gidft_type(vport, cmdiocb);
if (type == 0)
goto out;
/* CT command is being retried */
vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0);
vport->fc_ns_retry, type);
if (rc == 0)
goto out;
}
@ -670,13 +677,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *) inp->virt;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x\n",
vport->fc_flag);
lpfc_ns_rsp(vport, outp,
"0208 NameServer Rsp Data: x%x x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type);
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@ -731,9 +743,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
}
vport->gidft_inp--;
}
/* Link up / RSCN discovery */
if (vport->num_disc_nodes == 0) {
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
/*
* The driver has cycled through all Nports in the RSCN payload.
* Complete the handling by cleaning up and marking the
@ -881,6 +895,60 @@ out:
return;
}
static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
int did;
struct lpfc_nodelist *ndlp;
uint32_t fc4_data_0, fc4_data_1;
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
did, fc4_data_0, fc4_data_1);
ndlp = lpfc_findnode_did(vport, did);
if (ndlp) {
/* The bitmask value for FCP and NVME FCP types is
* the same because they are 32 bits distant from
* each other in word0 and word0.
*/
if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"3064 Setting ndlp %p, DID x%06x with "
"FC4 x%08x, Data: x%08x x%08x\n",
ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME);
}
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
lpfc_ct_free_iocb(phba, cmdiocb);
}
static void
lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@ -1071,31 +1139,27 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
/*
* Although the symbolic port name is thought to be an integer
* as of January 18, 2016, leave it as a string until more of
* the record state becomes defined.
*/
int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
int n;
uint8_t *wwn = vport->phba->wwpn;
n = snprintf(symbol, size,
"Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
wwn[0], wwn[1], wwn[2], wwn[3],
wwn[4], wwn[5], wwn[6], wwn[7]);
if (vport->port_type == LPFC_PHYSICAL_PORT)
return n;
if (n < size)
n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
if (n < size &&
strlen(vport->fc_vport->symbolic_name))
n += snprintf(symbol + n, size - n, " VName-%s",
vport->fc_vport->symbolic_name);
/*
* Use the lpfc board number as the Symbolic Port
* Name object. NPIV is not in play so this integer
* value is sufficient and unique per FC-ID.
*/
n = snprintf(symbol, size, "%d", vport->phba->brd_no);
return n;
}
int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
@ -1106,24 +1170,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " FV%s", fwrev);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version);
n += snprintf(symbol + n, size - n, " DV%s.",
lpfc_release_version);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " HN:%s.",
init_utsname()->nodename);
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename);
/* Note :- OS name is "Linux" */
if (size < n)
return n;
n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname);
n += snprintf(symbol + n, size - n, " OS:%s\n",
init_utsname()->sysname);
return n;
}
@ -1147,6 +1213,27 @@ lpfc_find_map_node(struct lpfc_vport *vport)
return cnt;
}
/*
* This routine will return the FC4 Type associated with the CT
* GID_FT command.
*/
int
lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
{
struct lpfc_sli_ct_request *CtReq;
struct lpfc_dmabuf *mp;
uint32_t type;
mp = cmdiocb->context1;
if (mp == NULL)
return 0;
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
type = (uint32_t)CtReq->un.gid.Fc4Type;
if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME))
return 0;
return type;
}
/*
* lpfc_ns_cmd
* Description:
@ -1207,8 +1294,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
/* NameServer Req */
lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
"0236 NameServer Req Data: x%x x%x x%x\n",
cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt);
"0236 NameServer Req Data: x%x x%x x%x x%x\n",
cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
context);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
@ -1219,6 +1307,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFF_ID)
bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFT_ID)
bpl->tus.f.bdeSize = GFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFT_ID)
bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RNN_ID)
@ -1246,7 +1336,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_GID_FT:
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
CtReq->un.gid.Fc4Type = context;
if (vport->port_state < LPFC_NS_QRY)
vport->port_state = LPFC_NS_QRY;
lpfc_set_disctmo(vport);
@ -1261,12 +1352,32 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
case SLI_CTNS_GFT_ID:
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_GFT_ID);
CtReq->un.gft.PortId = cpu_to_be32(context);
cmpl = lpfc_cmpl_ct_cmd_gft_id;
break;
case SLI_CTNS_RFT_ID:
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RFT_ID);
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
/* Register FC4 FCP type if enabled. */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
CtReq->un.rft.fcpReg = 1;
/* Register NVME type if enabled. Defined LE and swapped.
* rsvd[0] is used as word1 because of the hard-coded
* word0 usage in the ct_request data structure.
*/
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100);
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
@ -1316,7 +1427,31 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cpu_to_be16(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_TYPE_FCP;
/* The driver always supports FC_TYPE_FCP. However, the
* caller can specify NVME (type x28) as well. But only
* these that FC4 type is supported.
*/
if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
(context == FC_TYPE_NVME)) {
if ((vport == phba->pport) && phba->nvmet_support) {
CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
FC4_FEATURE_NVME_DISC);
lpfc_nvmet_update_targetport(phba);
} else {
lpfc_nvme_update_localport(vport);
}
CtReq->un.rff.type_code = context;
} else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
(context == FC_TYPE_FCP))
CtReq->un.rff.type_code = context;
else
goto ns_cmd_free_bmpvirt;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
@ -1337,6 +1472,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
*/
lpfc_nlp_put(ndlp);
ns_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -42,6 +44,22 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
enum {
DUMP_FCP,
DUMP_NVME,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
};
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_NVMEKTIME_SIZE 8192
#define LPFC_CPUCHECK_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
#define LPFC_DEBUG_OUT_LINE_SZ 80
/*
* For SLI4 iDiag debugfs diagnostics tool
*/
@ -188,6 +206,12 @@
#define SIZE_U16 sizeof(uint16_t)
#define SIZE_U32 sizeof(uint32_t)
#define lpfc_nvmeio_data(phba, fmt, arg...) \
{ \
if (phba->nvmeio_trc_on) \
lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \
}
struct lpfc_debug {
char *i_private;
char op;
@ -206,6 +230,13 @@ struct lpfc_debugfs_trc {
unsigned long jif;
};
struct lpfc_debugfs_nvmeio_trc {
char *fmt;
uint16_t data1;
uint16_t data2;
uint32_t data3;
};
struct lpfc_idiag_offset {
uint32_t last_rd;
};
@ -358,58 +389,111 @@ lpfc_debug_dump_q(struct lpfc_queue *q)
}
/**
* lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
* lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue
* @phba: Pointer to HBA context object.
* @fcp_wqidx: Index to a FCP work queue.
* @wqidx: Index to a FCP or NVME work queue.
*
* This function dumps all entries from a FCP work queue specified by the
* @fcp_wqidx.
* This function dumps all entries from a FCP or NVME work queue specified
* by the wqidx.
**/
static inline void
lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
{
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
struct lpfc_queue *wq;
char *qtypestr;
if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx];
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx];
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX";
} else if (qtype == DUMP_ELS) {
wq = phba->sli4_hba.els_wq;
qtypestr = "ELS";
} else if (qtype == DUMP_NVMELS) {
wq = phba->sli4_hba.nvmels_wq;
qtypestr = "NVMELS";
} else
return;
printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id);
else
pr_err("%s WQ: WQ[Qid:%d]\n",
qtypestr, wq->queue_id);
lpfc_debug_dump_q(wq);
}
/**
* lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
* lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's
* cmpl queue
* @phba: Pointer to HBA context object.
* @fcp_wqidx: Index to a FCP work queue.
* @wqidx: Index to a FCP work queue.
*
* This function dumps all entries from a FCP complete queue which is
* associated to the FCP work queue specified by the @fcp_wqidx.
* This function dumps all entries from a FCP or NVME completion queue
* which is associated to the work queue specified by the @wqidx.
**/
static inline void
lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
{
int fcp_cqidx, fcp_cqid;
struct lpfc_queue *wq, *cq, *eq;
char *qtypestr;
int eqidx;
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
/* fcp/nvme wq and cq are 1:1, thus same indexes */
if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx];
cq = phba->sli4_hba.fcp_cq[wqidx];
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx];
cq = phba->sli4_hba.nvme_cq[wqidx];
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq;
qtypestr = "MBX";
} else if (qtype == DUMP_ELS) {
wq = phba->sli4_hba.els_wq;
cq = phba->sli4_hba.els_cq;
qtypestr = "ELS";
} else if (qtype == DUMP_NVMELS) {
wq = phba->sli4_hba.nvmels_wq;
cq = phba->sli4_hba.nvmels_cq;
qtypestr = "NVMELS";
} else
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
eq = phba->sli4_hba.hba_eq[eqidx];
if (cq->assoc_qid == eq->queue_id)
break;
if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
return;
}
if (eqidx == phba->io_channel_irqs) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0;
eq = phba->sli4_hba.hba_eq[0];
}
printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
fcp_cqidx, fcp_cqid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
eqidx, eq->queue_id);
else
pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wq->queue_id, cq->queue_id,
eqidx, eq->queue_id);
lpfc_debug_dump_q(cq);
}
/**
@ -421,64 +505,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
* associated to the FCP work queue specified by the @fcp_wqidx.
**/
static inline void
lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
{
struct lpfc_queue *qdesc;
int fcp_eqidx, fcp_eqid;
int fcp_cqidx, fcp_cqid;
struct lpfc_queue *qp;
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
return;
}
qp = phba->sli4_hba.hba_eq[qidx];
fcp_eqidx = fcp_cqidx;
fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
"EQ[Idx:%d|Qid:%d]\n",
fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
lpfc_debug_dump_q(qdesc);
}
/**
* lpfc_debug_dump_els_wq - dump all entries from the els work queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the ELS work queue.
**/
static inline void
lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
{
printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
phba->sli4_hba.els_wq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.els_wq);
}
/**
* lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the MBOX work queue.
**/
static inline void
lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
{
printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
phba->sli4_hba.mbx_wq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
lpfc_debug_dump_q(qp);
}
/**
@ -509,36 +544,6 @@ lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
}
/**
* lpfc_debug_dump_els_cq - dump all entries from the els complete queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the els complete queue.
**/
static inline void
lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
{
printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_cq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.els_cq);
}
/**
* lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the mbox complete queue.
**/
static inline void
lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
{
printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
phba->sli4_hba.mbx_wq->queue_id,
phba->sli4_hba.mbx_cq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
}
/**
* lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
* @phba: Pointer to HBA context object.
@ -556,14 +561,29 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
break;
if (wq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
return;
}
for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
break;
if (wq_idx < phba->cfg_nvme_io_channel) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
return;
}
if (phba->sli4_hba.els_wq->queue_id == qid) {
printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
pr_err("ELS WQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_wq);
return;
}
if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
pr_err("NVME LS WQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
}
}
@ -617,27 +637,42 @@ lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
static inline void
lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
{
int cq_idx = 0;
int cq_idx;
do {
for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
break;
} while (++cq_idx < phba->cfg_fcp_io_channel);
if (cq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
return;
}
for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
break;
if (cq_idx < phba->cfg_nvme_io_channel) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
return;
}
if (phba->sli4_hba.els_cq->queue_id == qid) {
printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
pr_err("ELS CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_cq);
return;
}
if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
pr_err("NVME LS CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
return;
}
if (phba->sli4_hba.mbx_cq->queue_id == qid) {
printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
pr_err("MBX CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
}
}
@ -655,17 +690,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break;
}
if (eq_idx < phba->cfg_fcp_io_channel) {
if (eq_idx < phba->io_channel_irqs) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return;
}
}
void lpfc_debug_dump_all_queues(struct lpfc_hba *);

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -86,6 +88,17 @@ struct lpfc_nodelist {
#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */
#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */
uint16_t nlp_fc4_type; /* FC types node supports. */
/* Assigned from GID_FF, only
* FCP (0x8) and NVME (0x28)
* supported.
*/
#define NLP_FC4_NONE 0x0
#define NLP_FC4_FCP 0x1 /* FC4 Type FCP (value x8)) */
#define NLP_FC4_NVME 0x2 /* FC4 TYPE NVME (value x28) */
uint16_t nlp_rpi;
uint16_t nlp_state; /* state transition indicator */
@ -107,8 +120,8 @@ struct lpfc_nodelist {
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
struct fc_rport *rport; /* Corresponding FC transport
port structure */
struct fc_rport *rport; /* scsi_transport_fc port structure */
struct lpfc_nvme_rport *nrport; /* nvme transport rport struct. */
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
@ -118,6 +131,10 @@ struct lpfc_nodelist {
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
struct lpfc_node_rrq {
struct list_head list;
@ -133,6 +150,7 @@ struct lpfc_node_rrq {
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -29,7 +31,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@ -1323,7 +1324,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
"0201 Abort outstanding I/O on NPort x%x\n",
Fabric_DID);
pring = &phba->sli.ring[LPFC_ELS_RING];
pring = lpfc_phba_elsring(phba);
/*
* Check the txcmplq for an iocb that matches the nport the driver is
@ -1513,7 +1514,7 @@ static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_vport *vport = ndlp->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
struct lpfc_rport_data *rdata;
@ -1868,10 +1869,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0102 PLOGI completes to NPort x%x "
"0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, disc, vport->num_disc_nodes);
ndlp->nlp_DID, ndlp->nlp_fc4_type,
irsp->ulpStatus, irsp->un.ulpWord[4],
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(shost->host_lock);
@ -2000,12 +2003,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.fcphHigh = FC_PH3;
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
/* If our firmware supports this feature, convey that
* information to the target using the vendor specific field.
*/
if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
sp->cmn.valid_vendor_ver_level = 1;
sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
}
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@ -2052,14 +2064,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"PRLI cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
/* Ddriver supports multiple FC4 types. Counters matter. */
vport->fc_prli_sent--;
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%x "
"0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, vport->num_disc_nodes);
vport->num_disc_nodes, ndlp->fc4_prli_sent);
vport->fc_prli_sent--;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
@ -2068,6 +2083,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
ndlp->fc4_prli_sent--;
goto out;
}
/* PRLI failed */
@ -2082,9 +2098,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
} else
/* Good status, call state machine */
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
* because final disposition to Mapped or Unmapped is
* completed there.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
@ -2118,42 +2139,100 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
struct lpfc_nvme_prli *npr_nvme;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
u32 local_nlp_type, elscmd;
cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
local_nlp_type = ndlp->nlp_fc4_type;
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
elscmd = ELS_CMD_PRLI;
} else if (local_nlp_type & NLP_FC4_NVME) {
/* Payload is 4 + 20 = 24 x18 bytes. */
cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
elscmd = ELS_CMD_NVMEPRLI;
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"3083 Unknown FC_TYPE x%x ndlp x%06x\n",
ndlp->nlp_fc4_type, ndlp->nlp_DID);
return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_PRLI);
ndlp->nlp_DID, elscmd);
if (!elsiocb)
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
pcmd += sizeof(uint32_t);
memset(pcmd, 0, cmdsize);
/* For PRLI, remainder of payload is PRLI parameter page */
npr = (PRLI *) pcmd;
/*
* If our firmware version is 3.20 or later,
* set the following bits for FC-TAPE support.
*/
if (phba->vpd.rev.feaLevelHigh >= 0x02) {
npr->ConfmComplAllowed = 1;
npr->Retry = 1;
npr->TaskRetryIdReq = 1;
if (local_nlp_type & NLP_FC4_FCP) {
/* Remainder of payload is FCP PRLI parameter page.
* Note: this data structure is defined as
* BE/LE in the structure definition so no
* byte swap call is made.
*/
*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
pcmd += sizeof(uint32_t);
npr = (PRLI *)pcmd;
/*
* If our firmware version is 3.20 or later,
* set the following bits for FC-TAPE support.
*/
if (phba->vpd.rev.feaLevelHigh >= 0x02) {
npr->ConfmComplAllowed = 1;
npr->Retry = 1;
npr->TaskRetryIdReq = 1;
}
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
if (vport->cfg_first_burst_size)
npr->writeXferRdyDis = 1;
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
/* Remove FCP type - processed. */
local_nlp_type &= ~NLP_FC4_FCP;
} else if (local_nlp_type & NLP_FC4_NVME) {
/* Remainder of payload is NVME PRLI parameter page.
* This data structure is the newer definition that
* uses bf macros so a byte swap is required.
*/
*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
pcmd += sizeof(uint32_t);
npr_nvme = (struct lpfc_nvme_prli *)pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
/* Only initiators request first burst. */
if ((phba->cfg_nvme_enable_fb) &&
!phba->nvmet_support)
bf_set(prli_fba, npr_nvme, 1);
if (phba->nvmet_support) {
bf_set(prli_tgt, npr_nvme, 1);
bf_set(prli_disc, npr_nvme, 1);
} else {
bf_set(prli_init, npr_nvme, 1);
}
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
/* Remove NVME type - processed. */
local_nlp_type &= ~NLP_FC4_NVME;
}
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
if (vport->cfg_first_burst_size)
npr->writeXferRdyDis = 1;
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PRLI: did:x%x",
@ -2172,7 +2251,20 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
/* The vport counters are used for lpfc_scan_finished, but
* the ndlp is used to track outstanding PRLIs for different
* FC4 types.
*/
vport->fc_prli_sent++;
ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
*/
if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
return 0;
}
@ -2543,6 +2635,15 @@ out:
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
phba->pport->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
lpfc_nvme_update_localport(phba->pport);
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_config_link(phba, mbox);
@ -3055,6 +3156,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_PRLI:
case ELS_CMD_NVMEPRLI:
if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
@ -3245,7 +3347,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
if ((cmd == ELS_CMD_PLOGI) ||
(cmd == ELS_CMD_PRLI)) {
(cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
@ -3265,7 +3368,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case LSRJT_LOGICAL_BSY:
if ((cmd == ELS_CMD_PLOGI) ||
(cmd == ELS_CMD_PRLI)) {
(cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = 48;
} else if (cmd == ELS_CMD_FDISC) {
@ -3399,7 +3503,8 @@ out_retry:
spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
if (cmd == ELS_CMD_PRLI)
if ((cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
else
@ -3430,6 +3535,7 @@ out_retry:
lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PRLI:
case ELS_CMD_NVMEPRLI:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
@ -3995,7 +4101,18 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sizeof(struct serv_parm));
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
*/
if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
sp->cmn.valid_vendor_ver_level = 1;
sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
sp->un.vv.flags =
cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
}
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
@ -4231,17 +4348,43 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
{
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
struct lpfc_nvme_prli *npr_nvme;
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t prli_fc4_req, *req_payload;
struct lpfc_dmabuf *req_buf;
int rc;
u32 elsrspcmd;
/* Need the incoming PRLI payload to determine if the ACC is for an
* FC4 or NVME PRLI type. The PRLI type is at word 1.
*/
req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
req_payload = (((uint32_t *)req_buf->virt) + 1);
/* PRLI type payload is at byte 3 for FCP or NVME. */
prli_fc4_req = be32_to_cpu(*req_payload);
prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
prli_fc4_req, *((uint32_t *)req_payload));
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
} else if (prli_fc4_req & PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
return 1;
}
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
@ -4258,33 +4401,71 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memset(pcmd, 0, cmdsize);
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
memset(pcmd, 0, sizeof(PRLI));
npr = (PRLI *) pcmd;
vpd = &phba->vpd;
/*
* If the remote port is a target and our firmware version is 3.20 or
* later, set the following bits for FC-TAPE support.
*/
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(vpd->rev.feaLevelHigh >= 0x02)) {
if (prli_fc4_req == PRLI_FCP_TYPE) {
/*
* If the remote port is a target and our firmware version
* is 3.20 or later, set the following bits for FC-TAPE
* support.
*/
npr = (PRLI *) pcmd;
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(vpd->rev.feaLevelHigh >= 0x02)) {
npr->ConfmComplAllowed = 1;
npr->Retry = 1;
npr->TaskRetryIdReq = 1;
}
npr->acceptRspCode = PRLI_REQ_EXECUTED;
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
npr->ConfmComplAllowed = 1;
npr->Retry = 1;
npr->TaskRetryIdReq = 1;
}
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
} else if (prli_fc4_req & PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
if (phba->nvmet_support) {
bf_set(prli_tgt, npr_nvme, 1);
bf_set(prli_disc, npr_nvme, 1);
if (phba->cfg_nvme_enable_fb) {
bf_set(prli_fba, npr_nvme, 1);
npr->acceptRspCode = PRLI_REQ_EXECUTED;
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
npr->ConfmComplAllowed = 1;
/* TBD. Target mode needs to post buffers
* that support the configured first burst
* byte size.
*/
bf_set(prli_fb_sz, npr_nvme,
phba->cfg_nvmet_fb_size);
}
} else {
bf_set(prli_init, npr_nvme, 1);
}
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6015 NVME issue PRLI ACC word1 x%08x "
"word4 x%08x word5 x%08x flag x%x, "
"fcp_info x%x nlp_type x%x\n",
npr_nvme->word1, npr_nvme->word4,
npr_nvme->word5, ndlp->nlp_flag,
ndlp->nlp_fcp_info, ndlp->nlp_type);
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
} else
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
prli_fc4_req, ndlp->nlp_fc4_type,
ndlp->nlp_DID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue ACC PRLI: did:x%x flg:x%x",
@ -4411,7 +4592,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
**/
static void
lpfc_els_clear_rrq(struct lpfc_vport *vport,
struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
uint8_t *pcmd;
@ -4909,7 +5090,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
desc->length = cpu_to_be32(sizeof(desc->opd_info));
return sizeof(struct fc_rdp_opd_sfp_desc);
@ -5004,7 +5185,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
memcpy(desc->port_names.wwnn, phba->wwnn,
sizeof(desc->port_names.wwnn));
memcpy(desc->port_names.wwpn, &phba->wwpn,
memcpy(desc->port_names.wwpn, phba->wwpn,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
@ -5233,9 +5414,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2)) {
bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_expl = LSEXP_REQ_UNSUPPORTED;
goto error;
@ -5687,6 +5867,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
if (vport->phba->nvmet_support)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@ -5976,9 +6158,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
&& ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
vport->gidft_inp = 0;
if (lpfc_issue_gidft(vport) == 0)
/* Wait for NameServer query cmpl before we can
continue */
* continue
*/
return 1;
} else {
/* If login to NameServer does not exist, issue one */
@ -6082,7 +6266,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
/*
* If our portname is greater than the remote portname,
* then we initiate Nport login.
@ -7155,7 +7338,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
pring = lpfc_phba_elsring(phba);
if ((phba->pport->load_flag & FC_UNLOADING))
return;
spin_lock_irq(&phba->hbalock);
@ -7224,7 +7408,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
if (!list_empty(&pring->txcmplq))
if (!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
@ -7255,7 +7439,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
@ -7267,6 +7451,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* a working list and release the locks before calling the abort.
*/
spin_lock_irq(&phba->hbalock);
pring = lpfc_phba_elsring(phba);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@ -7777,6 +7962,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_fan(vport, elsiocb, ndlp);
break;
case ELS_CMD_PRLI:
case ELS_CMD_NVMEPRLI:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV PRLI: did:x%x/ste:x%x flg:x%x",
did, vport->port_state, ndlp->nlp_flag);
@ -8881,8 +9067,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
if (atomic_read(&phba->fabric_iocb_count) == 0)
BUG();
BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
cmdiocb->fabric_iocb_cmpl = NULL;
@ -8927,8 +9112,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
int ready;
int ret;
if (atomic_read(&phba->fabric_iocb_count) > 1)
BUG();
BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
spin_lock_irqsave(&phba->hbalock, iflags);
ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
@ -9013,7 +9197,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
LIST_HEAD(completions);
struct lpfc_hba *phba = ndlp->phba;
struct lpfc_iocbq *tmp_iocb, *piocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
pring = lpfc_phba_elsring(phba);
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
@ -9069,13 +9255,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
unsigned long iflag = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
sglq_entry->ndlp = NULL;
}
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
@ -9099,22 +9285,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
struct lpfc_nodelist *ndlp;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
pring = lpfc_phba_elsring(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list);
ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
spin_lock(&pring->ring_lock);
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list);
&phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED;
spin_unlock(&pring->ring_lock);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag,
@ -9126,21 +9312,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return;
}
}
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
lxri = lpfc_sli4_xri_inrange(phba, xri);
if (lxri == NO_XRI) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
spin_lock(&pring->ring_lock);
spin_lock(&phba->sli4_hba.sgl_list_lock);
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock(&pring->ring_lock);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
spin_unlock(&pring->ring_lock);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -31,6 +33,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@ -38,8 +43,9 @@
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@ -93,7 +99,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(ndlp->vport,
&phba->sli.ring[phba->sli.fcp_ring],
&phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
}
@ -247,8 +253,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
/* flush the target */
lpfc_sli_abort_iocb(vport,
&phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
&phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
put_node = rdata->pnode != NULL;
rdata->pnode = NULL;
@ -283,7 +289,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
@ -495,11 +501,12 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
return;
}
fc_host_post_vendor_event(shost,
fc_get_event_number(),
evt_data_size,
evt_data,
LPFC_NL_VENDOR_ID);
if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_vendor_event(shost,
fc_get_event_number(),
evt_data_size,
evt_data,
LPFC_NL_VENDOR_ID);
lpfc_free_fast_evt(phba, fast_evt_data);
return;
@ -682,7 +689,7 @@ lpfc_work_done(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
pring = &phba->sli.ring[LPFC_ELS_RING];
pring = lpfc_phba_elsring(phba);
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if ((status & HA_RXMASK) ||
@ -852,9 +859,12 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_LINKDOWN, 0);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Link Down: state:x%x rtry:x%x flg:x%x",
@ -894,11 +904,22 @@ lpfc_linkdown(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
vports[i]->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
lpfc_nvme_update_localport(vports[i]);
}
}
}
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@ -914,7 +935,6 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Setup myDID for link up if we are in pt2pt mode */
if (phba->pport->fc_flag & FC_PT2PT) {
phba->pport->fc_myDID = 0;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
@ -929,7 +949,6 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
spin_unlock_irq(shost->host_lock);
}
return 0;
}
@ -977,7 +996,9 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
@ -1016,7 +1037,7 @@ lpfc_linkup(struct lpfc_hba *phba)
* This routine handles processing a CLEAR_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
* handed off to the SLI layer.
* handed off to the SLI layer. SLI3 only.
*/
static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@ -1028,9 +1049,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint32_t control;
/* Since we don't do discovery right now, turn these off here */
psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
@ -3277,7 +3297,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
* This routine handles processing a READ_TOPOLOGY mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
* handed off to the SLI layer.
* handed off to the SLI layer. SLI4 only.
*/
void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@ -3285,11 +3305,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_mbx_read_top *la;
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Unblock ELS traffic */
phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
pring = lpfc_phba_elsring(phba);
pring->flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@ -3458,6 +3481,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
spin_unlock_irq(shost->host_lock);
/*
* We cannot leave the RPI registered because
* if we go thru discovery again for this ndlp
* a subsequent REG_RPI will fail.
*/
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
lpfc_unreg_rpi(vport, ndlp);
}
/* Call state machine */
@ -3556,6 +3587,14 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (phba->nvmet_support)
lpfc_nvmet_update_targetport(phba);
else
lpfc_nvme_update_localport(vport);
}
goto out;
}
@ -3805,6 +3844,52 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
/*
* This routine will issue a GID_FT for each FC4 Type supported
* by the driver. ALL GID_FTs must complete before discovery is started.
*/
int
lpfc_issue_gidft(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
/* Good status, issue CT Request to NameServer */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
/* Cannot issue NameServer FCP Query, so finish up
* discovery
*/
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
"0604 %s FC TYPE %x %s\n",
"Failed to issue GID_FT to ",
FC_TYPE_FCP,
"Finishing discovery.");
return 0;
}
vport->gidft_inp++;
}
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
/* Cannot issue NameServer NVME Query, so finish up
* discovery
*/
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
"0605 %s FC_TYPE %x %s %d\n",
"Failed to issue GID_FT to ",
FC_TYPE_NVME,
"Finishing discovery: gidftinp ",
vport->gidft_inp);
if (vport->gidft_inp == 0)
return 0;
} else
vport->gidft_inp++;
}
return vport->gidft_inp;
}
/*
* This routine handles processing a NameServer REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@ -3821,12 +3906,14 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
vport->gidft_inp = 0;
if (mb->mbxStatus) {
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
out:
/* decrement the node reference count held for this
* callback function.
*/
@ -3870,20 +3957,29 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
}
vport->fc_ns_retry = 0;
/* Good status, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
/* Cannot issue NameServer Query, so finish up discovery */
if (lpfc_issue_gidft(vport) == 0)
goto out;
}
/* decrement the node reference count held for this
/*
* At this point in time we may need to wait for multiple
* SLI_CTNS_GID_FT CT commands to complete before we start discovery.
*
* decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
@ -3903,6 +3999,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
@ -3972,12 +4071,17 @@ static void
lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport delete: did:x%x flg:x%x type x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3184 rport unregister x%06x, rport %p\n",
ndlp->nlp_DID, rport);
@ -4029,6 +4133,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@ -4039,23 +4144,56 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_NPR_NODE)
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
/* Transport interface */
if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
/* FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
if (ndlp->rport) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
}
/* Notify the NVME transport of this rport's loss */
if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
(vport->phba->nvmet_support == 0) &&
((ndlp->nlp_fc4_type & NLP_FC4_NVME) ||
(ndlp->nlp_DID == Fabric_DID))) {
vport->phba->nport_event_cnt++;
lpfc_nvme_unregister_port(vport, ndlp);
}
}
/* FCP and NVME Transport interfaces */
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
vport->phba->nport_event_cnt++;
/*
* Tell the fc transport about the port, if we haven't
* already. If we have, and it's a scsi entity, be
* sure to unblock any attached scsi devices
*/
lpfc_register_remote_port(vport, ndlp);
if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
(ndlp->nlp_DID == Fabric_DID)) {
vport->phba->nport_event_cnt++;
/*
* Tell the fc transport about the port, if we haven't
* already. If we have, and it's a scsi entity, be
*/
lpfc_register_remote_port(vport, ndlp);
}
/* Notify the NVME transport of this new rport. */
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
* Initiators take the NDLP ref count in
* the register.
*/
vport->phba->nport_event_cnt++;
lpfc_nvme_register_port(vport, ndlp);
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
*/
lpfc_nlp_get(ndlp);
}
}
}
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
/*
@ -4073,12 +4211,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0x%x\n", ndlp->nlp_DID);
}
/*
* if we added to Mapped list, but the remote port
* registration failed or assigned a target id outside
* our presentable range - move the node to the
* Unmapped List
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
* id outside the presentable range - move the node to the
* Unmapped List.
*/
if (new_state == NLP_STE_MAPPED_NODE &&
if ((new_state == NLP_STE_MAPPED_NODE) &&
(ndlp->nlp_type & NLP_FCP_TARGET) &&
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
@ -4212,6 +4351,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
@ -4394,7 +4534,6 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_sli *psli = &phba->sli;
IOCB_t *icmd = &iocb->iocb;
struct lpfc_vport *vport = ndlp->vport;
@ -4413,9 +4552,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
}
} else if (pring->ringno == psli->extra_ring) {
} else if (pring->ringno == psli->fcp_ring) {
} else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
@ -4424,12 +4561,58 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
return 1;
}
} else if (pring->ringno == psli->next_ring) {
}
return 0;
}
static void
__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
struct list_head *dequeue_list)
{
struct lpfc_iocbq *iocb, *next_iocb;
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
/* Check to see if iocb matches the nport */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
/* match, dequeue */
list_move_tail(&iocb->list, dequeue_list);
}
}
static void
lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
{
struct lpfc_sli *psli = &phba->sli;
uint32_t i;
spin_lock_irq(&phba->hbalock);
for (i = 0; i < psli->num_rings; i++)
__lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
dequeue_list);
spin_unlock_irq(&phba->hbalock);
}
static void
lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
{
struct lpfc_sli_ring *pring;
struct lpfc_queue *qp = NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
pring = qp->pring;
if (!pring)
continue;
spin_lock_irq(&pring->ring_lock);
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
spin_unlock_irq(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
}
/*
* Free resources / clean up outstanding I/Os
* associated with nlp_rpi in the LPFC_NODELIST entry.
@ -4438,10 +4621,6 @@ static int
lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
uint32_t i;
lpfc_fabric_abort_nport(ndlp);
@ -4449,29 +4628,11 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
*/
psli = &phba->sli;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
/* Now process each ring */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
list) {
/*
* Check to see if iocb matches the nport we are
* looking for
*/
if ((lpfc_check_sli_ndlp(phba, pring, iocb,
ndlp))) {
/* It matches, so deque and call compl
with an error */
list_move_tail(&iocb->list,
&completions);
}
}
spin_unlock_irq(&phba->hbalock);
}
if (phba->sli_rev != LPFC_SLI_REV4)
lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
else
lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
}
/* Cancel all the IOCBs from the completions list */
@ -4950,6 +5111,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
return NULL;
lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@ -4958,6 +5121,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
if (!ndlp)
return NULL;
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@ -4977,6 +5142,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
* delay timeout is not needed.
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@ -4992,6 +5159,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_flag & NLP_RCV_PLOGI)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@ -5040,14 +5209,14 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
return;
}
/* SLI3 only */
void
lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
int rc;
/*
@ -5071,7 +5240,6 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_disc_flush_list(vport);
extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
phba->link_state = LPFC_HBA_ERROR;
}
}
@ -5207,7 +5375,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
struct lpfc_sli_ring *pring;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
pring = lpfc_phba_elsring(phba);
/* Error matching iocb on txq or txcmplq
* First check the txq.
@ -5331,12 +5499,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
/* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
* FAN
*/
/* FAN timeout */
/*
* port_state is identically LPFC_LOCAL_CFG_LINK while
* waiting for FAN timeout
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0221 FAN timeout\n");
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
@ -5407,8 +5576,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
vport->fc_ns_retry++;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0);
vport->gidft_inp = 0;
rc = lpfc_issue_gidft(vport);
if (rc == 0)
break;
}
@ -5523,12 +5692,14 @@ restart_disc:
if (clrlaerr) {
lpfc_disc_flush_list(vport);
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
if (phba->sli_rev != LPFC_SLI_REV4) {
psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
~LPFC_STOP_IOCB_EVENT;
psli->sli3_ring[LPFC_FCP_RING].flag &=
~LPFC_STOP_IOCB_EVENT;
}
vport->port_state = LPFC_VPORT_READY;
}
return;
}

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -44,8 +46,6 @@
#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
@ -92,8 +92,10 @@ union CtCommandResponse {
uint32_t word;
};
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_TARGET 0x1
/* FC4 Feature bits for RFF_ID */
#define FC4_FEATURE_TARGET 0x1
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_NVME_DISC 0x4
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
@ -117,6 +119,16 @@ struct lpfc_sli_ct_request {
uint8_t AreaScope;
uint8_t Fc4Type; /* for GID_FT requests */
} gid;
struct gid_ff {
uint8_t Flags;
uint8_t DomainScope;
uint8_t AreaScope;
uint8_t rsvd1;
uint8_t rsvd2;
uint8_t rsvd3;
uint8_t Fc4FBits;
uint8_t Fc4Type;
} gid_ff;
struct rft {
uint32_t PortId; /* For RFT_ID requests */
@ -161,6 +173,12 @@ struct lpfc_sli_ct_request {
struct gff_acc {
uint8_t fbits[128];
} gff_acc;
struct gft {
uint32_t PortId;
} gft;
struct gft_acc {
uint32_t fc4_types[8];
} gft_acc;
#define FCP_TYPE_FEATURE_OFFSET 7
struct rff {
uint32_t PortId;
@ -176,8 +194,12 @@ struct lpfc_sli_ct_request {
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
#define GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid_ff))
#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gff))
#define GFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gft))
#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rft))
#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
@ -273,6 +295,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_GNN_IP 0x0153
#define SLI_CTNS_GIPA_IP 0x0156
#define SLI_CTNS_GID_FT 0x0171
#define SLI_CTNS_GID_FF 0x01F1
#define SLI_CTNS_GID_PT 0x01A1
#define SLI_CTNS_RPN_ID 0x0212
#define SLI_CTNS_RNN_ID 0x0213
@ -290,15 +313,16 @@ struct lpfc_sli_ct_request {
* Port Types
*/
#define SLI_CTPT_N_PORT 0x01
#define SLI_CTPT_NL_PORT 0x02
#define SLI_CTPT_FNL_PORT 0x03
#define SLI_CTPT_IP 0x04
#define SLI_CTPT_FCP 0x08
#define SLI_CTPT_NX_PORT 0x7F
#define SLI_CTPT_F_PORT 0x81
#define SLI_CTPT_FL_PORT 0x82
#define SLI_CTPT_E_PORT 0x84
#define SLI_CTPT_N_PORT 0x01
#define SLI_CTPT_NL_PORT 0x02
#define SLI_CTPT_FNL_PORT 0x03
#define SLI_CTPT_IP 0x04
#define SLI_CTPT_FCP 0x08
#define SLI_CTPT_NVME 0x28
#define SLI_CTPT_NX_PORT 0x7F
#define SLI_CTPT_F_PORT 0x81
#define SLI_CTPT_FL_PORT 0x82
#define SLI_CTPT_E_PORT 0x84
#define SLI_CT_LAST_ENTRY 0x80000000
@ -339,6 +363,7 @@ struct lpfc_name {
uint8_t IEEE[6]; /* FC IEEE address */
} s;
uint8_t wwn[8];
uint64_t name;
} u;
};
@ -492,7 +517,15 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls2;
struct class_parms cls3;
struct class_parms cls4;
uint8_t vendorVersion[16];
union {
uint8_t vendorVersion[16];
struct {
uint32_t vid;
#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */
uint32_t flags;
#define LPFC_VV_SUPPRESS_RSP 1
} vv;
} un;
};
/*
@ -551,6 +584,7 @@ struct fc_vft_header {
#define ELS_CMD_REC 0x13000000
#define ELS_CMD_RDP 0x18000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_NVMEPRLI 0x20140018
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
#define ELS_CMD_PDISC 0x50000000
@ -590,6 +624,7 @@ struct fc_vft_header {
#define ELS_CMD_REC 0x13
#define ELS_CMD_RDP 0x18
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_NVMEPRLI 0x18001420
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
#define ELS_CMD_PDISC 0x50
@ -686,6 +721,7 @@ typedef struct _PRLI { /* Structure is in Big Endian format */
uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
#define PRLI_FCP_TYPE 0x08
#define PRLI_NVME_TYPE 0x28
uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
#ifdef __BIG_ENDIAN_BITFIELD
@ -1245,8 +1281,7 @@ struct fc_rdp_opd_sfp_info {
uint8_t vendor_name[16];
uint8_t model_number[16];
uint8_t serial_number[16];
uint8_t revision[2];
uint8_t reserved[2];
uint8_t revision[4];
uint8_t date[8];
};
@ -1265,14 +1300,14 @@ struct fc_rdp_req_frame {
struct fc_rdp_res_frame {
uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
uint32_t length; /* FC Word 1 */
struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */
struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
uint32_t length; /* FC Word 1 */
struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10 -12 */
struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */
struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22 -27 */
struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */
struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/
struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/
struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/
@ -1791,6 +1826,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4
#define MBX_ACCESS_VDATA 0xA5
#define MBX_REG_FCFI_MRQ 0xAF
#define MBX_AUTH_PORT 0xF8
#define MBX_SECURITY_MGMT 0xF9

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -108,6 +110,7 @@ struct lpfc_sli_intf {
#define LPFC_MAX_MQ_PAGE 8
#define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8
#define LPFC_MAX_RQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8
@ -198,7 +201,7 @@ struct lpfc_sli_intf {
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 50000
#define LPFC_DEF_IMAX 150000
#define LPFC_MIN_CPU_MAP 0
#define LPFC_MAX_CPU_MAP 2
@ -348,6 +351,7 @@ struct lpfc_cqe {
#define CQE_CODE_RECEIVE 0x4
#define CQE_CODE_XRI_ABORTED 0x5
#define CQE_CODE_RECEIVE_V1 0x9
#define CQE_CODE_NVME_ERSP 0xd
/*
* Define mask value for xri_aborted and wcqe completed CQE extended status.
@ -367,6 +371,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_hw_status_SHIFT 0
#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
#define lpfc_wcqe_c_hw_status_WORD word0
#define lpfc_wcqe_c_ersp0_SHIFT 0
#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
#define lpfc_wcqe_c_ersp0_WORD word0
uint32_t total_data_placed;
uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5
@ -400,6 +407,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
#define lpfc_wcqe_c_sqhead_SHIFT 0
#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF
#define lpfc_wcqe_c_sqhead_WORD word3
};
/* completion queue entry for wqe release */
@ -954,6 +964,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET 0x1D
#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
@ -1135,6 +1146,116 @@ struct lpfc_mbx_cq_create {
} u;
};
struct lpfc_mbx_cq_create_set {
union lpfc_sli4_cfg_shdr cfg_shdr;
union {
struct {
uint32_t word0;
#define lpfc_mbx_cq_create_set_page_size_SHIFT 16 /* Version 2 Only */
#define lpfc_mbx_cq_create_set_page_size_MASK 0x000000FF
#define lpfc_mbx_cq_create_set_page_size_WORD word0
#define lpfc_mbx_cq_create_set_num_pages_SHIFT 0
#define lpfc_mbx_cq_create_set_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_pages_WORD word0
uint32_t word1;
#define lpfc_mbx_cq_create_set_evt_SHIFT 31
#define lpfc_mbx_cq_create_set_evt_MASK 0x00000001
#define lpfc_mbx_cq_create_set_evt_WORD word1
#define lpfc_mbx_cq_create_set_valid_SHIFT 29
#define lpfc_mbx_cq_create_set_valid_MASK 0x00000001
#define lpfc_mbx_cq_create_set_valid_WORD word1
#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27
#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003
#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1
#define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25
#define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003
#define lpfc_mbx_cq_create_set_cqe_size_WORD word1
#define lpfc_mbx_cq_create_set_auto_SHIFT 15
#define lpfc_mbx_cq_create_set_auto_MASK 0x0000001
#define lpfc_mbx_cq_create_set_auto_WORD word1
#define lpfc_mbx_cq_create_set_nodelay_SHIFT 14
#define lpfc_mbx_cq_create_set_nodelay_MASK 0x00000001
#define lpfc_mbx_cq_create_set_nodelay_WORD word1
#define lpfc_mbx_cq_create_set_clswm_SHIFT 12
#define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003
#define lpfc_mbx_cq_create_set_clswm_WORD word1
uint32_t word2;
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
uint32_t word3;
#define lpfc_mbx_cq_create_set_eq_id1_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id1_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id1_WORD word3
#define lpfc_mbx_cq_create_set_eq_id0_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id0_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id0_WORD word3
uint32_t word4;
#define lpfc_mbx_cq_create_set_eq_id3_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id3_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id3_WORD word4
#define lpfc_mbx_cq_create_set_eq_id2_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id2_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id2_WORD word4
uint32_t word5;
#define lpfc_mbx_cq_create_set_eq_id5_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id5_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id5_WORD word5
#define lpfc_mbx_cq_create_set_eq_id4_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id4_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id4_WORD word5
uint32_t word6;
#define lpfc_mbx_cq_create_set_eq_id7_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id7_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id7_WORD word6
#define lpfc_mbx_cq_create_set_eq_id6_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id6_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id6_WORD word6
uint32_t word7;
#define lpfc_mbx_cq_create_set_eq_id9_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id9_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id9_WORD word7
#define lpfc_mbx_cq_create_set_eq_id8_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id8_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id8_WORD word7
uint32_t word8;
#define lpfc_mbx_cq_create_set_eq_id11_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id11_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id11_WORD word8
#define lpfc_mbx_cq_create_set_eq_id10_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id10_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id10_WORD word8
uint32_t word9;
#define lpfc_mbx_cq_create_set_eq_id13_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id13_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id13_WORD word9
#define lpfc_mbx_cq_create_set_eq_id12_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id12_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id12_WORD word9
uint32_t word10;
#define lpfc_mbx_cq_create_set_eq_id15_SHIFT 16
#define lpfc_mbx_cq_create_set_eq_id15_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id15_WORD word10
#define lpfc_mbx_cq_create_set_eq_id14_SHIFT 0
#define lpfc_mbx_cq_create_set_eq_id14_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_eq_id14_WORD word10
struct dma_address page[1];
} request;
struct {
uint32_t word0;
#define lpfc_mbx_cq_create_set_num_alloc_SHIFT 16
#define lpfc_mbx_cq_create_set_num_alloc_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_alloc_WORD word0
#define lpfc_mbx_cq_create_set_base_id_SHIFT 0
#define lpfc_mbx_cq_create_set_base_id_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_base_id_WORD word0
} response;
} u;
};
struct lpfc_mbx_cq_destroy {
struct mbox_header header;
union {
@ -1186,6 +1307,7 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_page_size_SHIFT 0
#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
#define lpfc_mbx_wq_create_page_size_WORD word1
#define LPFC_WQ_PAGE_SIZE_4096 0x1
#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
#define lpfc_mbx_wq_create_wqe_size_WORD word1
@ -1243,10 +1365,10 @@ struct rq_context {
#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1-2 Only */
#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
#define lpfc_rq_context_rqe_count_1_WORD word0
#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1-2 Only */
#define lpfc_rq_context_rqe_size_MASK 0x0000000F
#define lpfc_rq_context_rqe_size_WORD word0
#define LPFC_RQE_SIZE_8 2
@ -1257,7 +1379,14 @@ struct rq_context {
#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
#define lpfc_rq_context_page_size_MASK 0x000000FF
#define lpfc_rq_context_page_size_WORD word0
uint32_t reserved1;
#define LPFC_RQ_PAGE_SIZE_4096 0x1
uint32_t word1;
#define lpfc_rq_context_data_size_SHIFT 16 /* Version 2 Only */
#define lpfc_rq_context_data_size_MASK 0x0000FFFF
#define lpfc_rq_context_data_size_WORD word1
#define lpfc_rq_context_hdr_size_SHIFT 0 /* Version 2 Only */
#define lpfc_rq_context_hdr_size_MASK 0x0000FFFF
#define lpfc_rq_context_hdr_size_WORD word1
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
#define lpfc_rq_context_cq_id_MASK 0x000003FF
@ -1265,6 +1394,9 @@ struct rq_context {
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
#define lpfc_rq_context_buf_size_WORD word2
#define lpfc_rq_context_base_cq_SHIFT 0 /* Version 2 Only */
#define lpfc_rq_context_base_cq_MASK 0x0000FFFF
#define lpfc_rq_context_base_cq_WORD word2
uint32_t buffer_size; /* Version 1 Only */
};
@ -1286,10 +1418,65 @@ struct lpfc_mbx_rq_create {
#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
#define lpfc_mbx_rq_create_ulp_num_WORD word0
struct rq_context context;
struct dma_address page[LPFC_MAX_WQ_PAGE];
struct dma_address page[LPFC_MAX_RQ_PAGE];
} request;
struct {
uint32_t word0;
#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16
#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0
#define lpfc_mbx_rq_create_q_id_SHIFT 0
#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_q_id_WORD word0
uint32_t doorbell_offset;
uint32_t word2;
#define lpfc_mbx_rq_create_bar_set_SHIFT 0
#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_bar_set_WORD word2
#define lpfc_mbx_rq_create_db_format_SHIFT 16
#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_db_format_WORD word2
} response;
} u;
};
struct lpfc_mbx_rq_create_v2 {
union lpfc_sli4_cfg_shdr cfg_shdr;
union {
struct {
uint32_t word0;
#define lpfc_mbx_rq_create_num_pages_SHIFT 0
#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_num_pages_WORD word0
#define lpfc_mbx_rq_create_rq_cnt_SHIFT 16
#define lpfc_mbx_rq_create_rq_cnt_MASK 0x000000FF
#define lpfc_mbx_rq_create_rq_cnt_WORD word0
#define lpfc_mbx_rq_create_dua_SHIFT 16
#define lpfc_mbx_rq_create_dua_MASK 0x00000001
#define lpfc_mbx_rq_create_dua_WORD word0
#define lpfc_mbx_rq_create_bqu_SHIFT 17
#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
#define lpfc_mbx_rq_create_bqu_WORD word0
#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
#define lpfc_mbx_rq_create_ulp_num_WORD word0
#define lpfc_mbx_rq_create_dim_SHIFT 29
#define lpfc_mbx_rq_create_dim_MASK 0x00000001
#define lpfc_mbx_rq_create_dim_WORD word0
#define lpfc_mbx_rq_create_dfd_SHIFT 30
#define lpfc_mbx_rq_create_dfd_MASK 0x00000001
#define lpfc_mbx_rq_create_dfd_WORD word0
#define lpfc_mbx_rq_create_dnb_SHIFT 31
#define lpfc_mbx_rq_create_dnb_MASK 0x00000001
#define lpfc_mbx_rq_create_dnb_WORD word0
struct rq_context context;
struct dma_address page[1];
} request;
struct {
uint32_t word0;
#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16
#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0
#define lpfc_mbx_rq_create_q_id_SHIFT 0
#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_q_id_WORD word0
@ -2203,6 +2390,160 @@ struct lpfc_mbx_reg_fcfi {
#define lpfc_reg_fcfi_vlan_tag_WORD word8
};
struct lpfc_mbx_reg_fcfi_mrq {
uint32_t word1;
#define lpfc_reg_fcfi_mrq_info_index_SHIFT 0
#define lpfc_reg_fcfi_mrq_info_index_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_info_index_WORD word1
#define lpfc_reg_fcfi_mrq_fcfi_SHIFT 16
#define lpfc_reg_fcfi_mrq_fcfi_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_fcfi_WORD word1
uint32_t word2;
#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT 0
#define lpfc_reg_fcfi_mrq_rq_id1_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_rq_id1_WORD word2
#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT 16
#define lpfc_reg_fcfi_mrq_rq_id0_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_rq_id0_WORD word2
uint32_t word3;
#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT 0
#define lpfc_reg_fcfi_mrq_rq_id3_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_rq_id3_WORD word3
#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT 16
#define lpfc_reg_fcfi_mrq_rq_id2_MASK 0x0000FFFF
#define lpfc_reg_fcfi_mrq_rq_id2_WORD word3
uint32_t word4;
#define lpfc_reg_fcfi_mrq_type_match0_SHIFT 24
#define lpfc_reg_fcfi_mrq_type_match0_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_match0_WORD word4
#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT 16
#define lpfc_reg_fcfi_mrq_type_mask0_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_mask0_WORD word4
#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT 8
#define lpfc_reg_fcfi_mrq_rctl_match0_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_match0_WORD word4
#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT 0
#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD word4
uint32_t word5;
#define lpfc_reg_fcfi_mrq_type_match1_SHIFT 24
#define lpfc_reg_fcfi_mrq_type_match1_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_match1_WORD word5
#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT 16
#define lpfc_reg_fcfi_mrq_type_mask1_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_mask1_WORD word5
#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT 8
#define lpfc_reg_fcfi_mrq_rctl_match1_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_match1_WORD word5
#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT 0
#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD word5
uint32_t word6;
#define lpfc_reg_fcfi_mrq_type_match2_SHIFT 24
#define lpfc_reg_fcfi_mrq_type_match2_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_match2_WORD word6
#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT 16
#define lpfc_reg_fcfi_mrq_type_mask2_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_mask2_WORD word6
#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT 8
#define lpfc_reg_fcfi_mrq_rctl_match2_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_match2_WORD word6
#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT 0
#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD word6
uint32_t word7;
#define lpfc_reg_fcfi_mrq_type_match3_SHIFT 24
#define lpfc_reg_fcfi_mrq_type_match3_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_match3_WORD word7
#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT 16
#define lpfc_reg_fcfi_mrq_type_mask3_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_type_mask3_WORD word7
#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT 8
#define lpfc_reg_fcfi_mrq_rctl_match3_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_match3_WORD word7
#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT 0
#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD word7
uint32_t word8;
#define lpfc_reg_fcfi_mrq_ptc7_SHIFT 31
#define lpfc_reg_fcfi_mrq_ptc7_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc7_WORD word8
#define lpfc_reg_fcfi_mrq_ptc6_SHIFT 30
#define lpfc_reg_fcfi_mrq_ptc6_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc6_WORD word8
#define lpfc_reg_fcfi_mrq_ptc5_SHIFT 29
#define lpfc_reg_fcfi_mrq_ptc5_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc5_WORD word8
#define lpfc_reg_fcfi_mrq_ptc4_SHIFT 28
#define lpfc_reg_fcfi_mrq_ptc4_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc4_WORD word8
#define lpfc_reg_fcfi_mrq_ptc3_SHIFT 27
#define lpfc_reg_fcfi_mrq_ptc3_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc3_WORD word8
#define lpfc_reg_fcfi_mrq_ptc2_SHIFT 26
#define lpfc_reg_fcfi_mrq_ptc2_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc2_WORD word8
#define lpfc_reg_fcfi_mrq_ptc1_SHIFT 25
#define lpfc_reg_fcfi_mrq_ptc1_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc1_WORD word8
#define lpfc_reg_fcfi_mrq_ptc0_SHIFT 24
#define lpfc_reg_fcfi_mrq_ptc0_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_ptc0_WORD word8
#define lpfc_reg_fcfi_mrq_pt7_SHIFT 23
#define lpfc_reg_fcfi_mrq_pt7_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt7_WORD word8
#define lpfc_reg_fcfi_mrq_pt6_SHIFT 22
#define lpfc_reg_fcfi_mrq_pt6_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt6_WORD word8
#define lpfc_reg_fcfi_mrq_pt5_SHIFT 21
#define lpfc_reg_fcfi_mrq_pt5_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt5_WORD word8
#define lpfc_reg_fcfi_mrq_pt4_SHIFT 20
#define lpfc_reg_fcfi_mrq_pt4_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt4_WORD word8
#define lpfc_reg_fcfi_mrq_pt3_SHIFT 19
#define lpfc_reg_fcfi_mrq_pt3_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt3_WORD word8
#define lpfc_reg_fcfi_mrq_pt2_SHIFT 18
#define lpfc_reg_fcfi_mrq_pt2_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt2_WORD word8
#define lpfc_reg_fcfi_mrq_pt1_SHIFT 17
#define lpfc_reg_fcfi_mrq_pt1_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt1_WORD word8
#define lpfc_reg_fcfi_mrq_pt0_SHIFT 16
#define lpfc_reg_fcfi_mrq_pt0_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_pt0_WORD word8
#define lpfc_reg_fcfi_mrq_xmv_SHIFT 15
#define lpfc_reg_fcfi_mrq_xmv_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_xmv_WORD word8
#define lpfc_reg_fcfi_mrq_mode_SHIFT 13
#define lpfc_reg_fcfi_mrq_mode_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_mode_WORD word8
#define lpfc_reg_fcfi_mrq_vv_SHIFT 12
#define lpfc_reg_fcfi_mrq_vv_MASK 0x00000001
#define lpfc_reg_fcfi_mrq_vv_WORD word8
#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT 0
#define lpfc_reg_fcfi_mrq_vlan_tag_MASK 0x00000FFF
#define lpfc_reg_fcfi_mrq_vlan_tag_WORD word8
uint32_t word9;
#define lpfc_reg_fcfi_mrq_policy_SHIFT 12
#define lpfc_reg_fcfi_mrq_policy_MASK 0x0000000F
#define lpfc_reg_fcfi_mrq_policy_WORD word9
#define lpfc_reg_fcfi_mrq_filter_SHIFT 8
#define lpfc_reg_fcfi_mrq_filter_MASK 0x0000000F
#define lpfc_reg_fcfi_mrq_filter_WORD word9
#define lpfc_reg_fcfi_mrq_npairs_SHIFT 0
#define lpfc_reg_fcfi_mrq_npairs_MASK 0x000000FF
#define lpfc_reg_fcfi_mrq_npairs_WORD word9
uint32_t word10;
uint32_t word11;
uint32_t word12;
uint32_t word13;
uint32_t word14;
uint32_t word15;
uint32_t word16;
};
struct lpfc_mbx_unreg_fcfi {
uint32_t word1_rsv;
uint32_t word2;
@ -2382,6 +2723,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@ -2410,6 +2754,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
};
struct lpfc_mbx_supp_pages {
@ -2839,12 +3186,18 @@ struct lpfc_sli4_parameters {
#define cfg_mqv_WORD word6
uint32_t word7;
uint32_t word8;
#define cfg_wqpcnt_SHIFT 0
#define cfg_wqpcnt_MASK 0x0000000f
#define cfg_wqpcnt_WORD word8
#define cfg_wqsize_SHIFT 8
#define cfg_wqsize_MASK 0x0000000f
#define cfg_wqsize_WORD word8
#define cfg_wqv_SHIFT 14
#define cfg_wqv_MASK 0x00000003
#define cfg_wqv_WORD word8
#define cfg_wqpsize_SHIFT 16
#define cfg_wqpsize_MASK 0x000000ff
#define cfg_wqpsize_WORD word8
uint32_t word9;
uint32_t word10;
#define cfg_rqv_SHIFT 14
@ -2895,6 +3248,12 @@ struct lpfc_sli4_parameters {
#define cfg_mds_diags_SHIFT 1
#define cfg_mds_diags_MASK 0x00000001
#define cfg_mds_diags_WORD word19
#define cfg_nvme_SHIFT 3
#define cfg_nvme_MASK 0x00000001
#define cfg_nvme_WORD word19
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
};
#define LPFC_SET_UE_RECOVERY 0x10
@ -3290,14 +3649,17 @@ struct lpfc_mqe {
struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_modify_eq_delay eq_delay;
struct lpfc_mbx_cq_create cq_create;
struct lpfc_mbx_cq_create_set cq_create_set;
struct lpfc_mbx_wq_create wq_create;
struct lpfc_mbx_rq_create rq_create;
struct lpfc_mbx_rq_create_v2 rq_create_v2;
struct lpfc_mbx_mq_destroy mq_destroy;
struct lpfc_mbx_eq_destroy eq_destroy;
struct lpfc_mbx_cq_destroy cq_destroy;
@ -3657,6 +4019,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
#define wqe_nvme_SHIFT 4
#define wqe_nvme_MASK 0x00000001
#define wqe_nvme_WORD word10
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
@ -3717,9 +4082,18 @@ struct wqe_common {
#define LPFC_ELS_ID_FDISC 2
#define LPFC_ELS_ID_LOGO 1
#define LPFC_ELS_ID_DEFAULT 0
#define wqe_irsp_SHIFT 4
#define wqe_irsp_MASK 0x00000001
#define wqe_irsp_WORD word11
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
#define wqe_irsplen_SHIFT 8
#define wqe_irsplen_MASK 0x0000000f
#define wqe_irsplen_WORD word11
#define wqe_cqid_SHIFT 16
#define wqe_cqid_MASK 0x0000ffff
#define wqe_cqid_WORD word11
@ -3897,6 +4271,50 @@ struct gen_req64_wqe {
uint32_t max_response_payload_len;
};
/* Define NVME PRLI request to fabric. NVME is a
* fabric-only protocol.
* Updated to red-lined v1.08 on Sept 16, 2016
*/
struct lpfc_nvme_prli {
uint32_t word1;
/* The Response Code is defined in the FCP PRLI lpfc_hw.h */
#define prli_acc_rsp_code_SHIFT 8
#define prli_acc_rsp_code_MASK 0x0000000f
#define prli_acc_rsp_code_WORD word1
#define prli_estabImagePair_SHIFT 13
#define prli_estabImagePair_MASK 0x00000001
#define prli_estabImagePair_WORD word1
#define prli_type_code_ext_SHIFT 16
#define prli_type_code_ext_MASK 0x000000ff
#define prli_type_code_ext_WORD word1
#define prli_type_code_SHIFT 24
#define prli_type_code_MASK 0x000000ff
#define prli_type_code_WORD word1
uint32_t word_rsvd2;
uint32_t word_rsvd3;
uint32_t word4;
#define prli_fba_SHIFT 0
#define prli_fba_MASK 0x00000001
#define prli_fba_WORD word4
#define prli_disc_SHIFT 3
#define prli_disc_MASK 0x00000001
#define prli_disc_WORD word4
#define prli_tgt_SHIFT 4
#define prli_tgt_MASK 0x00000001
#define prli_tgt_WORD word4
#define prli_init_SHIFT 5
#define prli_init_MASK 0x00000001
#define prli_init_WORD word4
#define prli_recov_SHIFT 8
#define prli_recov_MASK 0x00000001
#define prli_recov_WORD word4
uint32_t word5;
#define prli_fb_sz_SHIFT 0
#define prli_fb_sz_MASK 0x0000ffff
#define prli_fb_sz_WORD word5
#define LPFC_NVMET_FB_SZ_MAX 65536 /* Driver target mode only. */
};
struct create_xri_wqe {
uint32_t rsrvd[5]; /* words 0-4 */
struct wqe_did wqe_dest; /* word 5 */
@ -3969,6 +4387,35 @@ struct fcp_icmnd64_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
struct fcp_trsp64_wqe {
struct ulp_bde64 bde;
uint32_t response_len;
uint32_t rsvd_4_5[2];
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
struct fcp_tsend64_wqe {
struct ulp_bde64 bde;
uint32_t payload_offset_len;
uint32_t relative_offset;
uint32_t reserved;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t fcp_data_len; /* word 12 */
uint32_t rsvd_13_15[3]; /* word 13-15 */
};
struct fcp_treceive64_wqe {
struct ulp_bde64 bde;
uint32_t payload_offset_len;
uint32_t relative_offset;
uint32_t reserved;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t fcp_data_len; /* word 12 */
uint32_t rsvd_13_15[3]; /* word 13-15 */
};
#define TXRDY_PAYLOAD_LEN 12
union lpfc_wqe {
uint32_t words[16];
@ -3984,6 +4431,10 @@ union lpfc_wqe {
struct xmit_els_rsp64_wqe xmit_els_rsp;
struct els_request64_wqe els_req;
struct gen_req64_wqe gen_req;
struct fcp_trsp64_wqe fcp_trsp;
struct fcp_tsend64_wqe fcp_tsend;
struct fcp_treceive64_wqe fcp_treceive;
};
union lpfc_wqe128 {
@ -3992,6 +4443,9 @@ union lpfc_wqe128 {
struct fcp_icmnd64_wqe fcp_icmd;
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
struct fcp_trsp64_wqe fcp_trsp;
struct fcp_tsend64_wqe fcp_tsend;
struct fcp_treceive64_wqe fcp_treceive;
struct xmit_seq64_wqe xmit_sequence;
struct gen_req64_wqe gen_req;
};
@ -4015,11 +4469,39 @@ struct lpfc_grp_hdr {
uint8_t revision[32];
};
#define FCP_COMMAND 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
/* Defines for WQE command type */
#define FCP_COMMAND 0x0
#define NVME_READ_CMD 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define NVME_WRITE_CMD 0x1
#define FCP_COMMAND_TRECEIVE 0x2
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
#define OTHER_COMMAND 0x8
#define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD
#define LPFC_NVME_EMBED_CMD 0x0
#define LPFC_NVME_EMBED_WRITE 0x1
#define LPFC_NVME_EMBED_READ 0x2
/* WQE Commands */
#define CMD_ABORT_XRI_WQE 0x0F
#define CMD_XMIT_SEQUENCE64_WQE 0x82
#define CMD_XMIT_BCAST64_WQE 0x84
#define CMD_ELS_REQUEST64_WQE 0x8A
#define CMD_XMIT_ELS_RSP64_WQE 0x95
#define CMD_XMIT_BLS_RSP64_WQE 0x97
#define CMD_FCP_IWRITE64_WQE 0x98
#define CMD_FCP_IREAD64_WQE 0x9A
#define CMD_FCP_ICMND64_WQE 0x9C
#define CMD_FCP_TSEND64_WQE 0x9F
#define CMD_FCP_TRECEIVE64_WQE 0xA1
#define CMD_FCP_TRSP64_WQE 0xA3
#define CMD_GEN_REQUEST64_WQE 0xC2
#define CMD_WQE_MASK 0xff
#define LPFC_FW_DUMP 1
#define LPFC_FW_RESET 2

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -38,6 +40,10 @@
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
#define LOG_NVME 0x00100000 /* NVME general events. */
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -954,7 +956,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
pcbp->maxRing = (psli->num_rings - 1);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring = &psli->sli3_ring[i];
pring->sli.sli3.sizeCiocb =
phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
@ -1217,7 +1219,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
mb->un.varCfgRing.recvNotify = 1;
psli = &phba->sli;
pring = &psli->ring[ring];
pring = &psli->sli3_ring[ring];
mb->un.varCfgRing.numMask = pring->num_mask;
mb->mbxCommand = MBX_CONFIG_RING;
mb->mbxOwner = OWN_HOST;
@ -2081,6 +2083,9 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
if (phba->nvmet_support)
bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
return;
}
@ -2434,14 +2439,45 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
if (phba->nvmet_support == 0) {
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything - rq_id0 */
bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
/* addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
} else {
/* This is ONLY for NVMET MRQ == 1 */
if (phba->cfg_nvmet_mrq != 1)
return;
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
/* Match type FCP - rq_id0 */
bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
FC_RCTL_DD_UNSOL_CMD);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything else - rq_id1 */
bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
}
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
@ -2449,6 +2485,70 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
}
/**
* lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command
* @phba: pointer to the hba structure containing the FCF index and RQ ID.
* @mbox: pointer to lpfc mbox command to initialize.
* @mode: 0 to register FCFI, 1 to register MRQs
*
* The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs).
* The SLI Host uses the command to activate an FCF after it has acquired FCF
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
* the the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
{
struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
/* This is ONLY for MRQ */
if (phba->cfg_nvmet_mrq <= 1)
return;
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
if (mode == 0) {
bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx);
if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
phba->fcf.current_rec.vlan_id);
}
return;
}
bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
/* Match NVME frames of type FCP (protocol NVME) - rq_id0 */
bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */
bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */
bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything - rq_id1 */
bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
}
/**
* lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -24,10 +26,12 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/scsi.h>
#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@ -35,8 +39,10 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
@ -66,7 +72,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
* Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
* Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
* lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
@ -90,21 +96,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
else
i = SLI4_PAGE_SIZE;
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev,
phba->cfg_sg_dma_buf_size,
i,
0);
} else {
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev, phba->cfg_sg_dma_buf_size,
align, 0);
}
phba->lpfc_sg_dma_buf_pool =
pci_pool_create("lpfc_sg_dma_buf_pool",
phba->pcidev,
phba->cfg_sg_dma_buf_size,
i, 0);
if (!phba->lpfc_sg_dma_buf_pool)
goto fail;
if (!phba->lpfc_scsi_dma_buf_pool)
goto fail;
} else {
phba->lpfc_sg_dma_buf_pool =
pci_pool_create("lpfc_sg_dma_buf_pool",
phba->pcidev, phba->cfg_sg_dma_buf_size,
align, 0);
if (!phba->lpfc_sg_dma_buf_pool)
goto fail;
}
phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
LPFC_BPL_SIZE,
@ -170,12 +178,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
LPFC_DEVICE_DATA_POOL_SIZE,
sizeof(struct lpfc_device_data));
if (!phba->device_data_mem_pool)
goto fail_free_hrb_pool;
goto fail_free_drb_pool;
} else {
phba->device_data_mem_pool = NULL;
}
return 0;
fail_free_drb_pool:
pci_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@ -197,8 +208,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pci_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
fail_free_dma_buf_pool:
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@ -227,6 +238,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
if (phba->lpfc_hrb_pool)
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
if (phba->txrdy_payload_pool)
pci_pool_destroy(phba->txrdy_payload_pool);
phba->txrdy_payload_pool = NULL;
if (phba->lpfc_hbq_pool)
pci_pool_destroy(phba->lpfc_hbq_pool);
@ -258,8 +272,8 @@ lpfc_mem_free(struct lpfc_hba *phba)
phba->lpfc_mbuf_pool = NULL;
/* Free DMA buffer memory pool */
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
phba->lpfc_scsi_dma_buf_pool = NULL;
pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
phba->lpfc_sg_dma_buf_pool = NULL;
/* Free Device Data memory pool */
if (phba->device_data_mem_pool) {
@ -282,7 +296,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
* @phba: HBA to free memory for
*
* Description: Free memory from PCI and driver memory pools and also those
* used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
* used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
* kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
* the VPI bitmask.
*
@ -430,6 +444,44 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
/**
* lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
* lpfc_sg_dma_buf_pool PCI pool
* @phba: HBA which owns the pool to allocate from
* @mem_flags: indicates if this is a priority (MEM_PRI) allocation
* @handle: used to return the DMA-mapped address of the nvmet_buf
*
* Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
* PCI pool. Allocates from generic pci_pool_alloc function.
*
* Returns:
* pointer to the allocated nvmet_buf on success
* NULL on failure
**/
void *
lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
void *ret;
ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
return ret;
}
/**
* lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
* PCI pool
* @phba: HBA which owns the pool to return to
* @virt: nvmet_buf to free
* @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
*
* Returns: None
**/
void
lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
{
pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
}
/**
* lpfc_els_hbq_alloc - Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
@ -458,7 +510,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
kfree(hbqbp);
return NULL;
}
hbqbp->size = LPFC_BPL_SIZE;
hbqbp->total_size = LPFC_BPL_SIZE;
return hbqbp;
}
@ -518,7 +570,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
kfree(dma_buf);
return NULL;
}
dma_buf->size = LPFC_BPL_SIZE;
dma_buf->total_size = LPFC_DATA_BUF_SIZE;
return dma_buf;
}
@ -540,7 +592,134 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
return;
}
/**
* lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
* @phba: HBA to allocate a receive buffer for
*
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
* Notes: Not interrupt-safe. Must be called with no locks held.
*
* Returns:
* pointer to HBQ on success
* NULL on failure
**/
struct rqb_dmabuf *
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
struct lpfc_iocbq *nvmewqe;
union lpfc_wqe128 *wqe;
dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
&dma_buf->hbuf.phys);
if (!dma_buf->hbuf.virt) {
kfree(dma_buf);
return NULL;
}
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
&dma_buf->dbuf.phys);
if (!dma_buf->dbuf.virt) {
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
}
dma_buf->total_size = LPFC_DATA_BUF_SIZE;
dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
GFP_KERNEL);
if (!dma_buf->context) {
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
return NULL;
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
/* Word 7 */
bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
/* Word 10 */
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
dma_buf->iocbq->context1 = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
if (!dma_buf->sglq) {
lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
dma_buf->dbuf.phys);
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
dma_buf->hbuf.phys);
kfree(dma_buf);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6132 Ran out of nvmet XRIs\n");
return NULL;
}
return dma_buf;
}
/**
* lpfc_sli4_nvmet_free - Frees a receive buffer
* @phba: HBA buffer was allocated for
* @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
*
* Description: Frees both the container and the DMA-mapped buffers returned by
* lpfc_sli4_nvmet_alloc.
*
* Notes: Can be called with or without locks held.
*
* Returns: None
**/
void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{
unsigned long flags;
__lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
dmab->sglq->state = SGL_FREED;
dmab->sglq->ndlp = NULL;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
lpfc_sli_release_iocbq(phba, dmab->iocbq);
kfree(dmab->context);
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
}
/**
@ -565,13 +744,13 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
list_del(&hbq_entry->dbuf.list);
if (hbq_entry->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
@ -586,3 +765,48 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
}
return;
}
/**
* lpfc_rq_buf_free - Free a RQ DMA buffer
* @phba: HBA buffer is associated with
* @mp: Buffer to free
*
* Description: Frees the given DMA buffer in the appropriate way given by
* reposting it to its associated RQ so it can be reused.
*
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
*
* Returns: None
**/
void
lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
struct lpfc_rqb *rqbp;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
struct rqb_dmabuf *rqb_entry;
unsigned long flags;
int rc;
if (!mp)
return;
rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
rqbp = rqb_entry->hrq->rqbp;
spin_lock_irqsave(&phba->hbalock, flags);
list_del(&rqb_entry->hbuf.list);
hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
}

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -28,6 +30,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@ -35,8 +40,9 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@ -204,10 +210,11 @@ int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(abort_list);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"2819 Abort outstanding I/O on NPort x%x "
@ -283,6 +290,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
uint32_t vid, flag;
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
@ -418,6 +426,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_can_disctmo(vport);
}
ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
sp->cmn.valid_vendor_ver_level) {
vid = be32_to_cpu(sp->un.vv.vid);
flag = be32_to_cpu(sp->un.vv.flags);
if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto out;
@ -707,6 +724,7 @@ static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
PRLI *npr;
@ -720,16 +738,32 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
if (npr->prliType == PRLI_FCP_TYPE)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->prliType == PRLI_NVME_TYPE)
ndlp->nlp_type |= NLP_NVME_INITIATOR;
}
if (npr->targetFunc) {
ndlp->nlp_type |= NLP_FCP_TARGET;
if (npr->prliType == PRLI_FCP_TYPE)
ndlp->nlp_type |= NLP_FCP_TARGET;
if (npr->prliType == PRLI_NVME_TYPE)
ndlp->nlp_type |= NLP_NVME_TARGET;
if (npr->writeXferRdyDis)
ndlp->nlp_flag |= NLP_FIRSTBURST;
}
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
/* If this driver is in nvme target mode, set the ndlp's fc4
* type to NVME provided the PRLI response claims NVME FC4
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
}
if (rport) {
/* We need to update the rport role values */
@ -743,7 +777,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"rport rolechg: role:x%x did:x%x flg:x%x",
roles, ndlp->nlp_DID, ndlp->nlp_flag);
fc_remote_port_rolechg(rport, roles);
if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_remote_port_rolechg(rport, roles);
}
}
@ -1026,6 +1061,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
uint32_t vid, flag;
IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
@ -1094,6 +1130,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ed_tov = (phba->fc_edtov + 999999) / 1000000;
}
ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
sp->cmn.valid_vendor_ver_level) {
vid = be32_to_cpu(sp->un.vv.vid);
flag = be32_to_cpu(sp->un.vv.flags);
if ((vid == LPFC_VV_EMLX_ID) &&
(flag & LPFC_VV_SUPPRESS_RSP))
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
/*
* Use the larger EDTOV
* RATOV = 2 * EDTOV for pt-to-pt
@ -1489,8 +1535,38 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
* registration.
*/
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
"6115 NVMET ndlp rpi %d state "
"unknown, state x%x flags x%08x\n",
ndlp->nlp_rpi, ndlp->nlp_state,
ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@ -1573,9 +1649,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t did = mb->un.varWords[1];
int rc = 0;
if (mb->mbxStatus) {
/* RegLogin failed */
@ -1610,19 +1688,55 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
}
/* SLI4 ports have preallocated logical rpis. */
if (vport->phba->sli_rev < LPFC_SLI_REV4)
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"3066 RegLogin Complete on x%x x%x x%x\n",
did, ndlp->nlp_type, ndlp->nlp_fc4_type);
if (!(ndlp->nlp_type & NLP_FABRIC) &&
(phba->nvmet_support == 0)) {
/* The driver supports FCP and NVME concurrently. If the
* ndlp's nlp_fc4_type is still zero, the driver doesn't
* know what PRLI to send yet. Figure that out now and
* call PRLI depending on the outcome.
*/
if (vport->fc_flag & FC_PT2PT) {
/* If we are pt2pt, there is no Fabric to determine
* the FC4 type of the remote nport. So if NVME
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
}
} else if (ndlp->nlp_fc4_type == 0) {
rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
0, ndlp->nlp_DID);
return ndlp->nlp_state;
}
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
/* Only Fabric ports should transition. NVME target
* must complete PRLI.
*/
if (ndlp->nlp_type & NLP_FABRIC) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
return ndlp->nlp_state;
}
@ -1663,7 +1777,14 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
/* If we are a target we won't immediately transition into PRLI,
* so if REG_LOGIN already completed we don't need to ignore it.
*/
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
!vport->phba->nvmet_support)
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
@ -1739,10 +1860,23 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
PRLI *npr;
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
* format is different so NULL the two PRLI types so that the
* driver correctly gets the correct context.
*/
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
@ -1750,7 +1884,21 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->cfg_restrict_login) {
goto out;
}
/* The LS Req had some error. Don't let this be a
* target.
*/
if ((ndlp->fc4_prli_sent == 1) &&
(ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
/* The FCP PRLI completed successfully but
* the NVME PRLI failed. Since they are sent in
* succession, allow the FCP to complete.
*/
goto out_err;
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
ndlp->nlp_type |= NLP_FCP_INITIATOR;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
@ -1758,9 +1906,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check out PRLI rsp */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
/* NVME or FCP first burst must be negotiated for each PRLI. */
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
npr->initiatorFunc,
npr->targetFunc);
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->targetFunc) {
@ -1770,6 +1925,49 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
/* PRLI completed. Decrement count. */
ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
(bf_get_be32(prli_type_code, nvpr) ==
PRLI_NVME_TYPE)) {
/* Complete setting up the remote ndlp personality. */
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
/* Target driver cannot solicit NVME FB. */
if (bf_get_be32(prli_tgt, nvpr)) {
ndlp->nlp_type |= NLP_NVME_TARGET;
if ((bf_get_be32(prli_fba, nvpr) == 1) &&
(bf_get_be32(prli_fb_sz, nvpr) > 0) &&
(phba->cfg_nvme_enable_fb) &&
(!phba->nvmet_support)) {
/* Both sides support FB. The target's first
* burst size is a 512 byte encoded value.
*/
ndlp->nlp_flag |= NLP_FIRSTBURST;
ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
nvpr);
}
}
if (bf_get_be32(prli_recov, nvpr))
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6029 NVME PRLI Cmpl w1 x%08x "
"w4 x%08x w5 x%08x flag x%x, "
"fcp_info x%x nlp_type x%x\n",
be32_to_cpu(nvpr->word1),
be32_to_cpu(nvpr->word4),
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
/* PRLI completed. Decrement count. */
ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@ -1785,11 +1983,24 @@ out:
return ndlp->nlp_state;
}
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & NLP_FCP_TARGET)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
out_err:
/* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
* are complete.
*/
if (ndlp->fc4_prli_sent == 0) {
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
KERN_INFO, LOG_ELS,
"3067 PRLI's still outstanding "
"on x%06x - count %d, Pend Node Mode "
"transition...\n",
ndlp->nlp_DID, ndlp->fc4_prli_sent);
return ndlp->nlp_state;
}
@ -2104,7 +2315,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
********************************************************************/
#define LPFC_NVME_MIN_SEGS 16
#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */
#define LPFC_NVME_MAX_SEGS 510
#define LPFC_NVMET_MIN_POSTBUF 16
#define LPFC_NVMET_DEFAULT_POSTBUF 1024
#define LPFC_NVMET_MAX_POSTBUF 4096
#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
uint32_t cpu_id; /* current cpu id at time of create */
};
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct list_head rport_list;
struct completion lport_unreg_done;
/* Add sttats counters here */
};
struct lpfc_nvme_rport {
struct list_head list;
struct lpfc_nvme_lport *lport;
struct nvme_fc_remote_port *remoteport;
struct lpfc_nodelist *ndlp;
struct completion rport_unreg_done;
};
struct lpfc_nvme_buf {
struct list_head list;
struct nvmefc_fcp_req *nvmeCmd;
struct lpfc_nvme_rport *nrport;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;
uint16_t qidx;
uint16_t sqid;
uint32_t result; /* From IOCB Word 4. */
uint32_t seg_cnt; /* Number of scatter-gather segments returned by
* dma_map_sg. The driver needs this for calls
* to dma_unmap_sg.
*/
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
/*
* data and dma_handle are the kernel virtual and bus address of the
* dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
* gather bde list that supports the sg_tablesize value.
*/
void *data;
dma_addr_t dma_handle;
struct sli4_sge *nvme_sgl;
dma_addr_t dma_phys_sgl;
/* cur_iocbq has phys of the dma-able buffer.
* Iotag is in here
*/
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
unsigned long start_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_cmd_start;
uint64_t ts_last_cmd;
uint64_t ts_cmd_wqput;
uint64_t ts_isr_cmpl;
uint64_t ts_data_nvme;
#endif
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
********************************************************************/
#define LPFC_NVMET_MIN_SEGS 16
#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */
#define LPFC_NVMET_MAX_SEGS 510
#define LPFC_NVMET_SUCCESS_LEN 12
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
struct completion tport_unreg_done;
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
atomic_t rcv_ls_req_in;
atomic_t rcv_ls_req_out;
atomic_t rcv_ls_req_drop;
atomic_t xmt_ls_abort;
/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
atomic_t xmt_ls_rsp;
atomic_t xmt_ls_drop;
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_drop;
atomic_t xmt_fcp_read_rsp;
atomic_t xmt_fcp_read;
atomic_t xmt_fcp_write;
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_unsol_issue_abort */
atomic_t xmt_abort_rsp;
atomic_t xmt_abort_rsp_error;
/* Stats counters - lpfc_nvmet_xmt_abort_cmp */
atomic_t xmt_abort_cmpl;
};
struct lpfc_nvmet_rcv_ctx {
union {
struct nvmefc_tgt_ls_req ls_req;
struct nvmefc_tgt_fcp_req fcp_req;
} ctx;
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
dma_addr_t txrdy_phys;
uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
uint16_t size;
uint16_t entry_cnt;
uint16_t cpu;
uint16_t state;
/* States */
#define LPFC_NVMET_STE_FREE 0
#define LPFC_NVMET_STE_RCV 1
#define LPFC_NVMET_STE_DATA 2
#define LPFC_NVMET_STE_ABORT 3
#define LPFC_NVMET_STE_RSP 4
#define LPFC_NVMET_STE_DONE 5
uint16_t flag;
#define LPFC_NVMET_IO_INP 1
#define LPFC_NVMET_ABORT_OP 2
struct rqb_dmabuf *rqb_buffer;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd;
uint64_t ts_cmd_nvme;
uint64_t ts_nvme_data;
uint64_t ts_data_wqput;
uint64_t ts_isr_data;
uint64_t ts_data_nvme;
uint64_t ts_nvme_status;
uint64_t ts_status_wqput;
uint64_t ts_isr_status;
uint64_t ts_status_nvme;
#endif
};

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -413,7 +415,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@ -424,8 +426,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
@ -522,6 +524,8 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb, *next_psb;
unsigned long iflag = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
@ -554,8 +558,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
int i;
struct lpfc_nodelist *ndlp;
int rrq_empty = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
@ -819,7 +825,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@ -832,7 +838,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
@ -841,8 +847,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
@ -850,8 +856,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"3368 Failed to allocate IOTAG for"
@ -920,7 +926,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
phba->sli4_hba.scsi_xri_cnt++;
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
}
lpfc_printf_log(phba, KERN_INFO, LOG_BG,
lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
"3021 Allocate %d out of %d requested new SCSI "
"buffers\n", bcnt, num_to_alloc);
@ -3894,7 +3900,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
}
}
chann = atomic_add_return(1, &phba->fcp_qidx);
chann = (chann % phba->cfg_fcp_io_channel);
chann = chann % phba->cfg_fcp_io_channel;
return chann;
}
@ -3925,6 +3931,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
uint32_t logit = LOG_FCP;
phba->fc4ScsiIoCmpls++;
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd)
@ -3967,6 +3975,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->prot_data_segment = NULL;
}
#endif
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@ -4241,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
phba->fc4ScsiOutputRequests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
phba->fc4InputRequests++;
phba->fc4ScsiInputRequests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
phba->fc4ScsiControlRequests++;
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@ -4467,7 +4476,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
unsigned long poll_tmo_expires =
(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
mod_timer(&phba->fcp_poll_timer,
poll_tmo_expires);
}
@ -4497,7 +4506,7 @@ void lpfc_poll_timeout(unsigned long ptr)
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
@ -4561,7 +4570,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
"0707 driver's buffer pool is empty, "
"IO busied\n");
goto out_host_busy;
@ -4636,7 +4645,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
@ -4681,7 +4690,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4;
int ring_number, ret_val;
int ret_val;
unsigned long flags, iflags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@ -4769,7 +4778,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
icmd->ulpClass = cmd->ulpClass;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
abtsiocb->hba_wqidx = iocb->hba_wqidx;
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (iocb->iocb_flag & LPFC_IO_FOF)
abtsiocb->iocb_flag |= LPFC_IO_FOF;
@ -4782,8 +4791,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
pring_s4 = &phba->sli.ring[ring_number];
pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
if (pring_s4 == NULL) {
ret = FAILED;
goto out_unlock;
}
/* Note: both hbalock and ring_lock must be set here */
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@ -4805,7 +4817,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
lpfc_cmd->waitq = &waitq;
@ -5105,7 +5117,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
if (cnt)
lpfc_sli_abort_taskmgmt(vport,
&phba->sli.ring[phba->sli.fcp_ring],
&phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
@ -5323,7 +5335,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
ndlp->nlp_sid == i &&
ndlp->rport) {
ndlp->rport &&
ndlp->nlp_type & NLP_FCP_TARGET) {
match = 1;
break;
}
@ -5534,7 +5547,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
@ -5898,6 +5911,48 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
return false;
}
static int
lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
return SCSI_MLQUEUE_HOST_BUSY;
}
static int
lpfc_no_handler(struct scsi_cmnd *cmnd)
{
return FAILED;
}
static int
lpfc_no_slave(struct scsi_device *sdev)
{
return -ENODEV;
}
struct scsi_host_template lpfc_template_nvme = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
.eh_abort_handler = lpfc_no_handler,
.eh_device_reset_handler = lpfc_no_handler,
.eh_target_reset_handler = lpfc_no_handler,
.eh_bus_reset_handler = lpfc_no_handler,
.eh_host_reset_handler = lpfc_no_handler,
.slave_alloc = lpfc_no_slave,
.slave_configure = lpfc_no_slave,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = 1,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.track_queue_depth = 0,
};
struct scsi_host_template lpfc_template_s3 = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -135,6 +137,8 @@ struct lpfc_scsi_buf {
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
@ -164,6 +168,8 @@ struct lpfc_scsi_buf {
* Iotag is in here
*/
struct lpfc_iocbq cur_iocbq;
uint16_t cpu;
wait_queue_head_t *waitq;
unsigned long start_time;
@ -178,13 +184,15 @@ struct lpfc_scsi_buf {
#endif
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
#define FIND_FIRST_OAS_LUN 0
#define NO_MORE_OAS_LUN -1
#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
#define FIND_FIRST_OAS_LUN 0
#define NO_MORE_OAS_LUN -1
#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
#define TXRDY_PAYLOAD_LEN 12
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd);

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -54,9 +56,16 @@ struct lpfc_iocbq {
uint16_t iotag; /* pre-assigned IO tag */
uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
IOCB_t iocb; /* IOCB cmd */
/* Be careful here */
union lpfc_wqe wqe; /* WQE cmd */
IOCB_t iocb; /* For IOCB cmd or if we want 128 byte WQE */
uint8_t rsvd2;
uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint32_t iocb_flag;
@ -82,9 +91,13 @@ struct lpfc_iocbq {
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
#define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */
#define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */
#define LPFC_IO_NVME 0x200000 /* NVME FCP command */
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
@ -97,12 +110,14 @@ struct lpfc_iocbq {
struct lpfc_node_rrq *rrq;
} context_un;
void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_wcqe_complete *);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@ -112,6 +127,14 @@ struct lpfc_iocbq {
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */
#define WQE_SUCCESS 0
#define WQE_BUSY 1
#define WQE_ERROR 2
#define WQE_TIMEDOUT 3
#define WQE_ABORTED 4
#define LPFC_MBX_WAKE 1
#define LPFC_MBX_IMED_UNREG 2
@ -297,12 +320,9 @@ struct lpfc_sli {
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
struct lpfc_sli_ring *ring;
int fcp_ring; /* ring used for FCP initiator commands */
int next_ring;
int extra_ring; /* extra ring used for other protocols */
struct lpfc_sli_ring *sli3_ring;
struct lpfc_sli_stat slistat; /* SLI statistical info */
struct list_head mboxq;

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -35,9 +37,10 @@
#define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_FCP_IO_CHAN_DEF 4
#define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FCP_IO_CHAN_MAX 16
#define LPFC_HBA_IO_CHAN_MIN 0
#define LPFC_HBA_IO_CHAN_MAX 32
#define LPFC_FCP_IO_CHAN_DEF 4
#define LPFC_NVME_IO_CHAN_DEF 0
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
@ -107,6 +110,9 @@ enum lpfc_sli4_queue_subtype {
LPFC_MBOX,
LPFC_FCP,
LPFC_ELS,
LPFC_NVME,
LPFC_NVMET,
LPFC_NVME_LS,
LPFC_USOL
};
@ -125,25 +131,41 @@ union sli4_qe {
struct lpfc_rqe *rqe;
};
/* RQ buffer list */
struct lpfc_rqb {
uint16_t entry_count; /* Current number of RQ slots */
uint16_t buffer_count; /* Current number of buffers posted */
struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */
/* Callback for HBQ buffer allocation */
struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
/* Callback for HBQ buffer free */
void (*rqb_free_buffer)(struct lpfc_hba *,
struct rqb_dmabuf *);
};
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
struct lpfc_hba *phba;
struct list_head child_list;
struct list_head page_list;
struct list_head sgl_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t entry_repost; /* Count of entries before doorbell is rung */
#define LPFC_QUEUE_MIN_REPOST 8
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint16_t sgl_list_cnt;
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@ -176,6 +198,8 @@ struct lpfc_queue {
#define RQ_buf_trunc q_cnt_3
#define RQ_rcv_buf q_cnt_4
uint64_t isr_timestamp;
struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */
};
@ -338,6 +362,7 @@ struct lpfc_bmbx {
#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_WQE128_DEF_COUNT 128
#define LPFC_WQE128_MAX_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@ -379,10 +404,14 @@ struct lpfc_max_cfg_param {
struct lpfc_hba;
/* SLI4 HBA multi-fcp queue handler struct */
struct lpfc_fcp_eq_hdl {
struct lpfc_hba_eq_hdl {
uint32_t idx;
struct lpfc_hba *phba;
atomic_t fcp_eq_in_use;
atomic_t hba_eq_in_use;
struct cpumask *cpumask;
/* CPU affinitsed to or 0xffffffff if multiple */
uint32_t cpu;
#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
};
/* Port Capabilities for SLI4 Parameters */
@ -427,6 +456,7 @@ struct lpfc_pc_sli4_params {
uint8_t wqsize;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
};
struct lpfc_iov {
@ -445,7 +475,7 @@ struct lpfc_sli4_lnk_info {
uint8_t optic_state;
};
#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
#define LPFC_SLI4_HANDLER_NAME_SZ 16
@ -515,23 +545,34 @@ struct lpfc_sli4_hba {
uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue **hba_eq;/* Event queues for HBA */
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
struct lpfc_queue **hba_eq; /* Event queues for HBA */
struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
uint16_t *fcp_cq_map;
uint16_t *nvme_cq_map;
struct list_head lpfc_wq_list;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
struct lpfc_name wwnn;
struct lpfc_name wwpn;
uint32_t fw_func_mode; /* FW function protocol mode */
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
@ -568,14 +609,20 @@ struct lpfc_sli4_hba {
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
uint16_t nvme_xri_max;
uint16_t nvme_xri_cnt;
uint16_t nvme_xri_start;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
uint16_t els_xri_cnt;
uint16_t scsi_xri_start;
struct list_head lpfc_free_sgl_list;
struct list_head lpfc_sgl_list;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list;
struct list_head lpfc_abts_nvmet_sgl_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
@ -602,8 +649,10 @@ struct lpfc_sli4_hba {
#define LPFC_SLI4_PPNAME_NON 0
#define LPFC_SLI4_PPNAME_GET 1
struct lpfc_iov iov;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_lock;
uint32_t physical_port;
/* CPU to vector mapping information */
@ -611,11 +660,14 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
uint16_t nvmet_mrq_post_idx;
};
enum lpfc_sge_type {
GEN_BUFF_TYPE,
SCSI_BUFF_TYPE
SCSI_BUFF_TYPE,
NVMET_BUFF_TYPE
};
enum lpfc_sgl_state {
@ -694,15 +746,21 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
struct lpfc_queue **eqp, uint32_t type,
uint32_t subtype);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
struct lpfc_queue **drqp, struct lpfc_queue **cqp,
uint32_t subtype);
void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
@ -714,6 +772,7 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
@ -746,6 +805,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
int lpfc_sli4_init_vpi(struct lpfc_vport *);
uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@ -18,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "11.2.0.4"
#define LPFC_DRIVER_VERSION "11.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@ -30,4 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved."
#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \
"The term \"Broadcom\" refers to Broadcom Limited " \
"and/or its subsidiaries."

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@ -34,6 +36,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@ -403,6 +406,22 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
}
if ((phba->nvmet_support == 0) &&
((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
/* Create NVME binding with nvme_fc_transport. This
* ensures the vport is initialized.
*/
rc = lpfc_nvme_create_localport(vport);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6003 %s status x%x\n",
"NVME registration failed, ",
rc);
goto error_out;
}
}
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.

View File

@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *

View File

@ -1148,7 +1148,7 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
synchronize_irq(reply_q->vector);
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
}
}
@ -1837,11 +1837,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
if (smp_affinity_enable) {
irq_set_affinity_hint(reply_q->vector, NULL);
free_cpumask_var(reply_q->affinity_hint);
}
free_irq(reply_q->vector, reply_q);
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
}
}
@ -1850,13 +1847,13 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
* _base_request_irq - request irq
* @ioc: per adapter object
* @index: msix index into vector table
* @vector: irq vector
*
* Inserting respective reply_queue into the list.
*/
static int
_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
{
struct pci_dev *pdev = ioc->pdev;
struct adapter_reply_queue *reply_q;
int r;
@ -1868,14 +1865,6 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
}
reply_q->ioc = ioc;
reply_q->msix_index = index;
reply_q->vector = vector;
if (smp_affinity_enable) {
if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
kfree(reply_q);
return -ENOMEM;
}
}
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
@ -1884,12 +1873,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
else
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
ioc->driver_name, ioc->id);
r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
reply_q);
r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
IRQF_SHARED, reply_q->name, reply_q);
if (r) {
pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
reply_q->name, vector);
free_cpumask_var(reply_q->affinity_hint);
reply_q->name, pci_irq_vector(pdev, index));
kfree(reply_q);
return -EBUSY;
}
@ -1925,6 +1913,21 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
if (!nr_msix)
return;
if (smp_affinity_enable) {
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
reply_q->msix_index);
if (!mask) {
pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
ioc->name, reply_q->msix_index);
continue;
}
for_each_cpu(cpu, mask)
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
}
return;
}
cpu = cpumask_first(cpu_online_mask);
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
@ -1938,18 +1941,9 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
group++;
for (i = 0 ; i < group ; i++) {
ioc->cpu_msix_table[cpu] = index;
if (smp_affinity_enable)
cpumask_or(reply_q->affinity_hint,
reply_q->affinity_hint, get_cpu_mask(cpu));
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
cpu = cpumask_next(cpu, cpu_online_mask);
}
if (smp_affinity_enable)
if (irq_set_affinity_hint(reply_q->vector,
reply_q->affinity_hint))
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"Err setting affinity hint to irq vector %d\n",
ioc->name, reply_q->vector));
index++;
}
}
@ -1976,10 +1970,10 @@ _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
static int
_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
struct msix_entry *entries, *a;
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
unsigned int irq_flags = PCI_IRQ_MSIX;
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@ -1991,7 +1985,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
goto try_ioapic;
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
ioc->msix_vector_count);
printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
@ -2002,56 +1996,51 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
else
local_max_msix_vectors = max_msix_vectors;
if (local_max_msix_vectors > 0) {
if (local_max_msix_vectors > 0)
ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
ioc->msix_vector_count = ioc->reply_queue_count;
} else if (local_max_msix_vectors == 0)
else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
smp_affinity_enable = 0;
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT
"kcalloc failed @ at %s:%d/%s() !!!\n",
ioc->name, __FILE__, __LINE__, __func__));
goto try_ioapic;
}
if (smp_affinity_enable)
irq_flags |= PCI_IRQ_AFFINITY;
for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
a->entry = i;
r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
if (r) {
r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
irq_flags);
if (r < 0) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT
"pci_enable_msix_exact failed (r=%d) !!!\n",
"pci_alloc_irq_vectors failed (r=%d) !!!\n",
ioc->name, r));
kfree(entries);
goto try_ioapic;
}
ioc->msix_enable = 1;
for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
r = _base_request_irq(ioc, i, a->vector);
ioc->reply_queue_count = r;
for (i = 0; i < ioc->reply_queue_count; i++) {
r = _base_request_irq(ioc, i);
if (r) {
_base_free_irq(ioc);
_base_disable_msix(ioc);
kfree(entries);
goto try_ioapic;
}
}
kfree(entries);
return 0;
/* failback to io_apic interrupt routing */
try_ioapic:
ioc->reply_queue_count = 1;
r = _base_request_irq(ioc, 0, ioc->pdev->irq);
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT
"pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
ioc->name, r));
} else
r = _base_request_irq(ioc, 0);
return r;
}
@ -2222,7 +2211,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
"IO-APIC enabled"), reply_q->vector);
"IO-APIC enabled"),
pci_irq_vector(ioc->pdev, reply_q->msix_index));
pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
@ -5357,7 +5347,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
"for cpu_msix_table failed!!!\n", ioc->name));
"for reply_post_host_index failed!!!\n",
ioc->name));
r = -ENOMEM;
goto out_free_resources;
}

View File

@ -731,12 +731,10 @@ struct _event_ack_list {
struct adapter_reply_queue {
struct MPT3SAS_ADAPTER *ioc;
u8 msix_index;
unsigned int vector;
u32 reply_post_host_index;
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
char name[MPT_NAME_LENGTH];
atomic_t busy;
cpumask_var_t affinity_hint;
struct list_head list;
};

View File

@ -372,6 +372,7 @@ EXPORT_SYMBOL(osduld_device_same);
static int __detect_osd(struct osd_uld_device *oud)
{
struct scsi_device *scsi_device = oud->od.scsi_device;
struct scsi_sense_hdr sense_hdr;
char caps[OSD_CAP_LEN];
int error;
@ -380,7 +381,7 @@ static int __detect_osd(struct osd_uld_device *oud)
*/
OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
oud, scsi_device, scsi_device->request_queue);
error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL);
error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr);
if (error)
OSD_ERR("warning: scsi_test_unit_ready failed\n");

View File

@ -0,0 +1,11 @@
config QEDF
tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support"
depends on PCI && SCSI
depends on QED
depends on LIBFC
depends on LIBFCOE
select QED_LL2
select QED_FCOE
---help---
This driver supports FCoE offload for the QLogic FastLinQ
41000 Series Converged Network Adapters.

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
qedf_attr.o qedf_els.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o

View File

@ -0,0 +1,545 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDFC_H_
#define _QEDFC_H_
#include <scsi/libfcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc_encode.h>
#include <linux/version.h>
/* qedf_hsi.h needs to before included any qed includes */
#include "qedf_hsi.h"
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_fcoe_if.h>
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#define QEDF_DESCR "QLogic FCoE Offload Driver"
#define QEDF_MODULE_NAME "qedf"
#define QEDF_MIN_XID 0
#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1)
#define QEDF_MAX_ELS_XID 4095
#define QEDF_FLOGI_RETRY_CNT 3
#define QEDF_RPORT_RETRY_CNT 255
#define QEDF_MAX_SESSIONS 1024
#define QEDF_MAX_PAYLOAD 2048
#define QEDF_MAX_BDS_PER_CMD 256
#define QEDF_MAX_BD_LEN 0xffff
#define QEDF_BD_SPLIT_SZ 0x1000
#define QEDF_PAGE_SIZE 4096
#define QED_HW_DMA_BOUNDARY 0xfff
#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
#define QEDF_MFS (QEDF_MAX_PAYLOAD + \
sizeof(struct fc_frame_header))
#define QEDF_MAX_NPIV 64
#define QEDF_TM_TIMEOUT 10
#define QEDF_ABORT_TIMEOUT 10
#define QEDF_CLEANUP_TIMEOUT 10
#define QEDF_MAX_CDB_LEN 16
#define UPSTREAM_REMOVE 1
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
uint8_t tm_flags;
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
struct fcoe_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
struct fcoe_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
struct qedf_els_cb_arg {
struct qedf_ioreq *aborted_io_req;
struct qedf_ioreq *io_req;
u8 op; /* Used to keep track of ELS op */
uint16_t l2_oxid;
u32 offset; /* Used for sequence cleanup */
u8 r_ctl; /* Used for sequence cleanup */
};
enum qedf_ioreq_event {
QEDF_IOREQ_EV_ABORT_SUCCESS,
QEDF_IOREQ_EV_ABORT_FAILED,
QEDF_IOREQ_EV_SEND_RRQ,
QEDF_IOREQ_EV_ELS_TMO,
QEDF_IOREQ_EV_ELS_ERR_DETECT,
QEDF_IOREQ_EV_ELS_FLUSH,
QEDF_IOREQ_EV_CLEANUP_SUCCESS,
QEDF_IOREQ_EV_CLEANUP_FAILED,
};
#define FC_GOOD 0
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
struct list_head link;
uint16_t xid;
struct scsi_cmnd *sc_cmd;
bool use_slowpath; /* Use slow SGL for this I/O */
#define QEDF_SCSI_CMD 1
#define QEDF_TASK_MGMT_CMD 2
#define QEDF_ABTS 3
#define QEDF_ELS 4
#define QEDF_CLEANUP 5
#define QEDF_SEQ_CLEANUP 6
u8 cmd_type;
#define QEDF_CMD_OUTSTANDING 0x0
#define QEDF_CMD_IN_ABORT 0x1
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
size_t data_xfer_len;
struct kref refcount;
struct qedf_cmd_mgr *cmd_mgr;
struct io_bdt *bd_tbl;
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
* which has a max length of 8 bytes according to spec.
*/
#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
uint8_t *sense_buffer;
dma_addr_t sense_buffer_dma;
u32 fcp_resid;
u32 fcp_rsp_len;
u32 fcp_sns_len;
u8 cdb_status;
u8 fcp_status;
u8 fcp_rsp_code;
u8 scsi_comp_flags;
#define QEDF_MAX_REUSE 0xfff
u16 reuse_count;
struct qedf_mp_req mp_req;
void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
struct qedf_els_cb_arg *cb_arg;
int fp_idx;
unsigned int cpu;
unsigned int int_cpu;
#define QEDF_IOREQ_SLOW_SGE 0
#define QEDF_IOREQ_SINGLE_SGE 1
#define QEDF_IOREQ_FAST_SGE 2
u8 sge_type;
struct delayed_work rrq_work;
/* Used for sequence level recovery; i.e. REC/SRR */
uint32_t rx_buf_off;
uint32_t tx_buf_off;
uint32_t rx_id;
uint32_t task_retry_identifier;
/*
* Used to tell if we need to return a SCSI command
* during some form of error processing.
*/
bool return_scsi_cmd_on_abts;
};
extern struct workqueue_struct *qedf_io_wq;
struct qedf_rport {
spinlock_t rport_lock;
#define QEDF_RPORT_SESSION_READY 1
#define QEDF_RPORT_UPLOADING_CONNECTION 2
unsigned long flags;
unsigned long retry_delay_timestamp;
struct fc_rport *rport;
struct fc_rport_priv *rdata;
struct qedf_ctx *qedf;
u32 handle; /* Handle from qed */
u32 fw_cid; /* fw_cid from qed */
void __iomem *p_doorbell;
/* Send queue management */
atomic_t free_sqes;
atomic_t num_active_ios;
struct fcoe_wqe *sq;
dma_addr_t sq_dma;
u16 sq_prod_idx;
u16 fw_sq_prod_idx;
u16 sq_con_idx;
u32 sq_mem_size;
void *sq_pbl;
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
#define QEDF_RPORT_TYPE_DISK 1
#define QEDF_RPORT_TYPE_TAPE 2
uint dev_type; /* Disk or tape */
struct list_head peers;
};
/* Used to contain LL2 skb's in ll2_skb_list */
struct qedf_skb_work {
struct work_struct work;
struct sk_buff *skb;
struct qedf_ctx *qedf;
};
struct qedf_fastpath {
#define QEDF_SB_ID_NULL 0xffff
u16 sb_id;
struct qed_sb_info *sb_info;
struct qedf_ctx *qedf;
/* Keep track of number of completions on this fastpath */
unsigned long completions;
uint32_t cq_num_entries;
};
/* Used to pass fastpath information needed to process CQEs */
struct qedf_io_work {
struct work_struct work;
struct fcoe_cqe cqe;
struct qedf_ctx *qedf;
struct fc_frame *fp;
};
struct qedf_glbl_q_params {
u64 hw_p_cq; /* Completion queue PBL */
u64 hw_p_rq; /* Request queue PBL */
u64 hw_p_cmdq; /* Command queue PBL */
};
struct global_queue {
struct fcoe_cqe *cq;
dma_addr_t cq_dma;
u32 cq_mem_size;
u32 cq_cons_idx; /* Completion queue consumer index */
u32 cq_prod_idx;
void *cq_pbl;
dma_addr_t cq_pbl_dma;
u32 cq_pbl_size;
};
/* I/O tracing entry */
#define QEDF_IO_TRACE_SIZE 2048
struct qedf_io_log {
#define QEDF_IO_TRACE_REQ 0
#define QEDF_IO_TRACE_RSP 1
uint8_t direction;
uint16_t task_id;
uint32_t port_id; /* Remote port fabric ID */
int lun;
char op; /* SCSI CDB */
uint8_t lba[4];
unsigned int bufflen; /* SCSI buffer length */
unsigned int sg_count; /* Number of SG elements */
int result; /* Result passed back to mid-layer */
unsigned long jiffies; /* Time stamp when I/O logged */
int refcount; /* Reference count for task id */
unsigned int req_cpu; /* CPU that the task is queued on */
unsigned int int_cpu; /* Interrupt CPU that the task is received on */
unsigned int rsp_cpu; /* CPU that task is returned on */
u8 sge_type; /* Did we take the slow, single or fast SGE path */
};
/* Number of entries in BDQ */
#define QEDF_BDQ_SIZE 256
#define QEDF_BDQ_BUF_SIZE 2072
/* DMA coherent buffers for BDQ */
struct qedf_bdq_buf {
void *buf_addr;
dma_addr_t buf_dma;
};
/* Main adapter struct */
struct qedf_ctx {
struct qedf_dbg_ctx dbg_ctx;
struct fcoe_ctlr ctlr;
struct fc_lport *lport;
u8 data_src_addr[ETH_ALEN];
#define QEDF_LINK_DOWN 0
#define QEDF_LINK_UP 1
atomic_t link_state;
#define QEDF_DCBX_PENDING 0
#define QEDF_DCBX_DONE 1
atomic_t dcbx;
uint16_t max_scsi_xid;
uint16_t max_els_xid;
#define QEDF_NULL_VLAN_ID -1
#define QEDF_FALLBACK_VLAN 1002
#define QEDF_DEFAULT_PRIO 3
int vlan_id;
uint vlan_hw_insert:1;
struct qed_dev *cdev;
struct qed_dev_fcoe_info dev_info;
struct qed_int_info int_info;
uint16_t last_command;
spinlock_t hba_lock;
struct pci_dev *pdev;
u64 wwnn;
u64 wwpn;
u8 __aligned(16) mac[ETH_ALEN];
struct list_head fcports;
atomic_t num_offloads;
unsigned int curr_conn_id;
struct workqueue_struct *ll2_recv_wq;
struct workqueue_struct *link_update_wq;
struct delayed_work link_update;
struct delayed_work link_recovery;
struct completion flogi_compl;
struct completion fipvlan_compl;
/*
* Used to tell if we're in the window where we are waiting for
* the link to come back up before informting fcoe that the link is
* done.
*/
atomic_t link_down_tmo_valid;
#define QEDF_TIMER_INTERVAL (1 * HZ)
struct timer_list timer; /* One second book keeping timer */
#define QEDF_DRAIN_ACTIVE 1
#define QEDF_LL2_STARTED 2
#define QEDF_UNLOADING 3
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
struct global_queue **global_queues;
/* Pointer to array of queue structures */
struct qedf_glbl_q_params *p_cpuq;
/* Physical address of array of queue structures */
dma_addr_t hw_p_cpuq;
struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
void *bdq_pbl;
dma_addr_t bdq_pbl_dma;
size_t bdq_pbl_mem_size;
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
uint16_t bdq_prod_idx;
/* Structure for holding all the fastpath for this qedf_ctx */
struct qedf_fastpath *fp_array;
struct qed_fcoe_tid tasks;
struct qedf_cmd_mgr *cmd_mgr;
/* Holds the PF parameters we pass to qed to start he FCoE function */
struct qed_pf_params pf_params;
/* Used to time middle path ELS and TM commands */
struct workqueue_struct *timer_work_queue;
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
u32 slow_sge_ios;
u32 fast_sge_ios;
u32 single_sge_ios;
uint8_t *grcdump;
uint32_t grcdump_size;
struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
spinlock_t io_trace_lock;
uint16_t io_trace_idx;
bool stop_io_on_error;
u32 flogi_cnt;
u32 flogi_failed;
/* Used for fc statistics */
u64 input_requests;
u64 output_requests;
u64 control_requests;
u64 packet_aborts;
u64 alloc_failures;
};
struct io_bdt {
struct qedf_ioreq *io_req;
struct fcoe_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
#define FCOE_PARAMS_NUM_TASKS 4096
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
};
/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
* Usage:
*
* void *ptr;
* ptr = qedf_get_task_mem(&qedf->tasks, 128);
*/
static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
{
return (void *)(info->blocks[tid / info->num_tids_per_block] +
(tid % info->num_tids_per_block) * info->size);
}
static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
{
set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
}
/*
* Externs
*/
#define QEDF_DEFAULT_LOG_MASK 0x3CFB6
extern const struct qed_fcoe_ops *qed_ops;
extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
extern int qedf_queuecommand(struct Scsi_Host *host,
struct scsi_cmnd *sc_cmd);
extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
extern u8 *qedf_get_src_mac(struct fc_lport *lport);
extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern void qedf_process_error_detect(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
extern void qedf_release_cmd(struct kref *ref);
extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
u8 cmd_type);
extern struct device_attribute *qedf_host_attrs[];
extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx);
extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);
extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
int result);
extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct fcoe_cqe *cqe);
extern void qedf_restart_rport(struct qedf_rport *fcport);
extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
extern int qedf_post_io_req(struct qedf_rport *fcport,
struct qedf_ioreq *io_req);
extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_send_flogi(struct qedf_ctx *qedf);
extern void qedf_fp_io_handler(struct work_struct *work);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
struct fip_vlan {
struct ethhdr eth;
struct fip_header fip;
struct {
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
} desc;
};
/* SQ/CQ Sizes */
#define GBL_RSVD_TASKS 16
#define NUM_TASKS_PER_CONNECTION 1024
#define NUM_RW_TASKS_PER_CONNECTION 512
#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
#define QEDF_FCOE_PARAMS_GL_RQ_PI 0
#define QEDF_FCOE_PARAMS_GL_CMD_PI 1
#define QEDF_READ (1 << 1)
#define QEDF_WRITE (1 << 0)
#define MAX_FIBRE_LUNS 0xffffffff
#define QEDF_MAX_NUM_CQS 8
/*
* PCI function probe defines
*/
/* Probe/remove called during normal PCI probe */
#define QEDF_MODE_NORMAL 0
/* Probe/remove called from qed error recovery */
#define QEDF_MODE_RECOVERY 1
#define SUPPORTED_25000baseKR_Full (1<<27)
#define SUPPORTED_50000baseKR2_Full (1<<28)
#define SUPPORTED_100000baseKR4_Full (1<<29)
#define SUPPORTED_100000baseCR4_Full (1<<30)
#endif

View File

@ -0,0 +1,165 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf.h"
static ssize_t
qedf_fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
u32 port_id;
u8 lport_src_id[3];
u8 fcoe_mac[6];
port_id = fc_host_port_id(lport->host);
lport_src_id[2] = (port_id & 0x000000FF);
lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
fc_fcoe_set_mac(fcoe_mac, lport_src_id);
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,
NULL,
};
extern const struct qed_fcoe_ops *qed_ops;
inline bool qedf_is_vport(struct qedf_ctx *qedf)
{
return (!(qedf->lport->vport == NULL));
}
/* Get base qedf for physical port from vport */
static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
{
struct fc_lport *lport;
struct fc_lport *base_lport;
if (!(qedf_is_vport(qedf)))
return NULL;
lport = qedf->lport;
base_lport = shost_priv(vport_to_shost(lport->vport));
return (struct qedf_ctx *)(lport_priv(base_lport));
}
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
{
struct qedf_ctx *base_qedf;
/* Make sure we use the base qedf to take the GRC dump */
if (qedf_is_vport(qedf))
base_qedf = qedf_get_base_qedf(qedf);
else
base_qedf = qedf;
if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) {
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO,
"GRC Dump already captured.\n");
return;
}
qedf_get_grc_dump(base_qedf->cdev, qed_ops->common,
&base_qedf->grcdump, &base_qedf->grcdump_size);
QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n");
set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags);
qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP,
NULL);
}
static ssize_t
qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
ssize_t ret = 0;
struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qedf_ctx *qedf = lport_priv(lport);
if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) {
ret = memory_read_from_buffer(buf, count, &off,
qedf->grcdump, qedf->grcdump_size);
} else {
QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n");
}
return ret;
}
static ssize_t
qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct fc_lport *lport = NULL;
struct qedf_ctx *qedf = NULL;
long reading;
int ret = 0;
char msg[40];
if (off != 0)
return ret;
lport = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
qedf = lport_priv(lport);
buf[1] = 0;
ret = kstrtol(buf, 10, &reading);
if (ret) {
QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret);
return ret;
}
memset(msg, 0, sizeof(msg));
switch (reading) {
case 0:
memset(qedf->grcdump, 0, qedf->grcdump_size);
clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags);
break;
case 1:
qedf_capture_grc_dump(qedf);
break;
}
return count;
}
static struct bin_attribute sysfs_grcdump_attr = {
.attr = {
.name = "grcdump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
.read = qedf_sysfs_read_grcdump,
.write = qedf_sysfs_write_grcdump,
};
static struct sysfs_bin_attrs bin_file_entries[] = {
{"grcdump", &sysfs_grcdump_attr},
{NULL},
};
void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf)
{
qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries);
}
void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf)
{
qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries);
}

View File

@ -0,0 +1,195 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf_dbg.h"
#include <linux/vmalloc.h>
void
qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (likely(qedf) && likely(qedf->pdev))
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
va_end(va);
}
void
qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & QEDF_LOG_WARN))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
void
qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & QEDF_LOG_NOTICE))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
dev_name(&(qedf->pdev->dev)), nfunc, line,
qedf->host_no, &vaf);
else
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
void
qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 level, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & level))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
int
qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
{
*buf = vmalloc(len);
if (!(*buf))
return -ENOMEM;
memset(*buf, 0, len);
return 0;
}
void
qedf_free_grc_dump_buf(uint8_t **buf)
{
vfree(*buf);
*buf = NULL;
}
int
qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
u8 **buf, uint32_t *grcsize)
{
if (!*buf)
return -EINVAL;
return common->dbg_grc(cdev, *buf, grcsize);
}
void
qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg)
{
char event_string[40];
char *envp[] = {event_string, NULL};
memset(event_string, 0, sizeof(event_string));
switch (code) {
case QEDF_UEVENT_CODE_GRCDUMP:
if (msg)
strncpy(event_string, msg, strlen(msg));
else
sprintf(event_string, "GRCDUMP=%u", shost->host_no);
break;
default:
/* do nothing */
break;
}
kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp);
}
int
qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
{
int ret = 0;
for (; iter->name; iter++) {
ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
iter->attr);
if (ret)
pr_err("Unable to create sysfs %s attr, err(%d).\n",
iter->name, ret);
}
return ret;
}
void
qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
{
for (; iter->name; iter++)
sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
}

View File

@ -0,0 +1,154 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDF_DBG_H_
#define _QEDF_DBG_H_
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <scsi/scsi_transport.h>
#include <linux/fs.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_if.h>
extern uint qedf_debug;
/* Debug print level definitions */
#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */
#define QEDF_LOG_INFO 0x2 /*
* Informational logs,
* MAC address, WWPN, WWNN
*/
#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */
#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */
#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */
#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */
#define QEDF_LOG_TIMER 0x40 /* Timer events */
#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */
#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */
#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */
#define QEDF_LOG_BSG 0x1000 /* BSG logs */
#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
*/
#define QEDF_TRACK_TID 0x100000 /*
* Track TID state. To be
* enabled only at module load
* and not run-time.
*/
#define QEDF_TRACK_CMD_LIST 0x300000 /*
* Track active cmd list nodes,
* done with reference to TID,
* hence TRACK_TID also enabled.
*/
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
struct pci_dev *pdev;
#ifdef CONFIG_DEBUG_FS
struct dentry *bdf_dentry;
#endif
};
#define QEDF_ERR(pdev, fmt, ...) \
qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_WARN(pdev, fmt, ...) \
qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_NOTICE(pdev, fmt, ...) \
qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_INFO(pdev, level, fmt, ...) \
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
## __VA_ARGS__)
extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...);
extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *, ...);
extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
u32 line, const char *, ...);
extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 info, const char *fmt, ...);
/* GRC Dump related defines */
struct Scsi_Host;
#define QEDF_UEVENT_CODE_GRCDUMP 0
struct sysfs_bin_attrs {
char *name;
struct bin_attribute *attr;
};
extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
extern void qedf_free_grc_dump_buf(uint8_t **buf);
extern int qedf_get_grc_dump(struct qed_dev *cdev,
const struct qed_common_ops *common, uint8_t **buf,
uint32_t *grcsize);
extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg);
extern int qedf_create_sysfs_attr(struct Scsi_Host *shost,
struct sysfs_bin_attrs *iter);
extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost,
struct sysfs_bin_attrs *iter);
#ifdef CONFIG_DEBUG_FS
/* DebugFS related code */
struct qedf_list_of_funcs {
char *oper_str;
ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf);
};
struct qedf_debugfs_ops {
char *name;
struct qedf_list_of_funcs *qedf_funcs;
};
#define qedf_dbg_fileops(drv, ops) \
{ \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = drv##_dbg_##ops##_cmd_read, \
.write = drv##_dbg_##ops##_cmd_write \
}
/* Used for debugfs sequential files */
#define qedf_dbg_fileops_seq(drv, ops) \
{ \
.owner = THIS_MODULE, \
.open = drv##_dbg_##ops##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
struct qedf_debugfs_ops *dops,
struct file_operations *fops);
extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf);
extern void qedf_dbg_init(char *drv_name);
extern void qedf_dbg_exit(void);
#endif /* CONFIG_DEBUG_FS */
#endif /* _QEDF_DBG_H_ */

View File

@ -0,0 +1,460 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include "qedf.h"
#include "qedf_dbg.h"
static struct dentry *qedf_dbg_root;
/**
* qedf_dbg_host_init - setup the debugfs file for the pf
* @pf: the pf that is starting up
**/
void
qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
struct qedf_debugfs_ops *dops,
struct file_operations *fops)
{
char host_dirname[32];
struct dentry *file_dentry = NULL;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
/* create pf dir */
sprintf(host_dirname, "host%u", qedf->host_no);
qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
if (!qedf->bdf_dentry)
return;
/* create debugfs files */
while (dops) {
if (!(dops->name))
break;
file_dentry = debugfs_create_file(dops->name, 0600,
qedf->bdf_dentry, qedf,
fops);
if (!file_dentry) {
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
"Debugfs entry %s creation failed\n",
dops->name);
debugfs_remove_recursive(qedf->bdf_dentry);
return;
}
dops++;
fops++;
}
}
/**
* qedf_dbg_host_exit - clear out the pf's debugfs entries
* @pf: the pf that is stopping
**/
void
qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
{
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
"entry\n");
/* remove debugfs entries of this PF */
debugfs_remove_recursive(qedf->bdf_dentry);
qedf->bdf_dentry = NULL;
}
/**
* qedf_dbg_init - start up debugfs for the driver
**/
void
qedf_dbg_init(char *drv_name)
{
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n");
/* create qed dir in root of debugfs. NULL means debugfs root */
qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
if (!qedf_dbg_root)
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
"failed\n");
}
/**
* qedf_dbg_exit - clean out the driver's debugfs entries
**/
void
qedf_dbg_exit(void)
{
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root "
"entry\n");
/* remove qed dir in root of debugfs */
debugfs_remove_recursive(qedf_dbg_root);
qedf_dbg_root = NULL;
}
struct qedf_debugfs_ops qedf_debugfs_ops[] = {
{ "fp_int", NULL },
{ "io_trace", NULL },
{ "debug", NULL },
{ "stop_io_on_error", NULL},
{ "driver_stats", NULL},
{ "clear_stats", NULL},
{ "offload_stats", NULL},
/* This must be last */
{ NULL, NULL }
};
DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads);
static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
size_t cnt = 0;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
fp->completions);
}
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
if (!count || *ppos)
return 0;
return count;
}
static ssize_t
qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
struct qedf_dbg_ctx *qedf =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
uint32_t val;
void *kern_buf;
int rval;
struct qedf_dbg_ctx *qedf =
(struct qedf_dbg_ctx *)filp->private_data;
if (!count || *ppos)
return 0;
kern_buf = memdup_user(buffer, count);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
rval = kstrtouint(kern_buf, 10, &val);
kfree(kern_buf);
if (rval)
return rval;
if (val == 1)
qedf_debug = QEDF_DEFAULT_LOG_MASK;
else
qedf_debug = val;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
return count;
}
static ssize_t
qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "%s\n",
qedf->stop_io_on_error ? "true" : "false");
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_stop_io_on_error_cmd_write(struct file *filp,
const char __user *buffer, size_t count,
loff_t *ppos)
{
void *kern_buf;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
if (!count || *ppos)
return 0;
kern_buf = memdup_user(buffer, 6);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
if (strncmp(kern_buf, "false", 5) == 0)
qedf->stop_io_on_error = false;
else if (strncmp(kern_buf, "true", 4) == 0)
qedf->stop_io_on_error = true;
else if (strncmp(kern_buf, "now", 3) == 0)
/* Trigger from user to stop all I/O on this host */
set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
kfree(kern_buf);
return count;
}
static int
qedf_io_trace_show(struct seq_file *s, void *unused)
{
int i, idx = 0;
struct qedf_ctx *qedf = s->private;
struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx;
struct qedf_io_log *io_log;
unsigned long flags;
if (!qedf_io_tracing) {
seq_puts(s, "I/O tracing not enabled.\n");
goto out;
}
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
spin_lock_irqsave(&qedf->io_trace_lock, flags);
idx = qedf->io_trace_idx;
for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) {
io_log = &qedf->io_trace_buf[idx];
seq_printf(s, "%d:", io_log->direction);
seq_printf(s, "0x%x:", io_log->task_id);
seq_printf(s, "0x%06x:", io_log->port_id);
seq_printf(s, "%d:", io_log->lun);
seq_printf(s, "0x%02x:", io_log->op);
seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
io_log->lba[1], io_log->lba[2], io_log->lba[3]);
seq_printf(s, "%d:", io_log->bufflen);
seq_printf(s, "%d:", io_log->sg_count);
seq_printf(s, "0x%08x:", io_log->result);
seq_printf(s, "%lu:", io_log->jiffies);
seq_printf(s, "%d:", io_log->refcount);
seq_printf(s, "%d:", io_log->req_cpu);
seq_printf(s, "%d:", io_log->int_cpu);
seq_printf(s, "%d:", io_log->rsp_cpu);
seq_printf(s, "%d\n", io_log->sge_type);
idx++;
if (idx == QEDF_IO_TRACE_SIZE)
idx = 0;
}
spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
out:
return 0;
}
static int
qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_io_trace_show, qedf);
}
static int
qedf_driver_stats_show(struct seq_file *s, void *unused)
{
struct qedf_ctx *qedf = s->private;
struct qedf_rport *fcport;
struct fc_rport_priv *rdata;
seq_printf(s, "cmg_mgr free io_reqs: %d\n",
atomic_read(&qedf->cmd_mgr->free_list_cnt));
seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
seq_puts(s, "Offloaded ports:\n\n");
rcu_read_lock();
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
rdata = fcport->rdata;
if (rdata == NULL)
continue;
seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
rdata->ids.port_id, atomic_read(&fcport->free_sqes),
atomic_read(&fcport->num_active_ios));
}
rcu_read_unlock();
return 0;
}
static int
qedf_dbg_driver_stats_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_driver_stats_show, qedf);
}
static ssize_t
qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt = 0;
/* Essentially a read stub */
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_clear_stats_cmd_write(struct file *filp,
const char __user *buffer, size_t count,
loff_t *ppos)
{
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n");
if (!count || *ppos)
return 0;
/* Clear stat counters exposed by 'stats' node */
qedf->slow_sge_ios = 0;
qedf->single_sge_ios = 0;
qedf->fast_sge_ios = 0;
return count;
}
static int
qedf_offload_stats_show(struct seq_file *s, void *unused)
{
struct qedf_ctx *qedf = s->private;
struct qed_fcoe_stats *fw_fcoe_stats;
fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
if (!fw_fcoe_stats) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
"fw_fcoe_stats.\n");
goto out;
}
/* Query firmware for offload stats */
qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
seq_printf(s, "fcoe_rx_byte_cnt=%llu\n"
"fcoe_rx_data_pkt_cnt=%llu\n"
"fcoe_rx_xfer_pkt_cnt=%llu\n"
"fcoe_rx_other_pkt_cnt=%llu\n"
"fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n"
"fcoe_silent_drop_pkt_crc_error_cnt=%u\n"
"fcoe_silent_drop_pkt_task_invalid_cnt=%u\n"
"fcoe_silent_drop_total_pkt_cnt=%u\n"
"fcoe_silent_drop_pkt_rq_full_cnt=%u\n"
"fcoe_tx_byte_cnt=%llu\n"
"fcoe_tx_data_pkt_cnt=%llu\n"
"fcoe_tx_xfer_pkt_cnt=%llu\n"
"fcoe_tx_other_pkt_cnt=%llu\n",
fw_fcoe_stats->fcoe_rx_byte_cnt,
fw_fcoe_stats->fcoe_rx_data_pkt_cnt,
fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt,
fw_fcoe_stats->fcoe_rx_other_pkt_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt,
fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt,
fw_fcoe_stats->fcoe_tx_byte_cnt,
fw_fcoe_stats->fcoe_tx_data_pkt_cnt,
fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt,
fw_fcoe_stats->fcoe_tx_other_pkt_cnt);
kfree(fw_fcoe_stats);
out:
return 0;
}
static int
qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_offload_stats_show, qedf);
}
const struct file_operations qedf_dbg_fops[] = {
qedf_dbg_fileops(qedf, fp_int),
qedf_dbg_fileops_seq(qedf, io_trace),
qedf_dbg_fileops(qedf, debug),
qedf_dbg_fileops(qedf, stop_io_on_error),
qedf_dbg_fileops_seq(qedf, driver_stats),
qedf_dbg_fileops(qedf, clear_stats),
qedf_dbg_fileops_seq(qedf, offload_stats),
/* This must be last */
{ NULL, NULL },
};
#else /* CONFIG_DEBUG_FS */
void qedf_dbg_host_init(struct qedf_dbg_ctx *);
void qedf_dbg_host_exit(struct qedf_dbg_ctx *);
void qedf_dbg_init(char *);
void qedf_dbg_exit(void);
#endif /* CONFIG_DEBUG_FS */

View File

@ -0,0 +1,949 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf.h"
/* It's assumed that the lock is held when calling this function. */
static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
void *data, uint32_t data_len,
void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
{
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
rc = fc_remote_port_chkready(fcport->rport);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
rc = -EAGAIN;
goto els_err;
}
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
op);
rc = -EAGAIN;
goto els_err;
}
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
rc = -EINVAL;
goto els_err;
}
retry_els:
els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
if (!els_req) {
current_time = jiffies / HZ;
if ((current_time - start_time) > 10) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"els: Failed els 0x%x\n", op);
rc = -ENOMEM;
goto els_err;
}
mdelay(20 * USEC_PER_MSEC);
goto retry_els;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
"0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
els_req->xid);
els_req->sc_cmd = NULL;
els_req->cmd_type = QEDF_ELS;
els_req->fcport = fcport;
els_req->cb_func = cb_func;
cb_arg->io_req = els_req;
cb_arg->op = op;
els_req->cb_arg = cb_arg;
els_req->data_xfer_len = data_len;
/* Record which cpu this request is associated with */
els_req->cpu = smp_processor_id();
mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
rc = qedf_init_mp_req(els_req);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
kref_put(&els_req->refcount, qedf_release_cmd);
goto els_err;
} else {
rc = 0;
}
/* Fill ELS Payload */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
memcpy(mp_req->req_buf, data, data_len);
} else {
QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
kref_put(&els_req->refcount, qedf_release_cmd);
rc = -EINVAL;
}
if (rc)
goto els_err;
/* Fill FC header */
fc_hdr = &(mp_req->req_fc_hdr);
did = fcport->rdata->ids.port_id;
sid = fcport->sid;
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did,
FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = els_req->xid;
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
els_err:
return rc;
}
void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
struct fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
uint16_t xid;
struct fcoe_cqe_midpath_info *mp_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
/* Kill the ELS timer */
cancel_delayed_work(&els_req->timeout_work);
xid = els_req->xid;
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
sc_cmd = els_req->sc_cmd;
/* Get ELS response length from CQE */
mp_info = &cqe->cqe_info.midpath_info;
els_req->mp_req.resp_len = mp_info->data_placement_size;
/* Parse ELS response */
if ((els_req->cb_func) && (els_req->cb_arg)) {
els_req->cb_func(els_req->cb_arg);
els_req->cb_arg = NULL;
}
kref_put(&els_req->refcount, qedf_release_cmd);
}
static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *rrq_req;
struct qedf_ctx *qedf;
int refcount;
rrq_req = cb_arg->io_req;
qedf = rrq_req->fcport->qedf;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
/* This should return the aborted io_req to the command pool */
if (orig_io_req)
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
kfree(cb_arg);
}
/* Assumes kref is already held by caller */
int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
{
struct fc_els_rrq rrq;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t sid;
uint32_t r_a_tov;
int rc;
if (!aborted_io_req) {
QEDF_ERR(NULL, "abort_io_req is NULL.\n");
return -EINVAL;
}
fcport = aborted_io_req->fcport;
/* Check that fcport is still offloaded */
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
"io = %p, orig_xid = 0x%x\n", aborted_io_req,
aborted_io_req->xid);
memset(&rrq, 0, sizeof(rrq));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"RRQ\n");
rc = -ENOMEM;
goto rrq_err;
}
cb_arg->aborted_io_req = aborted_io_req;
rrq.rrq_cmd = ELS_RRQ;
hton24(rrq.rrq_s_id, sid);
rrq.rrq_ox_id = htons(aborted_io_req->xid);
rrq.rrq_rx_id =
htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
qedf_rrq_compl, cb_arg, r_a_tov);
rrq_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
"req 0x%x\n", aborted_io_req->xid);
kfree(cb_arg);
kref_put(&aborted_io_req->refcount, qedf_release_cmd);
}
return rc;
}
static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
struct fc_frame *fp,
u16 l2_oxid)
{
struct fc_lport *lport = fcport->qedf->lport;
struct fc_frame_header *fh;
u32 crc;
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
/* Set the OXID we return to what libfc used */
if (l2_oxid != FC_XID_UNKNOWN)
fh->fh_ox_id = htons(l2_oxid);
/* Setup header fields */
fh->fh_r_ctl = FC_RCTL_ELS_REP;
fh->fh_type = FC_TYPE_ELS;
/* Last sequence, end sequence */
fh->fh_f_ctl[0] = 0x98;
hton24(fh->fh_d_id, lport->port_id);
hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
fh->fh_rx_id = 0xffff;
/* Set frame attributes */
crc = fcoe_fc_crc(fp);
fc_frame_init(fp);
fr_dev(fp) = lport;
fr_sof(fp) = FC_SOF_I3;
fr_eof(fp) = FC_EOF_T;
fr_crc(fp) = cpu_to_le32(~crc);
/* Send completed request to libfc */
fc_exch_recv(lport, fp);
}
/*
* In instances where an ELS command times out we may need to restart the
* rport by logging out and then logging back in.
*/
void qedf_restart_rport(struct qedf_rport *fcport)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
u32 port_id;
if (!fcport)
return;
rdata = fcport->rdata;
if (rdata) {
lport = fcport->qedf->lport;
port_id = rdata->ids.port_id;
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"LOGO port_id=%x.\n", port_id);
fc_rport_logoff(rdata);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
if (rdata)
fc_rport_login(rdata);
}
}
static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *els_req;
struct qedf_rport *fcport;
struct qedf_mp_req *mp_req;
struct fc_frame *fp;
struct fc_frame_header *fh, *mp_fc_hdr;
void *resp_buf, *fc_payload;
u32 resp_len;
u16 l2_oxid;
l2_oxid = cb_arg->l2_oxid;
els_req = cb_arg->io_req;
if (!els_req) {
QEDF_ERR(NULL, "els_req is NULL.\n");
goto free_arg;
}
/*
* If we are flushing the command just free the cb_arg as none of the
* response data will be valid.
*/
if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
goto free_arg;
fcport = els_req->fcport;
mp_req = &(els_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
/*
* If a middle path ELS command times out, don't try to return
* the command but rather do any internal cleanup and then libfc
* timeout the command and clean up its internal resources.
*/
if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
/*
* If ADISC times out, libfc will timeout the exchange and then
* try to send a PLOGI which will timeout since the session is
* still offloaded. Force libfc to logout the session which
* will offload the connection and allow the PLOGI response to
* flow over the LL2 path.
*/
if (cb_arg->op == ELS_ADISC)
qedf_restart_rport(fcport);
return;
}
if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
"beyond page size.\n");
goto free_arg;
}
fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
if (!fp) {
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
return;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
free_arg:
kfree(cb_arg);
}
int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
{
struct fc_els_adisc *adisc;
struct fc_frame_header *fh;
struct fc_lport *lport = fcport->qedf->lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t r_a_tov = lport->r_a_tov;
int rc;
qedf = fcport->qedf;
fh = fc_frame_header_get(fp);
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"ADISC\n");
rc = -ENOMEM;
goto adisc_err;
}
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
qedf_l2_els_compl, cb_arg, r_a_tov);
adisc_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
kfree(cb_arg);
}
return rc;
}
static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *srr_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *mp_fc_hdr, *fh;
struct fc_frame *fp;
void *resp_buf, *fc_payload;
u32 resp_len;
struct fc_lport *lport;
struct qedf_ctx *qedf;
int refcount;
u8 opcode;
srr_req = cb_arg->io_req;
qedf = srr_req->fcport->qedf;
lport = qedf->lport;
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
" orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
/* If a SRR times out, simply free resources */
if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
goto out_free;
/* Normalize response data into struct fc_frame */
mp_req = &(srr_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
fp = fc_frame_alloc(lport, resp_len);
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
goto out_free;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
opcode = fc_frame_payload_op(fp);
switch (opcode) {
case ELS_LS_ACC:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"SRR success.\n");
break;
case ELS_LS_RJT:
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
"SRR rejected.\n");
qedf_initiate_abts(orig_io_req, true);
break;
}
fc_frame_free(fp);
out_free:
/* Put reference for original command since SRR completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
{
struct fcp_srr srr;
struct qedf_ctx *qedf;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
u32 sid, r_a_tov;
int rc;
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n");
return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
/* Take reference until SRR command completion */
kref_get(&orig_io_req->refcount);
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
"orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
memset(&srr, 0, sizeof(srr));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"SRR\n");
rc = -ENOMEM;
goto srr_err;
}
cb_arg->aborted_io_req = orig_io_req;
srr.srr_op = ELS_SRR;
srr.srr_ox_id = htons(orig_io_req->xid);
srr.srr_rx_id = htons(orig_io_req->rx_id);
srr.srr_rel_off = htonl(offset);
srr.srr_r_ctl = r_ctl;
rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
qedf_srr_compl, cb_arg, r_a_tov);
srr_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
"=0x%x\n", orig_io_req->xid);
kfree(cb_arg);
/* If we fail to queue SRR, send ABTS to orig_io */
qedf_initiate_abts(orig_io_req, true);
kref_put(&orig_io_req->refcount, qedf_release_cmd);
} else
/* Tell other threads that SRR is in progress */
set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
return rc;
}
static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
u32 offset, u8 r_ctl)
{
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
fcport = orig_io_req->fcport;
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Doing sequence cleanup for xid=0x%x offset=%u.\n",
orig_io_req->xid, offset);
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
"for sequence cleanup\n");
return;
}
/* Get reference for cleanup request */
kref_get(&orig_io_req->refcount);
orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
cb_arg->offset = offset;
cb_arg->r_ctl = r_ctl;
orig_io_req->cb_arg = cb_arg;
qedf_cmd_timer_set(fcport->qedf, orig_io_req,
QEDF_CLEANUP_TIMEOUT * HZ);
spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, orig_io_req->xid, 0,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
{
int rc;
struct qedf_els_cb_arg *cb_arg;
cb_arg = io_req->cb_arg;
/* If we timed out just free resources */
if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
goto free;
/* Kill the timer we put on the request */
cancel_delayed_work_sync(&io_req->timeout_work);
rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
if (rc)
QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
"abort, xid=0x%x.\n", io_req->xid);
free:
kfree(cb_arg);
kref_put(&io_req->refcount, qedf_release_cmd);
}
static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
{
struct qedf_rport *fcport;
struct qedf_ioreq *new_io_req;
unsigned long flags;
bool rc = false;
fcport = orig_io_req->fcport;
if (!fcport) {
QEDF_ERR(NULL, "fcport is NULL.\n");
goto out;
}
if (!orig_io_req->sc_cmd) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
"xid=0x%x.\n", orig_io_req->xid);
goto out;
}
new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
if (!new_io_req) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
"io_req.\n");
goto out;
}
new_io_req->sc_cmd = orig_io_req->sc_cmd;
/*
* This keeps the sc_cmd struct from being returned to the tape
* driver and being requeued twice. We do need to put a reference
* for the original I/O request since we will not do a SCSI completion
* for it.
*/
orig_io_req->sc_cmd = NULL;
kref_put(&orig_io_req->refcount, qedf_release_cmd);
spin_lock_irqsave(&fcport->rport_lock, flags);
/* kref for new command released in qedf_post_io_req on error */
if (qedf_post_io_req(fcport, new_io_req)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
/* Return SQE to pool */
atomic_inc(&fcport->free_sqes);
} else {
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Reissued SCSI command from orig_xid=0x%x on "
"new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
/*
* Abort the original I/O but do not return SCSI command as
* it has been reissued on another OX_ID.
*/
spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_initiate_abts(orig_io_req, false);
goto out;
}
spin_unlock_irqrestore(&fcport->rport_lock, flags);
out:
return rc;
}
static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *rec_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *mp_fc_hdr, *fh;
struct fc_frame *fp;
void *resp_buf, *fc_payload;
u32 resp_len;
struct fc_lport *lport;
struct qedf_ctx *qedf;
int refcount;
enum fc_rctl r_ctl;
struct fc_els_ls_rjt *rjt;
struct fc_els_rec_acc *acc;
u8 opcode;
u32 offset, e_stat;
struct scsi_cmnd *sc_cmd;
bool srr_needed = false;
rec_req = cb_arg->io_req;
qedf = rec_req->fcport->qedf;
lport = qedf->lport;
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
" orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
/* If a REC times out, free resources */
if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
goto out_free;
/* Normalize response data into struct fc_frame */
mp_req = &(rec_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
acc = resp_buf = mp_req->resp_buf;
fp = fc_frame_alloc(lport, resp_len);
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
goto out_free;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_RJT for REC: er_reason=0x%x, "
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
/*
* The following response(s) mean that we need to reissue the
* request on another exchange. We need to do this without
* informing the upper layers lest it cause an application
* error.
*/
if ((rjt->er_reason == ELS_RJT_LOGIC ||
rjt->er_reason == ELS_RJT_UNAB) &&
rjt->er_explan == ELS_EXPL_OXID_RXID) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Handle CMD LOST case.\n");
qedf_requeue_io_req(orig_io_req);
}
} else if (opcode == ELS_LS_ACC) {
offset = ntohl(acc->reca_fc4value);
e_stat = ntohl(acc->reca_e_stat);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
offset, e_stat);
if (e_stat & ESB_ST_SEQ_INIT) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Target has the seq init\n");
goto out_free_frame;
}
sc_cmd = orig_io_req->sc_cmd;
if (!sc_cmd) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"sc_cmd is NULL for xid=0x%x.\n",
orig_io_req->xid);
goto out_free_frame;
}
/* SCSI write case */
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
if (offset == orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"WRITE - response lost.\n");
r_ctl = FC_RCTL_DD_CMD_STATUS;
srr_needed = true;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"WRITE - XFER_RDY/DATA lost.\n");
r_ctl = FC_RCTL_DD_DATA_DESC;
/* Use data from warning CQE instead of REC */
offset = orig_io_req->tx_buf_off;
}
/* SCSI read case */
} else {
if (orig_io_req->rx_buf_off ==
orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"READ - response lost.\n");
srr_needed = true;
r_ctl = FC_RCTL_DD_CMD_STATUS;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"READ - DATA lost.\n");
/*
* For read case we always set the offset to 0
* for sequence recovery task.
*/
offset = 0;
r_ctl = FC_RCTL_DD_SOL_DATA;
}
}
if (srr_needed)
qedf_send_srr(orig_io_req, offset, r_ctl);
else
qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
}
out_free_frame:
fc_frame_free(fp);
out_free:
/* Put reference for original command since REC completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
/* Assumes kref is already held by caller */
int qedf_send_rec(struct qedf_ioreq *orig_io_req)
{
struct fc_els_rec rec;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t sid;
uint32_t r_a_tov;
int rc;
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n");
return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
/* Take reference until REC command completion */
kref_get(&orig_io_req->refcount);
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
memset(&rec, 0, sizeof(rec));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"REC\n");
rc = -ENOMEM;
goto rec_err;
}
cb_arg->aborted_io_req = orig_io_req;
rec.rec_cmd = ELS_REC;
hton24(rec.rec_s_id, sid);
rec.rec_ox_id = htons(orig_io_req->xid);
rec.rec_rx_id =
htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
"orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
orig_io_req->xid, rec.rec_rx_id);
rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
qedf_rec_compl, cb_arg, r_a_tov);
rec_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
"=0x%x\n", orig_io_req->xid);
kfree(cb_arg);
kref_put(&orig_io_req->refcount, qedf_release_cmd);
}
return rc;
}

View File

@ -0,0 +1,269 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include "qedf.h"
extern const struct qed_fcoe_ops *qed_ops;
/*
* FIP VLAN functions that will eventually move to libfcoe.
*/
void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
{
struct sk_buff *skb;
char *eth_fr;
int fr_len;
struct fip_vlan *vlan;
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
memset(vlan, 0, sizeof(*vlan));
ether_addr_copy(vlan->eth.h_source, qedf->mac);
ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs);
vlan->eth.h_proto = htons(ETH_P_FIP);
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
vlan->fip.fip_subcode = FIP_SC_VL_REQ;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac);
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN "
"request.");
if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request "
"because link is not up.\n");
kfree_skb(skb);
return;
}
qed_ops->ll2->start_xmit(qedf->cdev, skb);
}
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
struct sk_buff *skb)
{
struct fip_header *fiph;
struct fip_desc *desc;
u16 vid = 0;
ssize_t rlen;
size_t dlen;
fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);
rlen = ntohs(fiph->fip_dl_len) * 4;
desc = (struct fip_desc *)(fiph + 1);
while (rlen > 0) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_VLAN:
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
"vid=0x%x.\n", vid);
if (vid > 0 && qedf->vlan_id != vid) {
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
complete(&qedf->fipvlan_compl);
}
}
void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
struct fip_header *fiph;
u16 op, vlan_tci = 0;
u8 sub;
if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
kfree_skb(skb);
return;
}
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (!qedf->vlan_hw_insert) {
vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
- sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id);
}
/* Update eth_hdr since we added a VLAN tag */
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
"dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
ntohs(vlan_tci));
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
qed_ops->ll2->start_xmit(qedf->cdev, skb);
}
/* Process incoming FIP frames. */
void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
struct fip_header *fiph;
struct fip_desc *desc;
struct fip_mac_desc *mp;
struct fip_wwn_desc *wp;
struct fip_vn_desc *vp;
size_t rlen, dlen;
uint32_t cvl_port_id;
__u8 cvl_mac[ETH_ALEN];
u16 op;
u8 sub;
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
"skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
eth_hdr->h_source, op, sub);
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
/* Handle FIP VLAN resp in the driver */
if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
qedf_fcoe_process_vlan_resp(qedf, skb);
qedf->vlan_hw_insert = 0;
kfree_skb(skb);
} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
"link received.\n");
/* Check that an FCF has been selected by fcoe */
if (qedf->ctlr.sel_fcf == NULL) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Dropping CVL since FCF has not been selected "
"yet.");
return;
}
cvl_port_id = 0;
memset(cvl_mac, 0, ETH_ALEN);
/*
* We need to loop through the CVL descriptors to determine
* if we want to reset the fcoe link
*/
rlen = ntohs(fiph->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fiph + 1);
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_mac=%pM.\n", __func__, mp->fd_mac);
ether_addr_copy(cvl_mac, mp->fd_mac);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fc_wwpn=%016llx.\n",
get_unaligned_be64(&wp->fd_wwn));
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
cvl_port_id = ntoh24(vp->fd_fc_id);
break;
default:
/* Ignore anything else */
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
cvl_mac);
if (cvl_port_id == qedf->lport->port_id &&
ether_addr_equal(cvl_mac,
qedf->ctlr.sel_fcf->fcf_mac)) {
fcoe_ctlr_link_down(&qedf->ctlr);
qedf_wait_for_upload(qedf);
fcoe_ctlr_link_up(&qedf->ctlr);
}
kfree_skb(skb);
} else {
/* Everything else is handled by libfcoe */
__skb_pull(skb, ETH_HLEN);
fcoe_ctlr_recv(&qedf->ctlr, skb);
}
}
void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct qedf_ctx *qedf = lport_priv(lport);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Setting data_src_addr=%pM.\n", addr);
ether_addr_copy(qedf->data_src_addr, addr);
}
u8 *qedf_get_src_mac(struct fc_lport *lport)
{
u8 mac[ETH_ALEN];
u8 port_id[3];
struct qedf_ctx *qedf = lport_priv(lport);
/* We need to use the lport port_id to create the data_src_addr */
if (is_zero_ether_addr(qedf->data_src_addr)) {
hton24(port_id, lport->port_id);
fc_fcoe_set_mac(mac, port_id);
qedf->ctlr.update_mac(lport, mac);
}
return qedf->data_src_addr;
}

View File

@ -0,0 +1,422 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef __QEDF_HSI__
#define __QEDF_HSI__
/*
* Add include to common target
*/
#include <linux/qed/common_hsi.h>
/*
* Add include to common storage target
*/
#include <linux/qed/storage_common.h>
/*
* Add include to common fcoe target for both eCore and protocol driver
*/
#include <linux/qed/fcoe_common.h>
/*
* FCoE CQ element ABTS information
*/
struct fcoe_abts_info {
u8 r_ctl /* R_CTL in the ABTS response frame */;
u8 reserved0;
__le16 rx_id;
__le32 reserved2[2];
__le32 fc_payload[3] /* ABTS FC payload response frame */;
};
/*
* FCoE class type
*/
enum fcoe_class_type {
FCOE_TASK_CLASS_TYPE_3,
FCOE_TASK_CLASS_TYPE_2,
MAX_FCOE_CLASS_TYPE
};
/*
* FCoE CMDQ element control information
*/
struct fcoe_cmdqe_control {
__le16 conn_id;
u8 num_additional_cmdqes;
u8 cmdType;
/* true for ABTS request cmdqe. used in Target mode */
#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1
#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0
#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F
#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1
u8 reserved2[4];
};
/*
* FCoE control + payload CMDQ element
*/
struct fcoe_cmdqe {
struct fcoe_cmdqe_control hdr;
u8 fc_header[24];
__le32 fcp_cmd_payload[8];
};
/*
* FCP RSP flags
*/
struct fcoe_fcp_rsp_flags {
u8 flags;
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7
#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
};
/*
* FCoE CQ element response information
*/
struct fcoe_cqe_rsp_info {
struct fcoe_fcp_rsp_flags rsp_flags;
u8 scsi_status_code;
__le16 retry_delay_timer;
__le32 fcp_resid;
__le32 fcp_sns_len;
__le32 fcp_rsp_len;
__le16 rx_id;
u8 fw_error_flags;
#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */
#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0
#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F
#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1
u8 reserved;
__le32 fw_residual /* Residual bytes calculated by FW */;
};
/*
* FCoE CQ element Target completion information
*/
struct fcoe_cqe_target_info {
__le16 rx_id;
__le16 reserved0;
__le32 reserved1[5];
};
/*
* FCoE error/warning reporting entry
*/
struct fcoe_err_report_entry {
__le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */;
__le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */;
/* Buffer offset the beginning of the Sequence last transmitted */
__le32 tx_buf_off;
/* Buffer offset from the beginning of the Sequence last received */
__le32 rx_buf_off;
__le16 rx_id /* RX_ID of the associated task */;
__le16 reserved1;
__le32 reserved2;
};
/*
* FCoE CQ element middle path information
*/
struct fcoe_cqe_midpath_info {
__le32 data_placement_size;
__le16 rx_id;
__le16 reserved0;
__le32 reserved1[4];
};
/*
* FCoE CQ element unsolicited information
*/
struct fcoe_unsolic_info {
/* BD information: Physical address and opaque data */
struct scsi_bd bd_info;
__le16 conn_id /* Connection ID the frame is associated to */;
__le16 pkt_len /* Packet length */;
u8 reserved1[4];
};
/*
* FCoE warning reporting entry
*/
struct fcoe_warning_report_entry {
/* BD information: Physical address and opaque data */
struct scsi_bd bd_info;
/* Buffer offset the beginning of the Sequence last transmitted */
__le32 buf_off;
__le16 rx_id /* RX_ID of the associated task */;
__le16 reserved1;
};
/*
* FCoE CQ element information
*/
union fcoe_cqe_info {
struct fcoe_cqe_rsp_info rsp_info /* Response completion information */;
/* Target completion information */
struct fcoe_cqe_target_info target_info;
/* Error completion information */
struct fcoe_err_report_entry err_info;
struct fcoe_abts_info abts_info /* ABTS completion information */;
/* Middle path completion information */
struct fcoe_cqe_midpath_info midpath_info;
/* Unsolicited packet completion information */
struct fcoe_unsolic_info unsolic_info;
/* Warning completion information (Rec Tov expiration) */
struct fcoe_warning_report_entry warn_info;
};
/*
* FCoE CQ element
*/
struct fcoe_cqe {
__le32 cqe_data;
/* The task identifier (OX_ID) to be completed */
#define FCOE_CQE_TASK_ID_MASK 0xFFFF
#define FCOE_CQE_TASK_ID_SHIFT 0
/*
* The CQE type: 0x0 Indicating on a pending work request completion.
* 0x1 - Indicating on an unsolicited event notification. use enum
* fcoe_cqe_type (use enum fcoe_cqe_type)
*/
#define FCOE_CQE_CQE_TYPE_MASK 0xF
#define FCOE_CQE_CQE_TYPE_SHIFT 16
#define FCOE_CQE_RESERVED0_MASK 0xFFF
#define FCOE_CQE_RESERVED0_SHIFT 20
__le16 reserved1;
__le16 fw_cq_prod;
union fcoe_cqe_info cqe_info;
};
/*
* FCoE CQE type
*/
enum fcoe_cqe_type {
/* solicited response on a R/W or middle-path SQE */
FCOE_GOOD_COMPLETION_CQE_TYPE,
FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */,
FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */,
FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */,
FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */,
FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */,
FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */,
/* Task was completed wight after sending a pkt to the target */
FCOE_LOCAL_COMP_CQE_TYPE,
MAX_FCOE_CQE_TYPE
};
/*
* FCoE device type
*/
enum fcoe_device_type {
FCOE_TASK_DEV_TYPE_DISK,
FCOE_TASK_DEV_TYPE_TAPE,
MAX_FCOE_DEVICE_TYPE
};
/*
* FCoE fast path error codes
*/
enum fcoe_fp_error_warning_code {
FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */,
FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED,
FCOE_ERROR_CODE_XFER_NULL_BURST_LEN,
FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS,
FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE,
FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE,
FCOE_ERROR_CODE_XFER_PEND_XFER_SET,
FCOE_ERROR_CODE_XFER_OPENED_SEQ,
FCOE_ERROR_CODE_XFER_FCTL,
FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */,
FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD,
FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD,
FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE,
FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET,
FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ,
FCOE_ERROR_CODE_FCP_RSP_FCTL,
FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET,
FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET,
FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */,
FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE,
FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS,
FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET,
FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET,
FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET,
FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET,
FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ,
FCOE_ERROR_CODE_DATA_FCTL_INITIATIR,
FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */,
FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET,
FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET,
FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET,
FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET,
FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL,
FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY,
FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL,
FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */,
FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE,
FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH,
FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT,
FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH,
FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES,
FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR,
FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG,
FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED,
FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD,
FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL,
FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH,
FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */,
FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */,
FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */,
/* ABTSrsp pckt arrived unexpected */
FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED,
FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP,
FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER,
FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE,
FCOE_ERROR_CODE_DATA_FCTL_TARGET,
FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER,
FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR,
FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR,
FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR,
MAX_FCOE_FP_ERROR_WARNING_CODE
};
/*
* FCoE RESPQ element
*/
struct fcoe_respqe {
__le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */;
__le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */;
__le32 additional_info;
/* PARAM that is located in the FCP_RSP FC header */
#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF
#define FCOE_RESPQE_PARAM_SHIFT 0
/* Indication whther its Target-auto-rsp mode or not */
#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF
#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24
};
/*
* FCoE slow path error codes
*/
enum fcoe_sp_error_code {
/* Error codes for Error Reporting in slow path flows */
FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS,
FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE,
MAX_FCOE_SP_ERROR_CODE
};
/*
* FCoE SQE request type
*/
enum fcoe_sqe_request_type {
SEND_FCOE_CMD,
SEND_FCOE_MIDPATH,
SEND_FCOE_ABTS_REQUEST,
FCOE_EXCHANGE_CLEANUP,
FCOE_SEQUENCE_RECOVERY,
SEND_FCOE_XFER_RDY,
SEND_FCOE_RSP,
SEND_FCOE_RSP_WITH_SENSE_DATA,
SEND_FCOE_TARGET_DATA,
SEND_FCOE_INITIATOR_DATA,
/*
* Xfer Continuation (==1) ready to be sent. Previous XFERs data
* received successfully.
*/
SEND_FCOE_XFER_CONTINUATION_RDY,
SEND_FCOE_TARGET_ABTS_RSP,
MAX_FCOE_SQE_REQUEST_TYPE
};
/*
* FCoE task TX state
*/
enum fcoe_task_tx_state {
/* Initiate state after driver has initialized the task */
FCOE_TASK_TX_STATE_NORMAL,
/* Updated by TX path after complete transmitting unsolicited packet */
FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED,
/*
* Updated by TX path after start processing the task requesting the
* cleanup/abort operation
*/
FCOE_TASK_TX_STATE_CLEAN_REQ,
FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */,
/* Updated by TX path during exchange cleanup procedure */
FCOE_TASK_TX_STATE_EXCLEANUP,
/*
* Updated by TX path during exchange cleanup continuation task
* procedure
*/
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT,
/* Updated by TX path during exchange cleanup first xfer procedure */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE,
/* Updated by TX path during exchange cleanup read task in Target */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP,
/* Updated by TX path during target exchange cleanup procedure */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE,
/* Updated by TX path during sequence recovery procedure */
FCOE_TASK_TX_STATE_SEQRECOVERY,
MAX_FCOE_TASK_TX_STATE
};
/*
* FCoE task type
*/
enum fcoe_task_type {
FCOE_TASK_TYPE_WRITE_INITIATOR,
FCOE_TASK_TYPE_READ_INITIATOR,
FCOE_TASK_TYPE_MIDPATH,
FCOE_TASK_TYPE_UNSOLICITED,
FCOE_TASK_TYPE_ABTS,
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
FCOE_TASK_TYPE_WRITE_TARGET,
FCOE_TASK_TYPE_READ_TARGET,
FCOE_TASK_TYPE_RSP,
FCOE_TASK_TYPE_RSP_SENSE_DATA,
FCOE_TASK_TYPE_ABTS_TARGET,
FCOE_TASK_TYPE_ENUM_SIZE,
MAX_FCOE_TASK_TYPE
};
struct scsi_glbl_queue_entry {
/* Start physical address for the RQ (receive queue) PBL. */
struct regpair rq_pbl_addr;
/* Start physical address for the CQ (completion queue) PBL. */
struct regpair cq_pbl_addr;
/* Start physical address for the CMDQ (command queue) PBL. */
struct regpair cmdq_pbl_addr;
};
#endif /* __QEDF_HSI__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#define QEDF_VERSION "8.10.7.0"
#define QEDF_DRIVER_MAJOR_VER 8
#define QEDF_DRIVER_MINOR_VER 10
#define QEDF_DRIVER_REV_VER 7
#define QEDF_DRIVER_ENG_VER 0

View File

@ -165,10 +165,9 @@ static void qedi_tmf_resp_work(struct work_struct *work)
iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
if (rval) {
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
qedi_clear_task_idx(qedi, qedi_cmd->task_id);
iscsi_unblock_session(session->cls_session);
return;
goto exit_tmf_resp;
}
iscsi_unblock_session(session->cls_session);
@ -177,6 +176,8 @@ static void qedi_tmf_resp_work(struct work_struct *work)
spin_lock(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
spin_unlock(&session->back_lock);
exit_tmf_resp:
kfree(resp_hdr_ptr);
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
}

View File

@ -3148,14 +3148,17 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
int i, ret;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int min_vecs = QLA_BASE_VECTORS;
struct irq_affinity desc = {
.pre_vectors = QLA_BASE_VECTORS,
};
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
desc.pre_vectors++;
min_vecs++;
}
ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&desc);

View File

@ -1849,6 +1849,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
@ -1876,9 +1877,8 @@ skip_pio:
"BAR 3 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
"MSIX Count:%d.\n", ha->msix_count);
"MSIX Count: %d.\n", ha->msix_count);
return (0);
iospace_error_exit:
@ -1926,6 +1926,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* 83XX 26XX always use MQ type access for queues
* - mbar 2, a.k.a region 4 */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
pci_resource_len(ha->pdev, 4));
@ -1949,12 +1950,13 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
if (ql2xmqsupport) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
ha->max_rsp_queues = ha->max_req_queues;
/* ATIOQ needs 1 vector. That's 1 less QPair */
if (QLA_TGT_MODE_ENABLED())
ha->max_req_queues--;
ha->max_rsp_queues = ha->max_req_queues;
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
@ -1968,14 +1970,8 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
"BAR 1 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
if (QLA_TGT_MODE_ENABLED())
ha->msix_count++;
qlt_83xx_iospace_config(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count:%d.\n", ha->msix_count);
"MSIX Count: %d.\n", ha->msix_count);
return 0;
iospace_error_exit:

View File

@ -1792,7 +1792,7 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
p = strchr(tmp, '@');
if (!p) {
pr_err("Unable to locate NPIV '@' seperator\n");
pr_err("Unable to locate NPIV '@' separator\n");
return ERR_PTR(-EINVAL);
}
*p++ = '\0';

View File

@ -137,11 +137,11 @@ EXPORT_SYMBOL(int_to_scsilun);
bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr)
{
memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
if (!sense_buffer || !sb_len)
return false;
memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
sshdr->response_code = (sense_buffer[0] & 0x7f);
if (!scsi_sense_valid(sshdr))

View File

@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
{
struct scsi_device *sdev;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
spin_unlock_irqrestore(q->queue_lock, flags);
return sdev;
}
/*
* scsi_dh_activate - activate the path associated with the scsi_device
* corresponding to the given request queue.
@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
struct scsi_device *sdev;
int err = SCSI_DH_NOSYS;
sdev = get_sdev_from_queue(q);
sdev = scsi_device_from_queue(q);
if (!sdev) {
if (fn)
fn(data, err);
@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params)
struct scsi_device *sdev;
int err = -SCSI_DH_NOSYS;
sdev = get_sdev_from_queue(q);
sdev = scsi_device_from_queue(q);
if (!sdev)
return err;
@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
struct scsi_device_handler *scsi_dh;
int err = 0;
sdev = get_sdev_from_queue(q);
sdev = scsi_device_from_queue(q);
if (!sdev)
return -ENODEV;
@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
struct scsi_device *sdev;
const char *handler_name = NULL;
sdev = get_sdev_from_queue(q);
sdev = scsi_device_from_queue(q);
if (!sdev)
return NULL;

View File

@ -199,6 +199,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
char scsi_cmd[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sense_hdr;
/* Check for deprecated ioctls ... all the ioctls which don't
* follow the new unique numbering scheme are deprecated */
@ -243,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
case SCSI_IOCTL_TEST_UNIT_READY:
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
NORMAL_RETRIES, NULL);
NORMAL_RETRIES, &sense_hdr);
case SCSI_IOCTL_START_UNIT:
scsi_cmd[0] = START_STOP;
scsi_cmd[1] = 0;

View File

@ -213,10 +213,30 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, 1);
}
static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @sshdr: optional decoded sense header
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: flags for ->cmd_flags
* @rq_flags: flags for ->rq_flags
* @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, u64 flags,
req_flags_t rq_flags, int *resid)
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags, req_flags_t rq_flags,
int *resid)
{
struct request *req;
struct scsi_request *rq;
@ -259,62 +279,16 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
*resid = rq->resid_len;
if (sense && rq->sense_len)
memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: or into request flags;
* @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, u64 flags,
int *resid)
{
return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
timeout, retries, flags, 0, resid);
}
EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries,
int *resid, u64 flags, req_flags_t rq_flags)
{
char *sense = NULL;
int result;
if (sshdr) {
sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
if (!sense)
return DRIVER_ERROR << 24;
}
result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
sense, timeout, retries, flags, rq_flags, resid);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
kfree(sense);
return result;
}
EXPORT_SYMBOL(scsi_execute_req_flags);
/*
* Function: scsi_init_cmd_errh()
*
@ -2231,6 +2205,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost)
blk_mq_free_tag_set(&shost->tag_set);
}
/**
* scsi_device_from_queue - return sdev associated with a request_queue
* @q: The request queue to return the sdev from
*
* Return the sdev associated with a request queue or NULL if the
* request_queue does not reference a SCSI device.
*/
struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
if (q->mq_ops) {
if (q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
} else if (q->request_fn == scsi_request_fn)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
return sdev;
}
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
/*
* Function: scsi_block_requests()
*
@ -2497,28 +2494,20 @@ EXPORT_SYMBOL(scsi_mode_sense);
* @sdev: scsi device to change the state of.
* @timeout: command timeout
* @retries: number of retries before failing
* @sshdr_external: Optional pointer to struct scsi_sense_hdr for
* returning sense. Make sure that this is cleared before passing
* in.
* @sshdr: outpout pointer for decoded sense information.
*
* Returns zero if unsuccessful or an error if TUR failed. For
* removable media, UNIT_ATTENTION sets ->changed flag.
**/
int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
struct scsi_sense_hdr *sshdr_external)
struct scsi_sense_hdr *sshdr)
{
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
struct scsi_sense_hdr *sshdr;
int result;
if (!sshdr_external)
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
else
sshdr = sshdr_external;
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
@ -2529,8 +2518,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
} while (scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && --retries);
if (!sshdr_external)
kfree(sshdr);
return result;
}
EXPORT_SYMBOL(scsi_test_unit_ready);

View File

@ -123,25 +123,21 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
{
int i, result;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
NULL);
if (driver_byte(result) & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
sshdr = &sshdr_tmp;
if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE,
sshdr)
&& sshdr->sense_key == UNIT_ATTENTION)
continue;
}
break;
0, NULL);
if (!(driver_byte(result) & DRIVER_SENSE) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
return result;
}

View File

@ -1425,7 +1425,6 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
struct scsi_disk *sdkp = scsi_disk_get(disk);
struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
if (!sdkp)
@ -1454,22 +1453,21 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
* by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
* sd_revalidate() is called.
*/
retval = -ENODEV;
if (scsi_block_when_processing_errors(sdp)) {
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
struct scsi_sense_hdr sshdr = { 0, };
retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
sshdr);
}
&sshdr);
/* failed to execute TUR, assume media not present */
if (host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
/* failed to execute TUR, assume media not present */
if (host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
if (media_not_present(sdkp, sshdr))
goto out;
if (media_not_present(sdkp, &sshdr))
goto out;
}
/*
* For removable scsi disk we have to recognise the presence
@ -1485,7 +1483,6 @@ out:
* Medium present state has changed in either direction.
* Device has indicated UNIT_ATTENTION.
*/
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
scsi_disk_put(sdkp);
@ -1511,9 +1508,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* Leave the rest of the command zero to indicate
* flush everything.
*/
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
&sshdr, timeout, SD_MAX_RETRIES,
NULL, 0, RQF_PM);
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res == 0)
break;
}
@ -3084,7 +3080,7 @@ struct sd_devt {
struct disk_devt disk_devt;
};
void sd_devt_release(struct disk_devt *disk_devt)
static void sd_devt_release(struct disk_devt *disk_devt)
{
struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
disk_devt);
@ -3213,10 +3209,10 @@ static int sd_probe(struct device *dev)
sd_devt = NULL;
out_put:
put_disk(gd);
out_free:
kfree(sdkp);
out_free_devt:
kfree(sd_devt);
out_free:
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
return error;
@ -3299,8 +3295,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) & DRIVER_SENSE)

View File

@ -534,8 +534,7 @@ static int pqi_write_current_time_to_host_wellness(
size_t buffer_length;
time64_t local_time;
unsigned int year;
struct timeval time;
struct rtc_time tm;
struct tm tm;
buffer_length = sizeof(*buffer);
@ -552,9 +551,8 @@ static int pqi_write_current_time_to_host_wellness(
put_unaligned_le16(sizeof(buffer->time),
&buffer->time_length);
do_gettimeofday(&time);
local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
rtc_time64_to_tm(local_time, &tm);
local_time = ktime_get_real_seconds();
time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
year = tm.tm_year + 1900;
buffer->time[0] = bin2bcd(tm.tm_hour);

View File

@ -187,30 +187,19 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
struct request_sense *sense = cgc->sense;
SDev = cd->device;
if (!sense) {
sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!sense) {
err = -ENOMEM;
goto out;
}
}
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
goto out;
}
memset(sense, 0, sizeof(*sense));
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
cgc->buffer, cgc->buflen, (char *)sense,
cgc->timeout, IOCTL_RETRIES, 0, NULL);
scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
cgc->buffer, cgc->buflen,
(unsigned char *)cgc->sense, &sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
@ -261,8 +250,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
/* Wake up a process waiting for device */
out:
if (!cgc->sense)
kfree(sense);
cgc->stat = err;
return err;
}

View File

@ -1523,18 +1523,6 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
return false;
}
/*
* Not performing check for each individual select_major
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
if (host->testbus.select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
__func__, host->testbus.select_minor);
return false;
}
return true;
}

View File

@ -6915,9 +6915,9 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@ -6982,8 +6982,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",

View File

@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *);
extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
void scsi_attach_vpd(struct scsi_device *sdev);
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
extern int scsi_device_get(struct scsi_device *);
extern void scsi_device_put(struct scsi_device *);
extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
@ -409,19 +410,16 @@ extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries,
u64 flags, int *resid);
extern int scsi_execute_req_flags(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
int retries, int *resid, u64 flags, req_flags_t rq_flags);
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags,
req_flags_t rq_flags, int *resid);
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
int retries, int *resid)
{
return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
bufflen, sshdr, timeout, retries, resid, 0, 0);
return scsi_execute(sdev, cmd, data_direction, buffer,
bufflen, NULL, sshdr, timeout, retries, 0, 0, resid);
}
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);