1
0
Fork 0

snic: driver for Cisco SCSI HBA

Cisco has developed a new PCI HBA interface called sNIC, which stands for
SCSI NIC. This is a new storage feature supported on specialized network
adapter. The new PCI function provides a uniform host interface and abstracts
backend storage.

[jejb: fix up checkpatch errors]
Signed-off-by: Narsimhulu Musini <nmusini@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
hifive-unleashed-5.1
Narsimhulu Musini 2015-05-29 01:04:01 -07:00 committed by James Bottomley
parent 8d2b21db49
commit c8806b6c9e
37 changed files with 10263 additions and 0 deletions

View File

@ -2590,6 +2590,13 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/snic/
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br>

View File

@ -634,6 +634,23 @@ config FCOE_FNIC
<file:Documentation/scsi/scsi.txt>.
The module will be called fnic.
config SCSI_SNIC
tristate "Cisco SNIC Driver"
depends on PCI && SCSI
help
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
bool "Cisco SNIC Driver Debugfs Support"
depends on SCSI_SNIC && DEBUG_FS
help
This enables to list debugging information from SNIC Driver
available via debugfs file system
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI

View File

@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o

View File

@ -0,0 +1,17 @@
obj-$(CONFIG_SCSI_SNIC) += snic.o
snic-y := \
snic_attrs.o \
snic_main.o \
snic_res.o \
snic_isr.o \
snic_ctl.o \
snic_io.o \
snic_scsi.o \
snic_disc.o \
vnic_cq.o \
vnic_intr.o \
vnic_dev.o \
vnic_wq.o
snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o

View File

@ -0,0 +1,77 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specific area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specific[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */

View File

@ -0,0 +1,38 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
#endif /* _CQ_ENET_DESC_H_ */

View File

@ -0,0 +1,414 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_H_
#define _SNIC_H_
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/mempool.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "snic_disc.h"
#include "snic_io.h"
#include "snic_res.h"
#include "snic_trc.h"
#include "snic_stats.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_snic.h"
#define SNIC_DRV_NAME "snic"
#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver"
#define SNIC_DRV_VERSION "0.0.1.18"
#define PFX SNIC_DRV_NAME ":"
#define DFX SNIC_DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */
#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */
#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */
#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */
#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */
/*
* Tag bits used for special requests.
*/
#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */
#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */
#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */
#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */
#define SNIC_NO_TAG -1
/*
* Command flags to identify the type of command and for other future use
*/
#define SNIC_NO_FLAGS 0
#define SNIC_IO_INITIALIZED BIT(0)
#define SNIC_IO_ISSUED BIT(1)
#define SNIC_IO_DONE BIT(2)
#define SNIC_IO_REQ_NULL BIT(3)
#define SNIC_IO_ABTS_PENDING BIT(4)
#define SNIC_IO_ABORTED BIT(5)
#define SNIC_IO_ABTS_ISSUED BIT(6)
#define SNIC_IO_TERM_ISSUED BIT(7)
#define SNIC_IO_ABTS_TIMEDOUT BIT(8)
#define SNIC_IO_ABTS_TERM_DONE BIT(9)
#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10)
#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11)
#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12)
#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13)
#define SNIC_DEVICE_RESET BIT(14)
#define SNIC_DEV_RST_ISSUED BIT(15)
#define SNIC_DEV_RST_TIMEDOUT BIT(16)
#define SNIC_DEV_RST_ABTS_ISSUED BIT(17)
#define SNIC_DEV_RST_TERM_ISSUED BIT(18)
#define SNIC_DEV_RST_DONE BIT(19)
#define SNIC_DEV_RST_REQ_NULL BIT(20)
#define SNIC_DEV_RST_ABTS_DONE BIT(21)
#define SNIC_DEV_RST_TERM_DONE BIT(22)
#define SNIC_DEV_RST_ABTS_PENDING BIT(23)
#define SNIC_DEV_RST_PENDING BIT(24)
#define SNIC_DEV_RST_NOTSUP BIT(25)
#define SNIC_SCSI_CLEANUP BIT(26)
#define SNIC_HOST_RESET_ISSUED BIT(27)
#define SNIC_ABTS_TIMEOUT 30000 /* msec */
#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */
/*
* These are protected by the hashed req_lock.
*/
#define CMD_SP(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi)
#define CMD_STATE(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state)
#define CMD_ABTS_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status)
#define CMD_LR_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status)
#define CMD_FLAGS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags)
#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */
#define SNIC_MAX_TARGET 256
#define SNIC_FLAGS_NONE (0)
/* snic module params */
extern unsigned int snic_max_qdepth;
/* snic debugging */
extern unsigned int snic_log_level;
#define SNIC_MAIN_LOGGING 0x1
#define SNIC_SCSI_LOGGING 0x2
#define SNIC_ISR_LOGGING 0x8
#define SNIC_DESC_LOGGING 0x10
#define SNIC_CHECK_LOGGING(LEVEL, CMD) \
do { \
if (unlikely(snic_log_level & LEVEL)) \
do { \
CMD; \
} while (0); \
} while (0)
#define SNIC_MAIN_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ## args);)
#define SNIC_SCSI_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_DISC_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_ISR_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_HOST_ERR(host, fmt, args...) \
shost_printk(KERN_ERR, host, fmt, ##args)
#define SNIC_HOST_INFO(host, fmt, args...) \
shost_printk(KERN_INFO, host, fmt, ##args)
#define SNIC_INFO(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_DBG(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_ERR(fmt, args...) \
pr_err(PFX fmt, ## args)
#ifdef DEBUG
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \
BUG_ON(EXPR); \
} \
})
#else
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \
#EXPR, __func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
#endif
/* Soft assert */
#define SNIC_ASSERT_NOT_IMPL(EXPR) \
({ \
if (EXPR) {\
SNIC_INFO("Functionality not impl'ed at %s:%d\n", \
__func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
extern const char *snic_state_str[];
enum snic_intx_intr_index {
SNIC_INTX_WQ_RQ_COPYWQ,
SNIC_INTX_ERR,
SNIC_INTX_NOTIFY,
SNIC_INTX_INTR_MAX,
};
enum snic_msix_intr_index {
SNIC_MSIX_WQ,
SNIC_MSIX_IO_CMPL,
SNIC_MSIX_ERR_NOTIFY,
SNIC_MSIX_INTR_MAX,
};
struct snic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
enum snic_state {
SNIC_INIT = 0,
SNIC_ERROR,
SNIC_ONLINE,
SNIC_OFFLINE,
SNIC_FWRESET,
};
#define SNIC_WQ_MAX 1
#define SNIC_CQ_IO_CMPL_MAX 1
#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX)
/* firmware version information */
struct snic_fw_info {
u32 fw_ver;
u32 hid; /* u16 hid | u16 vnic id */
u32 max_concur_ios; /* max concurrent ios */
u32 max_sgs_per_cmd; /* max sgls per IO */
u32 max_io_sz; /* max io size supported */
u32 hba_cap; /* hba capabilities */
u32 max_tgts; /* max tgts supported */
u16 io_tmo; /* FW Extended timeout */
struct completion *wait; /* protected by snic lock*/
};
/*
* snic_work item : defined to process asynchronous events
*/
struct snic_work {
struct work_struct work;
u16 ev_id;
u64 *ev_data;
};
/*
* snic structure to represent SCSI vNIC
*/
struct snic {
/* snic specific members */
struct list_head list;
char name[IFNAMSIZ];
atomic_t state;
spinlock_t snic_lock;
struct completion *remove_wait;
bool in_remove;
bool stop_link_events; /* stop processing link events */
/* discovery related */
struct snic_disc disc;
/* Scsi Host info */
struct Scsi_Host *shost;
/* vnic related structures */
struct vnic_dev_bar bar0;
struct vnic_stats *stats;
unsigned long stats_time;
unsigned long stats_reset_time;
struct vnic_dev *vdev;
/* hw resource info */
unsigned int wq_count;
unsigned int cq_count;
unsigned int intr_count;
unsigned int err_intr_offset;
int link_status; /* retrieved from svnic_dev_link_status() */
u32 link_down_cnt;
/* pci related */
struct pci_dev *pdev;
struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
/* io related info */
mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */
____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS];
/* Maintain snic specific commands, cmds with no tag in spl_cmd_list */
____cacheline_aligned spinlock_t spl_cmd_lock;
struct list_head spl_cmd_list;
unsigned int max_tag_id;
atomic_t ios_inflight; /* io in flight counter */
struct vnic_snic_config config;
struct work_struct link_work;
/* firmware information */
struct snic_fw_info fwinfo;
/* Work for processing Target related work */
struct work_struct tgt_work;
/* Work for processing Discovery */
struct work_struct disc_work;
/* stats related */
unsigned int reset_stats;
atomic64_t io_cmpl_skip;
struct snic_stats s_stats; /* Per SNIC driver stats */
/* platform specific */
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
struct dentry *stats_host; /* Per snic debugfs root */
struct dentry *stats_file; /* Per snic debugfs file */
struct dentry *reset_stats_file;/* Per snic reset stats file */
#endif
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX];
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX];
spinlock_t wq_lock[SNIC_WQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX];
}; /* end of snic structure */
/*
* SNIC Driver's Global Data
*/
struct snic_global {
struct list_head snic_list;
spinlock_t snic_list_lock;
struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES];
struct workqueue_struct *event_q;
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* debugfs related global data */
struct dentry *trc_root;
struct dentry *stats_root;
struct snic_trc trc ____cacheline_aligned;
#endif
};
extern struct snic_global *snic_glob;
int snic_glob_init(void);
void snic_glob_cleanup(void);
extern struct workqueue_struct *snic_event_queue;
extern struct device_attribute *snic_attrs[];
int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int snic_abort_cmd(struct scsi_cmnd *);
int snic_device_reset(struct scsi_cmnd *);
int snic_host_reset(struct scsi_cmnd *);
int snic_reset(struct Scsi_Host *, struct scsi_cmnd *);
void snic_shutdown_scsi_cleanup(struct snic *);
int snic_request_intr(struct snic *);
void snic_free_intr(struct snic *);
int snic_set_intr_mode(struct snic *);
void snic_clear_intr_mode(struct snic *);
int snic_fwcq_cmpl_handler(struct snic *, int);
int snic_wq_cmpl_handler(struct snic *, int);
void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *);
void snic_log_q_error(struct snic *);
void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
void snic_handle_untagged_req(struct snic *, struct snic_req_info *);
void snic_release_untagged_req(struct snic *, struct snic_req_info *);
void snic_free_all_untagged_reqs(struct snic *);
int snic_get_conf(struct snic *);
void snic_set_state(struct snic *, enum snic_state);
int snic_get_state(struct snic *);
const char *snic_state_to_str(unsigned int);
void snic_hex_dump(char *, char *, int);
void snic_print_desc(const char *fn, char *os_buf, int len);
const char *show_opcode_name(int val);
#endif /* _SNIC_H */

View File

@ -0,0 +1,77 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
#include "snic.h"
static ssize_t
snic_show_sym_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
}
static ssize_t
snic_show_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
snic_state_str[snic_get_state(snic)]);
}
static ssize_t
snic_show_drv_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
}
static ssize_t
snic_show_link_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
if (snic->config.xpt_type == SNIC_DAS)
snic->link_status = svnic_dev_link_status(snic->vdev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(snic->link_status) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
struct device_attribute *snic_attrs[] = {
&dev_attr_snic_sym_name,
&dev_attr_snic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
NULL,
};

View File

@ -0,0 +1,279 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include <linux/ctype.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
/*
* snic_handle_link : Handles link flaps.
*/
void
snic_handle_link(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, link_work);
if (snic->config.xpt_type != SNIC_DAS) {
SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
SNIC_ASSERT_NOT_IMPL(1);
return;
}
snic->link_status = svnic_dev_link_status(snic->vdev);
snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
((snic->link_status) ? "Up" : "Down"));
}
/*
* snic_ver_enc : Encodes version str to int
* version string is similar to netmask string
*/
static int
snic_ver_enc(const char *s)
{
int v[4] = {0};
int i = 0, x = 0;
char c;
const char *p = s;
/* validate version string */
if ((strlen(s) > 15) || (strlen(s) < 7))
goto end;
while ((c = *p++)) {
if (c == '.') {
i++;
continue;
}
if (i > 4 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
}
/* validate sub version numbers */
for (i = 3; i >= 0; i--)
if (v[i] > 0xff)
goto end;
x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
end:
if (x == 0) {
SNIC_ERR("Invalid version string [%s].\n", s);
return -1;
}
return x;
} /* end of snic_ver_enc */
/*
* snic_qeueue_exch_ver_req :
*
* Queues Exchange Version Request, to communicate host information
* in return, it gets firmware version details
*/
int
snic_queue_exch_ver_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
struct snic_host_req *req = NULL;
u32 ver = 0;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
rqi = snic_req_init(snic, 0);
if (!rqi) {
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
ret = -ENOMEM;
goto error;
}
req = rqi_to_req(rqi);
/* Initialize snic_host_req */
snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
snic->config.hid, 0, (ulong)rqi);
ver = snic_ver_enc(SNIC_DRV_VERSION);
req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, req, sizeof(*req));
if (ret) {
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
goto error;
}
SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
error:
return ret;
} /* end of snic_queue_exch_ver_req */
/*
* snic_io_exch_ver_cmpl_handler
*/
int
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
u8 typ, hdr_stat;
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_BUG_ON(snic->config.hid != hid);
rqi = (struct snic_req_info *) ctx;
if (hdr_stat) {
SNIC_HOST_ERR(snic->shost,
"Exch Ver Completed w/ err status %d\n",
hdr_stat);
goto exch_cmpl_end;
}
spin_lock_irqsave(&snic->snic_lock, flags);
snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
SNIC_HOST_INFO(snic->shost,
"vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
snic->fwinfo.fw_ver,
snic->fwinfo.hid,
snic->fwinfo.max_concur_ios,
snic->fwinfo.max_sgs_per_cmd,
snic->fwinfo.max_io_sz,
snic->fwinfo.max_tgts,
snic->fwinfo.io_tmo);
SNIC_HOST_INFO(snic->shost,
"HBA Capabilities = 0x%x\n",
le32_to_cpu(exv_cmpl->hba_cap));
/* Updating SGList size */
max_sgs = snic->fwinfo.max_sgs_per_cmd;
if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
snic->shost->sg_tablesize = max_sgs;
SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
snic->shost->sg_tablesize);
} else if (max_sgs > snic->shost->sg_tablesize) {
SNIC_HOST_INFO(snic->shost,
"Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
snic->config.xpt_type, max_sgs,
snic->shost->sg_tablesize);
}
if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
snic->shost->can_queue = snic->fwinfo.max_concur_ios;
snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
if (snic->fwinfo.wait)
complete(snic->fwinfo.wait);
spin_unlock_irqrestore(&snic->snic_lock, flags);
exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
* snic_get_conf
*
* Synchronous call, and Retrieves snic params.
*/
int
snic_get_conf(struct snic *snic)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
int ret;
int nr_retries = 3;
SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
spin_lock_irqsave(&snic->snic_lock, flags);
memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
snic->fwinfo.wait = &wait;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* Additional delay to handle HW Resource initialization. */
msleep(50);
/*
* Exch ver req can be ignored by FW, if HW Resource initialization
* is in progress, Hence retry.
*/
do {
ret = snic_queue_exch_ver_req(snic);
if (ret)
return ret;
wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
spin_lock_irqsave(&snic->snic_lock, flags);
ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
if (ret)
SNIC_HOST_ERR(snic->shost,
"Failed to retrieve snic params,\n");
/* Unset fwinfo.wait, on success or on last retry */
if (ret == 0 || nr_retries == 1)
snic->fwinfo.wait = NULL;
spin_unlock_irqrestore(&snic->snic_lock, flags);
} while (ret && --nr_retries);
return ret;
} /* end of snic_get_info */

View File

@ -0,0 +1,560 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include "snic.h"
/*
* snic_debugfs_init - Initialize debugfs for snic debug logging
*
* Description:
* When Debugfs is configured this routine sets up fnic debugfs
* filesystem. If not already created. this routine will crate the
* fnic directory and statistics directory for trace buffer and
* stats logging
*/
int
snic_debugfs_init(void)
{
int rc = -1;
struct dentry *de = NULL;
de = debugfs_create_dir("snic", NULL);
if (!de) {
SNIC_DBG("Cannot create debugfs root\n");
return rc;
}
snic_glob->trc_root = de;
de = debugfs_create_dir("statistics", snic_glob->trc_root);
if (!de) {
SNIC_DBG("Cannot create Statistics directory\n");
return rc;
}
snic_glob->stats_root = de;
rc = 0;
return rc;
} /* end of snic_debugfs_init */
/*
* snic_debugfs_term - Tear down debugfs intrastructure
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to snic
*/
void
snic_debugfs_term(void)
{
debugfs_remove(snic_glob->stats_root);
snic_glob->stats_root = NULL;
debugfs_remove(snic_glob->trc_root);
snic_glob->trc_root = NULL;
}
/*
* snic_reset_stats_open - Open the reset_stats file
*/
static int
snic_reset_stats_open(struct inode *inode, struct file *filp)
{
SNIC_BUG_ON(!inode->i_private);
filp->private_data = inode->i_private;
return 0;
}
/*
* snic_reset_stats_read - Read a reset_stats debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer tocopy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading frm.
*
* Description:
* This routine reads value of variable reset_stats
* and stores into local @buf. It will start reading file @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t
snic_reset_stats_read(struct file *filp,
char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
char buf[64];
int len;
len = sprintf(buf, "%u\n", snic->reset_stats);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* snic_reset_stats_write - Write to reset_stats debugfs file
* @filp: The file pointer to write from
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* resets cumulative stats of snic.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t
snic_reset_stats_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
struct snic_stats *stats = &snic->s_stats;
u64 *io_stats_p = (u64 *) &stats->io;
u64 *fw_stats_p = (u64 *) &stats->fw;
char buf[64];
unsigned long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
snic->reset_stats = val;
if (snic->reset_stats) {
/* Skip variable is used to avoid descrepancies to Num IOs
* and IO Completions stats. Skip incrementing No IO Compls
* for pending active IOs after reset_stats
*/
atomic64_set(&snic->io_cmpl_skip,
atomic64_read(&stats->io.active));
memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
memset(io_stats_p+1,
0,
sizeof(struct snic_io_stats) - sizeof(u64));
memset(fw_stats_p+1,
0,
sizeof(struct snic_fw_stats) - sizeof(u64));
}
(*ppos)++;
SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");
return cnt;
}
static int
snic_reset_stats_release(struct inode *inode, struct file *filp)
{
filp->private_data = NULL;
return 0;
}
/*
* snic_stats_show - Formats and prints per host specific driver stats.
*/
static int
snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
struct timespec last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
/* Dump IO Stats */
seq_printf(sfp,
"------------------------------------------\n"
"\t\t IO Statistics\n"
"------------------------------------------\n");
maxio_tm = (u64) atomic64_read(&stats->io.max_time);
seq_printf(sfp,
"Active IOs : %lld\n"
"Max Active IOs : %lld\n"
"Total IOs : %lld\n"
"IOs Completed : %lld\n"
"IOs Failed : %lld\n"
"IOs Not Found : %lld\n"
"Memory Alloc Failures : %lld\n"
"REQs Null : %lld\n"
"SCSI Cmd Pointers Null : %lld\n"
"Max SGL for any IO : %lld\n"
"Max IO Size : %lld Sectors\n"
"Max Queuing Time : %lld\n"
"Max Completion Time : %lld\n"
"Max IO Process Time(FW) : %lld (%u msec)\n",
(u64) atomic64_read(&stats->io.active),
(u64) atomic64_read(&stats->io.max_active),
(u64) atomic64_read(&stats->io.num_ios),
(u64) atomic64_read(&stats->io.compl),
(u64) atomic64_read(&stats->io.fail),
(u64) atomic64_read(&stats->io.io_not_found),
(u64) atomic64_read(&stats->io.alloc_fail),
(u64) atomic64_read(&stats->io.req_null),
(u64) atomic64_read(&stats->io.sc_null),
(u64) atomic64_read(&stats->io.max_sgl),
(u64) atomic64_read(&stats->io.max_io_sz),
(u64) atomic64_read(&stats->io.max_qtime),
(u64) atomic64_read(&stats->io.max_cmpl_time),
maxio_tm,
jiffies_to_msecs(maxio_tm));
seq_puts(sfp, "\nSGL Counters\n");
for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) {
seq_printf(sfp,
"%10lld ",
(u64) atomic64_read(&stats->io.sgl_cnt[i]));
if ((i + 1) % 8 == 0)
seq_puts(sfp, "\n");
}
/* Dump Abort Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Abort Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Aborts : %lld\n"
"Aborts Fail : %lld\n"
"Aborts Driver Timeout : %lld\n"
"Abort FW Timeout : %lld\n"
"Abort IO NOT Found : %lld\n",
(u64) atomic64_read(&stats->abts.num),
(u64) atomic64_read(&stats->abts.fail),
(u64) atomic64_read(&stats->abts.drv_tmo),
(u64) atomic64_read(&stats->abts.fw_tmo),
(u64) atomic64_read(&stats->abts.io_not_found));
/* Dump Reset Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Reset Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"HBA Resets : %lld\n"
"HBA Reset Cmpls : %lld\n"
"HBA Reset Fail : %lld\n",
(u64) atomic64_read(&stats->reset.hba_resets),
(u64) atomic64_read(&stats->reset.hba_reset_cmpl),
(u64) atomic64_read(&stats->reset.hba_reset_fail));
/* Dump Firmware Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Firmware Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Active FW Requests : %lld\n"
"Max FW Requests : %lld\n"
"FW Out Of Resource Errs : %lld\n"
"FW IO Errors : %lld\n"
"FW SCSI Errors : %lld\n",
(u64) atomic64_read(&stats->fw.actv_reqs),
(u64) atomic64_read(&stats->fw.max_actv_reqs),
(u64) atomic64_read(&stats->fw.out_of_res),
(u64) atomic64_read(&stats->fw.io_errs),
(u64) atomic64_read(&stats->fw.scsi_errs));
/* Dump Miscellenous Stats */
seq_printf(sfp,
"\n---------------------------------------------\n"
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
"Last ISR Time : %llu (%8lu.%8lu)\n"
"Last Ack Time : %llu (%8lu.%8lu)\n"
"ISRs : %llu\n"
"Max CQ Entries : %lld\n"
"Data Count Mismatch : %lld\n"
"IOs w/ Timeout Status : %lld\n"
"IOs w/ Aborted Status : %lld\n"
"IOs w/ SGL Invalid Stat : %lld\n"
"WQ Desc Alloc Fail : %lld\n"
"Queue Full : %lld\n"
"Target Not Ready : %lld\n",
(u64) stats->misc.last_isr_time,
last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
(u64)stats->misc.last_ack_time,
last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
(u64) atomic64_read(&stats->misc.isr_cnt),
(u64) atomic64_read(&stats->misc.max_cq_ents),
(u64) atomic64_read(&stats->misc.data_cnt_mismat),
(u64) atomic64_read(&stats->misc.io_tmo),
(u64) atomic64_read(&stats->misc.io_aborted),
(u64) atomic64_read(&stats->misc.sgl_inval),
(u64) atomic64_read(&stats->misc.wq_alloc_fail),
(u64) atomic64_read(&stats->misc.qfull),
(u64) atomic64_read(&stats->misc.tgt_not_rdy));
return 0;
}
/*
* snic_stats_open - Open the stats file for specific host
*
* Description:
* This routine opens a debugfs file stats of specific host
*/
static int
snic_stats_open(struct inode *inode, struct file *filp)
{
return single_open(filp, snic_stats_show, inode->i_private);
}
static const struct file_operations snic_stats_fops = {
.owner = THIS_MODULE,
.open = snic_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations snic_reset_stats_fops = {
.owner = THIS_MODULE,
.open = snic_reset_stats_open,
.read = snic_reset_stats_read,
.write = snic_reset_stats_write,
.release = snic_reset_stats_release,
};
/*
* snic_stats_init - Initialize stats struct and create stats file
* per snic
*
* Description:
* When debugfs is cofigured this routine sets up the stats file per snic
* It will create file stats and reset_stats under statistics/host# directory
* to log per snic stats
*/
int
snic_stats_debugfs_init(struct snic *snic)
{
int rc = -1;
char name[16];
struct dentry *de = NULL;
snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
if (!snic_glob->stats_root) {
SNIC_DBG("snic_stats root doesn't exist\n");
return rc;
}
de = debugfs_create_dir(name, snic_glob->stats_root);
if (!de) {
SNIC_DBG("Cannot create host directory\n");
return rc;
}
snic->stats_host = de;
de = debugfs_create_file("stats",
S_IFREG|S_IRUGO,
snic->stats_host,
snic,
&snic_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's stats file\n");
return rc;
}
snic->stats_file = de;
de = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
snic->stats_host,
snic,
&snic_reset_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's reset_stats file\n");
return rc;
}
snic->reset_stats_file = de;
rc = 0;
return rc;
} /* end of snic_stats_debugfs_init */
/*
* snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to to snic stats
*/
void
snic_stats_debugfs_remove(struct snic *snic)
{
debugfs_remove(snic->stats_file);
snic->stats_file = NULL;
debugfs_remove(snic->reset_stats_file);
snic->reset_stats_file = NULL;
debugfs_remove(snic->stats_host);
snic->stats_host = NULL;
}
/* Trace Facility related API */
static void *
snic_trc_seq_start(struct seq_file *sfp, loff_t *pos)
{
return &snic_glob->trc;
}
static void *
snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos)
{
return NULL;
}
static void
snic_trc_seq_stop(struct seq_file *sfp, void *data)
{
}
#define SNIC_TRC_PBLEN 256
static int
snic_trc_seq_show(struct seq_file *sfp, void *data)
{
char buf[SNIC_TRC_PBLEN];
if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0)
seq_printf(sfp, "%s\n", buf);
return 0;
}
static const struct seq_operations snic_trc_seq_ops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
static int
snic_trc_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &snic_trc_seq_ops);
}
static const struct file_operations snic_trc_fops = {
.owner = THIS_MODULE,
.open = snic_trc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
int
snic_trc_debugfs_init(void)
{
struct dentry *de = NULL;
int ret = -1;
if (!snic_glob->trc_root) {
SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
return ret;
}
de = debugfs_create_bool("tracing_enable",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
&snic_glob->trc.enable);
if (!de) {
SNIC_ERR("Can't create trace_enable file.\n");
return ret;
}
snic_glob->trc.trc_enable = de;
de = debugfs_create_file("trace",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
NULL,
&snic_trc_fops);
if (!de) {
SNIC_ERR("Cann't create trace file.\n");
return ret;
}
snic_glob->trc.trc_file = de;
ret = 0;
return ret;
} /* end of snic_trc_debugfs_init */
/*
* snic_trc_debugfs_term : cleans up the files created for trace under debugfs
*/
void
snic_trc_debugfs_term(void)
{
debugfs_remove(snic_glob->trc.trc_file);
snic_glob->trc.trc_file = NULL;
debugfs_remove(snic_glob->trc.trc_enable);
snic_glob->trc.trc_enable = NULL;
}

View File

@ -0,0 +1,551 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_disc.h"
#include "snic.h"
#include "snic_io.h"
/* snic target types */
static const char * const snic_tgt_type_str[] = {
[SNIC_TGT_DAS] = "DAS",
[SNIC_TGT_SAN] = "SAN",
};
static inline const char *
snic_tgt_type_to_str(int typ)
{
return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
snic_tgt_type_str[typ] : "Unknown");
}
static const char * const snic_tgt_state_str[] = {
[SNIC_TGT_STAT_INIT] = "INIT",
[SNIC_TGT_STAT_ONLINE] = "ONLINE",
[SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
[SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
};
const char *
snic_tgt_state_to_str(int state)
{
return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
snic_tgt_state_str[state] : "UNKNOWN");
}
/*
* Initiate report_tgt req desc
*/
static void
snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
dma_addr_t rsp_buf_pa, ulong ctx)
{
struct snic_sg_desc *sgd = NULL;
snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
1, ctx);
req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
sgd = req_to_sgl(req);
sgd[0].addr = cpu_to_le64(rsp_buf_pa);
sgd[0].len = cpu_to_le32(len);
sgd[0]._resvd = 0;
req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
}
/*
* snic_queue_report_tgt_req: Queues report target request.
*/
static int
snic_queue_report_tgt_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
u32 ntgts, buf_len = 0;
u8 *buf = NULL;
dma_addr_t pa = 0;
int ret = 0;
rqi = snic_req_init(snic, 1);
if (!rqi) {
ret = -ENOMEM;
goto error;
}
if (snic->fwinfo.max_tgts)
ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
else
ntgts = snic->shost->max_id;
/* Allocate Response Buffer */
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
ret = -ENOMEM;
goto error;
}
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
kfree(buf);
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
ret = -EINVAL;
goto error;
}
SNIC_BUG_ON(pa == 0);
rqi->sge_va = (ulong) buf;
snic_report_tgt_init(rqi->req,
snic->config.hid,
buf,
buf_len,
pa,
(ulong)rqi);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
goto error;
}
SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
return ret;
error:
SNIC_HOST_ERR(snic->shost,
"Queuing Report Targets Failed, err = %d\n",
ret);
return ret;
} /* end of snic_queue_report_tgt_req */
/* call into SML */
static void
snic_scsi_scan_tgt(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
unsigned long flags;
SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
scsi_scan_target(&tgt->dev,
tgt->channel,
tgt->scsi_tgt_id,
SCAN_WILD_CARD,
1);
spin_lock_irqsave(shost->host_lock, flags);
tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
} /* end of snic_scsi_scan_tgt */
/*
* snic_tgt_lookup :
*/
static struct snic_tgt *
snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct list_head *cur, *nxt;
struct snic_tgt *tgt = NULL;
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
if (tgt->id == le32_to_cpu(tgtid->tgt_id))
return tgt;
tgt = NULL;
}
return tgt;
} /* end of snic_tgt_lookup */
/*
* snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
*/
void
snic_tgt_dev_release(struct device *dev)
{
struct snic_tgt *tgt = dev_to_tgt(dev);
SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
"Target Device ID %d (%s) Permanently Deleted.\n",
tgt->id,
dev_name(dev));
SNIC_BUG_ON(!list_empty(&tgt->list));
kfree(tgt);
}
/*
* snic_tgt_del : work function to delete snic_tgt
*/
static void
snic_tgt_del(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
if (tgt->flags & SNIC_TGT_SCAN_PENDING)
scsi_flush_work(shost);
/* Block IOs on child devices, stops new IOs */
scsi_target_block(&tgt->dev);
/* Cleanup IOs */
snic_tgt_scsi_abort_io(tgt);
/* Unblock IOs now, to flush if there are any. */
scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
/* Delete SCSI Target and sdevs */
scsi_remove_target(&tgt->dev); /* ?? */
device_del(&tgt->dev);
put_device(&tgt->dev);
} /* end of snic_tgt_del */
/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
* it creates one.
*/
static struct snic_tgt *
snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct snic_tgt *tgt = NULL;
unsigned long flags;
int ret;
tgt = snic_tgt_lookup(snic, tgtid);
if (tgt) {
/* update the information if required */
return tgt;
}
tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
if (!tgt) {
SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
ret = -ENOMEM;
return tgt;
}
INIT_LIST_HEAD(&tgt->list);
tgt->id = le32_to_cpu(tgtid->tgt_id);
tgt->channel = 0;
SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
/*
* Plugging into SML Device Tree
*/
tgt->tdata.disc_id = 0;
tgt->state = SNIC_TGT_STAT_INIT;
device_initialize(&tgt->dev);
tgt->dev.parent = get_device(&snic->shost->shost_gendev);
tgt->dev.release = snic_tgt_dev_release;
INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
INIT_WORK(&tgt->del_work, snic_tgt_del);
switch (tgt->tdata.typ) {
case SNIC_TGT_DAS:
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
case SNIC_TGT_SAN:
dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
default:
SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
}
spin_lock_irqsave(snic->shost->host_lock, flags);
list_add_tail(&tgt->list, &snic->disc.tgt_list);
tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
tgt->state = SNIC_TGT_STAT_ONLINE;
spin_unlock_irqrestore(snic->shost->host_lock, flags);
SNIC_HOST_INFO(snic->shost,
"Tgt %d, type = %s detected. Adding..\n",
tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
ret = device_add(&tgt->dev);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&snic->shost->shost_gendev);
kfree(tgt);
tgt = NULL;
return tgt;
}
SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
scsi_queue_work(snic->shost, &tgt->scan_work);
return tgt;
} /* end of snic_tgt_create */
/* Handler for discovery */
void
snic_handle_tgt_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, tgt_work);
struct snic_tgt_id *tgtid = NULL;
struct snic_tgt *tgt = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
kfree(snic->disc.rtgt_info);
return;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
mutex_lock(&snic->disc.mutex);
/* Discover triggered during disc in progress */
if (snic->disc.req_cnt) {
snic->disc.state = SNIC_DISC_DONE;
snic->disc.req_cnt = 0;
mutex_unlock(&snic->disc.mutex);
kfree(snic->disc.rtgt_info);
snic->disc.rtgt_info = NULL;
SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
/* Start Discovery Again */
snic_disc_start(snic);
return;
}
tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
for (i = 0; i < snic->disc.rtgt_cnt; i++) {
tgt = snic_tgt_create(snic, &tgtid[i]);
if (!tgt) {
int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
break;
}
}
snic->disc.rtgt_info = NULL;
snic->disc.state = SNIC_DISC_DONE;
mutex_unlock(&snic->disc.mutex);
SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
kfree(tgtid);
} /* end of snic_handle_tgt_disc */
int
snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
u8 typ, cmpl_stat;
u32 cmnd_id, hid, tgt_cnt = 0;
ulong ctx;
struct snic_req_info *rqi = NULL;
struct snic_tgt_id *tgtid;
int i, ret = 0;
snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
rqi = (struct snic_req_info *) ctx;
tgtid = (struct snic_tgt_id *) rqi->sge_va;
tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
if (tgt_cnt == 0) {
SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
ret = 1;
goto end;
}
/* printing list of targets here */
SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
for (i = 0; i < tgt_cnt; i++)
SNIC_HOST_INFO(snic->shost,
"Tgt id = 0x%x\n",
le32_to_cpu(tgtid[i].tgt_id));
/*
* Queue work for further processing,
* Response Buffer Memory is freed after creating targets
*/
snic->disc.rtgt_cnt = tgt_cnt;
snic->disc.rtgt_info = (u8 *) tgtid;
queue_work(snic_glob->event_q, &snic->tgt_work);
ret = 0;
end:
/* Unmap Response Buffer */
snic_pci_unmap_rsp_buf(snic, rqi);
if (ret)
kfree(tgtid);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
return ret;
} /* end of snic_report_tgt_cmpl_handler */
/* Discovery init fn */
void
snic_disc_init(struct snic_disc *disc)
{
INIT_LIST_HEAD(&disc->tgt_list);
mutex_init(&disc->mutex);
disc->disc_id = 0;
disc->nxt_tgt_id = 0;
disc->state = SNIC_DISC_INIT;
disc->req_cnt = 0;
disc->rtgt_cnt = 0;
disc->rtgt_info = NULL;
disc->cb = NULL;
} /* end of snic_disc_init */
/* Discovery, uninit fn */
void
snic_disc_term(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
mutex_lock(&disc->mutex);
if (disc->req_cnt) {
disc->req_cnt = 0;
SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
}
mutex_unlock(&disc->mutex);
}
/*
* snic_disc_start: Discovery Start ...
*/
int
snic_disc_start(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
int ret = 0;
SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
mutex_lock(&disc->mutex);
if (disc->state == SNIC_DISC_PENDING) {
disc->req_cnt++;
mutex_unlock(&disc->mutex);
return ret;
}
disc->state = SNIC_DISC_PENDING;
mutex_unlock(&disc->mutex);
ret = snic_queue_report_tgt_req(snic);
if (ret)
SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
return ret;
} /* end of snic_disc_start */
/*
* snic_disc_work :
*/
void
snic_handle_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, disc_work);
int ret = 0;
SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
ret = snic_disc_start(snic);
if (ret)
goto disc_err;
disc_err:
SNIC_HOST_ERR(snic->shost,
"disc_work: Discovery Failed w/ err = %d\n",
ret);
} /* end of snic_disc_work */
/*
* snic_tgt_del_all : cleanup all snic targets
* Called on unbinding the interface
*/
void
snic_tgt_del_all(struct snic *snic)
{
struct snic_tgt *tgt = NULL;
struct list_head *cur, *nxt;
unsigned long flags;
mutex_lock(&snic->disc.mutex);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
tgt->state = SNIC_TGT_STAT_DEL;
list_del_init(&tgt->list);
SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
queue_work(snic_glob->event_q, &tgt->del_work);
tgt = NULL;
}
spin_unlock_irqrestore(snic->shost->host_lock, flags);
scsi_flush_work(snic->shost);
mutex_unlock(&snic->disc.mutex);
} /* end of snic_tgt_del_all */

View File

@ -0,0 +1,124 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
#include "snic_fwint.h"
enum snic_disc_state {
SNIC_DISC_NONE,
SNIC_DISC_INIT,
SNIC_DISC_PENDING,
SNIC_DISC_DONE
};
struct snic;
struct snic_disc {
struct list_head tgt_list;
enum snic_disc_state state;
struct mutex mutex;
u16 disc_id;
u8 req_cnt;
u32 nxt_tgt_id;
u32 rtgt_cnt;
u8 *rtgt_info;
struct delayed_work disc_timeout;
void (*cb)(struct snic *);
};
#define SNIC_TGT_NAM_LEN 16
enum snic_tgt_state {
SNIC_TGT_STAT_NONE,
SNIC_TGT_STAT_INIT,
SNIC_TGT_STAT_ONLINE, /* Target is Online */
SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
SNIC_TGT_STAT_DEL,
};
struct snic_tgt_priv {
struct list_head list;
enum snic_tgt_type typ;
u16 disc_id;
char *name[SNIC_TGT_NAM_LEN];
union {
/*DAS Target specific info */
/*SAN Target specific info */
u8 dummmy;
} u;
};
/* snic tgt flags */
#define SNIC_TGT_SCAN_PENDING 0x01
struct snic_tgt {
struct list_head list;
u16 id;
u16 channel;
u32 flags;
u32 scsi_tgt_id;
enum snic_tgt_state state;
struct device dev;
struct work_struct scan_work;
struct work_struct del_work;
struct snic_tgt_priv tdata;
};
struct snic_fw_req;
void snic_disc_init(struct snic_disc *);
int snic_disc_start(struct snic *);
void snic_disc_term(struct snic *);
int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
void snic_process_report_tgts_rsp(struct work_struct *);
void snic_handle_tgt_disc(struct work_struct *);
void snic_handle_disc(struct work_struct *);
void snic_tgt_dev_release(struct device *);
void snic_tgt_del_all(struct snic *);
#define dev_to_tgt(d) \
container_of(d, struct snic_tgt, dev)
static inline int
is_snic_target(struct device *dev)
{
return dev->release == snic_tgt_dev_release;
}
#define starget_to_tgt(st) \
(is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
dev_to_tgt(st->dev.parent) : NULL)
#define snic_tgt_to_shost(t) \
dev_to_shost(t->dev.parent)
static inline int
snic_tgt_chkready(struct snic_tgt *tgt)
{
if (tgt->state == SNIC_TGT_STAT_ONLINE)
return 0;
else
return DID_NO_CONNECT << 16;
}
const char *snic_tgt_state_to_str(int);
int snic_tgt_scsi_abort_io(struct snic_tgt *);
#endif /* end of __SNIC_DISC_H */

View File

@ -0,0 +1,525 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */
#define LUN_ADDR_LEN 8
/*
* Command entry type
*/
enum snic_io_type {
/*
* Initiator request types
*/
SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */
SNIC_REQ_ICMND, /* Initiator command for SCSI IO */
SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */
SNIC_REQ_HBA_RESET, /* SNIC Reset */
SNIC_REQ_EXCH_VER, /* Exchange Version Information */
SNIC_REQ_TGT_INFO, /* Backend/Target Information */
SNIC_REQ_BOOT_LUNS,
/*
* Response type
*/
SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */
SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */
SNIC_RSP_ITMF_CMPL, /* Task Management Completion */
SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */
SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/
SNIC_RSP_BOOT_LUNS_CMPL,
/*
* Misc Request types
*/
SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */
SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */
}; /* end of enum snic_io_type */
/*
* Header status codes from firmware
*/
enum snic_io_status {
SNIC_STAT_IO_SUCCESS = 0, /* request was successful */
/*
* If a request to the fw is rejected, the original request header
* will be returned with the status set to one of the following:
*/
SNIC_STAT_INVALID_HDR, /* header contains invalid data */
SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */
SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */
SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */
SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */
/*
* Once a request is processed, the fw will usually return
* a cmpl message type. In cases where errors occurred,
* the header status would be filled in with one of the following:
*/
SNIC_STAT_ABORTED, /* req was aborted */
SNIC_STAT_TIMEOUT, /* req was timed out */
SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */
SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */
SNIC_STAT_FW_ERR, /* req was terminated due to fw error */
SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */
SNIC_STAT_ITMF_FAIL, /* itmf req was failed */
SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/
SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */
SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */
SNIC_STAT_NO_BOOTLUN,
SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */
SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */
SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */
}; /* end of enum snic_io_status */
/*
* snic_io_hdr : host <--> firmare
*
* for any other message that will be queued to firmware should
* have the following request header
*/
struct snic_io_hdr {
__le32 hid;
__le32 cmnd_id; /* tag here */
ulong init_ctx; /* initiator context */
u8 type; /* request/response type */
u8 status; /* header status entry */
u8 protocol; /* Protocol specific, may needed for RoCE*/
u8 flags;
__le16 sg_cnt;
u16 resvd;
};
/* auxillary funciton for encoding the snic_io_hdr */
static inline void
snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid,
u16 sg_cnt, ulong ctx)
{
hdr->type = typ;
hdr->status = status;
hdr->protocol = 0;
hdr->hid = cpu_to_le32(hid);
hdr->cmnd_id = cpu_to_le32(id);
hdr->sg_cnt = cpu_to_le16(sg_cnt);
hdr->init_ctx = ctx;
hdr->flags = 0;
}
/* auxillary funciton for decoding the snic_io_hdr */
static inline void
snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id,
u32 *hid, ulong *ctx)
{
*typ = hdr->type;
*stat = hdr->status;
*hid = le32_to_cpu(hdr->hid);
*cmnd_id = le32_to_cpu(hdr->cmnd_id);
*ctx = hdr->init_ctx;
}
/*
* snic_host_info: host -> firmware
*
* Used for sending host information to firmware, and request fw version
*/
struct snic_exch_ver_req {
__le32 drvr_ver; /* for debugging, when fw dump captured */
__le32 os_type; /* for OS specific features */
};
/*
* os_type flags
* Bit 0-7 : OS information
* Bit 8-31: Feature/Capability Information
*/
#define SNIC_OS_LINUX 0x1
#define SNIC_OS_WIN 0x2
#define SNIC_OS_ESX 0x3
/*
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
* Bit 3: Async event notifications on on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */
#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */
#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */
/*
* snic_exch_ver_rsp : firmware -> host
*
* Used by firmware to send response to version request
*/
struct snic_exch_ver_rsp {
__le32 version;
__le32 hid;
__le32 max_concur_ios; /* max concurrent ios */
__le32 max_sgs_per_cmd; /* max sgls per IO */
__le32 max_io_sz; /* max io size supported */
__le32 hba_cap; /* hba capabilities */
__le32 max_tgts; /* max tgts supported */
__le16 io_timeout; /* FW extended timeout */
u16 rsvd;
};
/*
* snic_report_tgts : host -> firmware request
*
* Used by the host to request list of targets
*/
struct snic_report_tgts {
__le16 sg_cnt;
__le16 flags; /* specific flags from fw */
u8 _resvd[4];
__le64 sg_addr; /* Points to SGL */
__le64 sense_addr;
};
enum snic_type {
SNIC_NONE = 0x0,
SNIC_DAS,
SNIC_SAN,
};
/* Report Target Response */
enum snic_tgt_type {
SNIC_TGT_NONE = 0x0,
SNIC_TGT_DAS, /* DAS Target */
SNIC_TGT_SAN, /* SAN Target */
};
/* target id format */
struct snic_tgt_id {
__le32 tgt_id; /* target id */
__le16 tgt_type; /* tgt type */
__le16 vnic_id; /* corresponding vnic id */
};
/*
* snic_report_tgts_cmpl : firmware -> host response
*
* Used by firmware to send response to Report Targets request
*/
struct snic_report_tgts_cmpl {
__le32 tgt_cnt; /* Number of Targets accessible */
u32 _resvd;
};
/*
* Command flags
*
* Bit 0: Read flags
* Bit 1: Write flag
* Bit 2: ESGL - sg/esg array contains extended sg
* ESGE - is a host buffer contains sg elements
* Bit 3-4: Task Attributes
* 00b - simple
* 01b - head of queue
* 10b - ordered
* Bit 5-7: Priority - future use
* Bit 8-15: Reserved
*/
#define SNIC_ICMND_WR 0x01 /* write command */
#define SNIC_ICMND_RD 0x02 /* read command */
#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/
/*
* Priority/Task Attribute settings
*/
#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */
#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff))
#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */
#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */
#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */
#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */
/*
* snic_icmnd : host-> firmware request
*
* used for sending out an initiator SCSI 16/32-byte command
*/
struct snic_icmnd {
__le16 sg_cnt; /* Number of SG Elements */
__le16 flags; /* flags */
__le32 sense_len; /* Sense buffer length */
__le64 tgt_id; /* Destination Target ID */
__le64 lun_id; /* Destination LUN ID */
u8 cdb_len;
u8 _resvd;
__le16 time_out; /* ms time for Res allocations fw to handle io*/
__le32 data_len; /* Total number of bytes to be transferred */
u8 cdb[SNIC_CDB_LEN];
__le64 sg_addr; /* Points to SG List */
__le64 sense_addr; /* Sense buffer address */
};
/* Response flags */
/* Bit 0: Under run
* Bit 1: Over Run
* Bit 2-7: Reserved
*/
#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */
#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */
/*
* snic_icmnd_cmpl: firmware -> host response
*
* Used for sending the host a response to an icmnd (initiator command)
*/
struct snic_icmnd_cmpl {
u8 scsi_status; /* value as per SAM */
u8 flags;
__le16 sense_len; /* Sense Length */
__le32 resid; /* Residue : # bytes under or over run */
};
/*
* snic_itmf: host->firmware request
*
* used for requesting the firmware to abort a request and/or send out
* a task management function
*
* the req_id field is valid in case of abort task and clear task
*/
struct snic_itmf {
u8 tm_type; /* SCSI Task Management request */
u8 resvd;
__le16 flags; /* flags */
__le32 req_id; /* Command id of snic req to be aborted */
__le64 tgt_id; /* Target ID */
__le64 lun_id; /* Destination LUN ID */
__le16 timeout; /* in sec */
};
/*
* Task Management Request
*/
enum snic_itmf_tm_type {
SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */
SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */
SNIC_ITMF_CLR_TASK, /* Clear Task */
SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */
SNIC_ITMF_LUN_RESET, /* Lun Reset */
SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */
};
/*
* snic_itmf_cmpl: firmware -> host resposne
*
* used for sending the host a response for a itmf request
*/
struct snic_itmf_cmpl {
__le32 nterminated; /* # IOs terminated as a result of tmf */
u8 flags; /* flags */
u8 _resvd[3];
};
/*
* itmfl_cmpl flags
* Bit 0 : 1 - Num terminated field valid
* Bit 1 - 7 : Reserved
*/
#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */
/*
* snic_hba_reset: host -> firmware request
*
* used for requesting firmware to reset snic
*/
struct snic_hba_reset {
__le16 flags; /* flags */
u8 _resvd[6];
};
/*
* snic_hba_reset_cmpl: firmware -> host response
*
* Used by firmware to respond to the host's hba reset request
*/
struct snic_hba_reset_cmpl {
u8 flags; /* flags : more info needs to be added*/
u8 _resvd[7];
};
/*
* snic_notify_msg: firmware -> host response
*
* Used by firmware to notify host of the last work queue entry received
*/
struct snic_notify_msg {
__le32 wqe_num; /* wq entry number */
u8 flags; /* flags, macros */
u8 _resvd[4];
};
#define SNIC_EVDATA_LEN 24 /* in bytes */
/* snic_async_evnotify: firmware -> host notification
*
* Used by firmware to notify the host about configuration/state changes
*/
struct snic_async_evnotify {
u8 FLS_EVENT_DESC;
u8 vnic; /* vnic id */
u8 _resvd[2];
__le32 ev_id; /* Event ID */
u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */
u8 _resvd2[4];
};
/* async event flags */
enum snic_ev_type {
SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */
SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */
SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */
SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */
SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */
SNIC_EV_TGT_ADDED, /* Target Added */
SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */
SNIC_EV_LUN_ADDED, /* LUN Added */
SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */
SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */
};
#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/
/* Payload 88 bytes = 128 - 24 - 16 */
#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
sizeof(struct snic_io_hdr) - \
(2 * sizeof(u64))))
/*
* snic_host_req: host -> firmware request
*
* Basic structure for all snic requests that are sent from the host to
* firmware. They are 128 bytes in size.
*/
struct snic_host_req {
u64 ctrl_data[2]; /*16 bytes - Control Data */
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_HOST_REQ_PAYLOAD];
/*
* Exchange firmware version
*/
struct snic_exch_ver_req exch_ver;
/* report targets */
struct snic_report_tgts rpt_tgts;
/* io request */
struct snic_icmnd icmnd;
/* task management request */
struct snic_itmf itmf;
/* hba reset */
struct snic_hba_reset reset;
} u;
}; /* end of snic_host_req structure */
#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */
struct snic_fw_req {
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)];
/* Exchange Version Response */
struct snic_exch_ver_rsp exch_ver_cmpl;
/* Report Targets Response */
struct snic_report_tgts_cmpl rpt_tgts_cmpl;
/* scsi response */
struct snic_icmnd_cmpl icmnd_cmpl;
/* task management response */
struct snic_itmf_cmpl itmf_cmpl;
/* hba reset response */
struct snic_hba_reset_cmpl reset_cmpl;
/* notify message */
struct snic_notify_msg ack;
/* async notification event */
struct snic_async_evnotify async_ev;
} u;
}; /* end of snic_fw_req structure */
/*
* Auxillary macro to verify specific snic req/cmpl structures
* to ensure that it will be aligned to 64 bit, and not using
* color bit field
*/
#define VERIFY_REQ_SZ(x)
#define VERIFY_CMPL_SZ(x)
/*
* Access routines to encode and decode the color bit, which is the most
* significant bit of the structure.
*/
static inline void
snic_color_enc(struct snic_fw_req *req, u8 color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
if (color)
*c |= 0x80;
else
*c &= ~0x80;
}
static inline void
snic_color_dec(struct snic_fw_req *req, u8 *color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
*color = *c >> 7;
/* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
}
#endif /* end of __SNIC_FWINT_H */

View File

@ -0,0 +1,518 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
static void
snic_wq_cmpl_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf,
void *opaque)
{
struct snic *snic = svnic_dev_priv(wq->vdev);
SNIC_BUG_ON(buf->os_buf == NULL);
if (snic_log_level & SNIC_DESC_LOGGING)
SNIC_HOST_INFO(snic->shost,
"Ack received for snic_host_req %p.\n",
buf->os_buf);
SNIC_TRC(snic->shost->host_no, 0, 0,
((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
0);
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
buf->os_buf = NULL;
}
static int
snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc,
u8 type,
u16 q_num,
u16 cmpl_idx,
void *opaque)
{
struct snic *snic = svnic_dev_priv(vdev);
unsigned long flags;
SNIC_BUG_ON(q_num != 0);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
svnic_wq_service(&snic->wq[q_num],
cq_desc,
cmpl_idx,
snic_wq_cmpl_frame_send,
NULL);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
return 0;
} /* end of snic_cmpl_handler_cont */
int
snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
{
unsigned int work_done = 0;
unsigned int i;
snic->s_stats.misc.last_ack_time = jiffies;
for (i = 0; i < snic->wq_count; i++) {
work_done += svnic_cq_service(&snic->cq[i],
work_to_do,
snic_wq_cmpl_handler_cont,
NULL);
}
return work_done;
} /* end of snic_wq_cmpl_handler */
void
snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct snic_host_req *req = buf->os_buf;
struct snic *snic = svnic_dev_priv(wq->vdev);
struct snic_req_info *rqi = NULL;
unsigned long flags;
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
end:
return;
}
/* Criteria to select work queue in multi queue mode */
static int
snic_select_wq(struct snic *snic)
{
/* No multi queue support for now */
BUILD_BUG_ON(SNIC_WQ_MAX > 1);
return 0;
}
int
snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
{
dma_addr_t pa = 0;
unsigned long flags;
struct snic_fw_stats *fwstats = &snic->s_stats.fw;
long act_reqs;
int q_num = 0;
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
}
q_num = snic_select_wq(snic);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
if (!svnic_wq_desc_avail(snic->wq)) {
pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
return -ENOMEM;
}
snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
/* Update stats */
act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
atomic64_set(&fwstats->max_actv_reqs, act_reqs);
return 0;
} /* end of snic_queue_wq_desc() */
/*
* snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
* Purpose : Used during driver unload to clean up the requests.
*/
void
snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
INIT_LIST_HEAD(&rqi->list);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_add_tail(&rqi->list, &snic->spl_cmd_list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_req_init:
* Allocates snic_req_info + snic_host_req + sgl data, and initializes.
*/
struct snic_req_info *
snic_req_init(struct snic *snic, int sg_cnt)
{
u8 typ;
struct snic_req_info *rqi = NULL;
typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.alloc_fail);
SNIC_HOST_ERR(snic->shost,
"Failed to allocate memory from snic req pool id = %d\n",
typ);
return rqi;
}
memset(rqi, 0, sizeof(*rqi));
rqi->rq_pool_type = typ;
rqi->start_time = jiffies;
rqi->req = (struct snic_host_req *) (rqi + 1);
rqi->req_len = sizeof(struct snic_host_req);
rqi->snic = snic;
rqi->req = (struct snic_host_req *)(rqi + 1);
if (sg_cnt == 0)
goto end;
rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
end:
memset(rqi->req, 0, rqi->req_len);
/* pre initialization of init_ctx to support req_to_rqi */
rqi->req->hdr.init_ctx = (ulong) rqi;
SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
return rqi;
} /* end of snic_req_init */
/*
* snic_abort_req_init : Inits abort request.
*/
struct snic_host_req *
snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
/* If abort to be issued second time, then reuse */
if (rqi->abort_req)
return rqi->abort_req;
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
rqi->abort_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_abort_req_init */
/*
* snic_dr_req_init : Inits device reset req
*/
struct snic_host_req *
snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
SNIC_BUG_ON(rqi->dr_req != NULL);
rqi->dr_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_dr_req_init */
/* frees snic_req_info and snic_host_req */
void
snic_req_free(struct snic *snic, struct snic_req_info *rqi)
{
SNIC_BUG_ON(rqi->req == rqi->abort_req);
SNIC_BUG_ON(rqi->req == rqi->dr_req);
SNIC_BUG_ON(rqi->sge_va != 0);
SNIC_SCSI_DBG(snic->shost,
"Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
rqi, rqi->req, rqi->abort_req, rqi->dr_req);
if (rqi->abort_req)
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
if (rqi->dr_req)
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
void
snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_sg_desc *sgd;
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
pci_unmap_single(snic->pdev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
PCI_DMA_FROMDEVICE);
}
/*
* snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
*/
void
snic_free_all_untagged_reqs(struct snic *snic)
{
struct snic_req_info *rqi;
struct list_head *cur, *nxt;
unsigned long flags;
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
rqi = list_entry(cur, struct snic_req_info, list);
list_del_init(&rqi->list);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
}
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_release_untagged_req : Unlinks the untagged req and frees it.
*/
void
snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
goto end;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
snic_req_free(snic, rqi);
end:
return;
}
/* dump buf in hex fmt */
void
snic_hex_dump(char *pfx, char *data, int len)
{
SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
}
#define LINE_BUFSZ 128 /* for snic_print_desc fn */
static void
snic_dump_desc(const char *fn, char *os_buf, int len)
{
struct snic_host_req *req = (struct snic_host_req *) os_buf;
struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
struct snic_req_info *rqi = NULL;
char line[LINE_BUFSZ] = { '\0' };
char *cmd_str = NULL;
if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
else
rqi = (struct snic_req_info *) req->hdr.init_ctx;
SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
switch (req->hdr.type) {
case SNIC_REQ_REPORT_TGTS:
cmd_str = "report-tgt : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
break;
case SNIC_REQ_ICMND:
cmd_str = "icmnd : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
req->u.icmnd.cdb[0]);
break;
case SNIC_REQ_ITMF:
cmd_str = "itmf : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
break;
case SNIC_REQ_HBA_RESET:
cmd_str = "hba reset :";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
break;
case SNIC_REQ_EXCH_VER:
cmd_str = "exch ver : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
break;
case SNIC_REQ_TGT_INFO:
cmd_str = "tgt info : ";
break;
case SNIC_RSP_REPORT_TGTS_CMPL:
cmd_str = "report tgt cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
break;
case SNIC_RSP_ICMND_CMPL:
cmd_str = "icmnd_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
rqi->req->u.icmnd.cdb[0]);
break;
case SNIC_RSP_ITMF_CMPL:
cmd_str = "itmf_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
break;
case SNIC_RSP_HBA_RESET_CMPL:
cmd_str = "hba_reset_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
break;
case SNIC_RSP_EXCH_VER_CMPL:
cmd_str = "exch_ver_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
break;
case SNIC_MSG_ACK:
cmd_str = "msg ack : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
break;
case SNIC_MSG_ASYNC_EVNOTIFY:
cmd_str = "async notify : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
break;
default:
cmd_str = "unknown : ";
SNIC_BUG_ON(1);
break;
}
SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
req->hdr.init_ctx);
/* Enable it, to dump byte stream */
if (snic_log_level & 0x20)
snic_hex_dump(cmd_str, os_buf, len);
} /* end of __snic_print_desc */
void
snic_print_desc(const char *fn, char *os_buf, int len)
{
if (snic_log_level & SNIC_DESC_LOGGING)
snic_dump_desc(fn, os_buf, len);
}
void
snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
{
u64 duration;
duration = jiffies - rqi->start_time;
if (duration > atomic64_read(&snic->s_stats.io.max_time))
atomic64_set(&snic->s_stats.io.max_time, duration);
}

View File

@ -0,0 +1,118 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
/* SG descriptor for snic */
struct snic_sg_desc {
__le64 addr;
__le32 len;
u32 _resvd;
};
struct snic_dflt_sgl {
struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
};
struct snic_max_sgl {
struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
};
enum snic_req_cache_type {
SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
snic_host_req objects only*/
SNIC_REQ_MAX_CACHES /* number of sgl caches */
};
/* Per IO internal state */
struct snic_internal_io_state {
char *rqi;
u64 flags;
u32 state;
u32 abts_status; /* Abort completion status */
u32 lr_status; /* device reset completion status */
};
/* IO state machine */
enum snic_ioreq_state {
SNIC_IOREQ_NOT_INITED = 0,
SNIC_IOREQ_PENDING,
SNIC_IOREQ_ABTS_PENDING,
SNIC_IOREQ_ABTS_COMPLETE,
SNIC_IOREQ_LR_PENDING,
SNIC_IOREQ_LR_COMPLETE,
SNIC_IOREQ_COMPLETE,
};
struct snic;
struct snic_host_req;
/*
* snic_req_info : Contains info about IO, one per scsi command.
* Notes: Make sure that the structure is aligned to 16 B
* this helps in easy access to snic_req_info from snic_host_req
*/
struct snic_req_info {
struct list_head list;
struct snic_host_req *req;
u64 start_time; /* start time in jiffies */
u16 rq_pool_type; /* noticion of request pool type */
u16 req_len; /* buf len passing to fw (req + sgl)*/
u32 tgt_id;
u32 tm_tag;
u8 io_cmpl:1; /* sets to 1 when fw completes IO */
u8 resvd[3];
struct scsi_cmnd *sc; /* Associated scsi cmd */
struct snic *snic; /* Associated snic */
ulong sge_va; /* Pointer to Resp Buffer */
u64 snsbuf_va;
struct snic_host_req *abort_req;
struct completion *abts_done;
struct snic_host_req *dr_req;
struct completion *dr_done;
};
#define rqi_to_req(rqi) \
((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
#define req_to_rqi(req) \
((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
#define req_to_sgl(req) \
((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
struct snic_req_info *
snic_req_init(struct snic *, int sg_cnt);
void snic_req_free(struct snic *, struct snic_req_info *);
void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_abort_req_init(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_dr_req_init(struct snic *, struct snic_req_info *);
#endif /* _SNIC_IO_H */

View File

@ -0,0 +1,204 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic_io.h"
#include "snic.h"
/*
* snic_isr_msix_wq : MSIx ISR for work queue.
*/
static irqreturn_t
snic_isr_msix_wq(int irq, void *data)
{
struct snic *snic = data;
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_wq */
static irqreturn_t
snic_isr_msix_io_cmpl(int irq, void *data)
{
struct snic *snic = data;
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
iocmpl_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_io_cmpl */
static irqreturn_t
snic_isr_msix_err_notify(int irq, void *data)
{
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
/*Handling link events */
snic_handle_link_event(snic);
return IRQ_HANDLED;
} /* end of snic_isr_msix_err_notify */
void
snic_free_intr(struct snic *snic)
{
int i;
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
free_irq(snic->msix_entry[i].vector,
snic->msix[i].devid);
}
}
} /* end of snic_free_intr */
int
snic_request_intr(struct snic *snic)
{
int ret = 0, i;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
/*
* Currently HW supports single WQ and CQ. So passing devid as snic.
* When hardware supports multiple WQs and CQs, one idea is
* to pass devid as corresponding WQ or CQ ptr and retrieve snic
* from queue ptr.
* Except for err_notify, which is always one.
*/
sprintf(snic->msix[SNIC_MSIX_WQ].devname,
"%.11s-scsi-wq",
snic->name);
snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
snic->msix[SNIC_MSIX_WQ].devid = snic;
sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
"%.11s-io-cmpl",
snic->name);
snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify",
snic->name);
snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
ret = request_irq(snic->msix_entry[i].vector,
snic->msix[i].isr,
0,
snic->msix[i].devname,
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"MSI-X: requrest_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
break;
}
snic->msix[i].requested = 1;
}
return ret;
} /* end of snic_requrest_intr */
int
snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
unsigned int i;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
for (i = 0; i < (n + m + 1); i++)
snic->msix_entry[i].entry = i;
if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
if (!pci_enable_msix(snic->pdev,
snic->msix_entry,
(n + m + 1))) {
snic->wq_count = n;
snic->cq_count = n + m;
snic->intr_count = n + m + 1;
snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
SNIC_ISR_DBG(snic->shost,
"Using MSI-X Interrupts\n");
svnic_dev_set_intr_mode(snic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
pci_disable_msix(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,295 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic.h"
int
snic_get_vnic_config(struct snic *snic)
{
struct vnic_snic_config *c = &snic->config;
int ret;
#define GET_CONFIG(m) \
do { \
ret = svnic_dev_spec(snic->vdev, \
offsetof(struct vnic_snic_config, m), \
sizeof(c->m), \
&c->m); \
if (ret) { \
SNIC_HOST_ERR(snic->shost, \
"Error getting %s, %d\n", #m, ret); \
return ret; \
} \
} while (0)
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(io_throttle_count);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
GET_CONFIG(xpt_type);
GET_CONFIG(hid);
c->wq_enet_desc_count = min_t(u32,
VNIC_SNIC_WQ_DESCS_MAX,
max_t(u32,
VNIC_SNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->maxdatafieldsize = min_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
max_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->io_throttle_count = min_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->port_down_timeout = min_t(u32,
VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries = min_t(u32,
VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt = min_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MAX,
max_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
SNIC_INFO("vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize,
c->intr_timer);
SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
c->flags,
c->luns_per_tgt);
SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
c->port_down_timeout,
c->port_down_io_retries);
SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
SNIC_INFO("vNIC hid = %d\n", c->hid);
return 0;
}
void
snic_get_res_counts(struct snic *snic)
{
snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
SNIC_BUG_ON(snic->wq_count == 0);
snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
SNIC_BUG_ON(snic->cq_count == 0);
snic->intr_count = svnic_dev_get_res_count(snic->vdev,
RES_TYPE_INTR_CTRL);
SNIC_BUG_ON(snic->intr_count == 0);
}
void
snic_free_vnic_res(struct snic *snic)
{
unsigned int i;
for (i = 0; i < snic->wq_count; i++)
svnic_wq_free(&snic->wq[i]);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_free(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_free(&snic->intr[i]);
}
int
snic_alloc_vnic_res(struct snic *snic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int intr_offset;
unsigned int err_intr_enable;
unsigned int err_intr_offset;
unsigned int i;
int ret;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_INFO("vNIC interrupt mode: %s\n",
((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
"Legacy PCI INTx" :
((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
"MSI" :
((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
"MSI-X" : "Unknown"))));
/* only MSI-X is supported */
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
snic->cq_count,
snic->intr_count);
/* Allocate WQs used for SCSI IOs */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_alloc(snic->vdev,
&snic->wq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (ret)
goto error_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (ret)
goto error_cleanup;
}
SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
/* CQ for FW TO host */
for (i = snic->wq_count; i < snic->cq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
(snic->config.wq_enet_desc_count * 3),
sizeof(struct snic_fw_req));
if (ret)
goto error_cleanup;
}
for (i = 0; i < snic->intr_count; i++) {
ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
if (ret)
goto error_cleanup;
}
/*
* Init WQ Resources.
* WQ[0 to n] points to CQ[0 to n-1]
* firmware to host comm points to CQ[n to m+1]
*/
err_intr_enable = 1;
err_intr_offset = snic->err_intr_offset;
for (i = 0; i < snic->wq_count; i++) {
svnic_wq_init(&snic->wq[i],
i,
err_intr_enable,
err_intr_offset);
}
for (i = 0; i < snic->cq_count; i++) {
intr_offset = i;
svnic_cq_init(&snic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
intr_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
* Assumption : snic is always in MSI-X mode
*/
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
mask_on_assertion = 1;
for (i = 0; i < snic->intr_count; i++) {
svnic_intr_init(&snic->intr[i],
snic->config.intr_timer,
snic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"svnic_dev_stats_dump failed - x%x\n",
ret);
goto error_cleanup;
}
/* Clear LIF stats */
svnic_dev_stats_clear(snic->vdev);
ret = 0;
return ret;
error_cleanup:
snic_free_vnic_res(snic);
return ret;
}
void
snic_log_q_error(struct snic *snic)
{
unsigned int i;
u32 err_status;
for (i = 0; i < snic->wq_count; i++) {
err_status = ioread32(&snic->wq[i].ctrl->error_status);
if (err_status)
SNIC_HOST_ERR(snic->shost,
"WQ[%d] error status %d\n",
i,
err_status);
}
} /* end of snic_log_q_error */

View File

@ -0,0 +1,97 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
#include "snic_io.h"
#include "wq_enet_desc.h"
#include "vnic_wq.h"
#include "snic_fwint.h"
#include "vnic_cq_fw.h"
static inline void
snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
u32 data_len, u16 sg_cnt, ulong sgl_addr,
dma_addr_t sns_addr_pa, u32 sense_len)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
ctx);
req->u.icmnd.flags = cpu_to_le16(flags);
req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req->u.icmnd.cdb_len = cdb_len;
memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req->u.icmnd.data_len = cpu_to_le32(data_len);
req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
}
static inline void
snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req->u.itmf.tm_type = tm_type;
req->u.itmf.flags = cpu_to_le16(flags);
/* req_id valid only in abort, clear task */
req->u.itmf.req_id = cpu_to_le32(req_id);
req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
}
static inline void
snic_queue_wq_eth_desc(struct vnic_wq *wq,
void *os_buf,
dma_addr_t dma_addr,
unsigned int len,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry)
{
struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
0, /* fc_eof */
0, /* offload mode */
1, /* eop */
(u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
}
struct snic;
int snic_get_vnic_config(struct snic *);
int snic_alloc_vnic_res(struct snic *);
void snic_free_vnic_res(struct snic *);
void snic_get_res_counts(struct snic *);
void snic_log_q_error(struct snic *);
int snic_get_vnic_resources_size(struct snic *);
#endif /* __SNIC_RES_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,123 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
struct snic_io_stats {
atomic64_t active; /* Active IOs */
atomic64_t max_active; /* Max # active IOs */
atomic64_t max_sgl; /* Max # SGLs for any IO */
atomic64_t max_time; /* Max time to process IO */
atomic64_t max_qtime; /* Max time to Queue the IO */
atomic64_t max_cmpl_time; /* Max time to complete the IO */
atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */
atomic64_t max_io_sz; /* Max IO Size */
atomic64_t compl; /* IO Completions */
atomic64_t fail; /* IO Failures */
atomic64_t req_null; /* req or req info is NULL */
atomic64_t alloc_fail; /* Alloc Failures */
atomic64_t sc_null;
atomic64_t io_not_found; /* IO Not Found */
atomic64_t num_ios; /* Number of IOs */
};
struct snic_abort_stats {
atomic64_t num; /* Abort counter */
atomic64_t fail; /* Abort Failure Counter */
atomic64_t drv_tmo; /* Abort Driver Timeouts */
atomic64_t fw_tmo; /* Abort Firmware Timeouts */
atomic64_t io_not_found;/* Abort IO Not Found */
};
struct snic_reset_stats {
atomic64_t dev_resets; /* Device Reset Counter */
atomic64_t dev_reset_fail; /* Device Reset Failures */
atomic64_t dev_reset_aborts; /* Device Reset Aborts */
atomic64_t dev_reset_tmo; /* Device Reset Timeout */
atomic64_t dev_reset_terms; /* Device Reset terminate */
atomic64_t hba_resets; /* hba/firmware resets */
atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */
atomic64_t hba_reset_fail; /* hba/firmware failures */
atomic64_t snic_resets; /* snic resets */
atomic64_t snic_reset_compl; /* snic reset completions */
atomic64_t snic_reset_fail; /* snic reset failures */
};
struct snic_fw_stats {
atomic64_t actv_reqs; /* Active Requests */
atomic64_t max_actv_reqs; /* Max Active Requests */
atomic64_t out_of_res; /* Firmware Out Of Resources */
atomic64_t io_errs; /* Firmware IO Firmware Errors */
atomic64_t scsi_errs; /* Target hits check condition */
};
struct snic_misc_stats {
u64 last_isr_time;
u64 last_ack_time;
atomic64_t isr_cnt;
atomic64_t max_cq_ents; /* Max CQ Entries */
atomic64_t data_cnt_mismat; /* Data Count Mismatch */
atomic64_t io_tmo;
atomic64_t io_aborted;
atomic64_t sgl_inval; /* SGL Invalid */
atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */
atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */
atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t io_under_run;
atomic64_t qfull;
atomic64_t tgt_not_rdy;
};
struct snic_stats {
struct snic_io_stats io;
struct snic_abort_stats abts;
struct snic_reset_stats reset;
struct snic_fw_stats fw;
struct snic_misc_stats misc;
atomic64_t io_cmpl_skip;
};
int snic_stats_debugfs_init(struct snic *);
void snic_stats_debugfs_remove(struct snic *);
/* Auxillary function to update active IO counter */
static inline void
snic_stats_update_active_ios(struct snic_stats *s_stats)
{
struct snic_io_stats *io = &s_stats->io;
u32 nr_active_ios;
nr_active_ios = atomic64_inc_return(&io->active);
if (atomic64_read(&io->max_active) < nr_active_ios)
atomic64_set(&io->max_active, nr_active_ios);
atomic64_inc(&io->num_ios);
}
/* Auxillary function to update IO completion counter */
static inline void
snic_stats_update_io_cmpl(struct snic_stats *s_stats)
{
atomic64_dec(&s_stats->io.active);
if (unlikely(atomic64_read(&s_stats->io_cmpl_skip)))
atomic64_dec(&s_stats->io_cmpl_skip);
else
atomic64_inc(&s_stats->io.compl);
}
#endif /* __SNIC_STATS_H */

View File

@ -0,0 +1,181 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include "snic_io.h"
#include "snic.h"
/*
* snic_get_trc_buf : Allocates a trace record and returns.
*/
struct snic_trc_data *
snic_get_trc_buf(void)
{
struct snic_trc *trc = &snic_glob->trc;
struct snic_trc_data *td = NULL;
unsigned long flags;
spin_lock_irqsave(&trc->lock, flags);
td = &trc->buf[trc->wr_idx];
trc->wr_idx++;
if (trc->wr_idx == trc->max_idx)
trc->wr_idx = 0;
if (trc->wr_idx != trc->rd_idx) {
spin_unlock_irqrestore(&trc->lock, flags);
goto end;
}
trc->rd_idx++;
if (trc->rd_idx == trc->max_idx)
trc->rd_idx = 0;
td->ts = 0; /* Marker for checking the record, for complete data*/
spin_unlock_irqrestore(&trc->lock, flags);
end:
return td;
} /* end of snic_get_trc_buf */
/*
* snic_fmt_trc_data : Formats trace data for printing.
*/
static int
snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz)
{
int len = 0;
struct timespec tmspec;
jiffies_to_timespec(td->ts, &tmspec);
len += snprintf(buf, buf_sz,
"%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n",
tmspec.tv_sec,
tmspec.tv_nsec,
td->fn,
td->hno,
td->tag,
td->data[0], td->data[1], td->data[2], td->data[3],
td->data[4]);
return len;
} /* end of snic_fmt_trc_data */
/*
* snic_get_trc_data : Returns a formatted trace buffer.
*/
int
snic_get_trc_data(char *buf, int buf_sz)
{
struct snic_trc_data *td = NULL;
struct snic_trc *trc = &snic_glob->trc;
unsigned long flags;
spin_lock_irqsave(&trc->lock, flags);
if (trc->rd_idx == trc->wr_idx) {
spin_unlock_irqrestore(&trc->lock, flags);
return -1;
}
td = &trc->buf[trc->rd_idx];
if (td->ts == 0) {
/* write in progress. */
spin_unlock_irqrestore(&trc->lock, flags);
return -1;
}
trc->rd_idx++;
if (trc->rd_idx == trc->max_idx)
trc->rd_idx = 0;
spin_unlock_irqrestore(&trc->lock, flags);
return snic_fmt_trc_data(td, buf, buf_sz);
} /* end of snic_get_trc_data */
/*
* snic_trc_init() : Configures Trace Functionality for snic.
*/
int
snic_trc_init(void)
{
struct snic_trc *trc = &snic_glob->trc;
void *tbuf = NULL;
int tbuf_sz = 0, ret;
tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
tbuf = vmalloc(tbuf_sz);
if (!tbuf) {
SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
SNIC_ERR("Trace Facility not enabled.\n");
ret = -ENOMEM;
return ret;
}
memset(tbuf, 0, tbuf_sz);
trc->buf = (struct snic_trc_data *) tbuf;
spin_lock_init(&trc->lock);
ret = snic_trc_debugfs_init();
if (ret) {
SNIC_ERR("Failed to create Debugfs Files.\n");
goto error;
}
trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
trc->rd_idx = trc->wr_idx = 0;
trc->enable = 1;
SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n",
tbuf_sz / PAGE_SIZE);
ret = 0;
return ret;
error:
snic_trc_free();
return ret;
} /* end of snic_trc_init */
/*
* snic_trc_free : Releases the trace buffer and disables the tracing.
*/
void
snic_trc_free(void)
{
struct snic_trc *trc = &snic_glob->trc;
trc->enable = 0;
snic_trc_debugfs_term();
if (trc->buf) {
vfree(trc->buf);
trc->buf = NULL;
}
SNIC_INFO("Trace Facility Disabled.\n");
} /* end of snic_trc_free */

View File

@ -0,0 +1,121 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_TRC_H
#define __SNIC_TRC_H
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
extern ssize_t simple_read_from_buffer(void __user *to,
size_t count,
loff_t *ppos,
const void *from,
size_t available);
extern unsigned int snic_trace_max_pages;
/* Global Data structure for trace to manage trace functionality */
struct snic_trc_data {
u64 ts; /* Time Stamp */
char *fn; /* Ptr to Function Name */
u32 hno; /* SCSI Host ID */
u32 tag; /* Command Tag */
u64 data[5];
} __attribute__((__packed__));
#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */
struct snic_trc {
spinlock_t lock;
struct snic_trc_data *buf; /* Trace Buffer */
u32 max_idx; /* Max Index into trace buffer */
u32 rd_idx;
u32 wr_idx;
u32 enable; /* Control Variable for Tracing */
struct dentry *trc_enable; /* debugfs file object */
struct dentry *trc_file;
};
int snic_trc_init(void);
void snic_trc_free(void);
int snic_trc_debugfs_init(void);
void snic_trc_debugfs_term(void);
struct snic_trc_data *snic_get_trc_buf(void);
int snic_get_trc_data(char *buf, int buf_sz);
int snic_debugfs_init(void);
void snic_debugfs_term(void);
static inline void
snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5)
{
struct snic_trc_data *tr_rec = snic_get_trc_buf();
if (!tr_rec)
return;
tr_rec->fn = (char *)fn;
tr_rec->hno = hno;
tr_rec->tag = tag;
tr_rec->data[0] = d1;
tr_rec->data[1] = d2;
tr_rec->data[2] = d3;
tr_rec->data[3] = d4;
tr_rec->data[4] = d5;
tr_rec->ts = jiffies; /* Update time stamp at last */
}
#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
do { \
if (unlikely(snic_glob->trc.enable)) \
snic_trace((char *)__func__, \
(u16)(_hno), \
(u32)(_tag), \
(u64)(d1), \
(u64)(d2), \
(u64)(d3), \
(u64)(d4), \
(u64)(d5)); \
} while (0)
#else
#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \
do { \
if (unlikely(snic_log_level & 0x2)) \
SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \
(char *)__func__, \
(u16)(_hno), \
(u32)(_tag), \
(u64)(d1), \
(u64)(d2), \
(u64)(d3), \
(u64)(d4), \
(u64)(d5)); \
} while (0)
#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */
#define SNIC_TRC_CMD(sc) \
((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \
(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \
(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \
(u64)sc->cmnd[5])
#define SNIC_TRC_CMD_STATE_FLAGS(sc) \
((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc))
#endif /* end of __SNIC_TRC_H */

View File

@ -0,0 +1,86 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void svnic_cq_free(struct vnic_cq *cq)
{
svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
pr_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void svnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
svnic_dev_clear_desc_ring(&cq->ring);
}

View File

@ -0,0 +1,110 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
#include "cq_desc.h"
#include "vnic_dev.h"
/* Completion queue control */
struct vnic_cq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 flow_control_enable; /* 0x10 */
u32 pad1;
u32 color_enable; /* 0x18 */
u32 pad2;
u32 cq_head; /* 0x20 */
u32 pad3;
u32 cq_tail; /* 0x28 */
u32 pad4;
u32 cq_tail_color; /* 0x30 */
u32 pad5;
u32 interrupt_enable; /* 0x38 */
u32 pad6;
u32 cq_entry_enable; /* 0x40 */
u32 pad7;
u32 cq_message_enable; /* 0x48 */
u32 pad8;
u32 interrupt_offset; /* 0x50 */
u32 pad9;
u64 cq_message_addr; /* 0x58 */
u32 pad10;
};
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
};
static inline unsigned int svnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque),
void *opaque)
{
struct cq_desc *cq_desc;
unsigned int work_done = 0;
u16 q_number, completed_index;
u8 type, color;
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq_desc, type,
q_number, completed_index, opaque))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
void svnic_cq_free(struct vnic_cq *cq);
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void svnic_cq_clean(struct vnic_cq *cq);
#endif /* _VNIC_CQ_H_ */

View File

@ -0,0 +1,62 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_FW_H_
#define _VNIC_CQ_FW_H_
#include "snic_fwint.h"
static inline unsigned int
vnic_cq_fw_service(struct vnic_cq *cq,
int (*q_service)(struct vnic_dev *vdev,
unsigned int index,
struct snic_fw_req *desc),
unsigned int work_to_do)
{
struct snic_fw_req *desc;
unsigned int work_done = 0;
u8 color;
desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
snic_color_dec(desc, &color);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq->index, desc))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
snic_color_dec(desc, &color);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
#endif /* _VNIC_CQ_FW_H_ */

View File

@ -0,0 +1,748 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
#include "vnic_wq.h"
#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
struct devcmd2_controller {
struct vnic_wq_ctrl __iomem *wq_ctrl;
struct vnic_dev_ring results_ring;
struct vnic_wq wq;
struct vnic_devcmd2 *cmd_ring;
struct devcmd2_result *result;
u16 next_result;
u16 result_size;
int color;
};
struct vnic_res {
void __iomem *vaddr;
unsigned int count;
};
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
struct vnic_res res[RES_TYPE_MAX];
enum vnic_dev_intr_mode intr_mode;
struct vnic_devcmd __iomem *devcmd;
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
u64 args[VNIC_DEVCMD_NARGS];
struct devcmd2_controller *devcmd2;
int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait);
};
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *svnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar, unsigned int num_bars)
{
struct vnic_resource_header __iomem *rh;
struct vnic_resource __iomem *r;
u8 type;
if (num_bars == 0)
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
pr_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
pr_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num >= num_bars)
continue;
if (!bar[bar_num].len || !bar[bar_num].vaddr)
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar->len) {
pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
type, bar_offset,
len,
bar->len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
case RES_TYPE_DEVCMD2:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
return 0;
}
unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
svnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
ring->size_unaligned,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
svnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
pci_free_consistent(vdev->pdev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
struct devcmd2_result *result = dc2c->result + dc2c->next_result;
unsigned int i;
int delay;
int err;
u32 posted;
u32 new_posted;
posted = ioread32(&dc2c->wq_ctrl->posted_index);
if (posted == 0xFFFFFFFF) { /* check for hardware gone */
/* Hardware surprise removal: return error */
return -ENODEV;
}
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
dc2c->cmd_ring[posted].cmd = cmd;
dc2c->cmd_ring[posted].flags = 0;
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
dc2c->cmd_ring[posted].args[i] = vdev->args[i];
}
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
if (result->color == dc2c->color) {
dc2c->next_result++;
if (dc2c->next_result == dc2c->result_size) {
dc2c->next_result = 0;
dc2c->color = dc2c->color ? 0 : 1;
}
if (result->error) {
err = (int) result->error;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
pr_err("Error %d devcmd %d\n",
err, _CMD_N(cmd));
return err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
/*
* Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which
* would potentially result in reading stale
* values.
*/
rmb();
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = result->results[i];
}
return 0;
}
}
pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
struct devcmd2_controller *dc2c = NULL;
unsigned int fetch_idx;
int ret;
void __iomem *p;
if (vdev->devcmd2)
return 0;
p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (!p)
return -ENODEV;
dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
if (!dc2c)
return -ENOMEM;
vdev->devcmd2 = dc2c;
dc2c->color = 1;
dc2c->result_size = DEVCMD2_RING_SIZE;
ret = vnic_wq_devcmd2_alloc(vdev,
&dc2c->wq,
DEVCMD2_RING_SIZE,
DEVCMD2_DESC_SIZE);
if (ret)
goto err_free_devcmd2;
fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
/* Hardware surprise removal: reset fetch_index */
fetch_idx = 0;
}
/*
* Don't change fetch_index ever and
* set posted_index same as fetch_index
* when setting up the WQ for devcmd2.
*/
vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
svnic_wq_enable(&dc2c->wq);
ret = svnic_dev_alloc_desc_ring(vdev,
&dc2c->results_ring,
DEVCMD2_RING_SIZE,
DEVCMD2_DESC_SIZE);
if (ret)
goto err_free_wq;
dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
dc2c->wq_ctrl = dc2c->wq.ctrl;
vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
vdev->args[1] = DEVCMD2_RING_SIZE;
ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
if (ret < 0)
goto err_free_desc_ring;
vdev->devcmd_rtn = &_svnic_dev_cmd2;
pr_info("DEVCMD2 Initialized.\n");
return ret;
err_free_desc_ring:
svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
err_free_wq:
svnic_wq_disable(&dc2c->wq);
svnic_wq_free(&dc2c->wq);
err_free_devcmd2:
kfree(dc2c);
vdev->devcmd2 = NULL;
return ret;
} /* end of svnic_dev_init_devcmd2 */
static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
vdev->devcmd2 = NULL;
vdev->devcmd_rtn = NULL;
svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
svnic_wq_disable(&dc2c->wq);
svnic_wq_free(&dc2c->wq);
kfree(dc2c);
}
int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
int err;
memset(vdev->args, 0, sizeof(vdev->args));
vdev->args[0] = *a0;
vdev->args[1] = *a1;
err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
return err;
}
int svnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
/* only get fw_info once and cache it */
err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
int err;
a0 = offset;
a1 = size;
err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1:
*(u8 *)value = (u8)a0;
break;
case 2:
*(u16 *)value = (u16)a0;
break;
case 4:
*(u32 *)value = (u32)a0;
break;
case 8:
*(u64 *)value = a0;
break;
default:
BUG();
break;
}
return err;
}
int svnic_dev_stats_clear(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
}
int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_stats), &vdev->stats_pa);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int svnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int svnic_dev_enable_wait(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err = 0;
err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
if (err == ERR_ECMDUNKNOWN)
return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
return err;
}
int svnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int svnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = VNIC_DVCMD_TMO;
int err;
*done = 0;
err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa);
if (!vdev->notify)
return -ENOMEM;
}
a0 = vdev->notify_pa;
a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
a1 += sizeof(struct vnic_devcmd_notify);
return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
void svnic_dev_notify_unset(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = VNIC_DVCMD_TMO;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
unsigned int i;
u32 csum;
if (!vdev->notify)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify,
sizeof(struct vnic_devcmd_notify));
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int svnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = VNIC_DVCMD_TMO;
return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
int svnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_down_cnt;
}
void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
void svnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
pci_free_consistent(vdev->pdev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
kfree(vdev);
}
}
struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
void *priv,
struct pci_dev *pdev,
struct vnic_dev_bar *bar,
unsigned int num_bars)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
return vdev;
err_out:
svnic_dev_unregister(vdev);
return NULL;
} /* end of svnic_dev_alloc_discover */
/*
* fallback option is left to keep the interface common for other vnics.
*/
int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
{
int err = -ENODEV;
void __iomem *p;
p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (p)
err = svnic_dev_init_devcmd2(vdev);
else
pr_err("DEVCMD2 resource not found.\n");
return err;
} /* end of svnic_dev_cmd_init */

View File

@ -0,0 +1,110 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#ifndef VNIC_PADDR_TARGET
#define VNIC_PADDR_TARGET 0x0000000000000000ULL
#endif
#ifndef readq
static inline u64 readq(void __iomem *reg)
{
return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
}
static inline void writeq(u64 val, void __iomem *reg)
{
writel(lower_32_bits(val), reg);
writel(upper_32_bits(val), reg + 0x4UL);
}
#endif
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
VNIC_DEV_INTR_MODE_MSI,
VNIC_DEV_INTR_MODE_MSIX,
};
struct vnic_dev_bar {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned long len;
};
struct vnic_dev_ring {
void *descs;
size_t size;
dma_addr_t base_addr;
size_t base_align;
void *descs_unaligned;
size_t size_unaligned;
dma_addr_t base_addr_unaligned;
unsigned int desc_size;
unsigned int desc_count;
unsigned int desc_avail;
};
struct vnic_dev;
struct vnic_stats;
void *svnic_dev_priv(struct vnic_dev *vdev);
unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size);
void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void svnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int svnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value);
int svnic_dev_stats_clear(struct vnic_dev *vdev);
int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
void svnic_dev_notify_unset(struct vnic_dev *vdev);
int svnic_dev_link_status(struct vnic_dev *vdev);
u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev);
int svnic_dev_close(struct vnic_dev *vdev);
int svnic_dev_enable_wait(struct vnic_dev *vdev);
int svnic_dev_disable(struct vnic_dev *vdev);
int svnic_dev_open(struct vnic_dev *vdev, int arg);
int svnic_dev_open_done(struct vnic_dev *vdev, int *done);
int svnic_dev_init(struct vnic_dev *vdev, int arg);
struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev,
struct vnic_dev_bar *bar,
unsigned int num_bars);
void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev);
void svnic_dev_unregister(struct vnic_dev *vdev);
int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
#endif /* _VNIC_DEV_H_ */

View File

@ -0,0 +1,270 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
#define _CMD_NBITS 14
#define _CMD_VTYPEBITS 10
#define _CMD_FLAGSBITS 6
#define _CMD_DIRBITS 2
#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
#define _CMD_NSHIFT 0
#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
/*
* Direction bits (from host perspective).
*/
#define _CMD_DIR_NONE 0U
#define _CMD_DIR_WRITE 1U
#define _CMD_DIR_READ 2U
#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
/*
* Flag bits.
*/
#define _CMD_FLAGS_NONE 0U
#define _CMD_FLAGS_NOWAIT 1U
/*
* vNIC type bits.
*/
#define _CMD_VTYPE_NONE 0U
#define _CMD_VTYPE_ENET 1U
#define _CMD_VTYPE_FC 2U
#define _CMD_VTYPE_SCSI 4U
#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
/*
* Used to create cmds..
*/
#define _CMDCF(dir, flags, vtype, nr) \
(((dir) << _CMD_DIRSHIFT) | \
((flags) << _CMD_FLAGSSHIFT) | \
((vtype) << _CMD_VTYPESHIFT) | \
((nr) << _CMD_NSHIFT))
#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
/*
* Used to decode cmds..
*/
#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
* out: a0=value */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
/* stats dump in mem: (u64)a0=paddr to stats area,
* (u16)a1=sizeof stats area */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* nic_cfg in (u32)a0 */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* set struct vnic_devcmd_notify buffer in mem:
* in:
* (u64)a0=paddr to notify (set paddr=0 to unset)
* (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
* (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
* (u32)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
* out: a0=0 open complete, a0=1 open in progress */
CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* enable virtual link, waiting variant. */
CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
* if a0=0, a1=errno */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
/* check fw capability of a cmd:
* in: (u32)a0=cmd
* out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
/*
* Initialization for the devcmd2 interface.
* in: (u64) a0=host result buffer physical address
* in: (u16) a1=number of entries in result buffer
*/
CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
#define CMD_PFILTER_BROADCAST 0x04
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
};
enum vnic_devcmd_error {
ERR_SUCCESS = 0,
ERR_EINVAL = 1,
ERR_EFAULT = 2,
ERR_EPERM = 3,
ERR_EBUSY = 4,
ERR_ECMDUNKNOWN = 5,
ERR_EBADSTATE = 6,
ERR_ENOMEM = 7,
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
};
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
};
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
u32 link_state; /* link up == 1 */
u32 port_speed; /* effective port speed (rate limit) */
u32 mtu; /* MTU */
u32 msglvl; /* requested driver msg lvl */
u32 uif; /* uplink interface */
u32 status; /* status bits (see VNIC_STF_*) */
u32 error; /* error code (see ERR_*) for first ERR */
u32 link_down_cnt; /* running count of link down transitions */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
u8 data[0];
};
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
* If cmd completed successfully STAT_ERROR will be clear
* and args registers contain cmd-specific results.
*
* If cmd error, STAT_ERROR will be set and args[0] contains error code.
*
* status register is read-only. While STAT_BUSY is set,
* all other register contents are read-only.
*/
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
u32 status; /* RO */
u32 cmd; /* RW */
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
/*
* Version 2 of the interface.
*
* Some things are carried over, notably the vnic_devcmd_cmd enum.
*/
/*
* Flags for vnic_devcmd2.flags
*/
#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
struct vnic_devcmd2 {
u16 pad;
u16 flags;
u32 cmd; /* same command #defines as original */
u64 args[VNIC_DEVCMD2_NARGS];
};
#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
struct devcmd2_result {
u64 results[VNIC_DEVCMD2_NRESULTS];
u32 pad;
u16 completed_index; /* into copy WQ */
u8 error; /* same error codes as original */
u8 color; /* 0 or 1 as with completion queues */
};
#define DEVCMD2_RING_SIZE 32
#define DEVCMD2_DESC_SIZE 128
#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
#endif /* _VNIC_DEVCMD_H_ */

View File

@ -0,0 +1,59 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void svnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
pr_err("Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void svnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}

View File

@ -0,0 +1,105 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#define VNIC_INTR_TIMER_MAX 0xffff
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
/* Interrupt control */
struct vnic_intr_ctrl {
u32 coalescing_timer; /* 0x00 */
u32 pad0;
u32 coalescing_value; /* 0x08 */
u32 pad1;
u32 coalescing_type; /* 0x10 */
u32 pad2;
u32 mask_on_assertion; /* 0x18 */
u32 pad3;
u32 mask; /* 0x20 */
u32 pad4;
u32 int_credits; /* 0x28 */
u32 pad5;
u32 int_credit_return; /* 0x30 */
u32 pad6;
};
struct vnic_intr {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
};
static inline void
svnic_intr_unmask(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->mask);
}
static inline void
svnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
}
static inline void
svnic_intr_return_credits(struct vnic_intr *intr,
unsigned int credits,
int unmask,
int reset_timer)
{
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
u32 int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
static inline unsigned int
svnic_intr_credits(struct vnic_intr *intr)
{
return ioread32(&intr->ctrl->int_credits);
}
static inline void
svnic_intr_return_all_credits(struct vnic_intr *intr)
{
unsigned int credits = svnic_intr_credits(intr);
int unmask = 1;
int reset_timer = 1;
svnic_intr_return_credits(intr, credits, unmask, reset_timer);
}
void svnic_intr_free(struct vnic_intr *);
int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int);
void svnic_intr_init(struct vnic_intr *intr,
unsigned int coalescing_timer,
unsigned int coalescing_type,
unsigned int mask_on_assertion);
void svnic_intr_clean(struct vnic_intr *);
#endif /* _VNIC_INTR_H_ */

View File

@ -0,0 +1,68 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
/* vNIC resource types */
enum vnic_res_type {
RES_TYPE_EOL, /* End-of-list */
RES_TYPE_WQ, /* Work queues */
RES_TYPE_RQ, /* Receive queues */
RES_TYPE_CQ, /* Completion queues */
RES_TYPE_RSVD1,
RES_TYPE_NIC_CFG, /* Enet NIC config registers */
RES_TYPE_RSVD2,
RES_TYPE_RSVD3,
RES_TYPE_RSVD4,
RES_TYPE_RSVD5,
RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
RES_TYPE_RSVD6,
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
RES_TYPE_SUBVNIC, /* subvnic resource type */
RES_TYPE_MQ_WQ, /* MQ Work queues */
RES_TYPE_MQ_RQ, /* MQ Receive queues */
RES_TYPE_MQ_CQ, /* MQ Completion queues */
RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
struct vnic_resource_header {
u32 magic;
u32 version;
};
struct vnic_resource {
u8 type;
u8 bar;
u8 pad[2];
u32 bar_offset;
u32 count;
};
#endif /* _VNIC_RESOURCE_H_ */

View File

@ -0,0 +1,54 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_SNIC_H_
#define _VNIC_SNIC_H_
#define VNIC_SNIC_WQ_DESCS_MIN 64
#define VNIC_SNIC_WQ_DESCS_MAX 1024
#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256
#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112
#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1
#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024
#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0
#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000
#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0
#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255
#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1
#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024
/* Device-specific region: scsi configuration */
struct vnic_snic_config {
u32 flags;
u32 wq_enet_desc_count;
u32 io_throttle_count;
u32 port_down_timeout;
u32 port_down_io_retries;
u32 luns_per_tgt;
u16 maxdatafieldsize;
u16 intr_timer;
u8 intr_timer_type;
u8 _resvd2;
u8 xpt_type;
u8 hid;
};
#endif /* _VNIC_SNIC_H_ */

View File

@ -0,0 +1,68 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
/* Tx statistics */
struct vnic_tx_stats {
u64 tx_frames_ok;
u64 tx_unicast_frames_ok;
u64 tx_multicast_frames_ok;
u64 tx_broadcast_frames_ok;
u64 tx_bytes_ok;
u64 tx_unicast_bytes_ok;
u64 tx_multicast_bytes_ok;
u64 tx_broadcast_bytes_ok;
u64 tx_drops;
u64 tx_errors;
u64 tx_tso;
u64 rsvd[16];
};
/* Rx statistics */
struct vnic_rx_stats {
u64 rx_frames_ok;
u64 rx_frames_total;
u64 rx_unicast_frames_ok;
u64 rx_multicast_frames_ok;
u64 rx_broadcast_frames_ok;
u64 rx_bytes_ok;
u64 rx_unicast_bytes_ok;
u64 rx_multicast_bytes_ok;
u64 rx_broadcast_bytes_ok;
u64 rx_drop;
u64 rx_no_bufs;
u64 rx_errors;
u64 rx_rss;
u64 rx_crc_errors;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_to_max;
u64 rsvd[16];
};
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
};
#endif /* _VNIC_STATS_H_ */

View File

@ -0,0 +1,237 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
if (!wq->ctrl)
return -EINVAL;
return 0;
}
static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
desc_size);
}
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
pr_err("Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void svnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
svnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = 0;
wq->vdev = vdev;
err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
if (err) {
pr_err("Failed to get devcmd2 resource\n");
return err;
}
svnic_wq_disable(wq);
err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size);
if (err)
return err;
return 0;
}
int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
if (err) {
pr_err("Failed to hook WQ[%d] resource\n", index);
return err;
}
svnic_wq_disable(wq);
err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
svnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
}
void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable,
error_interrupt_offset);
}
unsigned int svnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void svnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int svnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
pr_err("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void svnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (svnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
svnic_dev_clear_desc_ring(&wq->ring);
}

View File

@ -0,0 +1,170 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/* Work queue control */
struct vnic_wq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 dca_value; /* 0x38 */
u32 pad6;
u32 error_interrupt_enable; /* 0x40 */
u32 pad7;
u32 error_interrupt_offset; /* 0x48 */
u32 pad8;
u32 error_status; /* 0x50 */
u32 pad9;
};
struct vnic_wq_buf {
struct vnic_wq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int len;
unsigned int index;
int sop;
void *desc;
};
/* Break the vnic_wq_buf allocations into blocks of 64 entries */
#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLK_SZ \
(VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
struct vnic_wq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
struct vnic_wq_buf *to_use;
struct vnic_wq_buf *to_clean;
unsigned int pkts_outstanding;
};
static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
return wq->ring.desc_avail;
}
static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq)
{
/* how many does HW own? */
return wq->ring.desc_count - wq->ring.desc_avail - 1;
}
static inline void *svnic_wq_next_desc(struct vnic_wq *wq)
{
return wq->to_use->desc;
}
static inline void svnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
if (eop) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
}
wq->to_use = buf;
wq->ring.desc_avail--;
}
static inline void svnic_wq_service(struct vnic_wq *wq,
struct cq_desc *cq_desc, u16 completed_index,
void (*buf_service)(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
void *opaque)
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (1) {
(*buf_service)(wq, cq_desc, buf, opaque);
wq->ring.desc_avail++;
wq->to_clean = buf->next;
if (buf->index == completed_index)
break;
buf = wq->to_clean;
}
}
void svnic_wq_free(struct vnic_wq *wq);
int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size);
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int post_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int svnic_wq_error_status(struct vnic_wq *wq);
void svnic_wq_enable(struct vnic_wq *wq);
int svnic_wq_disable(struct vnic_wq *wq);
void svnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
#endif /* _VNIC_WQ_H_ */

View File

@ -0,0 +1,96 @@
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
__le16 length;
__le16 mss_loopback;
__le16 header_length_flags;
__le16 vlan_tag;
};
#define WQ_ENET_ADDR_BITS 64
#define WQ_ENET_LEN_BITS 14
#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
#define WQ_ENET_MSS_BITS 14
#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
#define WQ_ENET_MSS_SHIFT 2
#define WQ_ENET_LOOPBACK_SHIFT 1
#define WQ_ENET_HDRLEN_BITS 10
#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
#define WQ_ENET_FLAGS_OM_BITS 2
#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
#define WQ_ENET_FLAGS_EOP_SHIFT 12
#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
#define WQ_ENET_OFFLOAD_MODE_CSUM 0
#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
#define WQ_ENET_OFFLOAD_MODE_TSO 3
static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
u64 address, u16 length, u16 mss, u16 header_length,
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
desc->address = cpu_to_le64(address);
desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
desc->header_length_flags = cpu_to_le16(
(header_length & WQ_ENET_HDRLEN_MASK) |
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
desc->vlan_tag = cpu_to_le16(vlan_tag);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
u64 *address, u16 *length, u16 *mss, u16 *header_length,
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
WQ_ENET_MSS_MASK;
*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
*header_length = le16_to_cpu(desc->header_length_flags) &
WQ_ENET_HDRLEN_MASK;
*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
*vlan_tag = le16_to_cpu(desc->vlan_tag);
}
#endif /* _WQ_ENET_DESC_H_ */