[SCSI] cnic: Add new Broadcom CNIC driver.

The CNIC driver controls BNX2 hardware rings and resources used by
iSCSI.  Most hardware resources for iSCSI are separate from those
used for ethernet networking.

iSCSI uses a separate MAC address and IP address.  The CNIC driver
creates a UIO interface to handle the non-offloaded packets such as
ARP, etc in userspace.

Signed-off-by: Michael Chan <mchan@broadcom.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
Michael Chan 2009-06-08 18:14:43 -07:00 committed by James Bottomley
parent 4edd473f20
commit a463696039
6 changed files with 3901 additions and 0 deletions

View file

@ -2264,6 +2264,17 @@ config BNX2
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config CNIC
tristate "Broadcom CNIC support"
depends on BNX2
depends on UIO
help
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called cnic. This is recommended.
config SPIDER_NET
tristate "Spider Gigabit Ethernet driver"
depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)

View file

@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o
spidernet-y += spider_net.o spider_net_ethtool.o

2711
drivers/net/cnic.c Normal file

File diff suppressed because it is too large Load diff

299
drivers/net/cnic.h Normal file
View file

@ -0,0 +1,299 @@
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_H
#define CNIC_H
#define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16
#define KWQ_CID 24
#define KCQ_CID 25
/*
* krnlq_context definition
*/
#define L5_KRNLQ_FLAGS 0x00000000
#define L5_KRNLQ_SIZE 0x00000000
#define L5_KRNLQ_TYPE 0x00000000
#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
#define KRNLQ_TYPE_TYPE (0xf<<28)
#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
#define L5_KRNLQ_HOST_QIDX 0x00000004
#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
#define L5_KRNLQ_NX_PG_QIDX 0x00000018
#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
#define L5_KRNLQ_QIDX_INCR 0x0000001c
#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
struct cnic_redirect_entry {
struct dst_entry *old_dst;
struct dst_entry *new_dst;
};
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
#define CNIC_LOCAL_PORT_MAX 61000
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
(MAX_KCQE_CNT - 1)) ? \
(x) + 2 : (x) + 1
#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
#define DEF_IPID_COUNT 0xc001
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
#define DEF_KA_MAX_PROBE_COUNT 3
#define DEF_TOS 0
#define DEF_TTL 0xfe
#define DEF_SND_SEQ_SCALE 0
#define DEF_RCV_BUF 0xffff
#define DEF_SND_BUF 0xffff
#define DEF_SEED 0
#define DEF_MAX_RT_TIME 500
#define DEF_MAX_DA_COUNT 2
#define DEF_SWS_TIMER 1000
#define DEF_MAX_CWND 0xffff
struct cnic_ctx {
u32 cid;
void *ctx;
dma_addr_t mapping;
};
#define BNX2_MAX_CID 0x2000
struct cnic_dma {
int num_pages;
void **pg_arr;
dma_addr_t *pg_map_arr;
int pgtbl_size;
u32 *pgtbl;
dma_addr_t pgtbl_map;
};
struct cnic_id_tbl {
spinlock_t lock;
u32 start;
u32 max;
u32 next;
unsigned long *table;
};
#define CNIC_KWQ16_DATA_SIZE 128
struct kwqe_16_data {
u8 data[CNIC_KWQ16_DATA_SIZE];
};
struct cnic_iscsi {
struct cnic_dma task_array_info;
struct cnic_dma r2tq_info;
struct cnic_dma hq_info;
};
struct cnic_context {
u32 cid;
struct kwqe_16_data *kwqe_data;
dma_addr_t kwqe_data_mapping;
wait_queue_head_t waitq;
int wait_cond;
unsigned long timestamp;
u32 ctx_flags;
#define CTX_FL_OFFLD_START 0x00000001
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
} proto;
};
struct cnic_local {
spinlock_t cnic_ulp_lock;
void *ulp_handle[MAX_CNIC_ULP_TYPE];
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
/* protected by ulp_lock */
u32 cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x00000001
struct cnic_dev *dev;
struct cnic_eth_dev *ethdev;
void *l2_ring;
dma_addr_t l2_ring_map;
int l2_ring_size;
int l2_rx_ring_size;
void *l2_buf;
dma_addr_t l2_buf_map;
int l2_buf_size;
int l2_single_buf_size;
u16 *rx_cons_ptr;
u16 *tx_cons_ptr;
u16 rx_cons;
u16 tx_cons;
u32 kwq_cid_addr;
u32 kcq_cid_addr;
struct cnic_dma kwq_info;
struct kwqe **kwq;
struct cnic_dma kwq_16_data_info;
u16 max_kwq_idx;
u16 kwq_prod_idx;
u32 kwq_io_addr;
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
struct cnic_dma kcq_info;
struct kcqe **kcq;
u16 kcq_prod_idx;
u32 kcq_io_addr;
void *status_blk;
struct status_block_msix *bnx2_status_blk;
struct host_status_block *bnx2x_status_blk;
u32 status_blk_num;
u32 int_num;
u32 last_status_idx;
struct tasklet_struct cnic_irq_task;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma conn_buf_info;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
int max_iscsi_conn;
atomic_t iscsi_conn;
/* per connection parameters */
int num_iscsi_tasks;
int num_ccells;
int task_array_size;
int r2tq_size;
int hq_size;
int num_cqs;
struct cnic_ctx *ctx_arr;
int ctx_blks;
int ctx_blk_size;
int cids_per_blk;
u32 chip_id;
int func;
u32 shmem_base;
u32 uio_dev;
struct uio_info *cnic_uinfo;
struct cnic_ops *cnic_ops;
int (*start_hw)(struct cnic_dev *);
void (*stop_hw)(struct cnic_dev *);
void (*setup_pgtbl)(struct cnic_dev *,
struct cnic_dma *);
int (*alloc_resc)(struct cnic_dev *);
void (*free_resc)(struct cnic_dev *);
int (*start_cm)(struct cnic_dev *);
void (*stop_cm)(struct cnic_dev *);
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*close_conn)(struct cnic_sock *, u32 opcode);
u16 (*next_idx)(u16);
u16 (*hw_idx)(u16);
};
struct bnx2x_bd_chain_next {
u32 addr_lo;
u32 addr_hi;
u8 reserved[8];
};
#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
#define CDU_REGION_NUMBER_XCM_AG 2
#define CDU_REGION_NUMBER_UCM_AG 4
#endif

580
drivers/net/cnic_defs.h Normal file
View file

@ -0,0 +1,580 @@
/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_DEFS_H
#define CNIC_DEFS_H
/* KWQ (kernel work queue) request op codes */
#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
#define L4_KWQE_OPCODE_VALUE_RESET (53)
#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
#define L5CM_RAMROD_CMD_ID_BASE (0x80)
#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
/* KCQ (kernel completion queue) response op codes */
#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
/* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_LAYER_CODE (4)
#define L2_LAYER_CODE (2)
/*
* L4 KCQ CQE
*/
struct l4_kcq {
u32 cid;
u32 pg_cid;
u32 conn_id;
u32 pg_host_opaque;
#if defined(__BIG_ENDIAN)
u16 status;
u16 reserved1;
#elif defined(__LITTLE_ENDIAN)
u16 reserved1;
u16 status;
#endif
u32 reserved2[2];
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KCQ_RESERVED3 (0x7<<0)
#define L4_KCQ_RESERVED3_SHIFT 0
#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
#define L4_KCQ_LAYER_CODE (0x7<<4)
#define L4_KCQ_LAYER_CODE_SHIFT 4
#define L4_KCQ_RESERVED4 (0x1<<7)
#define L4_KCQ_RESERVED4_SHIFT 7
u8 op_code;
u16 qe_self_seq;
#elif defined(__LITTLE_ENDIAN)
u16 qe_self_seq;
u8 op_code;
u8 flags;
#define L4_KCQ_RESERVED3 (0xF<<0)
#define L4_KCQ_RESERVED3_SHIFT 0
#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
#define L4_KCQ_LAYER_CODE (0x7<<4)
#define L4_KCQ_LAYER_CODE_SHIFT 4
#define L4_KCQ_RESERVED4 (0x1<<7)
#define L4_KCQ_RESERVED4_SHIFT 7
#endif
};
/*
* L4 KCQ CQE PG upload
*/
struct l4_kcq_upload_pg {
u32 pg_cid;
#if defined(__BIG_ENDIAN)
u16 pg_status;
u16 pg_ipid_count;
#elif defined(__LITTLE_ENDIAN)
u16 pg_ipid_count;
u16 pg_status;
#endif
u32 reserved1[5];
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
u8 op_code;
u16 qe_self_seq;
#elif defined(__LITTLE_ENDIAN)
u16 qe_self_seq;
u8 op_code;
u8 flags;
#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
#endif
};
/*
* Gracefully close the connection request
*/
struct l4_kwq_close_req {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
/*
* The first request to be passed in order to establish connection in option2
*/
struct l4_kwq_connect_req1 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u8 reserved0;
u8 conn_flags;
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
#elif defined(__LITTLE_ENDIAN)
u8 conn_flags;
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
u8 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 pg_cid;
u32 src_ip;
u32 dst_ip;
#if defined(__BIG_ENDIAN)
u16 dst_port;
u16 src_port;
#elif defined(__LITTLE_ENDIAN)
u16 src_port;
u16 dst_port;
#endif
#if defined(__BIG_ENDIAN)
u8 rsrv1[3];
u8 tcp_flags;
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
#elif defined(__LITTLE_ENDIAN)
u8 tcp_flags;
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
u8 rsrv1[3];
#endif
u32 rsrv2;
};
/*
* The second ( optional )request to be passed in order to establish
* connection in option2 - for IPv6 only
*/
struct l4_kwq_connect_req2 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u8 reserved0;
u8 rsrv;
#elif defined(__LITTLE_ENDIAN)
u8 rsrv;
u8 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 reserved2;
u32 src_ip_v6_2;
u32 src_ip_v6_3;
u32 src_ip_v6_4;
u32 dst_ip_v6_2;
u32 dst_ip_v6_3;
u32 dst_ip_v6_4;
};
/*
* The third ( and last )request to be passed in order to establish
* connection in option2
*/
struct l4_kwq_connect_req3 {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 ka_timeout;
u32 ka_interval ;
#if defined(__BIG_ENDIAN)
u8 snd_seq_scale;
u8 ttl;
u8 tos;
u8 ka_max_probe_count;
#elif defined(__LITTLE_ENDIAN)
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
#endif
#if defined(__BIG_ENDIAN)
u16 pmtu;
u16 mss;
#elif defined(__LITTLE_ENDIAN)
u16 mss;
u16 pmtu;
#endif
u32 rcv_buf;
u32 snd_buf;
u32 seed;
};
/*
* a KWQE request to offload a PG connection
*/
struct l4_kwq_offload_pg {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
#endif
#if defined(__BIG_ENDIAN)
u8 l2hdr_nbytes;
u8 pg_flags;
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
u8 da0;
u8 da1;
#elif defined(__LITTLE_ENDIAN)
u8 da1;
u8 da0;
u8 pg_flags;
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
u8 l2hdr_nbytes;
#endif
#if defined(__BIG_ENDIAN)
u8 da2;
u8 da3;
u8 da4;
u8 da5;
#elif defined(__LITTLE_ENDIAN)
u8 da5;
u8 da4;
u8 da3;
u8 da2;
#endif
#if defined(__BIG_ENDIAN)
u8 sa0;
u8 sa1;
u8 sa2;
u8 sa3;
#elif defined(__LITTLE_ENDIAN)
u8 sa3;
u8 sa2;
u8 sa1;
u8 sa0;
#endif
#if defined(__BIG_ENDIAN)
u8 sa4;
u8 sa5;
u16 etype;
#elif defined(__LITTLE_ENDIAN)
u16 etype;
u8 sa5;
u8 sa4;
#endif
#if defined(__BIG_ENDIAN)
u16 vlan_tag;
u16 ipid_start;
#elif defined(__LITTLE_ENDIAN)
u16 ipid_start;
u16 vlan_tag;
#endif
#if defined(__BIG_ENDIAN)
u16 ipid_count;
u16 reserved3;
#elif defined(__LITTLE_ENDIAN)
u16 reserved3;
u16 ipid_count;
#endif
u32 host_opaque;
};
/*
* Abortively close the connection request
*/
struct l4_kwq_reset_req {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
u8 op_code;
u16 reserved0;
#elif defined(__LITTLE_ENDIAN)
u16 reserved0;
u8 op_code;
u8 flags;
#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
/*
* a KWQE request to update a PG connection
*/
struct l4_kwq_update_pg {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
u8 opcode;
u16 oper16;
#elif defined(__LITTLE_ENDIAN)
u16 oper16;
u8 opcode;
u8 flags;
#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 pg_cid;
u32 pg_host_opaque;
#if defined(__BIG_ENDIAN)
u8 pg_valids;
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
u8 pg_unused_a;
u16 pg_ipid_count;
#elif defined(__LITTLE_ENDIAN)
u16 pg_ipid_count;
u8 pg_unused_a;
u8 pg_valids;
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
#endif
#if defined(__BIG_ENDIAN)
u16 reserverd3;
u8 da0;
u8 da1;
#elif defined(__LITTLE_ENDIAN)
u8 da1;
u8 da0;
u16 reserverd3;
#endif
#if defined(__BIG_ENDIAN)
u8 da2;
u8 da3;
u8 da4;
u8 da5;
#elif defined(__LITTLE_ENDIAN)
u8 da5;
u8 da4;
u8 da3;
u8 da2;
#endif
u32 reserved4;
u32 reserved5;
};
/*
* a KWQE request to upload a PG or L4 context
*/
struct l4_kwq_upload {
#if defined(__BIG_ENDIAN)
u8 flags;
#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
u8 opcode;
u16 oper16;
#elif defined(__LITTLE_ENDIAN)
u16 oper16;
u8 opcode;
u8 flags;
#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
#endif
u32 cid;
u32 reserved2[6];
};
#endif /* CNIC_DEFS_H */

299
drivers/net/cnic_if.h Normal file
View file

@ -0,0 +1,299 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
*/
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.0.0"
#define CNIC_MODULE_RELDATE "May 21, 2009"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
#define CNIC_ULP_L4 2
#define MAX_CNIC_ULP_TYPE_EXT 2
#define MAX_CNIC_ULP_TYPE 3
struct kwqe {
u32 kwqe_op_flag;
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
#define KWQE_FLAGS_LAYER_SHIFT 28
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
u32 kwqe_info4;
u32 kwqe_info5;
u32 kwqe_info6;
};
struct kwqe_16 {
u32 kwqe_info0;
u32 kwqe_info1;
u32 kwqe_info2;
u32 kwqe_info3;
};
struct kcqe {
u32 kcqe_info0;
u32 kcqe_info1;
u32 kcqe_info2;
u32 kcqe_info3;
u32 kcqe_info4;
u32 kcqe_info5;
u32 kcqe_info6;
u32 kcqe_op_flag;
#define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
#define KCQE_FLAGS_LAYER_MASK (0x7<<28)
#define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
#define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
#define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
#define KCQE_OPCODE(op) \
(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
};
#define MAX_CNIC_CTL_DATA 64
#define MAX_DRV_CTL_DATA 64
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
#define DRV_CTL_CTX_WR_CMD 0x103
#define DRV_CTL_CTXTBL_WR_CMD 0x104
#define DRV_CTL_COMPLETION_CMD 0x105
struct cnic_ctl_completion {
u32 cid;
};
struct drv_ctl_completion {
u32 comp_count;
};
struct cnic_ctl_info {
int cmd;
union {
struct cnic_ctl_completion comp;
char bytes[MAX_CNIC_CTL_DATA];
} data;
};
struct drv_ctl_io {
u32 cid_addr;
u32 offset;
u32 data;
dma_addr_t dma_addr;
};
struct drv_ctl_info {
int cmd;
union {
struct drv_ctl_completion comp;
struct drv_ctl_io io;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
int (*cnic_handler)(void *, void *);
int (*cnic_ctl)(void *, struct cnic_ctl_info *);
};
#define MAX_CNIC_VEC 8
struct cnic_irq {
unsigned int vector;
void *status_blk;
u32 status_blk_num;
u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001
};
struct cnic_eth_dev {
struct module *drv_owner;
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
void __iomem *io_base;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
int ctx_blk_size;
u32 starting_cid;
u32 max_iscsi_conn;
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 reserved0[2];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
int (*drv_register_cnic)(struct net_device *,
struct cnic_ops *, void *);
int (*drv_unregister_cnic)(struct net_device *);
int (*drv_submit_kwqes_32)(struct net_device *,
struct kwqe *[], u32);
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
};
struct cnic_sockaddr {
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} local;
union {
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} remote;
};
struct cnic_sock {
struct cnic_dev *dev;
void *context;
u32 src_ip[4];
u32 dst_ip[4];
u16 src_port;
u16 dst_port;
u16 vlan_id;
unsigned char old_ha[6];
unsigned char ha[6];
u32 mtu;
u32 cid;
u32 l5_cid;
u32 pg_cid;
int ulp_type;
u32 ka_timeout;
u32 ka_interval;
u8 ka_max_probe_count;
u8 tos;
u8 ttl;
u8 snd_seq_scale;
u32 rcv_buf;
u32 snd_buf;
u32 seed;
unsigned long tcp_flags;
#define SK_TCP_NO_DELAY_ACK 0x1
#define SK_TCP_KEEP_ALIVE 0x2
#define SK_TCP_NAGLE 0x4
#define SK_TCP_TIMESTAMP 0x8
#define SK_TCP_SACK 0x10
#define SK_TCP_SEG_SCALING 0x20
unsigned long flags;
#define SK_F_INUSE 0
#define SK_F_OFFLD_COMPLETE 1
#define SK_F_OFFLD_SCHED 2
#define SK_F_PG_OFFLD_COMPLETE 3
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
atomic_t ref_count;
u32 state;
struct kwqe kwqe1;
struct kwqe kwqe2;
struct kwqe kwqe3;
};
struct cnic_dev {
struct net_device *netdev;
struct pci_dev *pcidev;
void __iomem *regview;
struct list_head list;
int (*register_device)(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx);
int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes);
int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
u32 num_wqes);
int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
void *);
int (*cm_destroy)(struct cnic_sock *);
int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
int (*cm_abort)(struct cnic_sock *);
int (*cm_close)(struct cnic_sock *);
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
u8 mac_addr[6];
int max_iscsi_conn;
int max_fcoe_conn;
int max_rdma_conn;
void *cnic_priv;
};
#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
#define CNIC_RD(dev, off) readl(dev->regview + off)
#define CNIC_RD16(dev, off) readw(dev->regview + off)
struct cnic_ulp_ops {
/* Calls to these functions are protected by RCU. When
* unregistering, we wait for any calls to complete before
* continuing.
*/
void (*cnic_init)(struct cnic_dev *dev);
void (*cnic_exit)(struct cnic_dev *dev);
void (*cnic_start)(void *ulp_ctx);
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
#endif