Merge branch 'bnx2x-next'

Yuval Mintz says:

====================
bnx2x: SR-IOV patch series

(With the exception of the first patch) This series contains IOV-related
patches, where the main changes are related to the driver's IOV-support
backbone - it adds a new workqueue for IOV related tasks and removes the vfop
mechanism from the driver.

Please consider applying this series to `net-next'.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-03-25 21:07:43 -04:00
commit e74dbb7327
8 changed files with 948 additions and 2070 deletions

View file

@ -1155,10 +1155,6 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
/* slow path work-queue */
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
@ -1413,6 +1409,12 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
};
enum bnx2x_iov_flag {
BNX2X_IOV_HANDLE_VF_MSG,
BNX2X_IOV_HANDLE_FLR,
};
struct bnx2x_prev_path_list {
@ -1613,6 +1615,8 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
struct delayed_work iov_task;
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
@ -1703,6 +1707,10 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
/* Mechanism protecting the drv_info_to_mcp */
struct mutex drv_info_mutex;
bool drv_info_mng_owner;
/* Total number of FW statistics requests */
u8 fw_stats_num;
@ -1892,6 +1900,9 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
/* Indication of the IOV tasks */
unsigned long iov_task_state;
/* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@ -2535,6 +2546,8 @@ enum {
void bnx2x_set_local_cmng(struct bnx2x *bp);
void bnx2x_update_mng_version(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)

View file

@ -2804,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
if (IS_PF(bp))
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
/* mark driver is loaded in shmem2 */
u32 val;
@ -3030,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
if (IS_PF(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/

View file

@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
#define IS_MF_MODE_STAT(bp) \
(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
#define HIDE_PORT_STAT(bp) \
((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
IS_VF(bp))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
BNX2X_NUM_Q_STATS;
} else
num_strings = 0;
if (IS_MF_MODE_STAT(bp)) {
if (HIDE_PORT_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_strings++;
@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */

View file

@ -2003,6 +2003,23 @@ struct shmem_lfa {
#define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
};
/* Used to support NSCI get OS driver version
* on driver load the version value will be set
* on driver unload driver value of 0x0 will be set.
*/
struct os_drv_ver {
#define DRV_VER_NOT_LOADED 0
/* personalties order is important */
#define DRV_PERS_ETHERNET 0
#define DRV_PERS_ISCSI 1
#define DRV_PERS_FCOE 2
/* shmem2 struct is constant can't add more personalties here */
#define MAX_DRV_PERS 3
u32 versions[MAX_DRV_PERS];
};
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@ -2217,6 +2234,18 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
u32 reserved5[2];
u32 reserved6[PORT_MAX];
/* driver version for each personality */
struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
/* Flag to the driver that PF's drv_info_host_addr buffer was read */
u32 mfw_drv_indication;
/* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};

View file

@ -120,7 +120,8 @@ static int debug;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
static struct workqueue_struct *bnx2x_wq;
struct workqueue_struct *bnx2x_iov_wq;
struct bnx2x_mac_vals {
u32 xmac_addr;
@ -1856,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@ -3482,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
bool release = false;
int wait;
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@ -3496,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT;
/* Must prevent other flows from accessing drv_info_to_mcp */
mutex_lock(&bp->drv_info_mutex);
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
@ -3512,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
default:
/* if op code isn't supported - send NACK */
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
return;
goto out;
}
/* if we got drv_info attn from MFW then these fields are defined in
@ -3524,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
/* Since possible management wants both this and get_driver_version
* need to wait until management notifies us it finished utilizing
* the buffer.
*/
if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
DP(BNX2X_MSG_MCP, "Management does not support indication\n");
} else if (!bp->drv_info_mng_owner) {
u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
/* Management is done; need to clear indication */
if (indication & bit) {
SHMEM2_WR(bp, mfw_drv_indication,
indication & ~bit);
release = true;
break;
}
msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
}
}
if (!release) {
DP(BNX2X_MSG_MCP, "Management did not release indication\n");
bp->drv_info_mng_owner = true;
}
out:
mutex_unlock(&bp->drv_info_mutex);
}
static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
{
u8 vals[4];
int i = 0;
if (bnx2x_format) {
i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
&vals[0], &vals[1], &vals[2], &vals[3]);
if (i > 0)
vals[0] -= '0';
} else {
i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
&vals[0], &vals[1], &vals[2], &vals[3]);
}
while (i < 4)
vals[i++] = 0;
return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
}
void bnx2x_update_mng_version(struct bnx2x *bp)
{
u32 iscsiver = DRV_VER_NOT_LOADED;
u32 fcoever = DRV_VER_NOT_LOADED;
u32 ethver = DRV_VER_NOT_LOADED;
int idx = BP_FW_MB_IDX(bp);
u8 *version;
if (!SHMEM2_HAS(bp, func_os_drv_ver))
return;
mutex_lock(&bp->drv_info_mutex);
/* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
if (bp->drv_info_mng_owner)
goto out;
if (bp->state != BNX2X_STATE_OPEN)
goto out;
/* Parse ethernet driver version */
ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
if (!CNIC_LOADED(bp))
goto out;
/* Try getting storage driver version via cnic */
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
bnx2x_drv_info_iscsi_stat(bp);
version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
iscsiver = bnx2x_update_mng_version_utility(version, false);
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
bnx2x_drv_info_fcoe_stat(bp);
version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
fcoever = bnx2x_update_mng_version_utility(version, false);
out:
SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
mutex_unlock(&bp->drv_info_mutex);
DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@ -4052,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_drv_info_req(bp);
if (val & DRV_STATUS_VF_DISABLED)
bnx2x_vf_handle_flr_event(bp);
bnx2x_schedule_iov_task(bp,
BNX2X_IOV_HANDLE_FLR);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@ -5243,8 +5351,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_VF_PF_CHANNEL:
DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
bnx2x_vf_mbx_schedule(bp,
&elem->message.data.vf_pf_event);
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
@ -5459,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
}
/* must be called after the EQ processing (since eq leads to sriov
* ramrod completion flows).
* This flow may have been scheduled by the arrival of a ramrod
* completion, or by the sriov code rescheduling itself.
*/
bnx2x_iov_sp_task(bp);
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
&bp->sp_state)) {
@ -8882,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
synchronize_irq(bp->pdev->irq);
flush_workqueue(bnx2x_wq);
flush_workqueue(bnx2x_iov_wq);
while (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED && tout--)
@ -9807,6 +9909,10 @@ sp_rtnl_not_reset:
bnx2x_dcbx_resume_hw_tx(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@ -11757,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
mutex_init(&bp->drv_info_mutex);
bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
if (IS_PF(bp)) {
rc = bnx2x_get_hwinfo(bp);
if (rc)
@ -13385,11 +13494,18 @@ static int __init bnx2x_init(void)
pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
if (!bnx2x_iov_wq) {
pr_err("Cannot create iov workqueue\n");
destroy_workqueue(bnx2x_wq);
return -ENOMEM;
}
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
destroy_workqueue(bnx2x_iov_wq);
}
return ret;
}
@ -13401,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
destroy_workqueue(bnx2x_iov_wq);
/* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) {
@ -13794,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
REG_WR(bp, scratch_offset + i,
*(host_addr + i/4));
}
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@ -13811,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
}
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@ -13916,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
rcu_assign_pointer(bp->cnic_ops, ops);
/* Schedule driver to read CNIC driver versions */
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -30,6 +30,8 @@ enum sample_bulletin_result {
#ifdef CONFIG_BNX2X_SRIOV
extern struct workqueue_struct *bnx2x_iov_wq;
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
*/
@ -86,113 +88,32 @@ struct bnx2x_vf_queue {
bool sp_initialized;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
* q-init, q-setup and SB index
/* struct bnx2x_vf_queue_construct_params - prepare queue construction
* parameters: q-init, q-setup and SB index
*/
struct bnx2x_vfop_qctor_params {
struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
/* VFOP parameters (one copy per VF) */
union bnx2x_vfop_params {
struct bnx2x_vlan_mac_ramrod_params vlan_mac;
struct bnx2x_rx_mode_ramrod_params rx_mode;
struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor;
struct bnx2x_queue_state_params qstate;
};
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
struct bnx2x_vfop_cmd {
vfop_handler_t done;
bool block;
};
/* VFOP queue filters command additional arguments */
struct bnx2x_vfop_filter {
struct list_head link;
struct bnx2x_vf_mac_vlan_filter {
int type;
#define BNX2X_VFOP_FILTER_MAC 1
#define BNX2X_VFOP_FILTER_VLAN 2
#define BNX2X_VF_FILTER_MAC 1
#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
struct bnx2x_vfop_filters {
int add_cnt;
struct list_head head;
struct bnx2x_vfop_filter filters[];
};
/* transient list allocated, built and saved until its
* passed to the SP-VERBs layer.
*/
struct bnx2x_vfop_args_mcast {
int mc_num;
struct bnx2x_mcast_list_elem *mc;
};
struct bnx2x_vfop_args_qctor {
int qid;
u16 sb_idx;
};
struct bnx2x_vfop_args_qdtor {
int qid;
struct eth_context *cxt;
};
struct bnx2x_vfop_args_defvlan {
int qid;
bool enable;
u16 vid;
u8 prio;
};
struct bnx2x_vfop_args_qx {
int qid;
bool en_add;
};
struct bnx2x_vfop_args_filters {
struct bnx2x_vfop_filters *multi_filter;
atomic_t *credit; /* non NULL means 'don't consume credit' */
};
struct bnx2x_vfop_args_tpa {
int qid;
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
};
union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor;
struct bnx2x_vfop_args_qdtor qdtor;
struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters;
struct bnx2x_vfop_args_tpa tpa;
};
struct bnx2x_vfop {
struct list_head link;
int rc; /* return code */
int state; /* next state */
union bnx2x_vfop_args args; /* extra arguments */
union bnx2x_vfop_params *op_p; /* ramrod params */
/* state machine callbacks */
vfop_handler_t transition;
vfop_handler_t done;
struct bnx2x_vf_mac_vlan_filters {
int count;
struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
@ -212,15 +133,7 @@ struct bnx2x_virtf {
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
/* non 0 during flr cleanup */
u8 flr_clnup_stage;
#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
* sans the end-wait
*/
#define VF_FLR_ACK 2 /* ACK flr notification */
#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
* ~ final cleanup' end wait
*/
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
@ -284,11 +197,6 @@ struct bnx2x_virtf {
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
atomic_t op_in_progress;
int op_rc;
bool op_wait_blocking;
struct list_head op_list_head;
union bnx2x_vfop_params op_params;
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
@ -346,11 +254,6 @@ struct bnx2x_vf_mbx {
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
u8 flags;
#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
* more then one pending msg
*/
};
struct bnx2x_vf_sp {
@ -427,6 +330,10 @@ struct bnx2x_vfdb {
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
/* sp_rtnl synchronization */
struct mutex event_mutex;
u64 event_occur;
};
/* queue access */
@ -476,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
void bnx2x_iov_sp_task(struct bnx2x *bp);
/* global vf mailbox routines */
void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_mbx(struct bnx2x *bp);
void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
/* CORE VF API */
@ -495,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
/* VFOP generic helpers */
#define bnx2x_vfop_default(state) do { \
BNX2X_ERR("Bad state %d\n", (state)); \
vfop->rc = -EINVAL; \
goto op_err; \
} while (0)
enum {
VFOP_DONE,
VFOP_CONT,
VFOP_VERIFY_PEND,
};
#define bnx2x_vfop_finalize(vf, rc, next) do { \
if ((rc) < 0) \
goto op_err; \
else if ((rc) > 0) \
goto op_pending; \
else if ((next) == VFOP_DONE) \
goto op_done; \
else if ((next) == VFOP_VERIFY_PEND) \
BNX2X_ERR("expected pending\n"); \
else { \
DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
atomic_set(&vf->op_in_progress, 1); \
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
return; \
} \
} while (0)
#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
do { \
vfop->state = first_state; \
vfop->op_p = &vf->op_params; \
vfop->transition = trans_hndlr; \
vfop->done = done_hndlr; \
} while (0)
static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
WARN_ON(list_empty(&vf->op_list_head));
return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
}
static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
if (vfop) {
INIT_LIST_HEAD(&vfop->link);
list_add(&vfop->link, &vf->op_list_head);
}
return vfop;
}
static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vfop *vfop)
{
/* rc < 0 - error, otherwise set to 0 */
DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
if (vfop->rc >= 0)
vfop->rc = 0;
DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
/* unlink the current op context and propagate error code
* must be done before invoking the 'done()' handler
*/
WARN(!mutex_is_locked(&vf->op_mutex),
"about to access vf op linked list but mutex was not locked!");
list_del(&vfop->link);
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
vf->op_rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
} else {
struct bnx2x_vfop *cur_vfop;
DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
cur_vfop = bnx2x_vfop_cur(bp, vf);
cur_vfop->rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
}
/* invoke done handler */
if (vfop->done) {
DP(BNX2X_MSG_IOV, "calling done handler\n");
vfop->done(bp, vf);
} else {
/* there is no done handler for the operation to unlock
* the mutex. Must have gotten here from PF initiated VF RELEASE
*/
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
}
DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
/* if this is the last nested op reset the wait_blocking flag
* to release any blocking wrappers, only after 'done()' is invoked
*/
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
vf->op_wait_blocking = false;
}
kfree(vfop);
}
static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
/* can take a while if any port is running */
int cnt = 5000;
might_sleep();
while (cnt--) {
if (vf->op_wait_blocking == false) {
#ifdef BNX2X_STOP_ON_ERROR
DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
#endif
return 0;
}
usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
}
/* timeout! */
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
#endif
return -EBUSY;
}
static inline int bnx2x_vfop_transition(struct bnx2x *bp,
struct bnx2x_virtf *vf,
vfop_handler_t transition,
bool block)
{
if (block)
vf->op_wait_blocking = true;
transition(bp, vf);
if (block)
return bnx2x_vfop_wait_blocking(bp, vf);
return 0;
}
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
@ -665,64 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
struct bnx2x_vfop_qctor_params *p,
struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only);
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only);
int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mac_vlan_filters *filters,
int qid, bool drv_only);
int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid);
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_queue_construct_params *qctor);
int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid);
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
bnx2x_mac_addr_t *mcasts,
int mcast_num, bool drv_only);
int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags);
int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, unsigned long accept_flags);
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_config_rss_params *rss);
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv);
int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct vfpf_tpa_tlv *tlv,
struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
@ -785,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
void bnx2x_iov_task(struct work_struct *work);
void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
bool queue_work) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
static inline void bnx2x_vf_mbx(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event) {}
static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@ -843,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
static inline void bnx2x_iov_task(struct work_struct *work) {}
void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */

View file

@ -673,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
@ -895,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
/* Ignore everything accept MODE_NONE */
if (mode == BNX2X_RX_MODE_NONE) {
req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
break;
case BNX2X_RX_MODE_NORMAL:
} else {
/* Current PF driver will not look at the specific flags,
* but they are required when working with older drivers on hv.
*/
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
case BNX2X_RX_MODE_ALLMULTI:
req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
case BNX2X_RX_MODE_PROMISC:
req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
default:
BNX2X_ERR("BAD rx mode (%d)\n", mode);
rc = -EINVAL;
goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@ -938,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
rc = -EINVAL;
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
@ -1048,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
}
static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
struct bnx2x_virtf *vf)
struct bnx2x_virtf *vf,
int vf_rc)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
@ -1060,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
@ -1089,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
mmiowb();
/* initiate dmae to send the response */
mbx->flags &= ~VF_MSG_INPROCESS;
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
*/
@ -1111,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
return;
mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
bnx2x_vf_release(bp, vf);
}
static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
struct bnx2x_virtf *vf)
struct bnx2x_virtf *vf,
int rc)
{
bnx2x_vf_mbx_resp_single_tlv(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
}
static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
@ -1242,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
sizeof(struct channel_list_end_tlv));
/* send the response */
vf->op_rc = vfop_status;
bnx2x_vf_mbx_resp_send_msg(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@ -1275,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_init_tlv *init = &mbx->msg->req.init;
int rc;
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* convert MBX queue-flags to standard SP queue-flags */
@ -1322,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_vf_queue_construct_params qctor;
int rc = 0;
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto response;
}
@ -1349,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
init_p = &vf->op_params.qctor.qstate.params.init;
memset(&qctor, 0 ,
sizeof(struct bnx2x_vf_queue_construct_params));
setup_p = &qctor.prep_qsetup;
init_p = &qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@ -1437,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
q->index, q->sb_idx);
}
/* complete the preparations */
bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
if (vf->op_rc)
rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
if (rc)
goto response;
return;
}
response:
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
enum bnx2x_vfop_filters_state {
BNX2X_VFOP_MBX_Q_FILTERS_MACS,
BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
BNX2X_VFOP_MBX_Q_FILTERS_DONE
};
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *tlv,
struct bnx2x_vfop_filters **pfl,
struct bnx2x_vf_mac_vlan_filters **pfl,
u32 type_flag)
{
int i, j;
struct bnx2x_vfop_filters *fl = NULL;
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
size_t fsz;
fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
sizeof(struct bnx2x_vfop_filters);
fsz = tlv->n_mac_vlan_filters *
sizeof(struct bnx2x_vf_mac_vlan_filter) +
sizeof(struct bnx2x_vf_mac_vlan_filters);
fl = kzalloc(fsz, GFP_KERNEL);
if (!fl)
return -ENOMEM;
INIT_LIST_HEAD(&fl->head);
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
@ -1482,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
fl->filters[j].type = BNX2X_VF_FILTER_MAC;
} else {
fl->filters[j].vid = msg_filter->vlan_tag;
fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
}
fl->filters[j].add =
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
list_add_tail(&fl->filters[j++].link, &fl->head);
fl->count++;
}
if (list_empty(&fl->head))
if (!fl->count)
kfree(fl);
else
*pfl = fl;
@ -1532,168 +1508,88 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
int rc;
int rc = 0;
struct vfpf_set_q_filters_tlv *msg =
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
enum bnx2x_vfop_filters_state state = vfop->state;
/* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_mbx_qfilters,
.block = false,
};
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_MAC_FILTER);
if (rc)
goto op_err;
DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
if (fl) {
if (vfop->rc < 0)
goto op_err;
switch (state) {
case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
/* check for any vlan/mac changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build mac list */
struct bnx2x_vfop_filters *fl = NULL;
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_MAC_FILTER);
if (vfop->rc)
/* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
if (fl) {
/* set mac list */
rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
msg->vf_qid,
false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
}
/* fall through */
case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
/* build vlan list */
fl = NULL;
/* check for any vlan/mac changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build vlan list */
struct bnx2x_vfop_filters *fl = NULL;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_VLAN_FILTER);
if (rc)
goto op_err;
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_VLAN_FILTER);
if (vfop->rc)
if (fl) {
/* set vlan list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
if (fl) {
/* set vlan list */
rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
msg->vf_qid,
false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
}
/* fall through */
}
case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
struct pf_vf_bulletin_content *bulletin =
BP_VF_BULLETIN(bp, vf->index);
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
struct pf_vf_bulletin_content *bulletin =
BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
if (msg->rx_mask &
VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
* with any vlan, unless a vlan has already been
* configured.
*/
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
msg->vf_qid, accept);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
/* Ignore VF requested mode; instead set a regular mode */
if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
/* fall through */
case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
/* A packet arriving the vf's mac should be accepted
* with any vlan, unless a vlan has already been
* configured.
*/
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
/* set mcasts */
rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
msg->n_multicast, false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
/* fall through */
op_done:
case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
/* set rx-mode */
rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
if (rc)
goto op_err;
}
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
/* set mcasts */
rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
msg->n_multicast, false);
if (rc)
goto op_err;
}
op_err:
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
vf->abs_vfid, msg->vf_qid, vfop->rc);
goto op_done;
default:
bnx2x_vfop_default(state);
}
}
static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
bnx2x_vfop_mbx_qfilters, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
cmd->block);
}
return -ENOMEM;
if (rc)
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
vf->abs_vfid, msg->vf_qid, rc);
return rc;
}
static int bnx2x_filters_validate_mac(struct bnx2x *bp,
@ -1713,7 +1609,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
if (filters->n_mac_vlan_filters > 1) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
@ -1724,7 +1619,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
@ -1751,7 +1645,6 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
VFPF_Q_FILTER_VLAN_TAG_VALID) {
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
@ -1773,15 +1666,14 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
if (bnx2x_filters_validate_mac(bp, vf, filters))
rc = bnx2x_filters_validate_mac(bp, vf, filters);
if (rc)
goto response;
if (bnx2x_filters_validate_vlan(bp, vf, filters))
rc = bnx2x_filters_validate_vlan(bp, vf, filters);
if (rc)
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@ -1791,125 +1683,105 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* print q_filter message */
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
if (vf->op_rc)
goto response;
return;
rc = bnx2x_vf_mbx_qfilters(bp, vf);
response:
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int qid = mbx->msg->req.q_op.vf_qid;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
vf->abs_vfid, qid);
vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_queue_teardown(bp, vf, qid);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_close(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_free(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
struct bnx2x_config_rss_params rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
int rc = 0;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto mbx_resp;
}
memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
/* set vfop params according to rss tlv */
memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
memcpy(rss.ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
sizeof(rss_tlv->rss_key));
vf_op_params->rss_obj = &vf->rss_conf_obj;
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
rss.rss_obj = &vf->rss_conf_obj;
rss.rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
vf_op_params->rss_flags = 0;
vf_op_params->ramrod_flags = 0;
rss.rss_flags = 0;
rss.ramrod_flags = 0;
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto mbx_resp;
}
vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
rc = bnx2x_vf_rss_update(bp, vf, &rss);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
@ -1938,47 +1810,42 @@ static int bnx2x_validate_tpa_params(struct bnx2x *bp,
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_queue_update_tpa_params *vf_op_params =
&vf->op_params.qstate.params.update_tpa;
struct bnx2x_queue_update_tpa_params vf_op_params;
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
int rc = 0;
memset(vf_op_params, 0, sizeof(*vf_op_params));
memset(&vf_op_params, 0, sizeof(vf_op_params));
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
goto mbx_resp;
vf_op_params->complete_on_both_clients =
vf_op_params.complete_on_both_clients =
tpa_tlv->tpa_client_info.complete_on_both_clients;
vf_op_params->dont_verify_thr =
vf_op_params.dont_verify_thr =
tpa_tlv->tpa_client_info.dont_verify_thr;
vf_op_params->max_agg_sz =
vf_op_params.max_agg_sz =
tpa_tlv->tpa_client_info.max_agg_size;
vf_op_params->max_sges_pkt =
vf_op_params.max_sges_pkt =
tpa_tlv->tpa_client_info.max_sges_for_packet;
vf_op_params->max_tpa_queues =
vf_op_params.max_tpa_queues =
tpa_tlv->tpa_client_info.max_tpa_queues;
vf_op_params->sge_buff_sz =
vf_op_params.sge_buff_sz =
tpa_tlv->tpa_client_info.sge_buff_size;
vf_op_params->sge_pause_thr_high =
vf_op_params.sge_pause_thr_high =
tpa_tlv->tpa_client_info.sge_pause_thr_high;
vf_op_params->sge_pause_thr_low =
vf_op_params.sge_pause_thr_low =
tpa_tlv->tpa_client_info.sge_pause_thr_low;
vf_op_params->tpa_mode =
vf_op_params.tpa_mode =
tpa_tlv->tpa_client_info.tpa_mode;
vf_op_params->update_ipv4 =
vf_op_params.update_ipv4 =
tpa_tlv->tpa_client_info.update_ipv4;
vf_op_params->update_ipv6 =
vf_op_params.update_ipv6 =
tpa_tlv->tpa_client_info.update_ipv6;
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* dispatch request */
@ -2042,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* can we respond to VF (do we have an address for it?) */
if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
/* notify the VF that we do not support this request */
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
} else {
/* can't send a response since this VF is unknown to us
* just ack the FW to release the mailbox and unlock
@ -2059,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
}
/* handle new vf-pf message */
void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
struct vf_pf_event_data *vfpf_event)
{
struct bnx2x_virtf *vf;
struct bnx2x_vf_mbx *mbx;
u8 vf_idx;
int rc;
DP(BNX2X_MSG_IOV,
"vf pf event received: vfid %d, address_hi %x, address lo %x",
@ -2077,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
goto mbx_done;
return;
}
vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
mbx = BP_VF_MBX(bp, vf_idx);
/* verify an event is not currently being processed -
* debug failsafe only
*/
if (mbx->flags & VF_MSG_INPROCESS) {
BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
vfpf_event->vf_id);
goto mbx_done;
/* Update VFDB with current message and schedule its handling */
mutex_lock(&BP_VFDB(bp)->event_mutex);
BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
mutex_unlock(&BP_VFDB(bp)->event_mutex);
bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
}
/* handle new vf-pf messages */
void bnx2x_vf_mbx(struct bnx2x *bp)
{
struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
u64 events;
u8 vf_idx;
int rc;
if (!vfdb)
return;
mutex_lock(&vfdb->event_mutex);
events = vfdb->event_occur;
vfdb->event_occur = 0;
mutex_unlock(&vfdb->event_mutex);
for_each_vf(bp, vf_idx) {
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
/* Handle VFs which have pending events */
if (!(events & (1ULL << vf_idx)))
continue;
DP(BNX2X_MSG_IOV,
"Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
mbx->first_tlv.resp_msg_offset);
/* dmae to get the VF request */
rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
vf->abs_vfid, mbx->vf_addr_hi,
mbx->vf_addr_lo,
sizeof(union vfpf_tlvs)/4);
if (rc) {
BNX2X_ERR("Failed to copy request VF %d\n",
vf->abs_vfid);
bnx2x_vf_release(bp, vf);
return;
}
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
/* Clean response buffer to refrain from falsely
* seeing chains.
*/
memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
}
vf = BP_VF(bp, vf_idx);
/* save the VF message address */
mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
/* dmae to get the VF request */
rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
mbx->vf_addr_hi, mbx->vf_addr_lo,
sizeof(union vfpf_tlvs)/4);
if (rc) {
BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
goto mbx_error;
}
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
/* Clean response buffer to refrain from falsely seeing chains */
memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
goto mbx_done;
mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
mbx_done:
return;
}
/* propagate local bulletin board to vf */