bnx2x: VF fastpath
When VF driver is transmitting it must supply the correct mac address in the parsing BD. This is used for firmware validation and enforcement and also for tx-switching. Refactor interrupt ack flow to allow for different BAR addresses of the hardware in the PF BAR vs the VF BAR. Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
381ac16b10
commit
dc1ba59146
|
@ -3472,8 +3472,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
cpu_to_le16(vlan_tx_tag_get(skb));
|
||||
tx_start_bd->bd_flags.as_bitfield |=
|
||||
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
|
||||
} else
|
||||
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
|
||||
} else {
|
||||
/* when transmitting in a vf, start bd must hold the ethertype
|
||||
* for fw to enforce it
|
||||
*/
|
||||
if (IS_VF(bp)) {
|
||||
tx_start_bd->vlan_or_ethertype =
|
||||
cpu_to_le16(ntohs(eth->h_proto));
|
||||
} else {
|
||||
/* used by FW for packet accounting */
|
||||
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
|
||||
}
|
||||
}
|
||||
|
||||
/* turn on parsing and get a BD */
|
||||
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
|
||||
|
@ -3489,9 +3499,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
|
||||
&pbd_e2_parsing_data,
|
||||
xmit_type);
|
||||
if (IS_MF_SI(bp)) {
|
||||
/*
|
||||
* fill in the MAC addresses in the PBD - for local
|
||||
|
||||
if (IS_MF_SI(bp) || IS_VF(bp)) {
|
||||
/* fill in the MAC addresses in the PBD - for local
|
||||
* switching
|
||||
*/
|
||||
bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
|
||||
|
|
|
@ -499,6 +499,39 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
|
|||
/* select_queue callback */
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
|
||||
|
||||
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
|
||||
struct bnx2x_fastpath *fp,
|
||||
u16 bd_prod, u16 rx_comp_prod,
|
||||
u16 rx_sge_prod)
|
||||
{
|
||||
struct ustorm_eth_rx_producers rx_prods = {0};
|
||||
u32 i;
|
||||
|
||||
/* Update producers */
|
||||
rx_prods.bd_prod = bd_prod;
|
||||
rx_prods.cqe_prod = rx_comp_prod;
|
||||
rx_prods.sge_prod = rx_sge_prod;
|
||||
|
||||
/* Make sure that the BD and SGE data is updated before updating the
|
||||
* producers since FW might read the BD/SGE right after the producer
|
||||
* is updated.
|
||||
* This is only applicable for weak-ordered memory model archs such
|
||||
* as IA-64. The following barrier is also mandatory since FW will
|
||||
* assumes BDs must have buffers.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
for (i = 0; i < sizeof(rx_prods)/4; i++)
|
||||
REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
|
||||
((u32 *)&rx_prods)[i]);
|
||||
|
||||
mmiowb(); /* keep prod updates ordered */
|
||||
|
||||
DP(NETIF_MSG_RX_STATUS,
|
||||
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
|
||||
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
|
||||
}
|
||||
|
||||
/* reload helper */
|
||||
int bnx2x_reload_if_running(struct net_device *dev);
|
||||
|
||||
|
@ -507,9 +540,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
|
|||
/* NAPI poll Rx part */
|
||||
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
|
||||
|
||||
void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
||||
u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
|
||||
|
||||
/* NAPI poll Tx part */
|
||||
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
|
||||
|
||||
|
@ -612,38 +642,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
|
|||
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
|
||||
}
|
||||
|
||||
static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
|
||||
struct bnx2x_fastpath *fp, u16 bd_prod,
|
||||
u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
|
||||
{
|
||||
struct ustorm_eth_rx_producers rx_prods = {0};
|
||||
u32 i;
|
||||
|
||||
/* Update producers */
|
||||
rx_prods.bd_prod = bd_prod;
|
||||
rx_prods.cqe_prod = rx_comp_prod;
|
||||
rx_prods.sge_prod = rx_sge_prod;
|
||||
|
||||
/*
|
||||
* Make sure that the BD and SGE data is updated before updating the
|
||||
* producers since FW might read the BD/SGE right after the producer
|
||||
* is updated.
|
||||
* This is only applicable for weak-ordered memory model archs such
|
||||
* as IA-64. The following barrier is also mandatory since FW will
|
||||
* assumes BDs must have buffers.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
for (i = 0; i < sizeof(rx_prods)/4; i++)
|
||||
REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
|
||||
|
||||
mmiowb(); /* keep prod updates ordered */
|
||||
|
||||
DP(NETIF_MSG_RX_STATUS,
|
||||
"queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
|
||||
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
|
||||
}
|
||||
|
||||
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
|
||||
u8 segment, u16 index, u8 op,
|
||||
u8 update, u32 igu_addr)
|
||||
|
|
|
@ -1697,15 +1697,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
|||
return;
|
||||
}
|
||||
|
||||
void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
||||
u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
|
||||
{
|
||||
u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
|
||||
|
||||
bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
|
||||
start);
|
||||
}
|
||||
|
||||
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev_instance);
|
||||
|
@ -4620,8 +4611,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
|
|||
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
|
||||
u16 index, u8 op, u8 update)
|
||||
{
|
||||
u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
|
||||
|
||||
u32 igu_addr = bp->igu_base_addr;
|
||||
igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
|
||||
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
|
||||
igu_addr);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue