1
0
Fork 0

i40evf: allocate ring structs dynamically

Instead of awkwardly keeping a fixed array of pointers in the adapter
struct and then allocating ring structs individually, just keep a single
pointer and allocate a single blob for the arrays. This simplifies code,
shrinks the adapter structure, and future-proofs the driver by not
limiting the number of rings we can handle.

Change-ID: I31334ff911a6474954232cfe4bc98ccca3c769ff
Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
hifive-unleashed-5.1
Mitch Williams 2015-10-26 19:44:40 -04:00 committed by Jeff Kirsher
parent 7d96ba1a8b
commit 0dd438d8ad
5 changed files with 40 additions and 41 deletions

View File

@ -2063,7 +2063,7 @@ out_drop:
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point

View File

@ -196,13 +196,13 @@ struct i40evf_adapter {
int num_active_queues;
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
struct i40e_ring *rx_rings;
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;

View File

@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
data[i] = *(u64 *)p;
}
for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->tx_rings[j]->stats.packets;
data[i++] = adapter->tx_rings[j]->stats.bytes;
data[i++] = adapter->tx_rings[j].stats.packets;
data[i++] = adapter->tx_rings[j].stats.bytes;
}
for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->rx_rings[j]->stats.packets;
data[i++] = adapter->rx_rings[j]->stats.bytes;
data[i++] = adapter->rx_rings[j].stats.packets;
data[i++] = adapter->rx_rings[j].stats.bytes;
}
}

View File

@ -348,7 +348,7 @@ static void
i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@ -369,7 +369,7 @@ static void
i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@ -610,7 +610,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
}
/**
@ -655,8 +655,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
}
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = rx_buf_len;
}
}
@ -991,7 +991,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i];
struct i40e_ring *ring = &adapter->rx_rings[i];
i40evf_alloc_rx_buffers_1buf(ring, ring->count);
ring->next_to_use = ring->count - 1;
@ -1111,16 +1111,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
**/
static void i40evf_free_queues(struct i40evf_adapter *adapter)
{
int i;
if (!adapter->vsi_res)
return;
for (i = 0; i < adapter->num_active_queues; i++) {
if (adapter->tx_rings[i])
kfree_rcu(adapter->tx_rings[i], rcu);
adapter->tx_rings[i] = NULL;
adapter->rx_rings[i] = NULL;
}
kfree(adapter->tx_rings);
kfree(adapter->rx_rings);
}
/**
@ -1135,13 +1129,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
int i;
adapter->tx_rings = kcalloc(adapter->num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
adapter->rx_rings = kcalloc(adapter->num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
tx_ring = &adapter->tx_rings[i];
tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev;
@ -1149,14 +1150,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->count = adapter->tx_desc_count;
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
adapter->rx_rings[i] = rx_ring;
}
return 0;
@ -1487,7 +1486,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
struct i40e_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
adapter->q_vectors = kzalloc(sizeof(*q_vector) * num_q_vectors,
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
goto err_out;
@ -2035,8 +2034,8 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
if (adapter->tx_rings[i].desc)
i40evf_free_tx_resources(&adapter->tx_rings[i]);
}
/**
@ -2054,8 +2053,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
adapter->tx_rings[i].count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
@ -2081,8 +2080,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
adapter->rx_rings[i].count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
@ -2103,8 +2102,8 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc)
i40evf_free_rx_resources(adapter->rx_rings[i]);
if (adapter->rx_rings[i].desc)
i40evf_free_rx_resources(&adapter->rx_rings[i]);
}
/**

View File

@ -255,19 +255,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
for (i = 0; i < pairs; i++) {
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
(vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
vqpi++;
}