1
0
Fork 0

net: mvneta: Use the new hwbm framework

Now that the hardware buffer management framework had been introduced,
let's use it.

Tested-by: Sebastian Careba <nitroshift@yahoo.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Gregory CLEMENT 2016-03-14 09:39:05 +01:00 committed by David S. Miller
parent 8cb2d8bf57
commit baa11ebc0c
4 changed files with 49 additions and 112 deletions

View File

@ -43,6 +43,7 @@ config MVMDIO
config MVNETA_BM config MVNETA_BM
tristate "Marvell Armada 38x/XP network interface BM support" tristate "Marvell Armada 38x/XP network interface BM support"
depends on MVNETA depends on MVNETA
select HWBM
---help--- ---help---
This driver supports auxiliary block of the network This driver supports auxiliary block of the network
interface units in the Marvell ARMADA XP and ARMADA 38x SoC interface units in the Marvell ARMADA XP and ARMADA 38x SoC

View File

@ -30,6 +30,7 @@
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/hwbm.h>
#include "mvneta_bm.h" #include "mvneta_bm.h"
#include <net/ip.h> #include <net/ip.h>
#include <net/ipv6.h> #include <net/ipv6.h>
@ -1026,11 +1027,12 @@ static int mvneta_bm_port_init(struct platform_device *pdev,
static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
{ {
struct mvneta_bm_pool *bm_pool = pp->pool_long; struct mvneta_bm_pool *bm_pool = pp->pool_long;
struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
int num; int num;
/* Release all buffers from long pool */ /* Release all buffers from long pool */
mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
if (bm_pool->buf_num) { if (hwbm_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", WARN(1, "cannot free all buffers in pool %d\n",
bm_pool->id); bm_pool->id);
goto bm_mtu_err; goto bm_mtu_err;
@ -1038,14 +1040,14 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
/* Fill entire long pool */ /* Fill entire long pool */
num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size); num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
if (num != bm_pool->size) { if (num != hwbm_pool->size) {
WARN(1, "pool %d: %d of %d allocated\n", WARN(1, "pool %d: %d of %d allocated\n",
bm_pool->id, num, bm_pool->size); bm_pool->id, num, hwbm_pool->size);
goto bm_mtu_err; goto bm_mtu_err;
} }
mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
@ -2066,14 +2068,14 @@ err_drop_frame:
} }
/* Refill processing */ /* Refill processing */
err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool); err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) { if (err) {
netdev_err(dev, "Linux processing - Can't refill\n"); netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++; rxq->missed++;
goto err_drop_frame_ret_pool; goto err_drop_frame_ret_pool;
} }
frag_size = bm_pool->frag_size; frag_size = bm_pool->hwbm_pool.frag_size;
skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);

View File

@ -10,16 +10,17 @@
* warranty of any kind, whether express or implied. * warranty of any kind, whether express or implied.
*/ */
#include <linux/kernel.h> #include <linux/clk.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/platform_device.h> #include <linux/io.h>
#include <linux/netdevice.h> #include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/mbus.h> #include <linux/mbus.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/io.h> #include <linux/netdevice.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/clk.h> #include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
#include "mvneta_bm.h" #include "mvneta_bm.h"
#define MVNETA_BM_DRIVER_NAME "mvneta_bm" #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
} }
/* Allocate skb for BM pool */ int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
dma_addr_t *buf_phys_addr)
{ {
void *buf; struct mvneta_bm_pool *bm_pool =
(struct mvneta_bm_pool *)hwbm_pool->priv;
struct mvneta_bm *priv = bm_pool->priv;
dma_addr_t phys_addr; dma_addr_t phys_addr;
buf = mvneta_frag_alloc(bm_pool->frag_size);
if (!buf)
return NULL;
/* In order to update buf_cookie field of RX descriptor properly, /* In order to update buf_cookie field of RX descriptor properly,
* BM hardware expects buf virtual address to be placed in the * BM hardware expects buf virtual address to be placed in the
* first four bytes of mapped buffer. * first four bytes of mapped buffer.
@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
*(u32 *)buf = (u32)buf; *(u32 *)buf = (u32)buf;
phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) { if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
mvneta_frag_free(bm_pool->frag_size, buf);
return NULL;
}
*buf_phys_addr = phys_addr;
return buf;
}
/* Refill processing for HW buffer management */
int mvneta_bm_pool_refill(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{
dma_addr_t buf_phys_addr;
void *buf;
buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr);
if (!buf)
return -ENOMEM; return -ENOMEM;
mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr); mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill); EXPORT_SYMBOL_GPL(mvneta_bm_construct);
/* Allocate buffers for the pool */
int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
int buf_num)
{
int err, i;
if (bm_pool->buf_num == bm_pool->size) {
dev_dbg(&priv->pdev->dev, "pool %d already filled\n",
bm_pool->id);
return bm_pool->buf_num;
}
if (buf_num < 0 ||
(buf_num + bm_pool->buf_num > bm_pool->size)) {
dev_err(&priv->pdev->dev,
"cannot allocate %d buffers for pool %d\n",
buf_num, bm_pool->id);
return 0;
}
for (i = 0; i < buf_num; i++) {
err = mvneta_bm_pool_refill(priv, bm_pool);
if (err < 0)
break;
}
/* Update BM driver with number of buffers added to pool */
bm_pool->buf_num += i;
dev_dbg(&priv->pdev->dev,
"%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size,
bm_pool->frag_size);
dev_dbg(&priv->pdev->dev,
"%s pool %d: %d of %d buffers added\n",
bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
bm_pool->id, i, buf_num);
return i;
}
EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add);
/* Create pool */ /* Create pool */
static int mvneta_bm_pool_create(struct mvneta_bm *priv, static int mvneta_bm_pool_create(struct mvneta_bm *priv,
@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct mvneta_bm *priv,
struct platform_device *pdev = priv->pdev; struct platform_device *pdev = priv->pdev;
u8 target_id, attr; u8 target_id, attr;
int size_bytes, err; int size_bytes, err;
size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
size_bytes = sizeof(u32) * bm_pool->size;
bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
&bm_pool->phys_addr, &bm_pool->phys_addr,
GFP_KERNEL); GFP_KERNEL);
@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
/* Allocate buffers in case BM pool hasn't been used yet */ /* Allocate buffers in case BM pool hasn't been used yet */
if (new_pool->type == MVNETA_BM_FREE) { if (new_pool->type == MVNETA_BM_FREE) {
struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
new_pool->priv = priv;
new_pool->type = type; new_pool->type = type;
new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
new_pool->frag_size = hwbm_pool->frag_size =
SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
/* Create new pool */ /* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool); err = mvneta_bm_pool_create(priv, new_pool);
@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
} }
/* Allocate buffers for this pool */ /* Allocate buffers for this pool */
num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size); num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
if (num != new_pool->size) { if (num != hwbm_pool->size) {
WARN(1, "pool %d: %d of %d allocated\n", WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, new_pool->size); new_pool->id, num, hwbm_pool->size);
return NULL; return NULL;
} }
} }
@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
for (i = 0; i < bm_pool->buf_num; i++) { for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
dma_addr_t buf_phys_addr; dma_addr_t buf_phys_addr;
u32 *vaddr; u32 *vaddr;
@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
dma_unmap_single(&priv->pdev->dev, buf_phys_addr, dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
bm_pool->buf_size, DMA_FROM_DEVICE); bm_pool->buf_size, DMA_FROM_DEVICE);
mvneta_frag_free(bm_pool->frag_size, vaddr); hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
} }
mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
/* Update BM driver with number of buffers removed from pool */ /* Update BM driver with number of buffers removed from pool */
bm_pool->buf_num -= i; bm_pool->hwbm_pool.buf_num -= i;
} }
EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
void mvneta_bm_pool_destroy(struct mvneta_bm *priv, void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map) struct mvneta_bm_pool *bm_pool, u8 port_map)
{ {
struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
bm_pool->port_map &= ~port_map; bm_pool->port_map &= ~port_map;
if (bm_pool->port_map) if (bm_pool->port_map)
return; return;
@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
bm_pool->type = MVNETA_BM_FREE; bm_pool->type = MVNETA_BM_FREE;
mvneta_bm_bufs_free(priv, bm_pool, port_map); mvneta_bm_bufs_free(priv, bm_pool, port_map);
if (bm_pool->buf_num) if (hwbm_pool->buf_num)
WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
if (bm_pool->virt_addr) { if (bm_pool->virt_addr) {
dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size, dma_free_coherent(&priv->pdev->dev,
sizeof(u32) * hwbm_pool->size,
bm_pool->virt_addr, bm_pool->phys_addr); bm_pool->virt_addr, bm_pool->phys_addr);
bm_pool->virt_addr = NULL; bm_pool->virt_addr = NULL;
} }
@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct mvneta_bm *priv)
MVNETA_BM_POOL_CAP_ALIGN)); MVNETA_BM_POOL_CAP_ALIGN));
size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
} }
bm_pool->size = size; bm_pool->hwbm_pool.size = size;
mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
bm_pool->size); bm_pool->hwbm_pool.size);
/* Obtain custom pkt_size from DT */ /* Obtain custom pkt_size from DT */
sprintf(prop, "pool%d,pkt-size", i); sprintf(prop, "pool%d,pkt-size", i);

View File

@ -108,20 +108,15 @@ struct mvneta_bm {
}; };
struct mvneta_bm_pool { struct mvneta_bm_pool {
struct hwbm_pool hwbm_pool;
/* Pool number in the range 0-3 */ /* Pool number in the range 0-3 */
u8 id; u8 id;
enum mvneta_bm_type type; enum mvneta_bm_type type;
/* Buffer Pointers Pool External (BPPE) size in number of bytes */
int size;
/* Number of buffers used by this pool */
int buf_num;
/* Pool buffer size */
int buf_size;
/* Packet size */ /* Packet size */
int pkt_size; int pkt_size;
/* Single frag size */ /* Size of the buffer acces through DMA*/
u32 frag_size; u32 buf_size;
/* BPPE virtual base address */ /* BPPE virtual base address */
u32 *virt_addr; u32 *virt_addr;
@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map); struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
u8 port_map); u8 port_map);
int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
int buf_num);
int mvneta_bm_pool_refill(struct mvneta_bm *priv, int mvneta_bm_pool_refill(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool); struct mvneta_bm_pool *bm_pool);
struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map) {} struct mvneta_bm_pool *bm_pool, u8 port_map) {}
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
u8 port_map) {} u8 port_map) {}
int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
int buf_num) { return 0; }
int mvneta_bm_pool_refill(struct mvneta_bm *priv, int mvneta_bm_pool_refill(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool) {return 0; } struct mvneta_bm_pool *bm_pool) {return 0; }
struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,