1
0
Fork 0

xsk: Fold xp_assign_dev and __xp_assign_dev

Fold xp_assign_dev and __xp_assign_dev. The former directly calls the
latter.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/bpf/20210122105351.11751-3-bjorn.topel@gmail.com
master
Björn Töpel 2021-01-22 11:53:50 +01:00 committed by Daniel Borkmann
parent 458f727234
commit f0863eab96
1 changed files with 3 additions and 9 deletions

View File

@ -119,8 +119,8 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
}
}
static int __xp_assign_dev(struct xsk_buff_pool *pool,
struct net_device *netdev, u16 queue_id, u16 flags)
int xp_assign_dev(struct xsk_buff_pool *pool,
struct net_device *netdev, u16 queue_id, u16 flags)
{
bool force_zc, force_copy;
struct netdev_bpf bpf;
@ -191,12 +191,6 @@ err_unreg_pool:
return err;
}
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags)
{
return __xp_assign_dev(pool, dev, queue_id, flags);
}
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id)
{
@ -210,7 +204,7 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
if (pool->uses_need_wakeup)
flags |= XDP_USE_NEED_WAKEUP;
return __xp_assign_dev(pool, dev, queue_id, flags);
return xp_assign_dev(pool, dev, queue_id, flags);
}
void xp_clear_dev(struct xsk_buff_pool *pool)