1
0
Fork 0

packet: switch kvzalloc to allocate memory

The patches includes following change:

*Use modern kvzalloc()/kvfree() instead of custom allocations.

*Remove order argument for alloc_pg_vec, it can get from req.

*Remove order argument for free_pg_vec, free_pg_vec now uses
kvfree which does not need order argument.

*Remove pg_vec_order from struct packet_ring_buffer, no longer
need to save/restore 'order'

*Remove variable 'order' for packet_set_ring, it is now unused

Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Li RongQing 2018-08-13 10:42:46 +08:00 committed by David S. Miller
parent 0192e7d46c
commit 71e4128620
2 changed files with 13 additions and 32 deletions

View File

@ -4137,52 +4137,36 @@ static const struct vm_operations_struct packet_mmap_ops = {
.close = packet_mm_close,
};
static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
unsigned int len)
static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
if (is_vmalloc_addr(pg_vec[i].buffer))
vfree(pg_vec[i].buffer);
else
free_pages((unsigned long)pg_vec[i].buffer,
order);
kvfree(pg_vec[i].buffer);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
static char *alloc_one_pg_vec_page(unsigned long order)
static char *alloc_one_pg_vec_page(unsigned long size)
{
char *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
buffer = kvzalloc(size, GFP_KERNEL);
if (buffer)
return buffer;
/* __get_free_pages failed, fall back to vmalloc */
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
if (buffer)
return buffer;
buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
/* vmalloc failed, lets dig into swap here */
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
/* complete and utter failure */
return NULL;
return buffer;
}
static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
static struct pgv *alloc_pg_vec(struct tpacket_req *req)
{
unsigned int block_nr = req->tp_block_nr;
unsigned long size = req->tp_block_size;
struct pgv *pg_vec;
int i;
@ -4191,7 +4175,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
goto out;
for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
pg_vec[i].buffer = alloc_one_pg_vec_page(size);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
@ -4200,7 +4184,7 @@ out:
return pg_vec;
out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
free_pg_vec(pg_vec, block_nr);
pg_vec = NULL;
goto out;
}
@ -4210,9 +4194,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
int was_running;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
@ -4274,8 +4258,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
pg_vec = alloc_pg_vec(req);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
@ -4329,7 +4312,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@ -4355,7 +4337,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
free_pg_vec(pg_vec, req->tp_block_nr);
out:
return err;
}

View File

@ -64,7 +64,6 @@ struct packet_ring_buffer {
unsigned int frame_size;
unsigned int frame_max;
unsigned int pg_vec_order;
unsigned int pg_vec_pages;
unsigned int pg_vec_len;