ixgbe: avoid premature Rx buffer reuse
[ Upstream commit5.4-rM2-2.2.x-imx-squasheda06316dc87
] The page recycle code, incorrectly, relied on that a page fragment could not be freed inside xdp_do_redirect(). This assumption leads to that page fragments that are used by the stack/XDP redirect can be reused and overwritten. To avoid this, store the page count prior invoking xdp_do_redirect(). Fixes:6453073987
("ixgbe: add initial support for xdp redirect") Reported-and-analyzed-by: Li RongQing <lirongqing@baidu.com> Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
parent
75bbe7bd90
commit
0e2b048ffe
|
@ -1947,7 +1947,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
|
||||||
|
int rx_buffer_pgcnt)
|
||||||
{
|
{
|
||||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||||
struct page *page = rx_buffer->page;
|
struct page *page = rx_buffer->page;
|
||||||
|
@ -1958,7 +1959,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
||||||
|
|
||||||
#if (PAGE_SIZE < 8192)
|
#if (PAGE_SIZE < 8192)
|
||||||
/* if we are only owner of page we can reuse it */
|
/* if we are only owner of page we can reuse it */
|
||||||
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
|
||||||
return false;
|
return false;
|
||||||
#else
|
#else
|
||||||
/* The last offset is a bit aggressive in that we assume the
|
/* The last offset is a bit aggressive in that we assume the
|
||||||
|
@ -2023,11 +2024,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
||||||
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
|
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||||
union ixgbe_adv_rx_desc *rx_desc,
|
union ixgbe_adv_rx_desc *rx_desc,
|
||||||
struct sk_buff **skb,
|
struct sk_buff **skb,
|
||||||
const unsigned int size)
|
const unsigned int size,
|
||||||
|
int *rx_buffer_pgcnt)
|
||||||
{
|
{
|
||||||
struct ixgbe_rx_buffer *rx_buffer;
|
struct ixgbe_rx_buffer *rx_buffer;
|
||||||
|
|
||||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||||
|
*rx_buffer_pgcnt =
|
||||||
|
#if (PAGE_SIZE < 8192)
|
||||||
|
page_count(rx_buffer->page);
|
||||||
|
#else
|
||||||
|
0;
|
||||||
|
#endif
|
||||||
prefetchw(rx_buffer->page);
|
prefetchw(rx_buffer->page);
|
||||||
*skb = rx_buffer->skb;
|
*skb = rx_buffer->skb;
|
||||||
|
|
||||||
|
@ -2057,9 +2065,10 @@ skip_sync:
|
||||||
|
|
||||||
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
|
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||||
struct ixgbe_rx_buffer *rx_buffer,
|
struct ixgbe_rx_buffer *rx_buffer,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb,
|
||||||
|
int rx_buffer_pgcnt)
|
||||||
{
|
{
|
||||||
if (ixgbe_can_reuse_rx_page(rx_buffer)) {
|
if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
|
||||||
/* hand second half of page back to the ring */
|
/* hand second half of page back to the ring */
|
||||||
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2295,6 +2304,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||||
union ixgbe_adv_rx_desc *rx_desc;
|
union ixgbe_adv_rx_desc *rx_desc;
|
||||||
struct ixgbe_rx_buffer *rx_buffer;
|
struct ixgbe_rx_buffer *rx_buffer;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
int rx_buffer_pgcnt;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
|
|
||||||
/* return some buffers to hardware, one at a time is too slow */
|
/* return some buffers to hardware, one at a time is too slow */
|
||||||
|
@ -2314,7 +2324,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||||
*/
|
*/
|
||||||
dma_rmb();
|
dma_rmb();
|
||||||
|
|
||||||
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
|
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
|
||||||
|
|
||||||
/* retrieve a buffer from the ring */
|
/* retrieve a buffer from the ring */
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
|
@ -2356,7 +2366,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
|
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
|
||||||
cleaned_count++;
|
cleaned_count++;
|
||||||
|
|
||||||
/* place incomplete frames back on ring for completion */
|
/* place incomplete frames back on ring for completion */
|
||||||
|
|
Loading…
Reference in New Issue