1
0
Fork 0

net: busy-poll: return busypolling status to drivers

NAPI drivers use napi_complete_done() or napi_complete() when
they drained RX ring and right before re-enabling device interrupts.

In busy polling, we can avoid interrupts being delivered since
we are polling RX ring in a controlled loop.

Drivers can chose to use napi_complete_done() return value
to reduce interrupts overhead while busy polling is active.

This is optional, legacy drivers should work fine even
if not updated.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Adam Belay <abelay@google.com>
Cc: Tariq Toukan <tariqt@mellanox.com>
Cc: Yuval Mintz <Yuval.Mintz@cavium.com>
Cc: Ariel Elior <ariel.elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Eric Dumazet 2016-11-15 10:15:13 -08:00 committed by David S. Miller
parent 21cb84c48c
commit 364b605573
2 changed files with 10 additions and 7 deletions

View File

@ -463,16 +463,17 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false; return false;
} }
void __napi_complete(struct napi_struct *n); bool __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done); bool napi_complete_done(struct napi_struct *n, int work_done);
/** /**
* napi_complete - NAPI processing complete * napi_complete - NAPI processing complete
* @n: NAPI context * @n: NAPI context
* *
* Mark NAPI processing as complete. * Mark NAPI processing as complete.
* Consider using napi_complete_done() instead. * Consider using napi_complete_done() instead.
* Return false if device should avoid rearming interrupts.
*/ */
static inline void napi_complete(struct napi_struct *n) static inline bool napi_complete(struct napi_struct *n)
{ {
return napi_complete_done(n, 0); return napi_complete_done(n, 0);
} }

View File

@ -4898,7 +4898,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
} }
EXPORT_SYMBOL(__napi_schedule_irqoff); EXPORT_SYMBOL(__napi_schedule_irqoff);
void __napi_complete(struct napi_struct *n) bool __napi_complete(struct napi_struct *n)
{ {
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
@ -4906,15 +4906,16 @@ void __napi_complete(struct napi_struct *n)
* napi_complete_done(). * napi_complete_done().
*/ */
if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state))) if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
return; return false;
list_del_init(&n->poll_list); list_del_init(&n->poll_list);
smp_mb__before_atomic(); smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state); clear_bit(NAPI_STATE_SCHED, &n->state);
return true;
} }
EXPORT_SYMBOL(__napi_complete); EXPORT_SYMBOL(__napi_complete);
void napi_complete_done(struct napi_struct *n, int work_done) bool napi_complete_done(struct napi_struct *n, int work_done)
{ {
unsigned long flags; unsigned long flags;
@ -4926,7 +4927,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
*/ */
if (unlikely(n->state & (NAPIF_STATE_NPSVC | if (unlikely(n->state & (NAPIF_STATE_NPSVC |
NAPIF_STATE_IN_BUSY_POLL))) NAPIF_STATE_IN_BUSY_POLL)))
return; return false;
if (n->gro_list) { if (n->gro_list) {
unsigned long timeout = 0; unsigned long timeout = 0;
@ -4948,6 +4949,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
__napi_complete(n); __napi_complete(n);
local_irq_restore(flags); local_irq_restore(flags);
} }
return true;
} }
EXPORT_SYMBOL(napi_complete_done); EXPORT_SYMBOL(napi_complete_done);