1
0
Fork 0

lockdep: Annotate irq_work

Mark irq_work items with IRQ_WORK_HARD_IRQ which should be invoked in
hardirq context even on PREEMPT_RT. IRQ_WORK without this flag will be
invoked in softirq context on PREEMPT_RT.

Set ->irq_config to 1 for the IRQ_WORK items which are invoked in softirq
context so lockdep knows that these can safely acquire a spinlock_t.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200321113242.643576700@linutronix.de
alistair/sensors
Sebastian Andrzej Siewior 2020-03-21 12:26:03 +01:00 committed by Peter Zijlstra
parent 40db173965
commit 49915ac35c
5 changed files with 19 additions and 0 deletions

View File

@ -18,6 +18,8 @@
/* Doesn't want IPI, wait for tick: */
#define IRQ_WORK_LAZY BIT(2)
/* Run hard IRQ context, even on RT */
#define IRQ_WORK_HARD_IRQ BIT(3)
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)

View File

@ -69,6 +69,17 @@ do { \
current->irq_config = 0; \
} while (0)
# define lockdep_irq_work_enter(__work) \
do { \
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
current->irq_config = 1; \
} while (0)
# define lockdep_irq_work_exit(__work) \
do { \
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
current->irq_config = 0; \
} while (0)
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@ -83,6 +94,8 @@ do { \
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) do { } while (0)
# define lockdep_hrtimer_exit(__hrtimer) do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
# define lockdep_irq_work_exit(__work) do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \

View File

@ -153,7 +153,9 @@ static void irq_work_run_list(struct llist_head *list)
*/
flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
lockdep_irq_work_enter(work);
work->func(work);
lockdep_irq_work_exit(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.

View File

@ -1113,6 +1113,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
rdp->rcu_iw_pending = true;
rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);

View File

@ -245,6 +245,7 @@ static void nohz_full_kick_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
.flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
};
/*