1
0
Fork 0

rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks

This commit makes the calls to rcu_tasks_qs() detect and report
quiescent states for RCU tasks trace.  If the task is in a quiescent
state and if ->trc_reader_checked is not yet set, the task sets its own
->trc_reader_checked.  This will cause the grace-period kthread to
remove it from the holdout list if it still remains there.

[ paulmck: Fix conditional compilation per kbuild test robot feedback. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
alistair/sunxi64-5.8
Paul E. McKenney 2020-03-16 20:38:29 -07:00
parent af051ca4e4
commit 43766c3ead
4 changed files with 43 additions and 14 deletions

View File

@ -131,20 +131,50 @@ static inline void rcu_init_nohz(void) { }
* This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU_GENERIC
#define rcu_tasks_qs(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
# ifdef CONFIG_TASKS_RCU
# define rcu_tasks_classic_qs(t, preempt) \
do { \
if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
# define synchronize_rcu_tasks synchronize_rcu
# endif
# ifdef CONFIG_TASKS_RCU_TRACE
# define rcu_tasks_trace_qs(t) \
do { \
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
!unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
smp_store_release(&(t)->trc_reader_checked, true); \
smp_mb(); /* Readers partitioned by store. */ \
} \
} while (0)
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
rcu_tasks_trace_qs((t)); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_qs(t) do { } while (0)
#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
@ -161,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
rcu_tasks_qs(current); \
rcu_tasks_qs(current, false); \
cond_resched(); \
} while (0)

View File

@ -49,7 +49,7 @@ static inline void rcu_softirq_qs(void)
#define rcu_note_context_switch(preempt) \
do { \
rcu_qs(); \
rcu_tasks_qs(current); \
rcu_tasks_qs(current, (preempt)); \
} while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)

View File

@ -180,7 +180,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
/* Pick up any new callbacks. */
raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
smp_mb__after_unlock_lock(); // Order updates vs. GP.
smp_mb__after_spinlock(); // Order updates vs. GP.
list = rtp->cbs_head;
rtp->cbs_head = NULL;
rtp->cbs_tail = &rtp->cbs_head;
@ -874,7 +874,7 @@ static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop)
{
WRITE_ONCE(t->trc_reader_need_end, false);
t->trc_reader_checked = false;
WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1;
trc_wait_for_one_reader(t, hop);
}
@ -983,6 +983,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
}
smp_mb(); // Caller's code must be ordered after wakeup.
// Pairs with pretty much every ordering primitive.
}
/* Report any needed quiescent state for this exiting task. */

View File

@ -331,8 +331,7 @@ void rcu_note_context_switch(bool preempt)
rcu_qs();
if (rdp->exp_deferred_qs)
rcu_report_exp_rdp(rdp);
if (!preempt)
rcu_tasks_qs(current);
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@ -841,8 +840,7 @@ void rcu_note_context_switch(bool preempt)
this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle();
if (!preempt)
rcu_tasks_qs(current);
rcu_tasks_qs(current, preempt);
out:
trace_rcu_utilization(TPS("End context switch"));
}