1
0
Fork 0

rcu: Reduce dyntick-idle state space

Both extended-quiescent-state entry and exit first update the nesting
counter and then adjust the dyntick-idle state.  This means that there
are four states: (1) Both nesting and dyntick idle indicate idle,
(2) Nesting indicates idle but dyntick idle does not, (3) Nesting indicates
non-idle and dyntick idle does not, and (4) Both nesting and dyntick
idle indicate non-idle.  This commit simplifies the state space by
eliminating #3, reversing the order of updates on exit from extended
quiescent state.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
hifive-unleashed-5.1
Paul E. McKenney 2017-10-02 19:45:10 -07:00
parent 2e672ab2d4
commit a0eb22bf64
1 changed files with 11 additions and 12 deletions

View File

@ -928,21 +928,21 @@ void rcu_irq_exit_irqson(void)
* we really have exited idle, and must do the appropriate accounting.
* The caller must have disabled interrupts.
*/
static void rcu_eqs_exit_common(long long oldval, int user)
static void rcu_eqs_exit_common(long long newval, int user)
{
RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle();
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, newval);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting);
rdtp->dynticks_nesting, newval);
rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
@ -967,8 +967,8 @@ static void rcu_eqs_exit(bool user)
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
} else {
__this_cpu_inc(disable_rcu_irq_enter);
rcu_eqs_exit_common(DYNTICK_TASK_EXIT_IDLE, user);
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_eqs_exit_common(oldval, user);
__this_cpu_dec(disable_rcu_irq_enter);
}
}
@ -1037,7 +1037,7 @@ void rcu_user_exit(void)
void rcu_irq_enter(void)
{
struct rcu_dynticks *rdtp;
long long oldval;
long long newval;
lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
@ -1046,14 +1046,13 @@ void rcu_irq_enter(void)
if (rdtp->dynticks_nmi_nesting)
return;
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
rdtp->dynticks_nesting == 0);
if (oldval)
trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
newval = rdtp->dynticks_nesting + 1;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && newval == 0);
if (rdtp->dynticks_nesting)
trace_rcu_dyntick(TPS("++="), rdtp->dynticks_nesting, newval);
else
rcu_eqs_exit_common(oldval, true);
rcu_eqs_exit_common(newval, true);
rdtp->dynticks_nesting++;
}
/*