rcu: Remove rcu_needs_cpu_flush() to avoid false quiescent states
The purpose of rcu_needs_cpu_flush() was to iterate on pushing the current grace period in order to help the current CPU enter dyntick-idle mode. However, this can result in failures if the CPU starts entering dyntick-idle mode, but then backs out. In this case, the call to rcu_pending() from rcu_needs_cpu_flush() might end up announcing a non-existing quiescent state. This commit therefore removes rcu_needs_cpu_flush() in favor of letting the dyntick-idle machinery at the end of the softirq handler push the loop along via its call to rcu_pending(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>hifive-unleashed-5.1
parent
5b61b0baa9
commit
e90c53d3e2
|
@ -1528,9 +1528,6 @@ static void rcu_process_callbacks(struct softirq_action *unused)
|
||||||
&__get_cpu_var(rcu_sched_data));
|
&__get_cpu_var(rcu_sched_data));
|
||||||
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
|
||||||
rcu_preempt_process_callbacks();
|
rcu_preempt_process_callbacks();
|
||||||
|
|
||||||
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
|
|
||||||
rcu_needs_cpu_flush();
|
|
||||||
trace_rcu_utilization("End RCU core");
|
trace_rcu_utilization("End RCU core");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -458,7 +458,6 @@ static int rcu_preempt_needs_cpu(int cpu);
|
||||||
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
|
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
|
||||||
static void rcu_preempt_send_cbs_to_online(void);
|
static void rcu_preempt_send_cbs_to_online(void);
|
||||||
static void __init __rcu_init_preempt(void);
|
static void __init __rcu_init_preempt(void);
|
||||||
static void rcu_needs_cpu_flush(void);
|
|
||||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||||
static void invoke_rcu_callbacks_kthread(void);
|
static void invoke_rcu_callbacks_kthread(void);
|
||||||
|
|
|
@ -1948,15 +1948,6 @@ int rcu_needs_cpu(int cpu)
|
||||||
return rcu_needs_cpu_quick_check(cpu);
|
return rcu_needs_cpu_quick_check(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check to see if we need to continue a callback-flush operations to
|
|
||||||
* allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
|
|
||||||
* entry is not configured, so we never do need to.
|
|
||||||
*/
|
|
||||||
static void rcu_needs_cpu_flush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||||
|
|
||||||
#define RCU_NEEDS_CPU_FLUSHES 5
|
#define RCU_NEEDS_CPU_FLUSHES 5
|
||||||
|
@ -2032,20 +2023,4 @@ int rcu_needs_cpu(int cpu)
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check to see if we need to continue a callback-flush operations to
|
|
||||||
* allow the last CPU to enter dyntick-idle mode.
|
|
||||||
*/
|
|
||||||
static void rcu_needs_cpu_flush(void)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
|
|
||||||
return;
|
|
||||||
local_irq_save(flags);
|
|
||||||
(void)rcu_needs_cpu(cpu);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||||
|
|
Loading…
Reference in New Issue