1
0
Fork 0

rcu: Eliminate rcu_irq_enter_disabled()

Now that the irq path uses the rcu_nmi_{enter,exit}() algorithm,
rcu_irq_enter() and rcu_irq_exit() may be used from any context.  There is
thus no need for rcu_irq_enter_disabled() and for the checks using it.
This commit therefore eliminates rcu_irq_enter_disabled().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
hifive-unleashed-5.1
Paul E. McKenney 2017-10-03 16:51:47 -07:00
parent 51a1fd30f1
commit 844ccdd7dc
5 changed files with 3 additions and 37 deletions

View File

@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }

View File

@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_irq_enter_disabled(void);
void exit_rcu(void);

View File

@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
if (rcucheck) { \
if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
return; \
if (rcucheck) \
rcu_irq_enter_irqson(); \
} \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \

View File

@ -270,20 +270,6 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
};
/*
* There's a few places, currently just in the tracing infrastructure,
* that uses rcu_irq_enter() to make sure RCU is watching. But there's
* a small location where that will not even work. In those cases
* rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
* can be called.
*/
static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
bool rcu_irq_enter_disabled(void)
{
return this_cpu_read(disable_rcu_irq_enter);
}
/*
* Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state.
@ -792,10 +778,8 @@ static void rcu_eqs_enter_common(bool user)
do_nocb_deferred_wakeup(rdp);
}
rcu_prepare_for_idle();
__this_cpu_inc(disable_rcu_irq_enter);
rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
__this_cpu_dec(disable_rcu_irq_enter);
rdtp->dynticks_nesting = 0;
rcu_dynticks_eqs_enter();
rcu_dynticks_task_enter();
/*
@ -1001,10 +985,8 @@ static void rcu_eqs_exit(bool user)
if (oldval) {
rdtp->dynticks_nesting++;
} else {
__this_cpu_inc(disable_rcu_irq_enter);
rcu_eqs_exit_common(1, user);
rdtp->dynticks_nesting = 1;
__this_cpu_dec(disable_rcu_irq_enter);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
}
}

View File

@ -2682,17 +2682,6 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
if (unlikely(in_nmi()))
return;
/*
* It is possible that a function is being traced in a
* location that RCU is not watching. A call to
* rcu_irq_enter() will make sure that it is, but there's
* a few internal rcu functions that could be traced
* where that wont work either. In those cases, we just
* do nothing.
*/
if (unlikely(rcu_irq_enter_disabled()))
return;
rcu_irq_enter_irqson();
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
rcu_irq_exit_irqson();