rcu: Use rsp->expedited_wq instead of sync_rcu_preempt_exp_wq
Now that there is an ->expedited_wq waitqueue in each rcu_state structure, there is no need for the sync_rcu_preempt_exp_wq global variable. This commit therefore substitutes ->expedited_wq for sync_rcu_preempt_exp_wq. It also initializes ->expedited_wq only once at boot instead of at the start of each expedited grace period. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
19a5ecde08
commit
f4ecea309d
|
@ -3556,7 +3556,6 @@ void synchronize_sched_expedited(void)
|
||||||
rcu_exp_gp_seq_start(rsp);
|
rcu_exp_gp_seq_start(rsp);
|
||||||
|
|
||||||
/* Stop each CPU that is online, non-idle, and not us. */
|
/* Stop each CPU that is online, non-idle, and not us. */
|
||||||
init_waitqueue_head(&rsp->expedited_wq);
|
|
||||||
atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
|
atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||||
|
@ -4179,6 +4178,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
||||||
}
|
}
|
||||||
|
|
||||||
init_waitqueue_head(&rsp->gp_wq);
|
init_waitqueue_head(&rsp->gp_wq);
|
||||||
|
init_waitqueue_head(&rsp->expedited_wq);
|
||||||
rnp = rsp->level[rcu_num_lvls - 1];
|
rnp = rsp->level[rcu_num_lvls - 1];
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
while (i > rnp->grphi)
|
while (i > rnp->grphi)
|
||||||
|
|
|
@ -535,8 +535,6 @@ void synchronize_rcu(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||||
|
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return non-zero if there are any tasks in RCU read-side critical
|
* Return non-zero if there are any tasks in RCU read-side critical
|
||||||
* sections blocking the current preemptible-RCU expedited grace period.
|
* sections blocking the current preemptible-RCU expedited grace period.
|
||||||
|
@ -590,7 +588,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
if (wake) {
|
if (wake) {
|
||||||
smp_mb(); /* EGP done before wake_up(). */
|
smp_mb(); /* EGP done before wake_up(). */
|
||||||
wake_up(&sync_rcu_preempt_exp_wq);
|
wake_up(&rsp->expedited_wq);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -729,7 +727,7 @@ void synchronize_rcu_expedited(void)
|
||||||
|
|
||||||
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
/* Wait for snapshotted ->blkd_tasks lists to drain. */
|
||||||
rnp = rcu_get_root(rsp);
|
rnp = rcu_get_root(rsp);
|
||||||
wait_event(sync_rcu_preempt_exp_wq,
|
wait_event(rsp->expedited_wq,
|
||||||
sync_rcu_preempt_exp_done(rnp));
|
sync_rcu_preempt_exp_done(rnp));
|
||||||
|
|
||||||
/* Clean up and exit. */
|
/* Clean up and exit. */
|
||||||
|
|
Loading…
Reference in a new issue