diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 775d36cc0050..53d66ebb4811 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3556,7 +3556,6 @@ void synchronize_sched_expedited(void) rcu_exp_gp_seq_start(rsp); /* Stop each CPU that is online, non-idle, and not us. */ - init_waitqueue_head(&rsp->expedited_wq); atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */ for_each_online_cpu(cpu) { struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); @@ -4179,6 +4178,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, } init_waitqueue_head(&rsp->gp_wq); + init_waitqueue_head(&rsp->expedited_wq); rnp = rsp->level[rcu_num_lvls - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b2bf3963a0ae..72df006de798 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -535,8 +535,6 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); -static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); - /* * Return non-zero if there are any tasks in RCU read-side critical * sections blocking the current preemptible-RCU expedited grace period. @@ -590,7 +588,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, raw_spin_unlock_irqrestore(&rnp->lock, flags); if (wake) { smp_mb(); /* EGP done before wake_up(). */ - wake_up(&sync_rcu_preempt_exp_wq); + wake_up(&rsp->expedited_wq); } break; } @@ -729,7 +727,7 @@ void synchronize_rcu_expedited(void) /* Wait for snapshotted ->blkd_tasks lists to drain. */ rnp = rcu_get_root(rsp); - wait_event(sync_rcu_preempt_exp_wq, + wait_event(rsp->expedited_wq, sync_rcu_preempt_exp_done(rnp)); /* Clean up and exit. */