1
0
Fork 0

x86, mce: fix a race condition about mce_callin and no_way_out

If one CPU has no_way_out == 1, all other CPUs should have no_way_out
== 1. But despite global_nwo is read after mce_callin, global_nwo is
updated after mce_callin too. So it is possible that some CPU read
global_nwo before some other CPU update global_nwo, so that no_way_out
== 1 for some CPU, while no_way_out == 0 for some other CPU.

This patch fixes this race condition via moving mce_callin updating
after global_nwo updating, with a smp_wmb in between. A smp_rmb is
added between their reading too.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
hifive-unleashed-5.1
Huang Ying 2009-06-15 15:37:07 +08:00 committed by H. Peter Anvin
parent 300df7dc89
commit 184e1fdfea
1 changed files with 10 additions and 2 deletions

View File

@ -703,6 +703,11 @@ static int mce_start(int no_way_out, int *order)
}
atomic_add(no_way_out, &global_nwo);
/*
* global_nwo should be updated before mce_callin
*/
smp_wmb();
*order = atomic_add_return(1, &mce_callin);
/*
* Wait for everyone.
@ -716,6 +721,10 @@ static int mce_start(int no_way_out, int *order)
ndelay(SPINUNIT);
}
/*
* mce_callin should be read before global_nwo
*/
smp_rmb();
/*
* Cache the global no_way_out state.
*/
@ -862,7 +871,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
int order;
int order = -1;
/*
* If no_way_out gets set, there is no safe way to recover from this
@ -887,7 +896,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
if (!banks)
goto out;
order = atomic_add_return(1, &mce_callin);
mce_setup(&m);
m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);