1
0
Fork 0

locking, ACPI: Annotate c3_lock as raw

We cannot preempt this lock on -rt as we are in an
interrupt disabled region and about to go into deep sleep.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Len Brown <len.brown@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
hifive-unleashed-5.1
Thomas Gleixner 2009-07-25 18:14:37 +02:00 committed by Ingo Molnar
parent 2d21a29fb6
commit e12f65f7a4
1 changed files with 5 additions and 5 deletions

View File

@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
}
static int c3_cpu_count;
static DEFINE_SPINLOCK(c3_lock);
static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));