1
0
Fork 0

sched: eliminate scd->prev_raw

eliminate prev_raw and use tick_raw instead.

It's enough to base the current time on the scheduler tick timestamp
alone - the monotonicity and maximum checks will prevent any damage.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Mike Galbraith <efault@gmx.de>
wifi-calibration
Ingo Molnar 2008-07-30 10:13:35 +02:00
parent 50526968e9
commit 18e4e36c66
1 changed files with 1 additions and 5 deletions

View File

@ -53,7 +53,6 @@ struct sched_clock_data {
raw_spinlock_t lock;
unsigned long tick_jiffies;
u64 prev_raw;
u64 tick_raw;
u64 tick_gtod;
u64 clock;
@ -84,7 +83,6 @@ void sched_clock_init(void)
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->tick_jiffies = now_jiffies;
scd->prev_raw = 0;
scd->tick_raw = 0;
scd->tick_gtod = ktime_now;
scd->clock = ktime_now;
@ -105,7 +103,7 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
long delta_jiffies = now_jiffies - scd->tick_jiffies;
u64 clock = scd->clock;
u64 min_clock, max_clock;
s64 delta = now - scd->prev_raw;
s64 delta = now - scd->tick_raw;
WARN_ON_ONCE(!irqs_disabled());
min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
@ -130,7 +128,6 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
if (unlikely(clock < min_clock))
clock = min_clock;
scd->prev_raw = now;
scd->tick_jiffies = now_jiffies;
scd->clock = clock;
}
@ -234,7 +231,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
* rq clock:
*/
__raw_spin_lock(&scd->lock);
scd->prev_raw = now;
scd->clock += delta_ns;
__raw_spin_unlock(&scd->lock);