1
0
Fork 0

Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/hrtimer-2.6

hifive-unleashed-5.1
Linus Torvalds 2006-01-12 10:22:11 -08:00
commit 661dd5c840
3 changed files with 25 additions and 25 deletions

View File

@ -49,8 +49,6 @@ struct hrtimer_base;
* struct hrtimer - the basic hrtimer structure
*
* @node: red black tree node for time ordered insertion
* @list: list head for easier access to the time ordered list,
* without walking the red black tree.
* @expires: the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
* which the timer is based.
@ -63,7 +61,6 @@ struct hrtimer_base;
*/
struct hrtimer {
struct rb_node node;
struct list_head list;
ktime_t expires;
enum hrtimer_state state;
int (*function)(void *);
@ -78,7 +75,7 @@ struct hrtimer {
* to a base on another cpu.
* @lock: lock protecting the base and associated timers
* @active: red black tree root node for the active timers
* @pending: list of pending timers for simple time ordered access
* @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
* @curr_timer: the timer which is executing a callback right now
@ -87,8 +84,8 @@ struct hrtimer_base {
clockid_t index;
spinlock_t lock;
struct rb_root active;
struct list_head pending;
unsigned long resolution;
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
struct hrtimer *curr_timer;
};
@ -125,8 +122,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
}
/* Forward a hrtimer so it expires after now: */
extern unsigned long hrtimer_forward(struct hrtimer *timer,
const ktime_t interval);
extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);
/* Precise sleep: */
extern long hrtimer_nanosleep(struct timespec *rqtp,

View File

@ -272,8 +272,8 @@ static inline u64 ktime_to_ns(const ktime_t kt)
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
#define KTIME_REALTIME_RES (NSEC_PER_SEC/HZ)
#define KTIME_MONOTONIC_RES (NSEC_PER_SEC/HZ)
#define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC }
#define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC }
/* Get the monotonic time in timespec format: */
extern void ktime_get_ts(struct timespec *ts);

View File

@ -275,7 +275,7 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* The number of overruns is added to the overrun field.
*/
unsigned long
hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
hrtimer_forward(struct hrtimer *timer, ktime_t interval)
{
unsigned long orun = 1;
ktime_t delta, now;
@ -287,6 +287,9 @@ hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
if (delta.tv64 < 0)
return 0;
if (interval.tv64 < timer->base->resolution.tv64)
interval.tv64 = timer->base->resolution.tv64;
if (unlikely(delta.tv64 >= interval.tv64)) {
nsec_t incr = ktime_to_ns(interval);
@ -314,7 +317,6 @@ hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
{
struct rb_node **link = &base->active.rb_node;
struct list_head *prev = &base->pending;
struct rb_node *parent = NULL;
struct hrtimer *entry;
@ -330,22 +332,23 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
*/
if (timer->expires.tv64 < entry->expires.tv64)
link = &(*link)->rb_left;
else {
else
link = &(*link)->rb_right;
prev = &entry->list;
}
}
/*
* Insert the timer to the rbtree and to the sorted list:
* Insert the timer to the rbtree and check whether it
* replaces the first pending timer
*/
rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active);
list_add(&timer->list, prev);
timer->state = HRTIMER_PENDING;
}
if (!base->first || timer->expires.tv64 <
rb_entry(base->first, struct hrtimer, node)->expires.tv64)
base->first = &timer->node;
}
/*
* __remove_hrtimer - internal function to remove a timer
@ -355,9 +358,11 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
{
/*
* Remove the timer from the sorted list and from the rbtree:
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary.
*/
list_del(&timer->list);
if (base->first == &timer->node)
base->first = rb_next(&timer->node);
rb_erase(&timer->node, &base->active);
}
@ -516,9 +521,8 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
struct hrtimer_base *bases;
tp->tv_sec = 0;
bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
tp->tv_nsec = bases[which_clock].resolution;
*tp = ktime_to_timespec(bases[which_clock].resolution);
return 0;
}
@ -529,16 +533,17 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
static inline void run_hrtimer_queue(struct hrtimer_base *base)
{
ktime_t now = base->get_time();
struct rb_node *node;
spin_lock_irq(&base->lock);
while (!list_empty(&base->pending)) {
while ((node = base->first)) {
struct hrtimer *timer;
int (*fn)(void *);
int restart;
void *data;
timer = list_entry(base->pending.next, struct hrtimer, list);
timer = rb_entry(node, struct hrtimer, node);
if (now.tv64 <= timer->expires.tv64)
break;
@ -732,7 +737,6 @@ static void __devinit init_hrtimers_cpu(int cpu)
for (i = 0; i < MAX_HRTIMER_BASES; i++) {
spin_lock_init(&base->lock);
INIT_LIST_HEAD(&base->pending);
base++;
}
}