1
0
Fork 0

sched/wait: Introduce wait_var_event()

As a replacement for the wait_on_atomic_t() API provide the
wait_var_event() API.

The wait_var_event() API is based on the very same hashed-waitqueue
idea, but doesn't care about the type (atomic_t) or the specific
condition (atomic_read() == 0). IOW. it's much more widely
applicable/flexible.

It shares all the benefits/disadvantages of a hashed-waitqueue
approach with the existing wait_on_atomic_t/wait_on_bit() APIs.

The API is modeled after the existing wait_event() API, but instead of
taking a wait_queue_head, it takes an address. This addresses is
hashed to obtain a wait_queue_head from the bit_wait_table.

Similar to the wait_event() API, it takes a condition expression as
second argument and will wait until this expression becomes true.

The following are (mostly) identical replacements:

	wait_on_atomic_t(&my_atomic, atomic_t_wait, TASK_UNINTERRUPTIBLE);
	wake_up_atomic_t(&my_atomic);

	wait_var_event(&my_atomic, !atomic_read(&my_atomic));
	wake_up_var(&my_atomic);

The only difference is that wake_up_var() is an unconditional wakeup
and doesn't check the previously hard-coded (atomic_read() == 0)
condition here. This is of little concequence, since most callers are
already conditional on atomic_dec_and_test() and the ones that are
not, are trivial to make so.

Tested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Peter Zijlstra 2018-03-15 11:40:33 +01:00 committed by Ingo Molnar
parent fc4c5a3828
commit 6b2bb7265f
2 changed files with 118 additions and 0 deletions

View File

@ -262,4 +262,74 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
return out_of_line_wait_on_atomic_t(val, action, mode);
}
extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
extern void wake_up_var(void *var);
extern wait_queue_head_t *__var_waitqueue(void *p);
#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
struct wait_queue_head *__wq_head = __var_waitqueue(var); \
struct wait_bit_queue_entry __wbq_entry; \
long __ret = ret; /* explicit shadow */ \
\
init_wait_var_entry(&__wbq_entry, var, \
exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
for (;;) { \
long __int = prepare_to_wait_event(__wq_head, \
&__wbq_entry.wq_entry, \
state); \
if (condition) \
break; \
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
goto __out; \
} \
\
cmd; \
} \
finish_wait(__wq_head, &__wbq_entry.wq_entry); \
__out: __ret; \
})
#define __wait_var_event(var, condition) \
___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
#define wait_var_event(var, condition) \
do { \
might_sleep(); \
if (condition) \
break; \
__wait_var_event(var, condition); \
} while (0)
#define __wait_var_event_killable(var, condition) \
___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
schedule())
#define wait_var_event_killable(var, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_var_event_killable(var, condition); \
__ret; \
})
#define __wait_var_event_timeout(var, condition, timeout) \
___wait_var_event(var, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
#define wait_var_event_timeout(var, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
#endif /* _LINUX_WAIT_BIT_H */

View File

@ -149,6 +149,54 @@ void wake_up_bit(void *word, int bit)
}
EXPORT_SYMBOL(wake_up_bit);
wait_queue_head_t *__var_waitqueue(void *p)
{
if (BITS_PER_LONG == 64) {
unsigned long q = (unsigned long)p;
return bit_waitqueue((void *)(q & ~1), q & 1);
}
return bit_waitqueue(p, 0);
}
EXPORT_SYMBOL(__var_waitqueue);
static int
var_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue_entry *wbq_entry =
container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
if (wbq_entry->key.flags != key->flags ||
wbq_entry->key.bit_nr != key->bit_nr)
return 0;
return autoremove_wake_function(wq_entry, mode, sync, key);
}
void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags)
{
*wbq_entry = (struct wait_bit_queue_entry){
.key = {
.flags = (var),
.bit_nr = -1,
},
.wq_entry = {
.private = current,
.func = var_wake_function,
.entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
},
};
}
EXPORT_SYMBOL(init_wait_var_entry);
void wake_up_var(void *var)
{
__wake_up_bit(__var_waitqueue(var), var, -1);
}
EXPORT_SYMBOL(wake_up_var);
/*
* Manipulate the atomic_t address to produce a better bit waitqueue table hash
* index (we're keying off bit -1, but that would produce a horrible hash