1
0
Fork 0

sched/wait: Standardize internal naming of wait-queue entries

So the various wait-queue entry variables in include/linux/wait.h
and kernel/sched/wait.c are named in a colorfully inconsistent
way:

	wait_queue_entry_t *wait
	wait_queue_entry_t *__wait	(even in plain C code!)
	wait_queue_entry_t *q		(!)
	wait_queue_entry_t *new		(making anyone who knows C++ cringe)
	wait_queue_entry_t *old

I think part of the reason for the inconsistency is the constant
apparent confusion about what a wait queue 'head' versus 'entry' is.

( Some of the documentation talks about a 'wait descriptor', which is
  the wait-queue entry itself - further adding to the confusion. )

The most common name is 'wait', but that in itself is somewhat
ambiguous as well, as it does not really make it clear whether
it's a wait-queue entry or head.

To improve all this name the wait-queue entry structure parameters
and variables consistently and push through this naming into all
the wait.h and wait.c code:

	struct wait_queue_entry *wq_entry

The 'wq_' prefix makes it easy to grep for, and we also use the
opportunity to move away from the typedef to a plain 'struct' naming:
in the kernel we typically reserve typedefs for cases where a
C structure is really small and somewhat opaque - such as pte_t.

wait-queue entries are neither small nor opaque, so use the more
standard 'struct xxx_entry' list management code nomenclature instead.

( We don't touch external users, and we preserve the typedef as well
  for actual wait-queue users, to reduce unnecessary churn. )

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
zero-colors
Ingo Molnar 2017-03-05 10:33:16 +01:00
parent ac6424b981
commit 50816c4899
2 changed files with 91 additions and 91 deletions

View File

@ -11,8 +11,9 @@
#include <uapi/linux/wait.h>
typedef struct wait_queue_entry wait_queue_entry_t;
typedef int (*wait_queue_func_t)(wait_queue_entry_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_entry_t *wait, unsigned mode, int flags, void *key);
typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
@ -37,7 +38,7 @@ struct wait_bit_key {
struct wait_bit_queue {
struct wait_bit_key key;
wait_queue_entry_t wait;
struct wait_queue_entry wait;
};
struct __wait_queue_head {
@ -58,7 +59,7 @@ struct task_struct;
.task_list = { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_entry_t name = __WAITQUEUE_INITIALIZER(name, tsk)
struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
@ -91,19 +92,19 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
#endif
static inline void init_waitqueue_entry(wait_queue_entry_t *q, struct task_struct *p)
static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
{
q->flags = 0;
q->private = p;
q->func = default_wake_function;
wq_entry->flags = 0;
wq_entry->private = p;
wq_entry->func = default_wake_function;
}
static inline void
init_waitqueue_func_entry(wait_queue_entry_t *q, wait_queue_func_t func)
init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
{
q->flags = 0;
q->private = NULL;
q->func = func;
wq_entry->flags = 0;
wq_entry->private = NULL;
wq_entry->func = func;
}
/**
@ -162,42 +163,41 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq)
return waitqueue_active(wq);
}
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait);
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait);
extern void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new)
static inline void __add_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
{
list_add(&new->task_list, &head->task_list);
list_add(&wq_entry->task_list, &head->task_list);
}
/*
* Used for wake-one threads:
*/
static inline void
__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
__add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(q, wait);
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(q, wq_entry);
}
static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head,
wait_queue_entry_t *new)
static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
{
list_add_tail(&new->task_list, &head->task_list);
list_add_tail(&wq_entry->task_list, &head->task_list);
}
static inline void
__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_entry_tail(q, wait);
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_entry_tail(q, wq_entry);
}
static inline void
__remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old)
__remove_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
{
list_del(&old->task_list);
list_del(&wq_entry->task_list);
}
typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
@ -252,7 +252,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
(!__builtin_constant_p(state) || \
state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
extern void init_wait_entry(wait_queue_entry_t *__wait, int flags);
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
/*
* The below macro ___wait_event() has an explicit shadow of the __ret
@ -269,12 +269,12 @@ extern void init_wait_entry(wait_queue_entry_t *__wait, int flags);
#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
wait_queue_entry_t __wait; \
struct wait_queue_entry __wq_entry; \
long __ret = ret; /* explicit shadow */ \
\
init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);\
for (;;) { \
long __int = prepare_to_wait_event(&wq, &__wait, state);\
long __int = prepare_to_wait_event(&wq, &__wq_entry, state);\
\
if (condition) \
break; \
@ -286,7 +286,7 @@ extern void init_wait_entry(wait_queue_entry_t *__wait, int flags);
\
cmd; \
} \
finish_wait(&wq, &__wait); \
finish_wait(&wq, &__wq_entry); \
__out: __ret; \
})
@ -970,17 +970,17 @@ do { \
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait);
long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout);
int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key);
void prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
#define DEFINE_WAIT_FUNC(name, function) \
wait_queue_entry_t name = { \
struct wait_queue_entry name = { \
.private = current, \
.func = function, \
.task_list = LIST_HEAD_INIT((name).task_list), \

View File

@ -21,34 +21,34 @@ void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_c
EXPORT_SYMBOL(__init_waitqueue_head);
void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait)
void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, wait);
__add_wait_queue_entry_tail(q, wq_entry);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait)
void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
wait->flags |= WQ_FLAG_EXCLUSIVE;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue_entry_tail(q, wait);
__add_wait_queue_entry_tail(q, wq_entry);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait)
void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__remove_wait_queue(q, wait);
__remove_wait_queue(q, wq_entry);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(remove_wait_queue);
@ -170,43 +170,43 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
* loads to move into the critical region).
*/
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
if (list_empty(&wq_entry->task_list))
__add_wait_queue(q, wq_entry);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
void
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
wait->flags |= WQ_FLAG_EXCLUSIVE;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_entry_tail(q, wait);
if (list_empty(&wq_entry->task_list))
__add_wait_queue_entry_tail(q, wq_entry);
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
void init_wait_entry(wait_queue_entry_t *wait, int flags)
void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
{
wait->flags = flags;
wait->private = current;
wait->func = autoremove_wake_function;
INIT_LIST_HEAD(&wait->task_list);
wq_entry->flags = flags;
wq_entry->private = current;
wq_entry->func = autoremove_wake_function;
INIT_LIST_HEAD(&wq_entry->task_list);
}
EXPORT_SYMBOL(init_wait_entry);
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state)
long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
long ret = 0;
@ -225,14 +225,14 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int s
* can't see us, it should wake up another exclusive waiter if
* we fail.
*/
list_del_init(&wait->task_list);
list_del_init(&wq_entry->task_list);
ret = -ERESTARTSYS;
} else {
if (list_empty(&wait->task_list)) {
if (wait->flags & WQ_FLAG_EXCLUSIVE)
__add_wait_queue_entry_tail(q, wait);
if (list_empty(&wq_entry->task_list)) {
if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
__add_wait_queue_entry_tail(q, wq_entry);
else
__add_wait_queue(q, wait);
__add_wait_queue(q, wq_entry);
}
set_current_state(state);
}
@ -284,13 +284,13 @@ EXPORT_SYMBOL(do_wait_intr_irq);
/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
* @wq_entry: wait descriptor
*
* Sets current thread back to running state and removes
* the wait descriptor from the given waitqueue if still
* queued.
*/
void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait)
void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
{
unsigned long flags;
@ -308,20 +308,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait)
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
if (!list_empty_careful(&wq_entry->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
list_del_init(&wq_entry->task_list);
spin_unlock_irqrestore(&q->lock, flags);
}
}
EXPORT_SYMBOL(finish_wait);
int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wait, mode, sync, key);
int ret = default_wake_function(wq_entry, mode, sync, key);
if (ret)
list_del_init(&wait->task_list);
list_del_init(&wq_entry->task_list);
return ret;
}
EXPORT_SYMBOL(autoremove_wake_function);
@ -341,17 +341,17 @@ static inline bool is_kthread_should_stop(void)
*
* p->state = mode; condition = true;
* smp_mb(); // A smp_wmb(); // C
* if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN;
* if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
* schedule() try_to_wake_up();
* p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
* wait->flags &= ~WQ_FLAG_WOKEN; condition = true;
* wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
* smp_mb() // B smp_wmb(); // C
* wait->flags |= WQ_FLAG_WOKEN;
* wq_entry->flags |= WQ_FLAG_WOKEN;
* }
* remove_wait_queue(&wq, &wait);
*
*/
long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout)
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
set_current_state(mode); /* A */
/*
@ -359,7 +359,7 @@ long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout)
* woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
* also observe all state before the wakeup.
*/
if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
@ -369,13 +369,13 @@ long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout)
* condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
* an event.
*/
smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
return timeout;
}
EXPORT_SYMBOL(wait_woken);
int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
/*
* Although this function is called under waitqueue lock, LOCK
@ -385,24 +385,24 @@ int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void
* and is paired with smp_store_mb() in wait_woken().
*/
smp_wmb(); /* C */
wait->flags |= WQ_FLAG_WOKEN;
wq_entry->flags |= WQ_FLAG_WOKEN;
return default_wake_function(wait, mode, sync, key);
return default_wake_function(wq_entry, mode, sync, key);
}
EXPORT_SYMBOL(woken_wake_function);
int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue *wait_bit
= container_of(wait, struct wait_bit_queue, wait);
= container_of(wq_entry, struct wait_bit_queue, wait);
if (wait_bit->key.flags != key->flags ||
wait_bit->key.bit_nr != key->bit_nr ||
test_bit(key->bit_nr, key->flags))
return 0;
else
return autoremove_wake_function(wait, mode, sync, key);
return autoremove_wake_function(wq_entry, mode, sync, key);
}
EXPORT_SYMBOL(wake_bit_function);
@ -534,19 +534,19 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
return bit_waitqueue(p, 0);
}
static int wake_atomic_t_function(wait_queue_entry_t *wait, unsigned mode, int sync,
static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue *wait_bit
= container_of(wait, struct wait_bit_queue, wait);
= container_of(wq_entry, struct wait_bit_queue, wait);
atomic_t *val = key->flags;
if (wait_bit->key.flags != key->flags ||
wait_bit->key.bit_nr != key->bit_nr ||
atomic_read(val) != 0)
return 0;
return autoremove_wake_function(wait, mode, sync, key);
return autoremove_wake_function(wq_entry, mode, sync, key);
}
/*