1
0
Fork 0

lockdep: Split header file into lockdep and lockdep_types

There is a header file inclusion loop between asm-generic/bug.h
and linux/kernel.h.  This causes potential compile failurs depending
on the which file is included first.  One way of breaking this loop
is to stop spinlock_types.h from including lockdep.h.  This patch
splits lockdep.h into two files for this purpose.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Acked-by: Petr Mladek <pmladek@suse.com>
Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Link: https://lkml.kernel.org/r/E1jlSJz-0003hE-8g@fornost.hmeau.com
zero-sugar-mainline-defconfig
Herbert Xu 2020-06-17 17:17:19 +10:00 committed by Peter Zijlstra
parent b3a9e3b962
commit c935cd62d3
4 changed files with 200 additions and 177 deletions

View File

@ -10,181 +10,20 @@
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
#include <linux/lockdep_types.h>
struct task_struct;
struct lockdep_map;
/* for sysctl */
extern int prove_locking;
extern int lock_stat;
#define MAX_LOCKDEP_SUBCLASSES 8UL
#include <linux/types.h>
enum lockdep_wait_type {
LD_WAIT_INV = 0, /* not checked, catch all */
LD_WAIT_FREE, /* wait free, rcu etc.. */
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
#else
LD_WAIT_CONFIG = LD_WAIT_SPIN,
#endif
LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
LD_WAIT_MAX, /* must be last */
};
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
#include <linux/list.h>
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
/*
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
*/
#define XXX_LOCK_USAGE_STATES (1+2*4)
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
* Currently main class (subclass == 0) and signle depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
*/
#define NR_LOCKDEP_CACHING_CLASSES 2
/*
* A lockdep key is associated with each lock object. For static locks we use
* the lock address itself as the key. Dynamically allocated lock objects can
* have a statically or dynamically allocated key. Dynamically allocated lock
* keys must be registered before being used and must be unregistered before
* the key memory is freed.
*/
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));
/* hash_entry is used to keep track of dynamically allocated keys. */
struct lock_class_key {
union {
struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
};
extern struct lock_class_key __lockdep_no_validate__;
struct lock_trace;
#define LOCKSTAT_POINTS 4
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
*/
struct lock_class {
/*
* class-hash:
*/
struct hlist_node hash_entry;
/*
* Entry in all_lock_classes when in use. Entry in free_lock_classes
* when not in use. Instances that are being freed are on one of the
* zapped_classes lists.
*/
struct list_head lock_entry;
/*
* These fields represent a directed graph of lock dependencies,
* to every node we attach a list of "forward" and a list of
* "backward" graph nodes.
*/
struct list_head locks_after, locks_before;
const struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;
/*
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
const char *name;
short wait_type_inner;
short wait_type_outer;
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
#endif
} __no_randomize_layout;
#ifdef CONFIG_LOCK_STAT
struct lock_time {
s64 min;
s64 max;
s64 total;
unsigned long nr;
};
enum bounce_type {
bounce_acquired_write,
bounce_acquired_read,
bounce_contended_write,
bounce_contended_read,
nr_bounce_types,
bounce_acquired = bounce_acquired_write,
bounce_contended = bounce_contended_write,
};
struct lock_class_stats {
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
struct lock_time write_holdtime;
unsigned long bounces[nr_bounce_types];
};
struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
/*
* Map the lock object (the lock instance) to the lock-class object.
* This is embedded into specific lock instances:
*/
struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
short wait_type_outer; /* can be taken in this context */
short wait_type_inner; /* presents this context */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
#endif
};
static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
@ -440,8 +279,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
struct pin_cookie { unsigned int val; };
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
@ -520,10 +357,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
/*
* The class key takes no space if lockdep is disabled:
*/
struct lock_class_key { };
static inline void lockdep_register_key(struct lock_class_key *key)
{
@ -533,11 +366,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
{
}
/*
* The lockdep_map takes no space if lockdep is disabled:
*/
struct lockdep_map { };
#define lockdep_depth(tsk) (0)
#define lockdep_is_held_type(l, r) (1)
@ -549,8 +377,6 @@ struct lockdep_map { };
#define lockdep_recursing(tsk) (0)
struct pin_cookie { };
#define NIL_COOKIE (struct pin_cookie){ }
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })

View File

@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* see Documentation/locking/lockdep-design.rst for more details.
*/
#ifndef __LINUX_LOCKDEP_TYPES_H
#define __LINUX_LOCKDEP_TYPES_H
#include <linux/types.h>
#define MAX_LOCKDEP_SUBCLASSES 8UL
enum lockdep_wait_type {
LD_WAIT_INV = 0, /* not checked, catch all */
LD_WAIT_FREE, /* wait free, rcu etc.. */
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
#else
LD_WAIT_CONFIG = LD_WAIT_SPIN,
#endif
LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
LD_WAIT_MAX, /* must be last */
};
#ifdef CONFIG_LOCKDEP
#include <linux/list.h>
/*
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
*/
#define XXX_LOCK_USAGE_STATES (1+2*4)
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
* Currently main class (subclass == 0) and signle depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
*/
#define NR_LOCKDEP_CACHING_CLASSES 2
/*
* A lockdep key is associated with each lock object. For static locks we use
* the lock address itself as the key. Dynamically allocated lock objects can
* have a statically or dynamically allocated key. Dynamically allocated lock
* keys must be registered before being used and must be unregistered before
* the key memory is freed.
*/
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));
/* hash_entry is used to keep track of dynamically allocated keys. */
struct lock_class_key {
union {
struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
};
extern struct lock_class_key __lockdep_no_validate__;
struct lock_trace;
#define LOCKSTAT_POINTS 4
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
*/
struct lock_class {
/*
* class-hash:
*/
struct hlist_node hash_entry;
/*
* Entry in all_lock_classes when in use. Entry in free_lock_classes
* when not in use. Instances that are being freed are on one of the
* zapped_classes lists.
*/
struct list_head lock_entry;
/*
* These fields represent a directed graph of lock dependencies,
* to every node we attach a list of "forward" and a list of
* "backward" graph nodes.
*/
struct list_head locks_after, locks_before;
const struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;
/*
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
const char *name;
short wait_type_inner;
short wait_type_outer;
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
#endif
} __no_randomize_layout;
#ifdef CONFIG_LOCK_STAT
struct lock_time {
s64 min;
s64 max;
s64 total;
unsigned long nr;
};
enum bounce_type {
bounce_acquired_write,
bounce_acquired_read,
bounce_contended_write,
bounce_contended_read,
nr_bounce_types,
bounce_acquired = bounce_acquired_write,
bounce_contended = bounce_contended_write,
};
struct lock_class_stats {
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
struct lock_time write_holdtime;
unsigned long bounces[nr_bounce_types];
};
struct lock_class_stats lock_stats(struct lock_class *class);
void clear_lock_stats(struct lock_class *class);
#endif
/*
* Map the lock object (the lock instance) to the lock-class object.
* This is embedded into specific lock instances:
*/
struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
short wait_type_outer; /* can be taken in this context */
short wait_type_inner; /* presents this context */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
#endif
};
struct pin_cookie { unsigned int val; };
#else /* !CONFIG_LOCKDEP */
/*
* The class key takes no space if lockdep is disabled:
*/
struct lock_class_key { };
/*
* The lockdep_map takes no space if lockdep is disabled:
*/
struct lockdep_map { };
struct pin_cookie { };
#endif /* !LOCKDEP */
#endif /* __LINUX_LOCKDEP_TYPES_H */

View File

@ -56,6 +56,7 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>

View File

@ -15,7 +15,7 @@
# include <linux/spinlock_types_up.h>
#endif
#include <linux/lockdep.h>
#include <linux/lockdep_types.h>
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;