1
0
Fork 0

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Add CONFIG_REFCOUNT_FULL=y to allow the disabling of the 'full'
     (robustness checked) refcount_t implementation with slightly lower
     runtime overhead. (Kees Cook)

     The lighter weight variant is the default. The two variants use the
     same API. Having this variant was a precondition by some
     maintainers to merge refcount_t cleanups.

   - Add lockdep support for rtmutexes (Peter Zijlstra)

   - liblockdep fixes and improvements (Sasha Levin, Ben Hutchings)

   - ... misc fixes and improvements"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
  locking/refcount: Remove the half-implemented refcount_sub() API
  locking/refcount: Create unchecked atomic_t implementation
  locking/rtmutex: Don't initialize lockdep when not required
  locking/selftest: Add RT-mutex support
  locking/selftest: Remove the bad unlock ordering test
  rt_mutex: Add lockdep annotations
  MAINTAINERS: Claim atomic*_t maintainership
  locking/x86: Remove the unused atomic_inc_short() methd
  tools/lib/lockdep: Remove private kernel headers
  tools/lib/lockdep: Hide liblockdep output from test results
  tools/lib/lockdep: Add dummy current_gfp_context()
  tools/include: Add IS_ERR_OR_NULL to err.h
  tools/lib/lockdep: Add empty __is_[module,kernel]_percpu_address
  tools/lib/lockdep: Include err.h
  tools/include: Add (mostly) empty include/linux/sched/mm.h
  tools/lib/lockdep: Use LDFLAGS
  tools/lib/lockdep: Remove double-quotes from soname
  tools/lib/lockdep: Fix object file paths used in an out-of-tree build
  tools/lib/lockdep: Fix compilation for 4.11
  tools/lib/lockdep: Don't mix fd-based and stream IO
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-07-03 12:14:18 -07:00
commit 892ad5acca
72 changed files with 641 additions and 234 deletions

View File

@ -2322,6 +2322,15 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
F: include/linux/platform_data/atmel_mxt_ts.h
ATOMIC INFRASTRUCTURE
M: Will Deacon <will.deacon@arm.com>
M: Peter Zijlstra <peterz@infradead.org>
R: Boqun Feng <boqun.feng@gmail.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
L: linux-scsi@vger.kernel.org
@ -7555,7 +7564,7 @@ S: Maintained
F: drivers/ata/sata_promise.*
LIBLOCKDEP
M: Sasha Levin <sasha.levin@oracle.com>
M: Sasha Levin <alexander.levin@verizon.com>
S: Maintained
F: tools/lib/lockdep/

View File

@ -867,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
implementation, which can be (slightly) slower but provides protections
against various use-after-free conditions that can be used in
security flaw exploits.
source "kernel/gcov/Kconfig"

View File

@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
* Support for 16-bit ops is included in the framework but we don't provide
* any (x86_64 has an atomic_inc_short(), so we might want to some day).
* Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.

View File

@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static __always_inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else

View File

@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
#ifdef CONFIG_REFCOUNT_FULL
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
extern void refcount_add(unsigned int i, refcount_t *r);
@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r);
extern void refcount_inc(refcount_t *r);
extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern void refcount_sub(unsigned int i, refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r);
#else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
return atomic_add_unless(&r->refs, i, 0);
}
static inline void refcount_add(unsigned int i, refcount_t *r)
{
atomic_add(i, &r->refs);
}
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return atomic_add_unless(&r->refs, 1, 0);
}
static inline void refcount_inc(refcount_t *r)
{
atomic_inc(&r->refs);
}
static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
return atomic_sub_and_test(i, &r->refs);
}
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return atomic_dec_and_test(&r->refs);
}
static inline void refcount_dec(refcount_t *r)
{
atomic_dec(&r->refs);
}
#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);

View File

@ -37,6 +37,9 @@ struct rt_mutex {
int line;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
struct rt_mutex_waiter;
@ -58,19 +61,33 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
# define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \
} while (0)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
, .dep_map = { .name = #mutexname }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .waiters = RB_ROOT \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@ -86,7 +103,7 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
return lock->owner != NULL;
}
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);

View File

@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
memset(waiter, 0x22, sizeof(*waiter));
}
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key)
{
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lock->name = name;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
}

View File

@ -11,7 +11,7 @@
extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void debug_rt_mutex_lock(struct rt_mutex *lock);
extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,

View File

@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
{
might_sleep();
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
int ret;
might_sleep();
return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
int
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
{
int ret;
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
int ret;
if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
return 0;
return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
*/
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
lock->magic = NULL;
#endif
}
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
/**
@ -1632,14 +1653,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
*
* Initializing of a locked rt lock is not allowed
*/
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
struct lock_class_key *key)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
lock->waiters = RB_ROOT;
lock->waiters_leftmost = NULL;
debug_rt_mutex_init(lock, name);
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
@ -1660,7 +1683,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL);
__rt_mutex_init(lock, NULL, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}

View File

@ -17,7 +17,7 @@
#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
#define debug_rt_mutex_unlock(l) do { } while (0)
#define debug_rt_mutex_init(m, n) do { } while (0)
#define debug_rt_mutex_init(m, n, k) do { } while (0)
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)

View File

@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select LOCKDEP
help
This feature will check whether any held lock (spinlock, rwlock,
@ -1067,6 +1068,7 @@ config PROVE_LOCKING
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
@ -1121,6 +1123,7 @@ config LOCK_STAT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
default n
help

View File

@ -0,0 +1,11 @@
#undef LOCK
#define LOCK RTL
#undef UNLOCK
#define UNLOCK RTU
#undef RLOCK
#undef WLOCK
#undef INIT
#define INIT RTI

View File

@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/rtmutex.h>
/*
* Change this to 1 if you want to see the failure printouts:
@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_MUTEX 0x4
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B);
static DECLARE_RWSEM(rwsem_C);
static DECLARE_RWSEM(rwsem_D);
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(rtmutex_A);
static DEFINE_RT_MUTEX(rtmutex_B);
static DEFINE_RT_MUTEX(rtmutex_C);
static DEFINE_RT_MUTEX(rtmutex_D);
#endif
/*
* Locks that we initialize dynamically as well so that
* e.g. X1 and X2 becomes two instances of the same class,
@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2);
static DECLARE_RWSEM(rwsem_Z1);
static DECLARE_RWSEM(rwsem_Z2);
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(rtmutex_X1);
static DEFINE_RT_MUTEX(rtmutex_X2);
static DEFINE_RT_MUTEX(rtmutex_Y1);
static DEFINE_RT_MUTEX(rtmutex_Y2);
static DEFINE_RT_MUTEX(rtmutex_Z1);
static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z)
static void init_shared_classes(void)
{
#ifdef CONFIG_RT_MUTEXES
static struct lock_class_key rt_X, rt_Y, rt_Z;
__rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
__rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
__rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
__rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
__rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
__rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
#endif
init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
@ -193,6 +226,10 @@ static void init_shared_classes(void)
#define MU(x) mutex_unlock(&mutex_##x)
#define MI(x) mutex_init(&mutex_##x)
#define RTL(x) rt_mutex_lock(&rtmutex_##x)
#define RTU(x) rt_mutex_unlock(&rtmutex_##x)
#define RTI(x) rt_mutex_init(&rtmutex_##x)
#define WSL(x) down_write(&rwsem_##x)
#define WSU(x) up_write(&rwsem_##x)
@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(AA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(AA_rtmutex);
#endif
#undef E
/*
@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBA_rtmutex);
#endif
#undef E
/*
@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBCCA_rtmutex);
#endif
#undef E
/*
@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCABC_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCABC_rtmutex);
#endif
#undef E
/*
@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCDDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABBCCDDA_rtmutex);
#endif
#undef E
/*
@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBDDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCDBDDA_rtmutex);
#endif
#undef E
/*
@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBCDA_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(ABCDBCDA_rtmutex);
#endif
#undef E
/*
@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(double_unlock_rsem)
#undef E
/*
* Bad unlock ordering:
*/
#define E() \
\
LOCK(A); \
LOCK(B); \
UNLOCK(A); /* fail */ \
UNLOCK(B);
/*
* 6 testcases:
*/
#include "locking-selftest-spin.h"
GENERATE_TESTCASE(bad_unlock_order_spin)
#include "locking-selftest-wlock.h"
GENERATE_TESTCASE(bad_unlock_order_wlock)
#include "locking-selftest-rlock.h"
GENERATE_TESTCASE(bad_unlock_order_rlock)
#include "locking-selftest-mutex.h"
GENERATE_TESTCASE(bad_unlock_order_mutex)
#include "locking-selftest-wsem.h"
GENERATE_TESTCASE(bad_unlock_order_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(bad_unlock_order_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(double_unlock_rtmutex);
#endif
#undef E
@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(init_held_rsem)
#ifdef CONFIG_RT_MUTEXES
#include "locking-selftest-rtmutex.h"
GENERATE_TESTCASE(init_held_rtmutex);
#endif
#undef E
/*
@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
#else
# define I_SPINLOCK(x)
# define I_RWLOCK(x)
@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_WW(x)
#endif
#ifndef I_RTMUTEX
# define I_RTMUTEX(x)
#endif
#ifdef CONFIG_RT_MUTEXES
#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
#else
#define I2_RTMUTEX(x)
#endif
#define I1(x) \
do { \
I_SPINLOCK(x); \
I_RWLOCK(x); \
I_MUTEX(x); \
I_RWSEM(x); \
I_RTMUTEX(x); \
} while (0)
#define I2(x) \
@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
I2_RTMUTEX(x); \
} while (0)
static void reset_locks(void)
@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
reset_locks();
}
#ifdef CONFIG_RT_MUTEXES
#define dotest_rt(fn, e, m) dotest((fn), (e), (m))
#else
#define dotest_rt(fn, e, m)
#endif
static inline void print_testname(const char *testname)
{
printk("%33s:", testname);
@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_6_SUCCESS(desc, name) \
@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
/*
@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_2I(desc, name, nr) \
@ -1825,7 +1903,6 @@ void locking_selftest(void)
DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
DO_TESTCASE_6("double unlock", double_unlock);
DO_TESTCASE_6("initialize held", init_held);
DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
printk(" --------------------------------------------------------------------------\n");
print_testname("recursive read-lock");

View File

@ -37,6 +37,8 @@
#include <linux/refcount.h>
#include <linux/bug.h>
#ifdef CONFIG_REFCOUNT_FULL
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r)
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL(refcount_dec);
#endif /* CONFIG_REFCOUNT_FULL */
/**
* refcount_dec_if_one - decrement a refcount if it is 1

View File

@ -19,6 +19,7 @@ help:
@echo ' kvm_stat - top-like utility for displaying kvm statistics'
@echo ' leds - LEDs tools'
@echo ' lguest - a minimal 32-bit x86 hypervisor'
@echo ' liblockdep - user-space wrapper for kernel locking-validator'
@echo ' net - misc networking tools'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
@ -89,7 +90,7 @@ freefall: FORCE
kvm_stat: FORCE
$(call descend,kvm/$@)
all: acpi cgroup cpupower gpio hv firewire lguest \
all: acpi cgroup cpupower gpio hv firewire lguest liblockdep \
perf selftests turbostat usb \
virtio vm net x86_energy_perf_policy \
tmon freefall objtool kvm_stat
@ -103,6 +104,9 @@ cpupower_install:
cgroup_install firewire_install gpio_install hv_install lguest_install perf_install usb_install virtio_install vm_install net_install objtool_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
$(call descend,lib/lockdep,install)
selftests_install:
$(call descend,testing/$(@:_install=),install)
@ -119,7 +123,7 @@ kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
install: acpi_install cgroup_install cpupower_install gpio_install \
hv_install firewire_install lguest_install \
hv_install firewire_install lguest_install liblockdep_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install net_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install

View File

@ -0,0 +1,4 @@
#ifndef __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H
#define __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H
#endif /* __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H */

View File

@ -61,4 +61,14 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
#endif

View File

@ -45,6 +45,10 @@
# define __maybe_unused __attribute__((unused))
#endif
#ifndef __used
# define __used __attribute__((__unused__))
#endif
#ifndef __packed
# define __packed __attribute__((__packed__))
#endif
@ -65,6 +69,14 @@
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
#ifndef __init
# define __init
#endif
#ifndef noinline
# define noinline
#endif
#define uninitialized_var(x) x = *(&(x))
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))

View File

@ -3,8 +3,9 @@
#include <stddef.h>
#include <linux/compiler.h>
#include <asm/bug.h>
#define DEBUG_LOCKS_WARN_ON(x) (x)
#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
extern bool debug_locks;
extern bool debug_locks_silent;

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_DELAY_H
#define _TOOLS_INCLUDE_LINUX_DELAY_H
#endif /* _TOOLS_INCLUDE_LINUX_DELAY_H */

View File

@ -46,4 +46,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
#endif /* _LINUX_ERR_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_FTRACE_H
#define _TOOLS_INCLUDE_LINUX_FTRACE_H
#endif /* _TOOLS_INCLUDE_LINUX_FTRACE_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_GFP_H
#define _TOOLS_INCLUDE_LINUX_GFP_H
#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_INTERRUPT_H
#define _TOOLS_INCLUDE_LINUX_INTERRUPT_H
#endif /* _TOOLS_INCLUDE_LINUX_INTERRUPT_H */

View File

@ -17,19 +17,19 @@
#define raw_local_irq_disable() do { } while (0)
#define raw_local_irq_enable() do { } while (0)
#define raw_local_irq_save(flags) ((flags) = 0)
#define raw_local_irq_restore(flags) do { } while (0)
#define raw_local_irq_restore(flags) ((void)(flags))
#define raw_local_save_flags(flags) ((flags) = 0)
#define raw_irqs_disabled_flags(flags) do { } while (0)
#define raw_irqs_disabled_flags(flags) ((void)(flags))
#define raw_irqs_disabled() 0
#define raw_safe_halt()
#define local_irq_enable() do { } while (0)
#define local_irq_disable() do { } while (0)
#define local_irq_save(flags) ((flags) = 0)
#define local_irq_restore(flags) do { } while (0)
#define local_irq_restore(flags) ((void)(flags))
#define local_save_flags(flags) ((flags) = 0)
#define irqs_disabled() (1)
#define irqs_disabled_flags(flags) (0)
#define irqs_disabled_flags(flags) ((void)(flags), 0)
#define safe_halt() do { } while (0)
#define trace_lock_release(x, y)

View File

@ -0,0 +1,175 @@
#ifndef _LINUX_JHASH_H
#define _LINUX_JHASH_H
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* http://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
*
* These are functions for producing 32-bit hashes for hash table lookup.
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
* are externally useful functions. Routines to test the hash are included
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.
* Jozsef
*/
#include <linux/bitops.h>
#include <linux/unaligned/packed_struct.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
/* __jhash_mix -- mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
/* An arbitrary initial parameter */
#define JHASH_INITVAL 0xdeadbeef
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
* @initval: the previous hash, or an arbitray value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
*
* Returns the hash value of the key. The result depends on endianness.
*/
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const u8 *k = key;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + length + initval;
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
a += __get_unaligned_cpu32(k);
b += __get_unaligned_cpu32(k + 4);
c += __get_unaligned_cpu32(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
/* Last block: affect all 32 bits of (c) */
/* All the case statements fall through */
switch (length) {
case 12: c += (u32)k[11]<<24;
case 11: c += (u32)k[10]<<16;
case 10: c += (u32)k[9]<<8;
case 9: c += k[8];
case 8: b += (u32)k[7]<<24;
case 7: b += (u32)k[6]<<16;
case 6: b += (u32)k[5]<<8;
case 5: b += k[4];
case 4: a += (u32)k[3]<<24;
case 3: a += (u32)k[2]<<16;
case 2: a += (u32)k[1]<<8;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
* @initval: the previous hash, or an arbitray value
*
* Returns the hash value of the key.
*/
static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + (length<<2) + initval;
/* Handle most of the key */
while (length > 3) {
a += k[0];
b += k[1];
c += k[2];
__jhash_mix(a, b, c);
length -= 3;
k += 3;
}
/* Handle the last 3 u32's: all the case statements fall through */
switch (length) {
case 3: c += k[2];
case 2: b += k[1];
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */

View File

@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <stdio.h>
#include <unistd.h>
#define KSYM_NAME_LEN 128
@ -24,7 +25,7 @@ static inline void print_ip_sym(unsigned long ip)
name = backtrace_symbols((void **)&ip, 1);
printf("%s\n", *name);
dprintf(STDOUT_FILENO, "%s\n", *name);
free(name);
}

View File

@ -32,6 +32,7 @@
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
#ifndef max
@ -89,4 +90,7 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...);
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define current_gfp_context(k) 0
#define synchronize_sched()
#endif

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H
#define _TOOLS_INCLUDE_LINUX_LINKAGE_H
#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */

View File

@ -7,8 +7,15 @@
#include <limits.h>
#include <linux/utsname.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/kern_levels.h>
#include <linux/err.h>
#include <linux/rcu.h>
#include <linux/list.h>
#include <linux/hardirq.h>
#include <unistd.h>
#define MAX_LOCK_DEPTH 2000UL
#define MAX_LOCK_DEPTH 63UL
#define asmlinkage
#define __visible
@ -29,31 +36,32 @@ extern struct task_struct *__curr(void);
#define current (__curr())
#define debug_locks_off() 1
static inline int debug_locks_off(void)
{
return 1;
}
#define task_pid_nr(tsk) ((tsk)->pid)
#define KSYM_NAME_LEN 128
#define printk printf
#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define pr_warn pr_err
#define list_del_rcu list_del
#define atomic_t unsigned long
#define atomic_inc(x) ((*(x))++)
static struct new_utsname *init_utsname(void)
{
static struct new_utsname n = (struct new_utsname) {
.release = "liblockdep",
.version = LIBLOCKDEP_VERSION,
};
return &n;
}
#define print_tainted() ""
#define static_obj(x) 1
#define debug_show_all_locks()
extern void debug_check_no_locks_held(void);
static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
{
return false;
}
#endif

View File

@ -3,4 +3,9 @@
#define module_param(name, type, perm)
static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
{
return false;
}
#endif

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_MUTEX_H
#define _TOOLS_INCLUDE_LINUX_MUTEX_H
#endif /* _TOOLS_INCLUDE_LINUX_MUTEX_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */

View File

@ -18,4 +18,7 @@ static inline bool rcu_is_watching(void)
return false;
}
#define rcu_assign_pointer(p, v) ((p) = (v))
#define RCU_INIT_POINTER(p, v) p=(v)
#endif

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_CLOCK_H
#define _TOOLS_PERF_LINUX_SCHED_CLOCK_H
#endif /* _TOOLS_PERF_LINUX_SCHED_CLOCK_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
#define _TOOLS_PERF_LINUX_SCHED_MM_H
#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_TASK_H
#define _TOOLS_PERF_LINUX_SCHED_TASK_H
#endif /* _TOOLS_PERF_LINUX_SCHED_TASK_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
#define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
#endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */

View File

@ -1,5 +1,31 @@
#ifndef __LINUX_SPINLOCK_H_
#define __LINUX_SPINLOCK_H_
#include <pthread.h>
#include <stdbool.h>
#define spinlock_t pthread_mutex_t
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
#define arch_spinlock_t pthread_mutex_t
#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
static inline void arch_spin_lock(arch_spinlock_t *mutex)
{
pthread_mutex_lock(mutex);
}
static inline void arch_spin_unlock(arch_spinlock_t *mutex)
{
pthread_mutex_unlock(mutex);
}
static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
{
return true;
}
#endif

View File

@ -0,0 +1,46 @@
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
#include <linux/kernel.h>
struct __una_u16 { u16 x; } __packed;
struct __una_u32 { u32 x; } __packed;
struct __una_u64 { u64 x; } __packed;
static inline u16 __get_unaligned_cpu16(const void *p)
{
const struct __una_u16 *ptr = (const struct __una_u16 *)p;
return ptr->x;
}
static inline u32 __get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
}
static inline u64 __get_unaligned_cpu64(const void *p)
{
const struct __una_u64 *ptr = (const struct __una_u64 *)p;
return ptr->x;
}
static inline void __put_unaligned_cpu16(u16 val, void *p)
{
struct __una_u16 *ptr = (struct __una_u16 *)p;
ptr->x = val;
}
static inline void __put_unaligned_cpu32(u32 val, void *p)
{
struct __una_u32 *ptr = (struct __una_u32 *)p;
ptr->x = val;
}
static inline void __put_unaligned_cpu64(u64 val, void *p)
{
struct __una_u64 *ptr = (struct __una_u64 *)p;
ptr->x = val;
}
#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */

View File

@ -0,0 +1,4 @@
#ifndef _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H
#define _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H
#endif /* _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H */

View File

@ -79,6 +79,7 @@ INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
CFLAGS += -fPIC
CFLAGS += -Wall
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
@ -100,7 +101,7 @@ include $(srctree)/tools/build/Makefile.include
do_compile_shared_library = \
($(print_shared_lib_compile) \
$(CC) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='"$@"';$(shell ln -sf $@ liblockdep.so))
$(CC) $(LDFLAGS) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='$(@F)';$(shell ln -sf $(@F) $(@D)/liblockdep.so))
do_build_static_lib = \
($(print_static_lib_build) \
@ -118,10 +119,10 @@ all_cmd: $(CMD_TARGETS)
$(LIB_IN): force
$(Q)$(MAKE) $(build)=liblockdep
liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
$(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
$(Q)$(do_compile_shared_library)
liblockdep.a: $(LIB_IN)
$(OUTPUT)liblockdep.a: $(LIB_IN)
$(Q)$(do_build_static_lib)
tags: force
@ -149,7 +150,7 @@ install_lib: all_cmd
install: install_lib
clean:
$(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d .*.cmd
$(RM) $(OUTPUT)*.o *~ $(TARGETS) $(OUTPUT)*.a $(OUTPUT)*liblockdep*.so* $(VERSION_FILES) $(OUTPUT).*.d $(OUTPUT).*.cmd
$(RM) tags TAGS
PHONY += force

View File

@ -1,8 +1,27 @@
#include <linux/lockdep.h>
#include <stdlib.h>
/* Trivial API wrappers, we don't (yet) have RCU in user-space: */
#define hlist_for_each_entry_rcu hlist_for_each_entry
#define hlist_add_head_rcu hlist_add_head
#define hlist_del_rcu hlist_del
#define list_for_each_entry_rcu list_for_each_entry
#define list_add_tail_rcu list_add_tail
u32 prandom_u32(void)
{
/* Used only by lock_pin_lock() which is dead code */
abort();
}
static struct new_utsname *init_utsname(void)
{
static struct new_utsname n = (struct new_utsname) {
.release = "liblockdep",
.version = LIBLOCKDEP_VERSION,
};
return &n;
}
#include "../../../kernel/locking/lockdep.c"

View File

@ -4,6 +4,7 @@
#include <dlfcn.h>
#include <stdlib.h>
#include <sysexits.h>
#include <unistd.h>
#include "include/liblockdep/mutex.h"
#include "../../include/linux/rbtree.h"
@ -122,8 +123,6 @@ static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
#define LIBLOCKDEP_STATIC_ENTRIES 1024
#endif
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
static int __locks_nr;
@ -149,7 +148,7 @@ static struct lock_lookup *alloc_lock(void)
int idx = __locks_nr++;
if (idx >= ARRAY_SIZE(__locks)) {
fprintf(stderr,
dprintf(STDERR_FILENO,
"LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
exit(EX_UNAVAILABLE);
}

View File

@ -1 +1 @@
#include "../../../lib/rbtree.c"
#include "../../lib/rbtree.c"

View File

@ -4,9 +4,9 @@ make &> /dev/null
for i in `ls tests/*.c`; do
testname=$(basename "$i" .c)
gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
gcc -o tests/$testname -pthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
echo -ne "$testname... "
if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
if [ $(timeout 1 ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
echo "PASSED!"
else
echo "FAILED!"
@ -18,9 +18,9 @@ done
for i in `ls tests/*.c`; do
testname=$(basename "$i" .c)
gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
gcc -o tests/$testname -pthread -Iinclude $i &> /dev/null
echo -ne "(PRELOAD) $testname... "
if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
if [ $(timeout 1 ./lockdep ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
echo "PASSED!"
else
echo "FAILED!"

View File

@ -1,6 +0,0 @@
#ifndef __ASM_GENERIC_HASH_H
#define __ASM_GENERIC_HASH_H
/* Stub */
#endif /* __ASM_GENERIC_HASH_H */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,10 +0,0 @@
#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_
#define _LIBLOCKDEP_LINUX_COMPILER_H_
#define __used __attribute__((__unused__))
#define unlikely
#define READ_ONCE(x) (x)
#define WRITE_ONCE(x, val) x=(val)
#define RCU_INIT_POINTER(p, v) p=(v)
#endif

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1 +0,0 @@
#include "../../../include/linux/hash.h"

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,47 +0,0 @@
#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_
#define _LIBLOCKDEP_LINUX_KERNEL_H_
#include <linux/export.h>
#include <linux/types.h>
#include <linux/rcu.h>
#include <linux/hardirq.h>
#include <linux/kern_levels.h>
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof(((type *)0)->member) * __mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
typeof(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#define WARN_ON(x) (x)
#define WARN_ON_ONCE(x) (x)
#define likely(x) (x)
#define WARN(x, y...) (x)
#define uninitialized_var(x) x
#define __init
#define noinline
#define list_add_tail_rcu list_add_tail
#define list_for_each_entry_rcu list_for_each_entry
#define barrier()
#define synchronize_sched()
#ifndef CALLER_ADDR0
#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#endif
#ifndef _RET_IP_
#define _RET_IP_ CALLER_ADDR0
#endif
#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
#endif
#endif

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1 +0,0 @@
#include "../../../include/linux/list.h"

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1 +0,0 @@
#include "../../../include/linux/poison.h"

View File

@ -1,6 +0,0 @@
#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_
#define _LIBLOCKDEP_LINUX_PREFETCH_H
static inline void prefetch(void *a __attribute__((unused))) { }
#endif

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,2 +0,0 @@
#define __always_inline
#include "../../../include/linux/rbtree_augmented.h"

View File

@ -1,3 +0,0 @@
/* empty file */

View File

@ -1,25 +0,0 @@
#ifndef _LIBLOCKDEP_SPINLOCK_H_
#define _LIBLOCKDEP_SPINLOCK_H_
#include <pthread.h>
#include <stdbool.h>
#define arch_spinlock_t pthread_mutex_t
#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
static inline void arch_spin_lock(arch_spinlock_t *mutex)
{
pthread_mutex_lock(mutex);
}
static inline void arch_spin_unlock(arch_spinlock_t *mutex)
{
pthread_mutex_unlock(mutex);
}
static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
{
return true;
}
#endif

View File

@ -1,7 +0,0 @@
#ifndef _LIBLOCKDEP_LINUX_STRINGIFY_H_
#define _LIBLOCKDEP_LINUX_STRINGIFY_H_
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
#endif

View File

@ -1,3 +0,0 @@
/* empty file */