1
0
Fork 0

ANDROID: binder: Don't BUG_ON(!spin_is_locked()).

Because is_spin_locked() always returns false on UP
systems.

Use assert_spin_locked() instead, and remove the
WARN_ON() instances, since those were easy to verify.

Signed-off-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Martijn Coenen 2017-08-31 10:04:26 +02:00 committed by Greg Kroah-Hartman
parent abcc61537e
commit 858b271968
1 changed files with 10 additions and 16 deletions

View File

@ -987,7 +987,7 @@ binder_select_thread_ilocked(struct binder_proc *proc)
{
struct binder_thread *thread;
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
thread = list_first_entry_or_null(&proc->waiting_threads,
struct binder_thread,
waiting_thread_node);
@ -1018,7 +1018,7 @@ static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
struct binder_thread *thread,
bool sync)
{
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
if (thread) {
if (sync)
@ -1075,7 +1075,7 @@ static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@ -1120,7 +1120,8 @@ static struct binder_node *binder_init_node_ilocked(
binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0;
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
while (*p) {
parent = *p;
@ -1195,9 +1196,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
{
struct binder_proc *proc = node->proc;
BUG_ON(!spin_is_locked(&node->lock));
assert_spin_locked(&node->lock);
if (proc)
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@ -1248,9 +1249,9 @@ static bool binder_dec_node_nilocked(struct binder_node *node,
{
struct binder_proc *proc = node->proc;
BUG_ON(!spin_is_locked(&node->lock));
assert_spin_locked(&node->lock);
if (proc)
BUG_ON(!spin_is_locked(&proc->inner_lock));
assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
@ -1774,7 +1775,7 @@ static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
struct binder_transaction *t)
{
BUG_ON(!target_thread);
BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
assert_spin_locked(&target_thread->proc->inner_lock);
BUG_ON(target_thread->transaction_stack != t);
BUG_ON(target_thread->transaction_stack->from != target_thread);
target_thread->transaction_stack =
@ -4896,7 +4897,6 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
struct binder_proc *to_proc;
struct binder_buffer *buffer = t->buffer;
WARN_ON(!spin_is_locked(&proc->inner_lock));
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
@ -4984,7 +4984,6 @@ static void print_binder_thread_ilocked(struct seq_file *m,
size_t start_pos = m->count;
size_t header_pos;
WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
thread->pid, thread->looper,
thread->looper_need_return,
@ -5021,10 +5020,6 @@ static void print_binder_node_nilocked(struct seq_file *m,
struct binder_work *w;
int count;
WARN_ON(!spin_is_locked(&node->lock));
if (node->proc)
WARN_ON(!spin_is_locked(&node->proc->inner_lock));
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
@ -5050,7 +5045,6 @@ static void print_binder_node_nilocked(struct seq_file *m,
static void print_binder_ref_olocked(struct seq_file *m,
struct binder_ref *ref)
{
WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
binder_node_lock(ref->node);
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
ref->data.debug_id, ref->data.desc,