bpf: Use recursion prevention helpers in hashtab code

The required protection is that the caller cannot be migrated to a
different CPU as these places take either a hash bucket lock or might
trigger a kprobe inside the memory allocator. Both scenarios can lead to
deadlocks. The deadlock prevention is per CPU by incrementing a per CPU
variable which temporarily blocks the invocation of BPF programs from perf
and kprobes.

Replace the open coded preempt_disable/enable() and this_cpu_inc/dec()
pairs with the new recursion prevention helpers to prepare BPF to work on
PREEMPT_RT enabled kernels. On a non-RT kernel the migrate disable/enable
in the helpers map to preempt_disable/enable(), i.e. no functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.211208533@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-02-24 15:01:48 +01:00 committed by Alexei Starovoitov
parent c518cfa0c5
commit 085fee1a72

View file

@ -1333,8 +1333,7 @@ alloc:
}
again:
preempt_disable();
this_cpu_inc(bpf_prog_active);
bpf_disable_instrumentation();
rcu_read_lock();
again_nocopy:
dst_key = keys;
@ -1362,8 +1361,7 @@ again_nocopy:
*/
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
this_cpu_dec(bpf_prog_active);
preempt_enable();
bpf_enable_instrumentation();
goto after_loop;
}
@ -1374,8 +1372,7 @@ again_nocopy:
*/
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
this_cpu_dec(bpf_prog_active);
preempt_enable();
bpf_enable_instrumentation();
kvfree(keys);
kvfree(values);
goto alloc;
@ -1445,8 +1442,7 @@ next_batch:
}
rcu_read_unlock();
this_cpu_dec(bpf_prog_active);
preempt_enable();
bpf_enable_instrumentation();
if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
key_size * bucket_cnt) ||
copy_to_user(uvalues + total * value_size, values,