Merge branch 'bkl/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing

* 'bkl/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing:
  do_coredump: Do not take BKL
  init: Remove the BKL from startup code
This commit is contained in:
Linus Torvalds 2010-08-07 17:06:54 -07:00
commit 78417334b5
3 changed files with 0 additions and 20 deletions

View file

@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/smp_lock.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/init.h>
@ -1892,13 +1891,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
/*
* lock_kernel() because format_corename() is controlled by sysctl, which
* uses lock_kernel()
*/
lock_kernel();
ispipe = format_corename(corename, signr);
unlock_kernel();
if (ispipe) {
int dump_count;

View file

@ -441,7 +441,6 @@ static noinline void __init_refok rest_init(void)
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
complete(&kthreadd_done);
unlock_kernel();
/*
* The boot idle thread must execute schedule()
@ -563,7 +562,6 @@ asmlinkage void __init start_kernel(void)
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
lock_kernel();
tick_init();
boot_cpu_init();
page_address_init();
@ -820,7 +818,6 @@ static noinline int init_post(void)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
unlock_kernel();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
@ -860,8 +857,6 @@ static int __init kernel_init(void * unused)
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
lock_kernel();
/*
* init can allocate pages on any node
*/

View file

@ -741,13 +741,6 @@ __acquires(kernel_lock)
return -1;
}
/*
* When this gets called we hold the BKL which means that
* preemption is disabled. Various trace selftests however
* need to disable and enable preemption for successful tests.
* So we drop the BKL here and grab it after the tests again.
*/
unlock_kernel();
mutex_lock(&trace_types_lock);
tracing_selftest_running = true;
@ -829,7 +822,6 @@ __acquires(kernel_lock)
#endif
out_unlock:
lock_kernel();
return ret;
}