tracing: Fix race in trace_open and buffer resize call
Below race can come, if trace_open and resize of
cpu buffer is running parallely on different cpus
CPUX CPUY
ring_buffer_resize
atomic_read(&buffer->resize_disabled)
tracing_open
tracing_reset_online_cpus
ring_buffer_reset_cpu
rb_reset_cpu
rb_update_pages
remove/insert pages
resetting pointer
This race can cause data abort or some times infinte loop in
rb_remove_pages and rb_insert_pages while checking pages
for sanity.
Take buffer lock to fix this.
Link: https://lkml.kernel.org/r/1601976833-24377-1-git-send-email-gkohli@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: b23d7a5f4a
("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU")
Signed-off-by: Gaurav Kohli <gkohli@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
zero-sugar-mainline-defconfig
parent
6d9bd13945
commit
bbeb97464e
|
@ -4866,6 +4866,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
|
||||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* prevent another thread from changing buffer sizes */
|
||||||
|
mutex_lock(&buffer->mutex);
|
||||||
|
|
||||||
atomic_inc(&cpu_buffer->resize_disabled);
|
atomic_inc(&cpu_buffer->resize_disabled);
|
||||||
atomic_inc(&cpu_buffer->record_disabled);
|
atomic_inc(&cpu_buffer->record_disabled);
|
||||||
|
|
||||||
|
@ -4876,6 +4879,8 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
|
||||||
|
|
||||||
atomic_dec(&cpu_buffer->record_disabled);
|
atomic_dec(&cpu_buffer->record_disabled);
|
||||||
atomic_dec(&cpu_buffer->resize_disabled);
|
atomic_dec(&cpu_buffer->resize_disabled);
|
||||||
|
|
||||||
|
mutex_unlock(&buffer->mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
|
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
|
||||||
|
|
||||||
|
@ -4889,6 +4894,9 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
|
/* prevent another thread from changing buffer sizes */
|
||||||
|
mutex_lock(&buffer->mutex);
|
||||||
|
|
||||||
for_each_online_buffer_cpu(buffer, cpu) {
|
for_each_online_buffer_cpu(buffer, cpu) {
|
||||||
cpu_buffer = buffer->buffers[cpu];
|
cpu_buffer = buffer->buffers[cpu];
|
||||||
|
|
||||||
|
@ -4907,6 +4915,8 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
|
||||||
atomic_dec(&cpu_buffer->record_disabled);
|
atomic_dec(&cpu_buffer->record_disabled);
|
||||||
atomic_dec(&cpu_buffer->resize_disabled);
|
atomic_dec(&cpu_buffer->resize_disabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&buffer->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue