treewide: Rename rcu_dereference_raw_notrace() to _check()
The rcu_dereference_raw_notrace() API name is confusing. It is equivalent to rcu_dereference_raw() except that it also does sparse pointer checking. There are only a few users of rcu_dereference_raw_notrace(). This patches renames all of them to be rcu_dereference_raw_check() with the "_check()" indicating sparse checking. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> [ paulmck: Fix checkpatch warnings about parentheses. ] Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>alistair/sunxi64-5.4-dsi
parent
609488bc97
commit
0a5b99f578
|
@ -2512,7 +2512,7 @@ disabled across the entire RCU read-side critical section.
|
||||||
<p>
|
<p>
|
||||||
It is possible to use tracing on RCU code, but tracing itself
|
It is possible to use tracing on RCU code, but tracing itself
|
||||||
uses RCU.
|
uses RCU.
|
||||||
For this reason, <tt>rcu_dereference_raw_notrace()</tt>
|
For this reason, <tt>rcu_dereference_raw_check()</tt>
|
||||||
is provided for use by tracing, which avoids the destructive
|
is provided for use by tracing, which avoids the destructive
|
||||||
recursion that could otherwise ensue.
|
recursion that could otherwise ensue.
|
||||||
This API is also used by virtualization in some architectures,
|
This API is also used by virtualization in some architectures,
|
||||||
|
|
|
@ -535,7 +535,7 @@ static inline void note_hpte_modification(struct kvm *kvm,
|
||||||
*/
|
*/
|
||||||
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
|
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
return rcu_dereference_raw_notrace(kvm->memslots[0]);
|
return rcu_dereference_raw_check(kvm->memslots[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
|
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
|
||||||
|
|
|
@ -622,7 +622,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||||
* as long as the traversal is guarded by rcu_read_lock().
|
* as long as the traversal is guarded by rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||||
typeof(*(pos)), member); \
|
typeof(*(pos)), member); \
|
||||||
pos; \
|
pos; \
|
||||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||||
|
@ -642,10 +642,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||||
* not do any RCU debugging or tracing.
|
* not do any RCU debugging or tracing.
|
||||||
*/
|
*/
|
||||||
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
|
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
|
||||||
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
|
for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
|
||||||
typeof(*(pos)), member); \
|
typeof(*(pos)), member); \
|
||||||
pos; \
|
pos; \
|
||||||
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
|
pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
|
||||||
&(pos)->member)), typeof(*(pos)), member))
|
&(pos)->member)), typeof(*(pos)), member))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -476,7 +476,7 @@ do { \
|
||||||
* The no-tracing version of rcu_dereference_raw() must not call
|
* The no-tracing version of rcu_dereference_raw() must not call
|
||||||
* rcu_read_lock_held().
|
* rcu_read_lock_held().
|
||||||
*/
|
*/
|
||||||
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
|
#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
|
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
|
||||||
|
|
|
@ -6,22 +6,22 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
||||||
* can use rcu_dereference_raw_notrace() is that elements removed from this list
|
* can use rcu_dereference_raw_check() is that elements removed from this list
|
||||||
* are simply leaked, so there is no need to interact with a grace-period
|
* are simply leaked, so there is no need to interact with a grace-period
|
||||||
* mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
|
* mechanism. The rcu_dereference_raw_check() calls are needed to handle
|
||||||
* concurrent insertions into the ftrace_global_list.
|
* concurrent insertions into the ftrace_global_list.
|
||||||
*
|
*
|
||||||
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
||||||
*/
|
*/
|
||||||
#define do_for_each_ftrace_op(op, list) \
|
#define do_for_each_ftrace_op(op, list) \
|
||||||
op = rcu_dereference_raw_notrace(list); \
|
op = rcu_dereference_raw_check(list); \
|
||||||
do
|
do
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optimized for just a single item in the list (as that is the normal case).
|
* Optimized for just a single item in the list (as that is the normal case).
|
||||||
*/
|
*/
|
||||||
#define while_for_each_ftrace_op(op) \
|
#define while_for_each_ftrace_op(op) \
|
||||||
while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
|
while (likely(op = rcu_dereference_raw_check((op)->next)) && \
|
||||||
unlikely((op) != &ftrace_list_end))
|
unlikely((op) != &ftrace_list_end))
|
||||||
|
|
||||||
extern struct ftrace_ops __rcu *ftrace_ops_list;
|
extern struct ftrace_ops __rcu *ftrace_ops_list;
|
||||||
|
|
|
@ -2642,10 +2642,10 @@ static void ftrace_exports(struct ring_buffer_event *event)
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
export = rcu_dereference_raw_notrace(ftrace_exports_list);
|
export = rcu_dereference_raw_check(ftrace_exports_list);
|
||||||
while (export) {
|
while (export) {
|
||||||
trace_process_export(export, event);
|
trace_process_export(export, event);
|
||||||
export = rcu_dereference_raw_notrace(export->next);
|
export = rcu_dereference_raw_check(export->next);
|
||||||
}
|
}
|
||||||
|
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
|
Loading…
Reference in New Issue