- smp_mb__before_spinlock() changed to smp_mb() on arm64 since the

generic definition to smp_wmb() is not sufficient
 
 - avoid a recursive loop with the graph tracer by using using
   preempt_(enable|disable)_notrace in _percpu_(read|write)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJX0vFPAAoJEGvWsS0AyF7x1jsP/0PuYxMPfkBBrL6dKfcwsBXR
 9eKhIui8gwkjtpvpeDMSia8+5V+UvFYhB/oaPS5tbSaP1DzS/1Uhmr8Nd17fEBZH
 nJZfodCRISSy+kOIiEIMAb00pXrkJpgn4Fy6Yf5jgKjrUvLYkLaeNNR9WQ1Nrwbl
 tOmK+974fTtRN+46Ht3vS4hX6kqSDr7Ze8ezFLZc5Nt1tS3R7D0gfVnE3DBQ+x9y
 gg+01WvGa2K/Iz0ur7zZo5eLVV7L+JfFvkBQCNxfFMkSt8SzOR+i6bPzqTGjy9Ge
 rW1l4GOCXw/Di2PfPvQ0+BpJx/HY4vjmEdItdKm1ZOySPqJkl08wOF2NFse2ZYka
 cUC+vlZE7zDjoHVOVYgjYTJtXvBZnwakIZ9Oxhsjw3h93hGPOXrJF5y6uYLiMCJa
 5yASzwX617hFBxO8VrIeqjVw7z2BLHLMiPE6uhiQ8TJiiwHXMxJzaVnBaGL0xidA
 qPzn7DhAHUsEb3bxYixu5uA/uyvJ7l33iRjOSObTUWxtBxMEvgajnxh1rkMl7lfY
 izCvwjtIU/1mAae3MA4qddzrxuPwp3GtmXC3Tb/rxzD4fgvkvqrGHrXyufeLYUWC
 z40kuGyp5ZMxTArPbno6TurseXeIWJQOW9/ya28FLzNbHC7RenIHBl7bSyoDYnFu
 RGCmr4g2q3hKG5pjJ+1P
 =QLnH
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - smp_mb__before_spinlock() changed to smp_mb() on arm64 since the
   generic definition to smp_wmb() is not sufficient

 - avoid a recursive loop with the graph tracer by using using
   preempt_(enable|disable)_notrace in _percpu_(read|write)

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: use preempt_disable_notrace in _percpu_read/write
  arm64: spinlocks: implement smp_mb__before_spinlock() as smp_mb()
This commit is contained in:
Linus Torvalds 2016-09-09 10:54:29 -07:00
commit e45eeb43e6
2 changed files with 14 additions and 4 deletions

View file

@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
preempt_disable_notrace(); \
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
sizeof(pcp)); \
preempt_enable(); \
preempt_enable_notrace(); \
__retval; \
})
#define _percpu_write(pcp, val) \
do { \
preempt_disable(); \
preempt_disable_notrace(); \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
sizeof(pcp)); \
preempt_enable(); \
preempt_enable_notrace(); \
} while(0) \
#define _pcp_protect(operation, pcp, val) \

View file

@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
/*
* Accesses appearing in program order before a spin_lock() operation
* can be reordered with accesses inside the critical section, by virtue
* of arch_spin_lock being constructed using acquire semantics.
*
* In cases where this is problematic (e.g. try_to_wake_up), an
* smp_mb__before_spinlock() can restore the required ordering.
*/
#define smp_mb__before_spinlock() smp_mb()
#endif /* __ASM_SPINLOCK_H */