locking,arch,xtensa: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-xtensa@linux-xtensa.org
Link: http://lkml.kernel.org/r/20140508135852.879575796@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2014-03-26 18:31:12 +01:00 committed by Ingo Molnar
parent 4f3316c2b5
commit d4608dd5b4

View file

@ -58,165 +58,96 @@
*/
#define atomic_set(v,i) ((v)->counter = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t * v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
} \
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" add %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
\
__asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \
" wsr %1, scompare1\n" \
" " #op " %0, %1, %2\n" \
" s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
" " #op " %0, %0, %2\n" \
: "=&a" (result), "=&a" (tmp) \
: "a" (i), "a" (v) \
: "memory" \
); \
\
return result; \
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
#else /* XCHAL_HAVE_S32C1I */
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15, "__stringify(LOCKLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: "a15", "memory" \
); \
} \
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(LOCKLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
" wsr a15, ps\n" \
" rsync\n" \
: "=&a" (vval) \
: "a" (i), "a" (v) \
: "a15", "memory" \
); \
\
return vval; \
}
/*
* We use atomic_{add|sub}_return to define other functions.
*/
#endif /* XCHAL_HAVE_S32C1I */
static inline int atomic_add_return(int i, atomic_t * v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" add %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" add %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" sub %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
}
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/**
* atomic_sub_and_test - subtract value from variable and test result