1
0
Fork 0

nios2: Switch to generic __xchg()

The generic __xchg() implementation present in asm-generic/cmpxchg.h is
correct on nios2 and even generates the same code. Switch to this generic
implementation to trim down the amount of ad-hoc copies of the code.

Signed-off-by: Marek Vasut <marex@denx.de>
Acked-by: Ley Foon Tan <lftan@altera.com>
hifive-unleashed-5.1
Marek Vasut 2015-09-30 22:08:00 +08:00 committed by Ley Foon Tan
parent 4db2196d0c
commit 713e9b802e
1 changed files with 0 additions and 47 deletions

View File

@ -9,53 +9,6 @@
#ifndef _ASM_NIOS2_CMPXCHG_H
#define _ASM_NIOS2_CMPXCHG_H
#include <linux/irqflags.h>
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long tmp, flags;
local_irq_save(flags);
switch (size) {
case 1:
__asm__ __volatile__(
"ldb %0, %2\n"
"stb %1, %2\n"
: "=&r" (tmp)
: "r" (x), "m" (*__xg(ptr))
: "memory");
break;
case 2:
__asm__ __volatile__(
"ldh %0, %2\n"
"sth %1, %2\n"
: "=&r" (tmp)
: "r" (x), "m" (*__xg(ptr))
: "memory");
break;
case 4:
__asm__ __volatile__(
"ldw %0, %2\n"
"stw %1, %2\n"
: "=&r" (tmp)
: "r" (x), "m" (*__xg(ptr))
: "memory");
break;
}
local_irq_restore(flags);
return tmp;
}
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
#endif /* _ASM_NIOS2_CMPXCHG_H */