1
0
Fork 0

x86: Use REP BSF unconditionally

Make "REP BSF" unconditional, as per the suggestion of hpa
and Linus, this removes the insane BSF_PREFIX conditional
and simplifies the logic.

Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Link: http://lkml.kernel.org/r/5058741E020000780009C014@nat28.tlf.novell.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Jan Beulich 2012-09-18 12:16:14 +01:00 committed by Ingo Molnar
parent 5870661c09
commit e26a44a2d6
1 changed files with 2 additions and 17 deletions

View File

@ -347,19 +347,6 @@ static int test_bit(int nr, const volatile unsigned long *addr);
? constant_test_bit((nr), (addr)) \
: variable_test_bit((nr), (addr)))
#if (defined(CONFIG_X86_GENERIC) || defined(CONFIG_GENERIC_CPU)) \
&& !defined(CONFIG_CC_OPTIMIZE_FOR_SIZE)
/*
* Since BSF and TZCNT have sufficiently similar semantics for the purposes
* for which we use them here, BMI-capable hardware will decode the prefixed
* variant as 'tzcnt ...' and may execute that faster than 'bsf ...', while
* older hardware will ignore the REP prefix and decode it as 'bsf ...'.
*/
# define BSF_PREFIX "rep;"
#else
# define BSF_PREFIX
#endif
/**
* __ffs - find first set bit in word
* @word: The word to search
@ -368,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
*/
static inline unsigned long __ffs(unsigned long word)
{
asm(BSF_PREFIX "bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
@ -382,14 +369,12 @@ static inline unsigned long __ffs(unsigned long word)
*/
static inline unsigned long ffz(unsigned long word)
{
asm(BSF_PREFIX "bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
#undef BSF_PREFIX
/*
* __fls: find last set bit in word
* @word: The word to search