1
0
Fork 0

hash: add explicit u32 and u64 versions of hash

The 32-bit version is more efficient (and apparently gives better hash
results than the 64-bit version), so users who are only hashing a 32-bit
quantity can now opt to use the 32-bit version explicitly, rather than
promoting to a long.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Matthew Wilcox 2008-02-06 01:36:14 -08:00 committed by Linus Torvalds
parent d9ae90ac4b
commit 4e701482d1
1 changed files with 28 additions and 16 deletions

View File

@ -1,6 +1,6 @@
#ifndef _LINUX_HASH_H #ifndef _LINUX_HASH_H
#define _LINUX_HASH_H #define _LINUX_HASH_H
/* Fast hashing routine for a long. /* Fast hashing routine for ints, longs and pointers.
(C) 2002 William Lee Irwin III, IBM */ (C) 2002 William Lee Irwin III, IBM */
/* /*
@ -13,23 +13,30 @@
* them can use shifts and additions instead of multiplications for * them can use shifts and additions instead of multiplications for
* machines where multiplications are slow. * machines where multiplications are slow.
*/ */
#if BITS_PER_LONG == 32
#include <asm/types.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e370001UL #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
#elif BITS_PER_LONG == 64
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
#if BITS_PER_LONG == 32
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
#else #else
#error Define GOLDEN_RATIO_PRIME for your wordsize. #error Wordsize not 32 or 64
#endif #endif
static inline unsigned long hash_long(unsigned long val, unsigned int bits) static inline u64 hash_64(u64 val, unsigned int bits)
{ {
unsigned long hash = val; u64 hash = val;
#if BITS_PER_LONG == 64
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
unsigned long n = hash; u64 n = hash;
n <<= 18; n <<= 18;
hash -= n; hash -= n;
n <<= 33; n <<= 33;
@ -42,15 +49,20 @@ static inline unsigned long hash_long(unsigned long val, unsigned int bits)
hash += n; hash += n;
n <<= 2; n <<= 2;
hash += n; hash += n;
#else
/* On some cpus multiply is faster, on others gcc will do shifts */
hash *= GOLDEN_RATIO_PRIME;
#endif
/* High bits are more random, so use them. */ /* High bits are more random, so use them. */
return hash >> (BITS_PER_LONG - bits); return hash >> (64 - bits);
} }
static inline u32 hash_32(u32 val, unsigned int bits)
{
/* On some cpus multiply is faster, on others gcc will do shifts */
u32 hash = val * GOLDEN_RATIO_PRIME_32;
/* High bits are more random, so use them. */
return hash >> (32 - bits);
}
static inline unsigned long hash_ptr(void *ptr, unsigned int bits) static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
{ {
return hash_long((unsigned long)ptr, bits); return hash_long((unsigned long)ptr, bits);