1
0
Fork 0

Add support for VNNI

Adds support for Vector Neural Network Instructions (avx512), as available on Intel Cascade Lake

The _mm512_dpbusd_epi32() intrinsic (vpdpbusd instruction) is taylor made for NNUE.

on a cascade lake CPU (AWS C5.24x.large, gcc 10) NNUE eval is at roughly 78% nps of classical
(single core test)

bench 1024 1 24 default depth:
target 	classical 	NNUE 	ratio
vnni 	2207232 	1725987 	78.20
avx512 	2216789 	1671734 	75.41
avx2 	2194006 	1611263 	73.44
modern 	2185001 	1352469 	61.90

closes https://github.com/official-stockfish/Stockfish/pull/2987

No functional change
pull/2996/head
mstembera 2020-08-11 12:59:39 -07:00 committed by Joost VandeVondele
parent 6bc0256292
commit dd63b98fb0
3 changed files with 41 additions and 1 deletions

View File

@ -73,6 +73,7 @@ endif
# avx2 = yes/no --- -mavx2 --- Use Intel Advanced Vector Extensions 2
# pext = yes/no --- -DUSE_PEXT --- Use pext x86_64 asm-instruction
# avx512 = yes/no --- -mavx512bw --- Use Intel Advanced Vector Extensions 512
# vnni = yes/no --- -mavx512vnni --- Use Intel Vector Neural Network Instructions 512
# neon = yes/no --- -DUSE_NEON --- Use ARM SIMD architecture
#
# Note that Makefile is space sensitive, so when adding new architectures
@ -93,6 +94,7 @@ sse41 = no
avx2 = no
pext = no
avx512 = no
vnni = no
neon = no
ARCH = x86-64-modern
@ -190,6 +192,19 @@ ifeq ($(ARCH),x86-64-avx512)
avx512 = yes
endif
ifeq ($(ARCH),x86-64-vnni)
arch = x86_64
prefetch = yes
popcnt = yes
sse = yes
ssse3 = yes
sse41 = yes
avx2 = yes
pext = yes
avx512 = yes
vnni = yes
endif
ifeq ($(ARCH),armv7)
arch = armv7
prefetch = yes
@ -420,6 +435,13 @@ ifeq ($(avx512),yes)
endif
endif
ifeq ($(vnni),yes)
CXXFLAGS += -DUSE_VNNI
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
CXXFLAGS += -mavx512vnni -mavx512dq -mavx512vl
endif
endif
ifeq ($(sse41),yes)
CXXFLAGS += -DUSE_SSE41
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
@ -522,6 +544,7 @@ help:
@echo ""
@echo "Supported archs:"
@echo ""
@echo "x86-64-vnni > x86 64-bit with vnni support"
@echo "x86-64-avx512 > x86 64-bit with avx512 support"
@echo "x86-64-bmi2 > x86 64-bit with bmi2 support"
@echo "x86-64-avx2 > x86 64-bit with avx2 support"
@ -640,6 +663,7 @@ config-sanity:
@echo "avx2: '$(avx2)'"
@echo "pext: '$(pext)'"
@echo "avx512: '$(avx512)'"
@echo "vnni: '$(vnni)'"
@echo "neon: '$(neon)'"
@echo ""
@echo "Flags:"
@ -664,6 +688,7 @@ config-sanity:
@test "$(avx2)" = "yes" || test "$(avx2)" = "no"
@test "$(pext)" = "yes" || test "$(pext)" = "no"
@test "$(avx512)" = "yes" || test "$(avx512)" = "no"
@test "$(vnni)" = "yes" || test "$(vnni)" = "no"
@test "$(neon)" = "yes" || test "$(neon)" = "no"
@test "$(comp)" = "gcc" || test "$(comp)" = "icc" || test "$(comp)" = "mingw" || test "$(comp)" = "clang"

View File

@ -219,6 +219,9 @@ const std::string compiler_info() {
compiler += "\nCompilation settings include: ";
compiler += (Is64Bit ? " 64bit" : " 32bit");
#if defined(USE_VNNI)
compiler += " VNNI";
#endif
#if defined(USE_AVX512)
compiler += " AVX512";
#endif

View File

@ -79,8 +79,10 @@ namespace Eval::NNUE::Layers {
#if defined(USE_AVX512)
constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
const __m512i kOnes = _mm512_set1_epi16(1);
const auto input_vector = reinterpret_cast<const __m512i*>(input);
#if !defined(USE_VNNI)
const __m512i kOnes = _mm512_set1_epi16(1);
#endif
#elif defined(USE_AVX2)
constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
@ -113,9 +115,13 @@ namespace Eval::NNUE::Layers {
__m512i sum = _mm512_setzero_si512();
const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
#if defined(USE_VNNI)
sum = _mm512_dpbusd_epi32(sum, _mm512_loadA_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
#else
__m512i product = _mm512_maddubs_epi16(_mm512_loadA_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
product = _mm512_madd_epi16(product, kOnes);
sum = _mm512_add_epi32(sum, product);
#endif
}
// Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks.
@ -125,8 +131,14 @@ namespace Eval::NNUE::Layers {
{
const auto iv256 = reinterpret_cast<const __m256i*>(&input_vector[kNumChunks]);
const auto row256 = reinterpret_cast<const __m256i*>(&row[kNumChunks]);
#if defined(USE_VNNI)
__m256i product256 = _mm256_dpbusd_epi32(
_mm512_castsi512_si256(sum), _mm256_loadA_si256(&iv256[0]), _mm256_load_si256(&row256[0]));
sum = _mm512_inserti32x8(sum, product256, 0);
#else
__m256i product256 = _mm256_maddubs_epi16(_mm256_loadA_si256(&iv256[0]), _mm256_load_si256(&row256[0]));
sum = _mm512_add_epi32(sum, _mm512_cvtepi16_epi32(product256));
#endif
}
output[i] = _mm512_reduce_add_epi32(sum) + biases_[i];