1
0
Fork 0

Optimize and tidy up affine transform code.

The new network caused some issues initially due to the very narrow neuron set between the first two FC layers. Necessary changes were hacked together to make it work. This patch is a mature approach to make the affine transform code faster, more readable, and easier to maintain should the layer sizes change again.

The following changes were made:

* ClippedReLU always produces a multiple of 32 outputs. This is about as good of a solution for AffineTransform's SIMD requirements as it can get without a bigger rewrite.

* All self-contained simd helpers are moved to a separate file (simd.h). Inline asm is utilized to work around GCC's issues with code generation and register assignment. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101693, https://godbolt.org/z/da76fY1n7

* AffineTransform has 2 specializations. While it's more lines of code due to the boilerplate, the logic in both is significantly reduced, as these two are impossible to nicely combine into one.
 1) The first specialization is for cases when there's >=128 inputs. It uses a different approach to perform the affine transform and can make full use of AVX512 without any edge cases. Furthermore, it has higher theoretical throughput because less loads are needed in the hot path, requiring only a fixed amount of instructions for horizontal additions at the end, which are amortized by the large number of inputs.
 2) The second specialization is made to handle smaller layers where performance is still necessary but edge cases need to be handled. AVX512 implementation for this was ommited by mistake, a remnant from the temporary implementation for the new... This could be easily reintroduced if needed. A slightly more detailed description of both implementations is in the code.

Overall it should be a minor speedup, as shown on fishtest:

passed STC:
LLR: 2.96 (-2.94,2.94) <-0.50,2.50>
Total: 51520 W: 4074 L: 3888 D: 43558
Ptnml(0-2): 111, 3136, 19097, 3288, 128

and various tests shown in the pull request

closes https://github.com/official-stockfish/Stockfish/pull/3663

No functional change
pull/3666/head
Tomasz Sobczyk 2021-08-16 12:19:26 +02:00 committed by Joost VandeVondele
parent ccf0239bc4
commit 18dcf1f097
3 changed files with 778 additions and 506 deletions

File diff suppressed because it is too large Load Diff

View File

@ -35,9 +35,10 @@ namespace Stockfish::Eval::NNUE::Layers {
static_assert(std::is_same<InputType, std::int32_t>::value, "");
// Number of input/output dimensions
static constexpr IndexType InputDimensions =
PreviousLayer::OutputDimensions;
static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
static constexpr IndexType OutputDimensions = InputDimensions;
static constexpr IndexType PaddedOutputDimensions =
ceil_to_multiple<IndexType>(OutputDimensions, 32);
// Size of forward propagation buffer used in this layer
static constexpr std::size_t SelfBufferSize =
@ -179,6 +180,15 @@ namespace Stockfish::Eval::NNUE::Layers {
output[i] = static_cast<OutputType>(
std::max(0, std::min(127, input[i] >> WeightScaleBits)));
}
// Affine transform layers expect that there is at least
// ceil_to_multiple(OutputDimensions, 32) initialized values.
// We cannot do this in the affine transform because it requires
// preallocating space here.
for (IndexType i = OutputDimensions; i < PaddedOutputDimensions; ++i) {
output[i] = 0;
}
return output;
}

341
src/simd.h 100644
View File

@ -0,0 +1,341 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef STOCKFISH_SIMD_H_INCLUDED
#define STOCKFISH_SIMD_H_INCLUDED
#if defined(USE_AVX2)
# include <immintrin.h>
#elif defined(USE_SSE41)
# include <smmintrin.h>
#elif defined(USE_SSSE3)
# include <tmmintrin.h>
#elif defined(USE_SSE2)
# include <emmintrin.h>
#elif defined(USE_MMX)
# include <mmintrin.h>
#elif defined(USE_NEON)
# include <arm_neon.h>
#endif
// The inline asm is only safe for GCC, where it is necessary to get good codegen.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101693
// Clang does fine without it.
// Play around here: https://godbolt.org/z/7EWqrYq51
#if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER))
#define USE_INLINE_ASM
#endif
namespace Stockfish::Simd {
#if defined (USE_AVX512)
[[maybe_unused]] static int m512_hadd(__m512i sum, int bias) {
return _mm512_reduce_add_epi32(sum) + bias;
}
/*
Parameters:
sum0 = [zmm0.i128[0], zmm0.i128[1], zmm0.i128[2], zmm0.i128[3]]
sum1 = [zmm1.i128[0], zmm1.i128[1], zmm1.i128[2], zmm1.i128[3]]
sum2 = [zmm2.i128[0], zmm2.i128[1], zmm2.i128[2], zmm2.i128[3]]
sum3 = [zmm3.i128[0], zmm3.i128[1], zmm3.i128[2], zmm3.i128[3]]
Returns:
ret = [
reduce_add_epi32(zmm0.i128[0]), reduce_add_epi32(zmm1.i128[0]), reduce_add_epi32(zmm2.i128[0]), reduce_add_epi32(zmm3.i128[0]),
reduce_add_epi32(zmm0.i128[1]), reduce_add_epi32(zmm1.i128[1]), reduce_add_epi32(zmm2.i128[1]), reduce_add_epi32(zmm3.i128[1]),
reduce_add_epi32(zmm0.i128[2]), reduce_add_epi32(zmm1.i128[2]), reduce_add_epi32(zmm2.i128[2]), reduce_add_epi32(zmm3.i128[2]),
reduce_add_epi32(zmm0.i128[3]), reduce_add_epi32(zmm1.i128[3]), reduce_add_epi32(zmm2.i128[3]), reduce_add_epi32(zmm3.i128[3])
]
*/
[[maybe_unused]] static __m512i m512_hadd128x16_interleave(
__m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3) {
__m512i sum01a = _mm512_unpacklo_epi32(sum0, sum1);
__m512i sum01b = _mm512_unpackhi_epi32(sum0, sum1);
__m512i sum23a = _mm512_unpacklo_epi32(sum2, sum3);
__m512i sum23b = _mm512_unpackhi_epi32(sum2, sum3);
__m512i sum01 = _mm512_add_epi32(sum01a, sum01b);
__m512i sum23 = _mm512_add_epi32(sum23a, sum23b);
__m512i sum0123a = _mm512_unpacklo_epi64(sum01, sum23);
__m512i sum0123b = _mm512_unpackhi_epi64(sum01, sum23);
return _mm512_add_epi32(sum0123a, sum0123b);
}
[[maybe_unused]] static __m128i m512_haddx4(
__m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3,
__m128i bias) {
__m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
__m256i sum256lo = _mm512_castsi512_si256(sum);
__m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
sum256lo = _mm256_add_epi32(sum256lo, sum256hi);
__m128i sum128lo = _mm256_castsi256_si128(sum256lo);
__m128i sum128hi = _mm256_extracti128_si256(sum256lo, 1);
return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
}
[[maybe_unused]] static void m512_add_dpbusd_epi32(
__m512i& acc,
__m512i a,
__m512i b) {
# if defined (USE_VNNI)
# if defined (USE_INLINE_ASM)
asm(
"vpdpbusd %[b], %[a], %[acc]\n\t"
: [acc]"+v"(acc)
: [a]"v"(a), [b]"vm"(b)
);
# else
acc = _mm512_dpbusd_epi32(acc, a, b);
# endif
# else
# if defined (USE_INLINE_ASM)
__m512i tmp = _mm512_maddubs_epi16(a, b);
asm(
"vpmaddwd %[tmp], %[ones], %[tmp]\n\t"
"vpaddd %[acc], %[tmp], %[acc]\n\t"
: [acc]"+v"(acc), [tmp]"+&v"(tmp)
: [ones]"v"(_mm512_set1_epi16(1))
);
# else
__m512i product0 = _mm512_maddubs_epi16(a, b);
product0 = _mm512_madd_epi16(product0, _mm512_set1_epi16(1));
acc = _mm512_add_epi32(acc, product0);
# endif
# endif
}
[[maybe_unused]] static void m512_add_dpbusd_epi32x2(
__m512i& acc,
__m512i a0, __m512i b0,
__m512i a1, __m512i b1) {
# if defined (USE_VNNI)
# if defined (USE_INLINE_ASM)
asm(
"vpdpbusd %[b0], %[a0], %[acc]\n\t"
"vpdpbusd %[b1], %[a1], %[acc]\n\t"
: [acc]"+v"(acc)
: [a0]"v"(a0), [b0]"vm"(b0), [a1]"v"(a1), [b1]"vm"(b1)
);
# else
acc = _mm512_dpbusd_epi32(acc, a0, b0);
acc = _mm512_dpbusd_epi32(acc, a1, b1);
# endif
# else
# if defined (USE_INLINE_ASM)
__m512i tmp0 = _mm512_maddubs_epi16(a0, b0);
__m512i tmp1 = _mm512_maddubs_epi16(a1, b1);
asm(
"vpaddsw %[tmp0], %[tmp1], %[tmp0]\n\t"
"vpmaddwd %[tmp0], %[ones], %[tmp0]\n\t"
"vpaddd %[acc], %[tmp0], %[acc]\n\t"
: [acc]"+v"(acc), [tmp0]"+&v"(tmp0)
: [tmp1]"v"(tmp1), [ones]"v"(_mm512_set1_epi16(1))
);
# else
__m512i product0 = _mm512_maddubs_epi16(a0, b0);
__m512i product1 = _mm512_maddubs_epi16(a1, b1);
product0 = _mm512_adds_epi16(product0, product1);
product0 = _mm512_madd_epi16(product0, _mm512_set1_epi16(1));
acc = _mm512_add_epi32(acc, product0);
# endif
# endif
}
#endif
#if defined (USE_AVX2)
[[maybe_unused]] static int m256_hadd(__m256i sum, int bias) {
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
return _mm_cvtsi128_si32(sum128) + bias;
}
[[maybe_unused]] static __m128i m256_haddx4(
__m256i sum0, __m256i sum1, __m256i sum2, __m256i sum3,
__m128i bias) {
sum0 = _mm256_hadd_epi32(sum0, sum1);
sum2 = _mm256_hadd_epi32(sum2, sum3);
sum0 = _mm256_hadd_epi32(sum0, sum2);
__m128i sum128lo = _mm256_castsi256_si128(sum0);
__m128i sum128hi = _mm256_extracti128_si256(sum0, 1);
return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
}
[[maybe_unused]] static void m256_add_dpbusd_epi32(
__m256i& acc,
__m256i a,
__m256i b) {
# if defined (USE_VNNI)
# if defined (USE_INLINE_ASM)
asm(
"vpdpbusd %[b], %[a], %[acc]\n\t"
: [acc]"+v"(acc)
: [a]"v"(a), [b]"vm"(b)
);
# else
acc = _mm256_dpbusd_epi32(acc, a, b);
# endif
# else
# if defined (USE_INLINE_ASM)
__m256i tmp = _mm256_maddubs_epi16(a, b);
asm(
"vpmaddwd %[tmp], %[ones], %[tmp]\n\t"
"vpaddd %[acc], %[tmp], %[acc]\n\t"
: [acc]"+v"(acc), [tmp]"+&v"(tmp)
: [ones]"v"(_mm256_set1_epi16(1))
);
# else
__m256i product0 = _mm256_maddubs_epi16(a, b);
product0 = _mm256_madd_epi16(product0, _mm256_set1_epi16(1));
acc = _mm256_add_epi32(acc, product0);
# endif
# endif
}
[[maybe_unused]] static void m256_add_dpbusd_epi32x2(
__m256i& acc,
__m256i a0, __m256i b0,
__m256i a1, __m256i b1) {
# if defined (USE_VNNI)
# if defined (USE_INLINE_ASM)
asm(
"vpdpbusd %[b0], %[a0], %[acc]\n\t"
"vpdpbusd %[b1], %[a1], %[acc]\n\t"
: [acc]"+v"(acc)
: [a0]"v"(a0), [b0]"vm"(b0), [a1]"v"(a1), [b1]"vm"(b1)
);
# else
acc = _mm256_dpbusd_epi32(acc, a0, b0);
acc = _mm256_dpbusd_epi32(acc, a1, b1);
# endif
# else
# if defined (USE_INLINE_ASM)
__m256i tmp0 = _mm256_maddubs_epi16(a0, b0);
__m256i tmp1 = _mm256_maddubs_epi16(a1, b1);
asm(
"vpaddsw %[tmp0], %[tmp1], %[tmp0]\n\t"
"vpmaddwd %[tmp0], %[ones], %[tmp0]\n\t"
"vpaddd %[acc], %[tmp0], %[acc]\n\t"
: [acc]"+v"(acc), [tmp0]"+&v"(tmp0)
: [tmp1]"v"(tmp1), [ones]"v"(_mm256_set1_epi16(1))
);
# else
__m256i product0 = _mm256_maddubs_epi16(a0, b0);
__m256i product1 = _mm256_maddubs_epi16(a1, b1);
product0 = _mm256_adds_epi16(product0, product1);
product0 = _mm256_madd_epi16(product0, _mm256_set1_epi16(1));
acc = _mm256_add_epi32(acc, product0);
# endif
# endif
}
#endif
#if defined (USE_SSSE3)
[[maybe_unused]] static int m128_hadd(__m128i sum, int bias) {
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
return _mm_cvtsi128_si32(sum) + bias;
}
[[maybe_unused]] static __m128i m128_haddx4(
__m128i sum0, __m128i sum1, __m128i sum2, __m128i sum3,
__m128i bias) {
sum0 = _mm_hadd_epi32(sum0, sum1);
sum2 = _mm_hadd_epi32(sum2, sum3);
sum0 = _mm_hadd_epi32(sum0, sum2);
return _mm_add_epi32(sum0, bias);
}
[[maybe_unused]] static void m128_add_dpbusd_epi32(
__m128i& acc,
__m128i a,
__m128i b) {
# if defined (USE_INLINE_ASM)
__m128i tmp = _mm_maddubs_epi16(a, b);
asm(
"pmaddwd %[ones], %[tmp]\n\t"
"paddd %[tmp], %[acc]\n\t"
: [acc]"+v"(acc), [tmp]"+&v"(tmp)
: [ones]"v"(_mm_set1_epi16(1))
);
# else
__m128i product0 = _mm_maddubs_epi16(a, b);
product0 = _mm_madd_epi16(product0, _mm_set1_epi16(1));
acc = _mm_add_epi32(acc, product0);
# endif
}
[[maybe_unused]] static void m128_add_dpbusd_epi32x2(
__m128i& acc,
__m128i a0, __m128i b0,
__m128i a1, __m128i b1) {
# if defined (USE_INLINE_ASM)
__m128i tmp0 = _mm_maddubs_epi16(a0, b0);
__m128i tmp1 = _mm_maddubs_epi16(a1, b1);
asm(
"paddsw %[tmp1], %[tmp0]\n\t"
"pmaddwd %[ones], %[tmp0]\n\t"
"paddd %[tmp0], %[acc]\n\t"
: [acc]"+v"(acc), [tmp0]"+&v"(tmp0)
: [tmp1]"v"(tmp1), [ones]"v"(_mm_set1_epi16(1))
);
# else
__m128i product0 = _mm_maddubs_epi16(a0, b0);
__m128i product1 = _mm_maddubs_epi16(a1, b1);
product0 = _mm_adds_epi16(product0, product1);
product0 = _mm_madd_epi16(product0, _mm_set1_epi16(1));
acc = _mm_add_epi32(acc, product0);
# endif
}
#endif
}
#endif // STOCKFISH_SIMD_H_INCLUDED