1
0
Fork 0

Split manually vectorized code into separate files.

pull/3598/head
Tomasz Sobczyk 2021-06-29 15:51:53 +02:00
parent 2275923d3c
commit 458a8056a9
12 changed files with 1637 additions and 1029 deletions

View File

@ -21,14 +21,16 @@
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
#define NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED
#include <iostream>
#include "../nnue_common.h"
#include <iostream>
#include <cstdint>
namespace Stockfish::Eval::NNUE::Layers {
// Affine transformation layer
template <typename PreviousLayer, IndexType OutDims>
class AffineTransform {
class AffineTransform_Base {
public:
// Input/output type
using InputType = typename PreviousLayer::OutputType;
@ -36,16 +38,10 @@ namespace Stockfish::Eval::NNUE::Layers {
static_assert(std::is_same<InputType, std::uint8_t>::value, "");
// Number of input/output dimensions
static constexpr IndexType InputDimensions =
PreviousLayer::OutputDimensions;
static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
static constexpr IndexType OutputDimensions = OutDims;
static constexpr IndexType PaddedInputDimensions =
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
#if defined (USE_AVX512)
static constexpr const IndexType OutputSimdWidth = SimdWidth / 2;
#elif defined (USE_SSSE3)
static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
#endif
ceil_to_multiple<IndexType>(InputDimensions, 32);
// Size of forward propagation buffer used in this layer
static constexpr std::size_t SelfBufferSize =
@ -64,363 +60,7 @@ namespace Stockfish::Eval::NNUE::Layers {
return hashValue;
}
// Read network parameters
bool read_parameters(std::istream& stream) {
if (!previousLayer.read_parameters(stream)) return false;
for (std::size_t i = 0; i < OutputDimensions; ++i)
biases[i] = read_little_endian<BiasType>(stream);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
#if !defined (USE_SSSE3)
weights[i] = read_little_endian<WeightType>(stream);
#else
weights[
(i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
i / PaddedInputDimensions * 4 +
i % 4
] = read_little_endian<WeightType>(stream);
#endif
return !stream.fail();
}
// Write network parameters
bool write_parameters(std::ostream& stream) const {
if (!previousLayer.write_parameters(stream)) return false;
for (std::size_t i = 0; i < OutputDimensions; ++i)
write_little_endian<BiasType>(stream, biases[i]);
#if !defined (USE_SSSE3)
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[i]);
#else
std::unique_ptr<WeightType[]> unscrambledWeights = std::make_unique<WeightType[]>(OutputDimensions * PaddedInputDimensions);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
unscrambledWeights[i] =
weights[
(i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
i / PaddedInputDimensions * 4 +
i % 4
];
}
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, unscrambledWeights[i]);
#endif
return !stream.fail();
}
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures, char* buffer) const {
const auto input = previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
#if defined (USE_AVX512)
[[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1);
[[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
return _mm512_reduce_add_epi32(sum) + bias;
};
[[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
#if defined (USE_VNNI)
acc = _mm512_dpbusd_epi32(acc, a, b);
#else
__m512i product0 = _mm512_maddubs_epi16(a, b);
product0 = _mm512_madd_epi16(product0, Ones512);
acc = _mm512_add_epi32(acc, product0);
#endif
};
[[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1,
__m512i a2, __m512i b2, __m512i a3, __m512i b3) {
#if defined (USE_VNNI)
acc = _mm512_dpbusd_epi32(acc, a0, b0);
acc = _mm512_dpbusd_epi32(acc, a1, b1);
acc = _mm512_dpbusd_epi32(acc, a2, b2);
acc = _mm512_dpbusd_epi32(acc, a3, b3);
#else
__m512i product0 = _mm512_maddubs_epi16(a0, b0);
__m512i product1 = _mm512_maddubs_epi16(a1, b1);
__m512i product2 = _mm512_maddubs_epi16(a2, b2);
__m512i product3 = _mm512_maddubs_epi16(a3, b3);
product0 = _mm512_adds_epi16(product0, product1);
product0 = _mm512_madd_epi16(product0, Ones512);
product2 = _mm512_adds_epi16(product2, product3);
product2 = _mm512_madd_epi16(product2, Ones512);
acc = _mm512_add_epi32(acc, _mm512_add_epi32(product0, product2));
#endif
};
#endif
#if defined (USE_AVX2)
[[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1);
[[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
return _mm_cvtsi128_si32(sum128) + bias;
};
[[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
#if defined (USE_VNNI)
acc = _mm256_dpbusd_epi32(acc, a, b);
#else
__m256i product0 = _mm256_maddubs_epi16(a, b);
product0 = _mm256_madd_epi16(product0, Ones256);
acc = _mm256_add_epi32(acc, product0);
#endif
};
[[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1,
__m256i a2, __m256i b2, __m256i a3, __m256i b3) {
#if defined (USE_VNNI)
acc = _mm256_dpbusd_epi32(acc, a0, b0);
acc = _mm256_dpbusd_epi32(acc, a1, b1);
acc = _mm256_dpbusd_epi32(acc, a2, b2);
acc = _mm256_dpbusd_epi32(acc, a3, b3);
#else
__m256i product0 = _mm256_maddubs_epi16(a0, b0);
__m256i product1 = _mm256_maddubs_epi16(a1, b1);
__m256i product2 = _mm256_maddubs_epi16(a2, b2);
__m256i product3 = _mm256_maddubs_epi16(a3, b3);
product0 = _mm256_adds_epi16(product0, product1);
product0 = _mm256_madd_epi16(product0, Ones256);
product2 = _mm256_adds_epi16(product2, product3);
product2 = _mm256_madd_epi16(product2, Ones256);
acc = _mm256_add_epi32(acc, _mm256_add_epi32(product0, product2));
#endif
};
#endif
#if defined (USE_SSSE3)
[[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1);
[[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
return _mm_cvtsi128_si32(sum) + bias;
};
[[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
__m128i product0 = _mm_maddubs_epi16(a, b);
product0 = _mm_madd_epi16(product0, Ones128);
acc = _mm_add_epi32(acc, product0);
};
[[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1,
__m128i a2, __m128i b2, __m128i a3, __m128i b3) {
__m128i product0 = _mm_maddubs_epi16(a0, b0);
__m128i product1 = _mm_maddubs_epi16(a1, b1);
__m128i product2 = _mm_maddubs_epi16(a2, b2);
__m128i product3 = _mm_maddubs_epi16(a3, b3);
product0 = _mm_adds_epi16(product0, product1);
product0 = _mm_madd_epi16(product0, Ones128);
product2 = _mm_adds_epi16(product2, product3);
product2 = _mm_madd_epi16(product2, Ones128);
acc = _mm_add_epi32(acc, _mm_add_epi32(product0, product2));
};
#endif
#if defined (USE_AVX512)
using vec_t = __m512i;
#define vec_setzero _mm512_setzero_si512
#define vec_set_32 _mm512_set1_epi32
auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4;
auto& vec_hadd = m512_hadd;
#elif defined (USE_AVX2)
using vec_t = __m256i;
#define vec_setzero _mm256_setzero_si256
#define vec_set_32 _mm256_set1_epi32
auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4;
auto& vec_hadd = m256_hadd;
#elif defined (USE_SSSE3)
using vec_t = __m128i;
#define vec_setzero _mm_setzero_si128
#define vec_set_32 _mm_set1_epi32
auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4;
auto& vec_hadd = m128_hadd;
#endif
#if defined (USE_SSSE3)
// Different layout, we process 4 inputs at a time, always.
static_assert(InputDimensions % 4 == 0);
const auto output = reinterpret_cast<OutputType*>(buffer);
const auto inputVector = reinterpret_cast<const vec_t*>(input);
static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
// OutputDimensions is either 1 or a multiple of SimdWidth
// because then it is also an input dimension.
if constexpr (OutputDimensions % OutputSimdWidth == 0)
{
constexpr IndexType NumChunks = InputDimensions / 4;
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
vec_t* outptr = reinterpret_cast<vec_t*>(output);
std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
for (int i = 0; i < (int)NumChunks - 3; i += 4)
{
const vec_t in0 = vec_set_32(input32[i + 0]);
const vec_t in1 = vec_set_32(input32[i + 1]);
const vec_t in2 = vec_set_32(input32[i + 2]);
const vec_t in3 = vec_set_32(input32[i + 3]);
const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
const auto col2 = reinterpret_cast<const vec_t*>(&weights[(i + 2) * OutputDimensions * 4]);
const auto col3 = reinterpret_cast<const vec_t*>(&weights[(i + 3) * OutputDimensions * 4]);
for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j)
vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]);
}
}
else if constexpr (OutputDimensions == 1)
{
#if defined (USE_AVX512)
if constexpr (PaddedInputDimensions % (SimdWidth * 2) != 0)
{
constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
const auto inputVector256 = reinterpret_cast<const __m256i*>(input);
__m256i sum0 = _mm256_setzero_si256();
const auto row0 = reinterpret_cast<const __m256i*>(&weights[0]);
for (int j = 0; j < (int)NumChunks; ++j)
{
const __m256i in = inputVector256[j];
m256_add_dpbusd_epi32(sum0, in, row0[j]);
}
output[0] = m256_hadd(sum0, biases[0]);
}
else
#endif
{
#if defined (USE_AVX512)
constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth * 2);
#else
constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
#endif
vec_t sum0 = vec_setzero();
const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
for (int j = 0; j < (int)NumChunks; ++j)
{
const vec_t in = inputVector[j];
vec_add_dpbusd_32(sum0, in, row0[j]);
}
output[0] = vec_hadd(sum0, biases[0]);
}
}
#else
// Use old implementation for the other architectures.
auto output = reinterpret_cast<OutputType*>(buffer);
#if defined(USE_SSE2)
// At least a multiple of 16, with SSE2.
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m128i Zeros = _mm_setzero_si128();
const auto inputVector = reinterpret_cast<const __m128i*>(input);
#elif defined(USE_MMX)
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m64 Zeros = _mm_setzero_si64();
const auto inputVector = reinterpret_cast<const __m64*>(input);
#elif defined(USE_NEON)
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
#endif
for (IndexType i = 0; i < OutputDimensions; ++i) {
const IndexType offset = i * PaddedInputDimensions;
#if defined(USE_SSE2)
__m128i sumLo = _mm_cvtsi32_si128(biases[i]);
__m128i sumHi = Zeros;
const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
__m128i row_j = _mm_load_si128(&row[j]);
__m128i input_j = _mm_load_si128(&inputVector[j]);
__m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
__m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
__m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
__m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
__m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
__m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
sumLo = _mm_add_epi32(sumLo, productLo);
sumHi = _mm_add_epi32(sumHi, productHi);
}
__m128i sum = _mm_add_epi32(sumLo, sumHi);
__m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
sum = _mm_add_epi32(sum, sumHigh_64);
__m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
sum = _mm_add_epi32(sum, sum_second_32);
output[i] = _mm_cvtsi128_si32(sum);
#elif defined(USE_MMX)
__m64 sumLo = _mm_cvtsi32_si64(biases[i]);
__m64 sumHi = Zeros;
const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
__m64 row_j = row[j];
__m64 input_j = inputVector[j];
__m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
__m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
__m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
__m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
__m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
__m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
sumLo = _mm_add_pi32(sumLo, productLo);
sumHi = _mm_add_pi32(sumHi, productHi);
}
__m64 sum = _mm_add_pi32(sumLo, sumHi);
sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
output[i] = _mm_cvtsi64_si32(sum);
#elif defined(USE_NEON)
int32x4_t sum = {biases[i]};
const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
sum = vpadalq_s16(sum, product);
}
output[i] = sum[0] + sum[1] + sum[2] + sum[3];
#else
OutputType sum = biases[i];
for (IndexType j = 0; j < InputDimensions; ++j) {
sum += weights[offset + j] * input[j];
}
output[i] = sum;
#endif
}
#if defined(USE_MMX)
_mm_empty();
#endif
#endif
return output;
}
private:
protected:
using BiasType = OutputType;
using WeightType = std::int8_t;
@ -432,4 +72,24 @@ namespace Stockfish::Eval::NNUE::Layers {
} // namespace Stockfish::Eval::NNUE::Layers
#include "affine_transform_vec.h"
#if defined (AFFINE_TRANSFORM_NO_VEC)
# include "affine_transform_scalar.h"
namespace Stockfish::Eval::NNUE::Layers {
template <typename PreviousLayer, IndexType OutDims>
using AffineTransform = AffineTransform_Scalar<PreviousLayer, OutDims>;
}
#else
namespace Stockfish::Eval::NNUE::Layers {
template <typename PreviousLayer, IndexType OutDims>
using AffineTransform = AffineTransform_Vec<PreviousLayer, OutDims>;
}
#endif
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED

View File

@ -0,0 +1,99 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Definition of layer AffineTransform of NNUE evaluation function
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SCALAR_H_INCLUDED
#define NNUE_LAYERS_AFFINE_TRANSFORM_SCALAR_H_INCLUDED
#if !defined (NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED)
#error "This file can only be included through affine_transform.h"
#endif
#include <iostream>
namespace Stockfish::Eval::NNUE::Layers {
// Affine transformation layer
template <typename PreviousLayer, IndexType OutDims>
class AffineTransform_Scalar : public AffineTransform_Base<PreviousLayer, OutDims> {
public:
using BaseType = AffineTransform_Base<PreviousLayer, OutDims>;
using InputType = typename BaseType::InputType;
using OutputType = typename BaseType::OutputType;
static constexpr auto InputDimensions = BaseType::InputDimensions;
static constexpr auto OutputDimensions = BaseType::OutputDimensions;
static constexpr auto PaddedInputDimensions = BaseType::PaddedInputDimensions;
static constexpr auto SelfBufferSize = BaseType::SelfBufferSize;
static constexpr auto BufferSize = BaseType::BufferSize;
// Read network parameters
bool read_parameters(std::istream& stream) {
if (!BaseType::previousLayer.read_parameters(stream)) return false;
for (std::size_t i = 0; i < OutputDimensions; ++i)
biases[i] = read_little_endian<BiasType>(stream);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
weights[i] = read_little_endian<WeightType>(stream);
return !stream.fail();
}
// Write network parameters
bool write_parameters(std::ostream& stream) const {
if (!BaseType::previousLayer.write_parameters(stream)) return false;
for (std::size_t i = 0; i < OutputDimensions; ++i)
write_little_endian<BiasType>(stream, biases[i]);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[i]);
return !stream.fail();
}
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures,
char* buffer) const {
const auto input = BaseType::previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
const auto output = reinterpret_cast<OutputType*>(buffer);
for (IndexType i = 0; i < OutputDimensions; ++i) {
const IndexType offset = i * PaddedInputDimensions;
OutputType sum = biases[i];
for (IndexType j = 0; j < InputDimensions; ++j) {
sum += weights[offset + j] * input[j];
}
output[i] = sum;
}
return output;
}
private:
using BiasType = typename BaseType::BiasType;
using WeightType = typename BaseType::WeightType;
using BaseType::biases;
using BaseType::weights;
};
} // namespace Stockfish::Eval::NNUE::Layers
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_SCALAR_H_INCLUDED

View File

@ -0,0 +1,439 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Definition of layer AffineTransform of NNUE evaluation function
#ifndef NNUE_LAYERS_AFFINE_TRANSFORM_VEC_H_INCLUDED
#define NNUE_LAYERS_AFFINE_TRANSFORM_VEC_H_INCLUDED
#if !defined (NNUE_LAYERS_AFFINE_TRANSFORM_H_INCLUDED)
#error "This file can only be included through affine_transform.h"
#endif
#if defined (USE_MMX) || defined (USE_SSE2) || defined (USE_NEON)
#include <iostream>
#include <memory>
#include <cstring>
namespace Stockfish::Eval::NNUE::Layers {
// Affine transformation layer
template <typename PreviousLayer, IndexType OutDims>
class AffineTransform_Vec : public AffineTransform_Base<PreviousLayer, OutDims> {
public:
using BaseType = AffineTransform_Base<PreviousLayer, OutDims>;
using InputType = typename BaseType::InputType;
using OutputType = typename BaseType::OutputType;
static constexpr auto InputDimensions = BaseType::InputDimensions;
static constexpr auto OutputDimensions = BaseType::OutputDimensions;
static constexpr auto PaddedInputDimensions = BaseType::PaddedInputDimensions;
static constexpr auto SelfBufferSize = BaseType::SelfBufferSize;
static constexpr auto BufferSize = BaseType::BufferSize;
#if defined(USE_AVX512)
static constexpr std::size_t SimdWidth = 64;
#elif defined(USE_AVX2)
static constexpr std::size_t SimdWidth = 32;
#elif defined(USE_SSE2)
static constexpr std::size_t SimdWidth = 16;
#elif defined(USE_MMX)
static constexpr std::size_t SimdWidth = 8;
#elif defined(USE_NEON)
static constexpr std::size_t SimdWidth = 16;
#endif
static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
// Read network parameters
bool read_parameters(std::istream& stream) {
if (!BaseType::previousLayer.read_parameters(stream)) return false;
for (std::size_t i = 0; i < OutputDimensions; ++i)
biases[i] = read_little_endian<BiasType>(stream);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
#if !defined (USE_SSSE3)
weights[i] = read_little_endian<WeightType>(stream);
#else
weights[
(i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
i / PaddedInputDimensions * 4 +
i % 4
] = read_little_endian<WeightType>(stream);
#endif
return !stream.fail();
}
// Write network parameters
bool write_parameters(std::ostream& stream) const {
if (!BaseType::previousLayer.write_parameters(stream)) return false;
for (std::size_t i = 0; i < BaseType::OutputDimensions; ++i)
write_little_endian<BiasType>(stream, biases[i]);
#if !defined (USE_SSSE3)
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[i]);
#else
std::unique_ptr<WeightType[]> unscrambledWeights =
std::make_unique<WeightType[]>(OutputDimensions * PaddedInputDimensions);
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i) {
unscrambledWeights[i] =
weights[
(i / 4) % (PaddedInputDimensions / 4) * OutputDimensions * 4 +
i / PaddedInputDimensions * 4 +
i % 4
];
}
for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, unscrambledWeights[i]);
#endif
return !stream.fail();
}
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures,
char* buffer) const {
const auto input = BaseType::previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
#if defined (USE_AVX512)
[[maybe_unused]] const __m512i Ones512 = _mm512_set1_epi16(1);
[[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
return _mm512_reduce_add_epi32(sum) + bias;
};
[[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
# if defined (USE_VNNI)
acc = _mm512_dpbusd_epi32(acc, a, b);
# else
__m512i product0 = _mm512_maddubs_epi16(a, b);
product0 = _mm512_madd_epi16(product0, Ones512);
acc = _mm512_add_epi32(acc, product0);
# endif
};
[[maybe_unused]] auto m512_add_dpbusd_epi32x4 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1,
__m512i a2, __m512i b2, __m512i a3, __m512i b3) {
# if defined (USE_VNNI)
acc = _mm512_dpbusd_epi32(acc, a0, b0);
acc = _mm512_dpbusd_epi32(acc, a1, b1);
acc = _mm512_dpbusd_epi32(acc, a2, b2);
acc = _mm512_dpbusd_epi32(acc, a3, b3);
# else
__m512i product0 = _mm512_maddubs_epi16(a0, b0);
__m512i product1 = _mm512_maddubs_epi16(a1, b1);
__m512i product2 = _mm512_maddubs_epi16(a2, b2);
__m512i product3 = _mm512_maddubs_epi16(a3, b3);
product0 = _mm512_adds_epi16(product0, product1);
product0 = _mm512_madd_epi16(product0, Ones512);
product2 = _mm512_adds_epi16(product2, product3);
product2 = _mm512_madd_epi16(product2, Ones512);
acc = _mm512_add_epi32(acc, _mm512_add_epi32(product0, product2));
# endif
};
#endif
#if defined (USE_AVX2)
[[maybe_unused]] const __m256i Ones256 = _mm256_set1_epi16(1);
[[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
return _mm_cvtsi128_si32(sum128) + bias;
};
[[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
# if defined (USE_VNNI)
acc = _mm256_dpbusd_epi32(acc, a, b);
# else
__m256i product0 = _mm256_maddubs_epi16(a, b);
product0 = _mm256_madd_epi16(product0, Ones256);
acc = _mm256_add_epi32(acc, product0);
# endif
};
[[maybe_unused]] auto m256_add_dpbusd_epi32x4 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1,
__m256i a2, __m256i b2, __m256i a3, __m256i b3) {
# if defined (USE_VNNI)
acc = _mm256_dpbusd_epi32(acc, a0, b0);
acc = _mm256_dpbusd_epi32(acc, a1, b1);
acc = _mm256_dpbusd_epi32(acc, a2, b2);
acc = _mm256_dpbusd_epi32(acc, a3, b3);
# else
__m256i product0 = _mm256_maddubs_epi16(a0, b0);
__m256i product1 = _mm256_maddubs_epi16(a1, b1);
__m256i product2 = _mm256_maddubs_epi16(a2, b2);
__m256i product3 = _mm256_maddubs_epi16(a3, b3);
product0 = _mm256_adds_epi16(product0, product1);
product0 = _mm256_madd_epi16(product0, Ones256);
product2 = _mm256_adds_epi16(product2, product3);
product2 = _mm256_madd_epi16(product2, Ones256);
acc = _mm256_add_epi32(acc, _mm256_add_epi32(product0, product2));
# endif
};
#endif
#if defined (USE_SSSE3)
[[maybe_unused]] const __m128i Ones128 = _mm_set1_epi16(1);
[[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
return _mm_cvtsi128_si32(sum) + bias;
};
[[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
__m128i product0 = _mm_maddubs_epi16(a, b);
product0 = _mm_madd_epi16(product0, Ones128);
acc = _mm_add_epi32(acc, product0);
};
[[maybe_unused]] auto m128_add_dpbusd_epi32x4 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1,
__m128i a2, __m128i b2, __m128i a3, __m128i b3) {
__m128i product0 = _mm_maddubs_epi16(a0, b0);
__m128i product1 = _mm_maddubs_epi16(a1, b1);
__m128i product2 = _mm_maddubs_epi16(a2, b2);
__m128i product3 = _mm_maddubs_epi16(a3, b3);
product0 = _mm_adds_epi16(product0, product1);
product0 = _mm_madd_epi16(product0, Ones128);
product2 = _mm_adds_epi16(product2, product3);
product2 = _mm_madd_epi16(product2, Ones128);
acc = _mm_add_epi32(acc, _mm_add_epi32(product0, product2));
};
#endif
#if defined (USE_AVX512)
using vec_t = __m512i;
#define vec_setzero _mm512_setzero_si512
#define vec_set_32 _mm512_set1_epi32
auto& vec_add_dpbusd_32 = m512_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m512_add_dpbusd_epi32x4;
auto& vec_hadd = m512_hadd;
#elif defined (USE_AVX2)
using vec_t = __m256i;
#define vec_setzero _mm256_setzero_si256
#define vec_set_32 _mm256_set1_epi32
auto& vec_add_dpbusd_32 = m256_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m256_add_dpbusd_epi32x4;
auto& vec_hadd = m256_hadd;
#elif defined (USE_SSSE3)
using vec_t = __m128i;
#define vec_setzero _mm_setzero_si128
#define vec_set_32 _mm_set1_epi32
auto& vec_add_dpbusd_32 = m128_add_dpbusd_epi32;
auto& vec_add_dpbusd_32x4 = m128_add_dpbusd_epi32x4;
auto& vec_hadd = m128_hadd;
#endif
#if defined (USE_SSSE3)
// Different layout, we process 4 inputs at a time, always.
static_assert(InputDimensions % 4 == 0);
const auto output = reinterpret_cast<OutputType*>(buffer);
const auto inputVector = reinterpret_cast<const vec_t*>(input);
static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
// OutputDimensions is either 1 or a multiple of SimdWidth
// because then it is also an input dimension.
if constexpr (OutputDimensions % OutputSimdWidth == 0)
{
constexpr IndexType NumChunks = InputDimensions / 4;
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
vec_t* outptr = reinterpret_cast<vec_t*>(output);
std::memcpy(output, biases, OutputDimensions * sizeof(OutputType));
for (int i = 0; i < (int)NumChunks - 3; i += 4)
{
const vec_t in0 = vec_set_32(input32[i + 0]);
const vec_t in1 = vec_set_32(input32[i + 1]);
const vec_t in2 = vec_set_32(input32[i + 2]);
const vec_t in3 = vec_set_32(input32[i + 3]);
const auto col0 = reinterpret_cast<const vec_t*>(&weights[(i + 0) * OutputDimensions * 4]);
const auto col1 = reinterpret_cast<const vec_t*>(&weights[(i + 1) * OutputDimensions * 4]);
const auto col2 = reinterpret_cast<const vec_t*>(&weights[(i + 2) * OutputDimensions * 4]);
const auto col3 = reinterpret_cast<const vec_t*>(&weights[(i + 3) * OutputDimensions * 4]);
for (int j = 0; j * OutputSimdWidth < OutputDimensions; ++j)
vec_add_dpbusd_32x4(outptr[j], in0, col0[j], in1, col1[j], in2, col2[j], in3, col3[j]);
}
}
else if constexpr (OutputDimensions == 1)
{
# if defined (USE_AVX512)
if constexpr (PaddedInputDimensions % SimdWidth != 0)
{
constexpr IndexType NumChunks = PaddedInputDimensions / (SimdWidth / 2);
const auto inputVector256 = reinterpret_cast<const __m256i*>(input);
__m256i sum0 = _mm256_setzero_si256();
const auto row0 = reinterpret_cast<const __m256i*>(&weights[0]);
for (int j = 0; j < (int)NumChunks; ++j)
{
const __m256i in = inputVector256[j];
m256_add_dpbusd_epi32(sum0, in, row0[j]);
}
output[0] = m256_hadd(sum0, biases[0]);
}
else
# endif
{
constexpr IndexType NumChunks = PaddedInputDimensions / SimdWidth;
vec_t sum0 = vec_setzero();
const auto row0 = reinterpret_cast<const vec_t*>(&weights[0]);
for (int j = 0; j < (int)NumChunks; ++j)
{
const vec_t in = inputVector[j];
vec_add_dpbusd_32(sum0, in, row0[j]);
}
output[0] = vec_hadd(sum0, biases[0]);
}
}
#else
// Use old implementation for the other architectures.
auto output = reinterpret_cast<OutputType*>(buffer);
# if defined(USE_SSE2)
// At least a multiple of 16, with SSE2.
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m128i Zeros = _mm_setzero_si128();
const auto inputVector = reinterpret_cast<const __m128i*>(input);
# elif defined(USE_MMX)
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m64 Zeros = _mm_setzero_si64();
const auto inputVector = reinterpret_cast<const __m64*>(input);
# elif defined(USE_NEON)
static_assert(InputDimensions % SimdWidth == 0);
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
# endif
for (IndexType i = 0; i < OutputDimensions; ++i) {
const IndexType offset = i * PaddedInputDimensions;
# if defined(USE_SSE2)
__m128i sumLo = _mm_cvtsi32_si128(biases[i]);
__m128i sumHi = Zeros;
const auto row = reinterpret_cast<const __m128i*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
__m128i row_j = _mm_load_si128(&row[j]);
__m128i input_j = _mm_load_si128(&inputVector[j]);
__m128i extendedRowLo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
__m128i extendedRowHi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
__m128i extendedInputLo = _mm_unpacklo_epi8(input_j, Zeros);
__m128i extendedInputHi = _mm_unpackhi_epi8(input_j, Zeros);
__m128i productLo = _mm_madd_epi16(extendedRowLo, extendedInputLo);
__m128i productHi = _mm_madd_epi16(extendedRowHi, extendedInputHi);
sumLo = _mm_add_epi32(sumLo, productLo);
sumHi = _mm_add_epi32(sumHi, productHi);
}
__m128i sum = _mm_add_epi32(sumLo, sumHi);
__m128i sumHigh_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
sum = _mm_add_epi32(sum, sumHigh_64);
__m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
sum = _mm_add_epi32(sum, sum_second_32);
output[i] = _mm_cvtsi128_si32(sum);
# elif defined(USE_MMX)
__m64 sumLo = _mm_cvtsi32_si64(biases[i]);
__m64 sumHi = Zeros;
const auto row = reinterpret_cast<const __m64*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
__m64 row_j = row[j];
__m64 input_j = inputVector[j];
__m64 extendedRowLo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
__m64 extendedRowHi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
__m64 extendedInputLo = _mm_unpacklo_pi8(input_j, Zeros);
__m64 extendedInputHi = _mm_unpackhi_pi8(input_j, Zeros);
__m64 productLo = _mm_madd_pi16(extendedRowLo, extendedInputLo);
__m64 productHi = _mm_madd_pi16(extendedRowHi, extendedInputHi);
sumLo = _mm_add_pi32(sumLo, productLo);
sumHi = _mm_add_pi32(sumHi, productHi);
}
__m64 sum = _mm_add_pi32(sumLo, sumHi);
sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
output[i] = _mm_cvtsi64_si32(sum);
# elif defined(USE_NEON)
int32x4_t sum = {biases[i]};
const auto row = reinterpret_cast<const int8x8_t*>(&weights[offset]);
for (IndexType j = 0; j < NumChunks; ++j) {
int16x8_t product = vmull_s8(inputVector[j * 2], row[j * 2]);
product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
sum = vpadalq_s16(sum, product);
}
output[i] = sum[0] + sum[1] + sum[2] + sum[3];
# else
# error "No vectorization possible but vectorization path entered."
# endif
}
# if defined(USE_MMX)
_mm_empty();
# endif
#endif
return output;
}
private:
using BiasType = typename BaseType::BiasType;
using WeightType = typename BaseType::WeightType;
using BaseType::biases;
using BaseType::weights;
};
} // namespace Stockfish::Eval::NNUE::Layers
#else
#define AFFINE_TRANSFORM_NO_VEC
#endif
#endif // #ifndef NNUE_LAYERS_AFFINE_TRANSFORM_VEC_H_INCLUDED

View File

@ -23,11 +23,14 @@
#include "../nnue_common.h"
#include <cstdint>
#include <iostream>
namespace Stockfish::Eval::NNUE::Layers {
// Clipped ReLU
template <typename PreviousLayer>
class ClippedReLU {
class ClippedReLU_Base {
public:
// Input/output type
using InputType = typename PreviousLayer::OutputType;
@ -35,8 +38,7 @@ namespace Stockfish::Eval::NNUE::Layers {
static_assert(std::is_same<InputType, std::int32_t>::value, "");
// Number of input/output dimensions
static constexpr IndexType InputDimensions =
PreviousLayer::OutputDimensions;
static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
static constexpr IndexType OutputDimensions = InputDimensions;
// Size of forward propagation buffer used in this layer
@ -64,128 +66,30 @@ namespace Stockfish::Eval::NNUE::Layers {
return previousLayer.write_parameters(stream);
}
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures, char* buffer) const {
const auto input = previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
const auto output = reinterpret_cast<OutputType*>(buffer);
#if defined(USE_AVX2)
if constexpr (InputDimensions % SimdWidth == 0) {
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m256i Zero = _mm256_setzero_si256();
const __m256i Offsets = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
const auto in = reinterpret_cast<const __m256i*>(input);
const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 0]),
_mm256_load_si256(&in[i * 4 + 1])), WeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 2]),
_mm256_load_si256(&in[i * 4 + 3])), WeightScaleBits);
_mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), Zero), Offsets));
}
} else {
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
const __m128i Zero = _mm_setzero_si128();
const auto in = reinterpret_cast<const __m128i*>(input);
const auto out = reinterpret_cast<__m128i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m128i words0 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 0]),
_mm_load_si128(&in[i * 4 + 1])), WeightScaleBits);
const __m128i words1 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 2]),
_mm_load_si128(&in[i * 4 + 3])), WeightScaleBits);
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
_mm_store_si128(&out[i], _mm_max_epi8(packedbytes, Zero));
}
}
constexpr IndexType Start =
InputDimensions % SimdWidth == 0
? InputDimensions / SimdWidth * SimdWidth
: InputDimensions / (SimdWidth / 2) * (SimdWidth / 2);
#elif defined(USE_SSE2)
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
#ifdef USE_SSE41
const __m128i Zero = _mm_setzero_si128();
#else
const __m128i k0x80s = _mm_set1_epi8(-128);
#endif
const auto in = reinterpret_cast<const __m128i*>(input);
const auto out = reinterpret_cast<__m128i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m128i words0 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 0]),
_mm_load_si128(&in[i * 4 + 1])), WeightScaleBits);
const __m128i words1 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 2]),
_mm_load_si128(&in[i * 4 + 3])), WeightScaleBits);
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
_mm_store_si128(&out[i],
#ifdef USE_SSE41
_mm_max_epi8(packedbytes, Zero)
#else
_mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
#endif
);
}
constexpr IndexType Start = NumChunks * SimdWidth;
#elif defined(USE_MMX)
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m64 k0x80s = _mm_set1_pi8(-128);
const auto in = reinterpret_cast<const __m64*>(input);
const auto out = reinterpret_cast<__m64*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m64 words0 = _mm_srai_pi16(
_mm_packs_pi32(in[i * 4 + 0], in[i * 4 + 1]),
WeightScaleBits);
const __m64 words1 = _mm_srai_pi16(
_mm_packs_pi32(in[i * 4 + 2], in[i * 4 + 3]),
WeightScaleBits);
const __m64 packedbytes = _mm_packs_pi16(words0, words1);
out[i] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
}
_mm_empty();
constexpr IndexType Start = NumChunks * SimdWidth;
#elif defined(USE_NEON)
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
const int8x8_t Zero = {0};
const auto in = reinterpret_cast<const int32x4_t*>(input);
const auto out = reinterpret_cast<int8x8_t*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
int16x8_t shifted;
const auto pack = reinterpret_cast<int16x4_t*>(&shifted);
pack[0] = vqshrn_n_s32(in[i * 2 + 0], WeightScaleBits);
pack[1] = vqshrn_n_s32(in[i * 2 + 1], WeightScaleBits);
out[i] = vmax_s8(vqmovn_s16(shifted), Zero);
}
constexpr IndexType Start = NumChunks * (SimdWidth / 2);
#else
constexpr IndexType Start = 0;
#endif
for (IndexType i = Start; i < InputDimensions; ++i) {
output[i] = static_cast<OutputType>(
std::max(0, std::min(127, input[i] >> WeightScaleBits)));
}
return output;
}
private:
protected:
PreviousLayer previousLayer;
};
} // namespace Stockfish::Eval::NNUE::Layers
#include "clipped_relu_vec.h"
#if defined (CLIPPED_RELU_NO_VEC)
# include "clipped_relu_scalar.h"
namespace Stockfish::Eval::NNUE::Layers {
template <typename PreviousLayer>
using ClippedReLU = ClippedReLU_Scalar<PreviousLayer>;
}
#else
namespace Stockfish::Eval::NNUE::Layers {
template <typename PreviousLayer>
using ClippedReLU = ClippedReLU_Vec<PreviousLayer>;
}
#endif
#endif // NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED

View File

@ -0,0 +1,66 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Definition of layer ClippedReLU of NNUE evaluation function
#ifndef NNUE_LAYERS_CLIPPED_RELU_SCALAR_H_INCLUDED
#define NNUE_LAYERS_CLIPPED_RELU_SCALAR_H_INCLUDED
#if !defined (NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED)
#error "This file can only be included through clipped_relu.h"
#endif
namespace Stockfish::Eval::NNUE::Layers {
// Clipped ReLU
template <typename PreviousLayer>
class ClippedReLU_Scalar : public ClippedReLU_Base<PreviousLayer> {
public:
using BaseType = ClippedReLU_Base<PreviousLayer>;
using InputType = typename BaseType::InputType;
using OutputType = typename BaseType::OutputType;
static constexpr auto InputDimensions = BaseType::InputDimensions;
static constexpr auto OutputDimensions = BaseType::OutputDimensions;
static constexpr auto SelfBufferSize = BaseType::SelfBufferSize;
static constexpr auto BufferSize = BaseType::BufferSize;
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures,
char* buffer) const {
const auto input = BaseType::previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
const auto output = reinterpret_cast<OutputType*>(buffer);
for (IndexType i = 0; i < InputDimensions; ++i) {
int x = input[i] >> WeightScaleBits;
if (x < 0) x = 0;
if (x > 127) x = 127;
output[i] = static_cast<OutputType>(x);
}
return output;
}
};
} // namespace Stockfish::Eval::NNUE::Layers
#endif // NNUE_LAYERS_CLIPPED_RELU_SCALAR_H_INCLUDED

View File

@ -0,0 +1,197 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Definition of layer ClippedReLU of NNUE evaluation function
#ifndef NNUE_LAYERS_CLIPPED_RELU_VEC_H_INCLUDED
#define NNUE_LAYERS_CLIPPED_RELU_VEC_H_INCLUDED
#if !defined (NNUE_LAYERS_CLIPPED_RELU_H_INCLUDED)
#error "This file can only be included through clipped_relu.h"
#endif
#if defined (USE_MMX) || defined (USE_SSE2) || defined (USE_NEON)
namespace Stockfish::Eval::NNUE::Layers {
// Clipped ReLU
template <typename PreviousLayer>
class ClippedReLU_Vec : public ClippedReLU_Base<PreviousLayer> {
public:
using BaseType = ClippedReLU_Base<PreviousLayer>;
using InputType = typename BaseType::InputType;
using OutputType = typename BaseType::OutputType;
static constexpr auto InputDimensions = BaseType::InputDimensions;
static constexpr auto OutputDimensions = BaseType::OutputDimensions;
static constexpr auto SelfBufferSize = BaseType::SelfBufferSize;
static constexpr auto BufferSize = BaseType::BufferSize;
// SIMD width (in bytes)
#if defined(USE_AVX2)
static constexpr std::size_t SimdWidth = 32;
#elif defined(USE_SSE2)
static constexpr std::size_t SimdWidth = 16;
#elif defined(USE_MMX)
static constexpr std::size_t SimdWidth = 8;
#elif defined(USE_NEON)
static constexpr std::size_t SimdWidth = 16;
#endif
// Forward propagation
const OutputType* propagate(
const TransformedFeatureType* transformedFeatures,
char* buffer) const {
const auto input = BaseType::previousLayer.propagate(
transformedFeatures, buffer + SelfBufferSize);
const auto output = reinterpret_cast<OutputType*>(buffer);
#if defined(USE_AVX2)
if constexpr (InputDimensions % SimdWidth == 0) {
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m256i Zero = _mm256_setzero_si256();
const __m256i Offsets = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
const auto in = reinterpret_cast<const __m256i*>(input);
const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 0]),
_mm256_load_si256(&in[i * 4 + 1])), WeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 2]),
_mm256_load_si256(&in[i * 4 + 3])), WeightScaleBits);
_mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), Zero), Offsets));
}
} else {
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
const __m128i Zero = _mm_setzero_si128();
const auto in = reinterpret_cast<const __m128i*>(input);
const auto out = reinterpret_cast<__m128i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m128i words0 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 0]),
_mm_load_si128(&in[i * 4 + 1])), WeightScaleBits);
const __m128i words1 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 2]),
_mm_load_si128(&in[i * 4 + 3])), WeightScaleBits);
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
_mm_store_si128(&out[i], _mm_max_epi8(packedbytes, Zero));
}
}
constexpr IndexType Start =
InputDimensions % SimdWidth == 0
? InputDimensions / SimdWidth * SimdWidth
: InputDimensions / (SimdWidth / 2) * (SimdWidth / 2);
#elif defined(USE_SSE2)
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
# ifdef USE_SSE41
const __m128i Zero = _mm_setzero_si128();
# else
const __m128i k0x80s = _mm_set1_epi8(-128);
# endif
const auto in = reinterpret_cast<const __m128i*>(input);
const auto out = reinterpret_cast<__m128i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m128i words0 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 0]),
_mm_load_si128(&in[i * 4 + 1])), WeightScaleBits);
const __m128i words1 = _mm_srai_epi16(_mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 2]),
_mm_load_si128(&in[i * 4 + 3])), WeightScaleBits);
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
_mm_store_si128(&out[i],
# ifdef USE_SSE41
_mm_max_epi8(packedbytes, Zero)
# else
_mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
# endif
);
}
constexpr IndexType Start = NumChunks * SimdWidth;
#elif defined(USE_MMX)
constexpr IndexType NumChunks = InputDimensions / SimdWidth;
const __m64 k0x80s = _mm_set1_pi8(-128);
const auto in = reinterpret_cast<const __m64*>(input);
const auto out = reinterpret_cast<__m64*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
const __m64 words0 = _mm_srai_pi16(
_mm_packs_pi32(in[i * 4 + 0], in[i * 4 + 1]),
WeightScaleBits);
const __m64 words1 = _mm_srai_pi16(
_mm_packs_pi32(in[i * 4 + 2], in[i * 4 + 3]),
WeightScaleBits);
const __m64 packedbytes = _mm_packs_pi16(words0, words1);
out[i] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
}
_mm_empty();
constexpr IndexType Start = NumChunks * SimdWidth;
#elif defined(USE_NEON)
constexpr IndexType NumChunks = InputDimensions / (SimdWidth / 2);
const int8x8_t Zero = {0};
const auto in = reinterpret_cast<const int32x4_t*>(input);
const auto out = reinterpret_cast<int8x8_t*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
int16x8_t shifted;
const auto pack = reinterpret_cast<int16x4_t*>(&shifted);
pack[0] = vqshrn_n_s32(in[i * 2 + 0], WeightScaleBits);
pack[1] = vqshrn_n_s32(in[i * 2 + 1], WeightScaleBits);
out[i] = vmax_s8(vqmovn_s16(shifted), Zero);
}
constexpr IndexType Start = NumChunks * (SimdWidth / 2);
#else
# error "No vectorization possible but vectorization path entered."
#endif
for (IndexType i = Start; i < InputDimensions; ++i) {
int x = input[i] >> WeightScaleBits;
if (x < 0) x = 0;
if (x > 127) x = 127;
output[i] = static_cast<OutputType>(x);
}
return output;
}
};
} // namespace Stockfish::Eval::NNUE::Layers
#else
#define CLIPPED_RELU_NO_VEC
#endif
#endif // NNUE_LAYERS_CLIPPED_RELU_VEC_H_INCLUDED

View File

@ -30,7 +30,7 @@ template <IndexType OutDims, IndexType Offset = 0>
class InputSlice {
public:
// Need to maintain alignment
static_assert(Offset % MaxSimdWidth == 0, "");
static_assert(Offset % 32 == 0, "");
// Output type
using OutputType = TransformedFeatureType;

View File

@ -51,7 +51,6 @@ namespace Stockfish::Eval::NNUE {
using Network = Layers::OutputLayer;
static_assert(TransformedFeatureDimensions % MaxSimdWidth == 0, "");
static_assert(Network::OutputDimensions == 1, "");
static_assert(std::is_same<Network::OutputType, std::int32_t>::value, "");

View File

@ -57,22 +57,6 @@ namespace Stockfish::Eval::NNUE {
// Size of cache line (in bytes)
constexpr std::size_t CacheLineSize = 64;
// SIMD width (in bytes)
#if defined(USE_AVX2)
constexpr std::size_t SimdWidth = 32;
#elif defined(USE_SSE2)
constexpr std::size_t SimdWidth = 16;
#elif defined(USE_MMX)
constexpr std::size_t SimdWidth = 8;
#elif defined(USE_NEON)
constexpr std::size_t SimdWidth = 16;
#endif
constexpr std::size_t MaxSimdWidth = 32;
// Type of input feature after conversion
using TransformedFeatureType = std::uint8_t;
using IndexType = std::uint32_t;

View File

@ -24,159 +24,27 @@
#include "nnue_common.h"
#include "nnue_architecture.h"
#include <cstring> // std::memset()
#include "../position.h"
#include <iostream>
#include <cstdint>
namespace Stockfish::Eval::NNUE {
using BiasType = std::int16_t;
using WeightType = std::int16_t;
using PSQTWeightType = std::int32_t;
// If vector instructions are enabled, we update and refresh the
// accumulator tile by tile such that each tile fits in the CPU's
// vector registers.
#define VECTOR
static_assert(PSQTBuckets % 8 == 0,
"Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
#ifdef USE_AVX512
typedef __m512i vec_t;
typedef __m256i psqt_vec_t;
#define vec_load(a) _mm512_load_si512(a)
#define vec_store(a,b) _mm512_store_si512(a,b)
#define vec_add_16(a,b) _mm512_add_epi16(a,b)
#define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
#define vec_load_psqt(a) _mm256_load_si256(a)
#define vec_store_psqt(a,b) _mm256_store_si256(a,b)
#define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
#define vec_zero_psqt() _mm256_setzero_si256()
#define NumRegistersSIMD 32
#elif USE_AVX2
typedef __m256i vec_t;
typedef __m256i psqt_vec_t;
#define vec_load(a) _mm256_load_si256(a)
#define vec_store(a,b) _mm256_store_si256(a,b)
#define vec_add_16(a,b) _mm256_add_epi16(a,b)
#define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
#define vec_load_psqt(a) _mm256_load_si256(a)
#define vec_store_psqt(a,b) _mm256_store_si256(a,b)
#define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
#define vec_zero_psqt() _mm256_setzero_si256()
#define NumRegistersSIMD 16
#elif USE_SSE2
typedef __m128i vec_t;
typedef __m128i psqt_vec_t;
#define vec_load(a) (*(a))
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) _mm_add_epi16(a,b)
#define vec_sub_16(a,b) _mm_sub_epi16(a,b)
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) _mm_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b)
#define vec_zero_psqt() _mm_setzero_si128()
#define NumRegistersSIMD (Is64Bit ? 16 : 8)
#elif USE_MMX
typedef __m64 vec_t;
typedef __m64 psqt_vec_t;
#define vec_load(a) (*(a))
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) _mm_add_pi16(a,b)
#define vec_sub_16(a,b) _mm_sub_pi16(a,b)
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) _mm_add_pi32(a,b)
#define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b)
#define vec_zero_psqt() _mm_setzero_si64()
#define NumRegistersSIMD 8
#elif USE_NEON
typedef int16x8_t vec_t;
typedef int32x4_t psqt_vec_t;
#define vec_load(a) (*(a))
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) vaddq_s16(a,b)
#define vec_sub_16(a,b) vsubq_s16(a,b)
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) vaddq_s32(a,b)
#define vec_sub_psqt_32(a,b) vsubq_s32(a,b)
#define vec_zero_psqt() psqt_vec_t{0}
#define NumRegistersSIMD 16
#else
#undef VECTOR
#endif
#ifdef VECTOR
// Compute optimal SIMD register count for feature transformer accumulation.
// We use __m* types as template arguments, which causes GCC to emit warnings
// about losing some attribute information. This is irrelevant to us as we
// only take their size, so the following pragma are harmless.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-attributes"
template <typename SIMDRegisterType,
typename LaneType,
int NumLanes,
int MaxRegisters>
static constexpr int BestRegisterCount()
{
#define RegisterSize sizeof(SIMDRegisterType)
#define LaneSize sizeof(LaneType)
static_assert(RegisterSize >= LaneSize);
static_assert(MaxRegisters <= NumRegistersSIMD);
static_assert(MaxRegisters > 0);
static_assert(NumRegistersSIMD > 0);
static_assert(RegisterSize % LaneSize == 0);
static_assert((NumLanes * LaneSize) % RegisterSize == 0);
const int ideal = (NumLanes * LaneSize) / RegisterSize;
if (ideal <= MaxRegisters)
return ideal;
// Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
for (int divisor = MaxRegisters; divisor > 1; --divisor)
if (ideal % divisor == 0)
return divisor;
return 1;
}
static constexpr int NumRegs = BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
static constexpr int NumPsqtRegs = BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
#pragma GCC diagnostic pop
#endif
static_assert(TransformedFeatureDimensions % 32 == 0, "");
// Input feature converter
class FeatureTransformer {
class FeatureTransformer_Base {
protected:
using BiasType = std::int16_t;
using WeightType = std::int16_t;
using PSQTWeightType = std::int32_t;
private:
// Number of output dimensions for one side
static constexpr IndexType HalfDimensions = TransformedFeatureDimensions;
#ifdef VECTOR
static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
#endif
public:
// Output type
using OutputType = TransformedFeatureType;
@ -214,173 +82,13 @@ namespace Stockfish::Eval::NNUE {
return !stream.fail();
}
// Convert input features
std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
update_accumulator(pos, WHITE);
update_accumulator(pos, BLACK);
protected:
alignas(CacheLineSize) BiasType biases[HalfDimensions];
alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions];
alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];
const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
const auto& accumulation = pos.state()->accumulator.accumulation;
const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
const auto psqt = (
psqtAccumulation[perspectives[0]][bucket]
- psqtAccumulation[perspectives[1]][bucket]
) / 2;
#if defined(USE_AVX512)
constexpr IndexType NumChunks = HalfDimensions / (SimdWidth * 2);
static_assert(HalfDimensions % (SimdWidth * 2) == 0);
const __m512i Control = _mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7);
const __m512i Zero = _mm512_setzero_si512();
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m512i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m512i sum0 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m512i sum1 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
_mm512_store_si512(&out[j], _mm512_permutexvar_epi64(Control,
_mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), Zero)));
}
}
return psqt;
#elif defined(USE_AVX2)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
constexpr int Control = 0b11011000;
const __m256i Zero = _mm256_setzero_si256();
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m256i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m256i sum0 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m256i sum1 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
_mm256_store_si256(&out[j], _mm256_permute4x64_epi64(
_mm256_max_epi8(_mm256_packs_epi16(sum0, sum1), Zero), Control));
}
}
return psqt;
#elif defined(USE_SSE2)
#ifdef USE_SSE41
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m128i Zero = _mm_setzero_si128();
#else
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m128i k0x80s = _mm_set1_epi8(-128);
#endif
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m128i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
#ifdef USE_SSE41
_mm_store_si128(&out[j], _mm_max_epi8(packedbytes, Zero));
#else
_mm_store_si128(&out[j], _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s));
#endif
}
}
return psqt;
#elif defined(USE_MMX)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m64 k0x80s = _mm_set1_pi8(-128);
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m64*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m64 sum0 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 0]);
__m64 sum1 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 1]);
const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
}
}
_mm_empty();
return psqt;
#elif defined(USE_NEON)
constexpr IndexType NumChunks = HalfDimensions / (SimdWidth / 2);
const int8x8_t Zero = {0};
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
int16x8_t sum = reinterpret_cast<const int16x8_t*>(accumulation[perspectives[p]])[j];
out[j] = vmax_s8(vqmovn_s16(sum), Zero);
}
}
return psqt;
#else
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
for (IndexType j = 0; j < HalfDimensions; ++j)
{
BiasType sum = accumulation[perspectives[p]][j];
output[offset + j] = static_cast<OutputType>(std::max<int>(0, std::min<int>(127, sum)));
}
}
return psqt;
#endif
} // end of function transform()
private:
void update_accumulator(const Position& pos, const Color perspective) const {
// The size must be enough to contain the largest possible update.
// That might depend on the feature set and generally relies on the
// feature set's update cost calculation to be correct and never
// allow updates with more added/removed features than MaxActiveDimensions.
using IndexList = ValueList<IndexType, FeatureSet::MaxActiveDimensions>;
#ifdef VECTOR
// Gcc-10.2 unnecessarily spills AVX2 registers if this array
// is defined in the VECTOR code below, once in each branch
vec_t acc[NumRegs];
psqt_vec_t psqt[NumPsqtRegs];
#endif
// Look for a usable accumulator of an earlier position. We keep track
// of the estimated gain in terms of features to be added/subtracted.
std::pair<StateInfo*, StateInfo*> try_search_for_computed(const Position& pos, Color perspective) const
{
StateInfo *st = pos.state(), *next = nullptr;
int gain = FeatureSet::refresh_cost(pos);
while (st->previous && !st->accumulator.computed[perspective])
@ -393,223 +101,28 @@ namespace Stockfish::Eval::NNUE {
next = st;
st = st->previous;
}
if (st->accumulator.computed[perspective])
{
if (next == nullptr)
return;
// Update incrementally in two steps. First, we update the "next"
// accumulator. Then, we update the current accumulator (pos.state()).
// Gather all features to be updated.
const Square ksq = pos.square<KING>(perspective);
IndexList removed[2], added[2];
FeatureSet::append_changed_indices(
ksq, next, perspective, removed[0], added[0]);
for (StateInfo *st2 = pos.state(); st2 != next; st2 = st2->previous)
FeatureSet::append_changed_indices(
ksq, st2, perspective, removed[1], added[1]);
// Mark the accumulators as computed.
next->accumulator.computed[perspective] = true;
pos.state()->accumulator.computed[perspective] = true;
// Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
StateInfo *states_to_update[3] =
{ next, next == pos.state() ? nullptr : pos.state(), nullptr };
#ifdef VECTOR
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{
// Load accumulator
auto accTile = reinterpret_cast<vec_t*>(
&st->accumulator.accumulation[perspective][j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_load(&accTile[k]);
for (IndexType i = 0; states_to_update[i]; ++i)
{
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_sub_16(acc[k], column[k]);
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_add_16(acc[k], column[k]);
}
// Store accumulator
accTile = reinterpret_cast<vec_t*>(
&states_to_update[i]->accumulator.accumulation[perspective][j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
vec_store(&accTile[k], acc[k]);
}
}
for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
{
// Load accumulator
auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&st->accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_load_psqt(&accTilePsqt[k]);
for (IndexType i = 0; states_to_update[i]; ++i)
{
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_sub_psqt_32(psqt[k], columnPsqt[k]);
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
}
// Store accumulator
accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&states_to_update[i]->accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
vec_store_psqt(&accTilePsqt[k], psqt[k]);
}
}
#else
for (IndexType i = 0; states_to_update[i]; ++i)
{
std::memcpy(states_to_update[i]->accumulator.accumulation[perspective],
st->accumulator.accumulation[perspective],
HalfDimensions * sizeof(BiasType));
for (std::size_t k = 0; k < PSQTBuckets; ++k)
states_to_update[i]->accumulator.psqtAccumulation[perspective][k] = st->accumulator.psqtAccumulation[perspective][k];
st = states_to_update[i];
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
st->accumulator.accumulation[perspective][j] -= weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
st->accumulator.psqtAccumulation[perspective][k] -= psqtWeights[index * PSQTBuckets + k];
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
st->accumulator.accumulation[perspective][j] += weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
st->accumulator.psqtAccumulation[perspective][k] += psqtWeights[index * PSQTBuckets + k];
}
}
#endif
}
else
{
// Refresh the accumulator
auto& accumulator = pos.state()->accumulator;
accumulator.computed[perspective] = true;
IndexList active;
FeatureSet::append_active_indices(pos, perspective, active);
#ifdef VECTOR
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{
auto biasesTile = reinterpret_cast<const vec_t*>(
&biases[j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = biasesTile[k];
for (const auto index : active)
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (unsigned k = 0; k < NumRegs; ++k)
acc[k] = vec_add_16(acc[k], column[k]);
}
auto accTile = reinterpret_cast<vec_t*>(
&accumulator.accumulation[perspective][j * TileHeight]);
for (unsigned k = 0; k < NumRegs; k++)
vec_store(&accTile[k], acc[k]);
}
for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
{
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_zero_psqt();
for (const auto index : active)
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
}
auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
vec_store_psqt(&accTilePsqt[k], psqt[k]);
}
#else
std::memcpy(accumulator.accumulation[perspective], biases,
HalfDimensions * sizeof(BiasType));
for (std::size_t k = 0; k < PSQTBuckets; ++k)
accumulator.psqtAccumulation[perspective][k] = 0;
for (const auto index : active)
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
accumulator.accumulation[perspective][j] += weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
accumulator.psqtAccumulation[perspective][k] += psqtWeights[index * PSQTBuckets + k];
}
#endif
}
#if defined(USE_MMX)
_mm_empty();
#endif
return { st, next };
}
alignas(CacheLineSize) BiasType biases[HalfDimensions];
alignas(CacheLineSize) WeightType weights[HalfDimensions * InputDimensions];
alignas(CacheLineSize) PSQTWeightType psqtWeights[InputDimensions * PSQTBuckets];
};
} // namespace Stockfish::Eval::NNUE
#include "nnue_feature_transformer_vec.h"
#if defined (FEATURE_TRANSFORMER_NO_VEC)
# include "nnue_feature_transformer_scalar.h"
namespace Stockfish::Eval::NNUE {
using FeatureTransformer = FeatureTransformer_Scalar;
}
#else
namespace Stockfish::Eval::NNUE {
using FeatureTransformer = FeatureTransformer_Vec;
}
#endif
#endif // #ifndef NNUE_FEATURE_TRANSFORMER_H_INCLUDED

View File

@ -0,0 +1,194 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// A class that converts the input features of the NNUE evaluation function
#ifndef NNUE_FEATURE_TRANSFORMER_SCALAR_H_INCLUDED
#define NNUE_FEATURE_TRANSFORMER_SCALAR_H_INCLUDED
#if !defined (NNUE_FEATURE_TRANSFORMER_H_INCLUDED)
#error "This file can only be included through nnue_feature_transformer.h"
#endif
#include <cstring>
namespace Stockfish::Eval::NNUE {
// Input feature converter
class FeatureTransformer_Scalar : public FeatureTransformer_Base {
public:
using BaseType = FeatureTransformer_Base;
private:
using BiasType = typename FeatureTransformer_Base::BiasType;
using WeightType = typename FeatureTransformer_Base::WeightType;
using PSQTWeightType = typename FeatureTransformer_Base::PSQTWeightType;
// Number of output dimensions for one side
static constexpr auto HalfDimensions = FeatureTransformer_Base::HalfDimensions;
public:
// Output type
using OutputType = typename FeatureTransformer_Base::OutputType;
// Number of input/output dimensions
static constexpr auto InputDimensions = FeatureTransformer_Base::InputDimensions;
static constexpr auto OutputDimensions = FeatureTransformer_Base::OutputDimensions;
// Size of forward propagation buffer
static constexpr auto BufferSize = FeatureTransformer_Base::BufferSize;
// Convert input features
std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
update_accumulator(pos, WHITE);
update_accumulator(pos, BLACK);
const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
const auto& accumulation = pos.state()->accumulator.accumulation;
const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
const auto psqt = (
psqtAccumulation[perspectives[0]][bucket]
- psqtAccumulation[perspectives[1]][bucket]
) / 2;
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
for (IndexType j = 0; j < HalfDimensions; ++j)
{
BiasType sum = accumulation[perspectives[p]][j];
if (sum < 0) sum = 0;
if (sum > 127) sum = 127;
output[offset + j] = static_cast<OutputType>(sum);
}
}
return psqt;
} // end of function transform()
private:
void update_accumulator(const Position& pos, const Color perspective) const {
// The size must be enough to contain the largest possible update.
// That might depend on the feature set and generally relies on the
// feature set's update cost calculation to be correct and never
// allow updates with more added/removed features than MaxActiveDimensions.
using IndexList = ValueList<IndexType, FeatureSet::MaxActiveDimensions>;
// Look for a usable accumulator of an earlier position. We keep track
// of the estimated gain in terms of features to be added/subtracted.
auto [st, next] = BaseType::try_search_for_computed(pos, perspective);
if (st->accumulator.computed[perspective])
{
if (next == nullptr)
return;
// Update incrementally in two steps. First, we update the "next"
// accumulator. Then, we update the current accumulator (pos.state()).
// Gather all features to be updated.
const Square ksq = pos.square<KING>(perspective);
IndexList removed[2], added[2];
FeatureSet::append_changed_indices(
ksq, next, perspective, removed[0], added[0]);
for (StateInfo *st2 = pos.state(); st2 != next; st2 = st2->previous)
FeatureSet::append_changed_indices(
ksq, st2, perspective, removed[1], added[1]);
// Mark the accumulators as computed.
next->accumulator.computed[perspective] = true;
pos.state()->accumulator.computed[perspective] = true;
// Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
StateInfo *states_to_update[3] =
{ next, next == pos.state() ? nullptr : pos.state(), nullptr };
for (IndexType i = 0; states_to_update[i]; ++i)
{
std::memcpy(states_to_update[i]->accumulator.accumulation[perspective],
st->accumulator.accumulation[perspective],
HalfDimensions * sizeof(BiasType));
for (std::size_t k = 0; k < PSQTBuckets; ++k)
states_to_update[i]->accumulator.psqtAccumulation[perspective][k] = st->accumulator.psqtAccumulation[perspective][k];
st = states_to_update[i];
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
st->accumulator.accumulation[perspective][j] -= weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
st->accumulator.psqtAccumulation[perspective][k] -= psqtWeights[index * PSQTBuckets + k];
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
st->accumulator.accumulation[perspective][j] += weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
st->accumulator.psqtAccumulation[perspective][k] += psqtWeights[index * PSQTBuckets + k];
}
}
}
else
{
// Refresh the accumulator
auto& accumulator = pos.state()->accumulator;
accumulator.computed[perspective] = true;
IndexList active;
FeatureSet::append_active_indices(pos, perspective, active);
std::memcpy(accumulator.accumulation[perspective], biases,
HalfDimensions * sizeof(BiasType));
for (std::size_t k = 0; k < PSQTBuckets; ++k)
accumulator.psqtAccumulation[perspective][k] = 0;
for (const auto index : active)
{
const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j)
accumulator.accumulation[perspective][j] += weights[offset + j];
for (std::size_t k = 0; k < PSQTBuckets; ++k)
accumulator.psqtAccumulation[perspective][k] += psqtWeights[index * PSQTBuckets + k];
}
}
}
using BaseType::biases;
using BaseType::weights;
using BaseType::psqtWeights;
};
} // namespace Stockfish::Eval::NNUE
#endif // #ifndef NNUE_FEATURE_TRANSFORMER_SCALAR_H_INCLUDED

View File

@ -0,0 +1,553 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2021 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// A class that converts the input features of the NNUE evaluation function
#ifndef NNUE_FEATURE_TRANSFORMER_VEC_H_INCLUDED
#define NNUE_FEATURE_TRANSFORMER_VEC_H_INCLUDED
#if !defined (NNUE_FEATURE_TRANSFORMER_H_INCLUDED)
#error "This file can only be included through nnue_feature_transformer.h"
#endif
#if defined (USE_MMX) || defined (USE_SSE2) || defined (USE_NEON)
#include <cstring>
namespace Stockfish::Eval::NNUE {
#if defined (USE_AVX512)
using vec_t = __m512i;
using psqt_vec_t = __m256i;
# define vec_load(a) _mm512_load_si512(a)
# define vec_store(a,b) _mm512_store_si512(a,b)
# define vec_add_16(a,b) _mm512_add_epi16(a,b)
# define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
# define vec_load_psqt(a) _mm256_load_si256(a)
# define vec_store_psqt(a,b) _mm256_store_si256(a,b)
# define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
# define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
# define vec_zero_psqt() _mm256_setzero_si256()
static constexpr inline IndexType NumRegistersSIMD = 32;
#elif defined (USE_AVX2)
using vec_t = __m256i;
using psqt_vec_t = __m256i;
# define vec_load(a) _mm256_load_si256(a)
# define vec_store(a,b) _mm256_store_si256(a,b)
# define vec_add_16(a,b) _mm256_add_epi16(a,b)
# define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
# define vec_load_psqt(a) _mm256_load_si256(a)
# define vec_store_psqt(a,b) _mm256_store_si256(a,b)
# define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
# define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
# define vec_zero_psqt() _mm256_setzero_si256()
static constexpr inline IndexType NumRegistersSIMD = 16;
#elif defined (USE_SSE2)
using vec_t = __m128i;
using psqt_vec_t = __m128i;
# define vec_load(a) (*(a))
# define vec_store(a,b) *(a)=(b)
# define vec_add_16(a,b) _mm_add_epi16(a,b)
# define vec_sub_16(a,b) _mm_sub_epi16(a,b)
# define vec_load_psqt(a) (*(a))
# define vec_store_psqt(a,b) *(a)=(b)
# define vec_add_psqt_32(a,b) _mm_add_epi32(a,b)
# define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b)
# define vec_zero_psqt() _mm_setzero_si128()
static constexpr inline IndexType NumRegistersSIMD = (Is64Bit ? 16 : 8);
#elif defined (USE_MMX)
using vec_t = __m64;
using psqt_vec_t = __m64;
# define vec_load(a) (*(a))
# define vec_store(a,b) *(a)=(b)
# define vec_add_16(a,b) _mm_add_pi16(a,b)
# define vec_sub_16(a,b) _mm_sub_pi16(a,b)
# define vec_load_psqt(a) (*(a))
# define vec_store_psqt(a,b) *(a)=(b)
# define vec_add_psqt_32(a,b) _mm_add_pi32(a,b)
# define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b)
# define vec_zero_psqt() _mm_setzero_si64()
static constexpr inline IndexType NumRegistersSIMD = 8;
#elif defined (USE_NEON)
using vec_t = int16x8_t;
using psqt_vec_t = int32x4_t;
# define vec_load(a) (*(a))
# define vec_store(a,b) *(a)=(b)
# define vec_add_16(a,b) vaddq_s16(a,b)
# define vec_sub_16(a,b) vsubq_s16(a,b)
# define vec_load_psqt(a) (*(a))
# define vec_store_psqt(a,b) *(a)=(b)
# define vec_add_psqt_32(a,b) vaddq_s32(a,b)
# define vec_sub_psqt_32(a,b) vsubq_s32(a,b)
# define vec_zero_psqt() psqt_vec_t{0}
static constexpr inline IndexType NumRegistersSIMD = 16;
#else
# error "No vectorization possible but vectorization path entered."
#endif
// We use __m* types as template arguments, which causes GCC to emit warnings
// about losing some attribute information. This is irrelevant to us as we
// only take their size, so the following pragma are harmless.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-attributes"
template <typename SIMDRegisterType,
typename LaneType,
int NumLanes,
int MaxRegisters>
static constexpr int BestRegisterCount()
{
constexpr int RegisterSize = sizeof(SIMDRegisterType);
constexpr int LaneSize = sizeof(LaneType);
static_assert(RegisterSize >= LaneSize);
static_assert(MaxRegisters <= NumRegistersSIMD);
static_assert(MaxRegisters > 0);
static_assert(NumRegistersSIMD > 0);
static_assert(RegisterSize % LaneSize == 0);
static_assert((NumLanes * LaneSize) % RegisterSize == 0);
const int ideal = (NumLanes * LaneSize) / RegisterSize;
if (ideal <= MaxRegisters)
return ideal;
// Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
for (int divisor = MaxRegisters; divisor > 1; --divisor)
if (ideal % divisor == 0)
return divisor;
return 1;
}
#pragma GCC diagnostic pop
// Input feature converter
class FeatureTransformer_Vec : public FeatureTransformer_Base {
private:
// Number of output dimensions for one side
static constexpr auto HalfDimensions = FeatureTransformer_Base::HalfDimensions;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-attributes"
// Compute optimal SIMD register count for feature transformer accumulation.
static constexpr IndexType NumRegs = BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
static constexpr IndexType NumPsqtRegs = BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
#pragma GCC diagnostic pop
static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
using BiasType = typename FeatureTransformer_Base::BiasType;
using WeightType = typename FeatureTransformer_Base::WeightType;
using PSQTWeightType = typename FeatureTransformer_Base::PSQTWeightType;
#if defined(USE_AVX512)
static constexpr std::size_t SimdWidth = 64;
#elif defined(USE_AVX2)
static constexpr std::size_t SimdWidth = 32;
#elif defined(USE_SSE2)
static constexpr std::size_t SimdWidth = 16;
#elif defined(USE_MMX)
static constexpr std::size_t SimdWidth = 8;
#elif defined(USE_NEON)
static constexpr std::size_t SimdWidth = 16;
#endif
public:
using BaseType = FeatureTransformer_Base;
// Output type
using OutputType = typename FeatureTransformer_Base::OutputType;
// Number of input/output dimensions
static constexpr auto InputDimensions = FeatureTransformer_Base::InputDimensions;
static constexpr auto OutputDimensions = FeatureTransformer_Base::OutputDimensions;
// Size of forward propagation buffer
static constexpr auto BufferSize = FeatureTransformer_Base::BufferSize;
// Read network parameters
bool read_parameters(std::istream& stream) {
read_little_endian<BiasType >(stream, biases , HalfDimensions );
read_little_endian<WeightType >(stream, weights , HalfDimensions * InputDimensions);
read_little_endian<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
return !stream.fail();
}
// Write network parameters
bool write_parameters(std::ostream& stream) const {
write_little_endian<BiasType >(stream, biases , HalfDimensions );
write_little_endian<WeightType >(stream, weights , HalfDimensions * InputDimensions);
write_little_endian<PSQTWeightType>(stream, psqtWeights, PSQTBuckets * InputDimensions);
return !stream.fail();
}
// Convert input features
std::int32_t transform(const Position& pos, OutputType* output, int bucket) const {
update_accumulator(pos, WHITE);
update_accumulator(pos, BLACK);
const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
const auto& accumulation = pos.state()->accumulator.accumulation;
const auto& psqtAccumulation = pos.state()->accumulator.psqtAccumulation;
const auto psqt = (
psqtAccumulation[perspectives[0]][bucket]
- psqtAccumulation[perspectives[1]][bucket]
) / 2;
#if defined (USE_AVX512)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
static_assert(HalfDimensions % SimdWidth == 0);
const __m512i Control = _mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7);
const __m512i Zero = _mm512_setzero_si512();
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m512i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m512i sum0 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m512i sum1 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
_mm512_store_si512(&out[j], _mm512_permutexvar_epi64(Control,
_mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), Zero)));
}
}
#elif defined (USE_AVX2)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
constexpr int Control = 0b11011000;
const __m256i Zero = _mm256_setzero_si256();
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m256i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m256i sum0 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m256i sum1 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
_mm256_store_si256(&out[j], _mm256_permute4x64_epi64(
_mm256_max_epi8(_mm256_packs_epi16(sum0, sum1), Zero), Control));
}
}
#elif defined (USE_SSE2)
# if defined (USE_SSE41)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m128i Zero = _mm_setzero_si128();
# else
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m128i k0x80s = _mm_set1_epi8(-128);
# endif
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m128i*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>
(accumulation[perspectives[p]])[j * 2 + 0]);
__m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>
(accumulation[perspectives[p]])[j * 2 + 1]);
const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
#ifdef USE_SSE41
_mm_store_si128(&out[j], _mm_max_epi8(packedbytes, Zero));
#else
_mm_store_si128(&out[j], _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s));
#endif
}
}
#elif defined (USE_MMX)
constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
const __m64 k0x80s = _mm_set1_pi8(-128);
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
auto out = reinterpret_cast<__m64*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
__m64 sum0 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 0]);
__m64 sum1 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 1]);
const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
}
}
_mm_empty();
#elif defined (USE_NEON)
constexpr IndexType NumChunks = HalfDimensions / (SimdWidth / 2);
const int8x8_t Zero = {0};
for (IndexType p = 0; p < 2; ++p)
{
const IndexType offset = HalfDimensions * p;
const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
for (IndexType j = 0; j < NumChunks; ++j)
{
int16x8_t sum = reinterpret_cast<const int16x8_t*>(accumulation[perspectives[p]])[j];
out[j] = vmax_s8(vqmovn_s16(sum), Zero);
}
}
#else
# error "No vectorization possible but vectorization path entered."
#endif
return psqt;
} // end of function transform()
private:
void update_accumulator(const Position& pos, const Color perspective) const {
// The size must be enough to contain the largest possible update.
// That might depend on the feature set and generally relies on the
// feature set's update cost calculation to be correct and never
// allow updates with more added/removed features than MaxActiveDimensions.
using IndexList = ValueList<IndexType, FeatureSet::MaxActiveDimensions>;
// Gcc-10.2 unnecessarily spills AVX2 registers if this array
// is defined in the VECTOR code below, once in each branch
vec_t acc[NumRegs];
psqt_vec_t psqt[NumPsqtRegs];
// Look for a usable accumulator of an earlier position. We keep track
// of the estimated gain in terms of features to be added/subtracted.
auto [st, next] = BaseType::try_search_for_computed(pos, perspective);
if (st->accumulator.computed[perspective])
{
if (next == nullptr)
return;
// Update incrementally in two steps. First, we update the "next"
// accumulator. Then, we update the current accumulator (pos.state()).
// Gather all features to be updated.
const Square ksq = pos.square<KING>(perspective);
IndexList removed[2], added[2];
FeatureSet::append_changed_indices(
ksq, next, perspective, removed[0], added[0]);
for (StateInfo *st2 = pos.state(); st2 != next; st2 = st2->previous)
FeatureSet::append_changed_indices(
ksq, st2, perspective, removed[1], added[1]);
// Mark the accumulators as computed.
next->accumulator.computed[perspective] = true;
pos.state()->accumulator.computed[perspective] = true;
// Now update the accumulators listed in states_to_update[], where the last element is a sentinel.
StateInfo *states_to_update[3] =
{ next, next == pos.state() ? nullptr : pos.state(), nullptr };
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{
// Load accumulator
auto accTile = reinterpret_cast<vec_t*>(
&st->accumulator.accumulation[perspective][j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_load(&accTile[k]);
for (IndexType i = 0; states_to_update[i]; ++i)
{
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_sub_16(acc[k], column[k]);
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = vec_add_16(acc[k], column[k]);
}
// Store accumulator
accTile = reinterpret_cast<vec_t*>(
&states_to_update[i]->accumulator.accumulation[perspective][j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
vec_store(&accTile[k], acc[k]);
}
}
for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
{
// Load accumulator
auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&st->accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_load_psqt(&accTilePsqt[k]);
for (IndexType i = 0; states_to_update[i]; ++i)
{
// Difference calculation for the deactivated features
for (const auto index : removed[i])
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_sub_psqt_32(psqt[k], columnPsqt[k]);
}
// Difference calculation for the activated features
for (const auto index : added[i])
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
}
// Store accumulator
accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&states_to_update[i]->accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
vec_store_psqt(&accTilePsqt[k], psqt[k]);
}
}
}
else
{
// Refresh the accumulator
auto& accumulator = pos.state()->accumulator;
accumulator.computed[perspective] = true;
IndexList active;
FeatureSet::append_active_indices(pos, perspective, active);
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{
auto biasesTile = reinterpret_cast<const vec_t*>(
&biases[j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k)
acc[k] = biasesTile[k];
for (const auto index : active)
{
const IndexType offset = HalfDimensions * index + j * TileHeight;
auto column = reinterpret_cast<const vec_t*>(&weights[offset]);
for (unsigned k = 0; k < NumRegs; ++k)
acc[k] = vec_add_16(acc[k], column[k]);
}
auto accTile = reinterpret_cast<vec_t*>(
&accumulator.accumulation[perspective][j * TileHeight]);
for (unsigned k = 0; k < NumRegs; k++)
vec_store(&accTile[k], acc[k]);
}
for (IndexType j = 0; j < PSQTBuckets / PsqtTileHeight; ++j)
{
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_zero_psqt();
for (const auto index : active)
{
const IndexType offset = PSQTBuckets * index + j * PsqtTileHeight;
auto columnPsqt = reinterpret_cast<const psqt_vec_t*>(&psqtWeights[offset]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
psqt[k] = vec_add_psqt_32(psqt[k], columnPsqt[k]);
}
auto accTilePsqt = reinterpret_cast<psqt_vec_t*>(
&accumulator.psqtAccumulation[perspective][j * PsqtTileHeight]);
for (std::size_t k = 0; k < NumPsqtRegs; ++k)
vec_store_psqt(&accTilePsqt[k], psqt[k]);
}
}
#if defined (USE_MMX)
_mm_empty();
#endif
}
using BaseType::biases;
using BaseType::weights;
using BaseType::psqtWeights;
};
} // namespace Stockfish::Eval::NNUE
#undef vec_load
#undef vec_store
#undef vec_add_16
#undef vec_sub_16
#undef vec_load_psqt
#undef vec_store_psqt
#undef vec_add_psqt_32
#undef vec_sub_psqt_32
#undef vec_zero_psqt
#else
#define FEATURE_TRANSFORMER_NO_VEC
#endif
#endif // #ifndef NNUE_FEATURE_TRANSFORMER_VEC_H_INCLUDED