1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-30 08:43:09 +00:00

Avoid special casing for MinGW

after some testing, no version of MinGW/gcc has been found where this code is still necessary.
Probably older code (pre-c++17?)

closes https://github.com/official-stockfish/Stockfish/pull/2891

No functional change
This commit is contained in:
Dariusz Orzechowski 2020-08-09 14:32:24 -07:00 committed by Joost VandeVondele
parent 2bfde55429
commit a6e89293df
3 changed files with 14 additions and 98 deletions

View file

@ -104,13 +104,8 @@ namespace Eval::NNUE::Layers {
__m512i sum = _mm512_setzero_si512(); __m512i sum = _mm512_setzero_si512();
const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]); const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) { for (IndexType j = 0; j < kNumChunks; ++j) {
__m512i product = _mm512_maddubs_epi16(
#if defined(__MINGW32__) || defined(__MINGW64__) _mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
__m512i product = _mm512_maddubs_epi16(_mm512_loadu_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
#else
__m512i product = _mm512_maddubs_epi16(_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
#endif
product = _mm512_madd_epi16(product, kOnes); product = _mm512_madd_epi16(product, kOnes);
sum = _mm512_add_epi32(sum, product); sum = _mm512_add_epi32(sum, product);
} }
@ -125,12 +120,8 @@ namespace Eval::NNUE::Layers {
const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]); const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]);
int j = kNumChunks * 2; int j = kNumChunks * 2;
#if defined(__MINGW32__) || defined(__MINGW64__) // See HACK comment below in AVX2. __m256i sum256 = _mm256_maddubs_epi16(
__m256i sum256 = _mm256_maddubs_epi16(_mm256_loadu_si256(&iv_256[j]), _mm256_load_si256(&row_256[j])); _mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
#else
__m256i sum256 = _mm256_maddubs_epi16(_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
#endif
sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1)); sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1));
sum256 = _mm256_hadd_epi32(sum256, sum256); sum256 = _mm256_hadd_epi32(sum256, sum256);
sum256 = _mm256_hadd_epi32(sum256, sum256); sum256 = _mm256_hadd_epi32(sum256, sum256);
@ -144,17 +135,7 @@ namespace Eval::NNUE::Layers {
const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]); const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) { for (IndexType j = 0; j < kNumChunks; ++j) {
__m256i product = _mm256_maddubs_epi16( __m256i product = _mm256_maddubs_epi16(
_mm256_load_si256(&input_vector[j]), _mm256_load_si256(&row[j]));
#if defined(__MINGW32__) || defined(__MINGW64__)
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&input_vector[j]), _mm256_load_si256(&row[j]));
product = _mm256_madd_epi16(product, kOnes); product = _mm256_madd_epi16(product, kOnes);
sum = _mm256_add_epi32(sum, product); sum = _mm256_add_epi32(sum, product);
} }

View file

@ -74,50 +74,13 @@ namespace Eval::NNUE::Layers {
const auto out = reinterpret_cast<__m256i*>(output); const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < kNumChunks; ++i) { for (IndexType i = 0; i < kNumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32( const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 0]),
#if defined(__MINGW32__) || defined(__MINGW64__) _mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits);
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&in[i * 4 + 0]),
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&in[i * 4 + 1])), kWeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32( const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
_mm256_load_si256(&in[i * 4 + 2]),
#if defined(__MINGW32__) || defined(__MINGW64__) _mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits);
_mm256_loadu_si256 _mm256_store_si256(
#else &out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_load_si256
#endif
(&in[i * 4 + 2]),
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&in[i * 4 + 3])), kWeightScaleBits);
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256
#else
_mm256_store_si256
#endif
(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), kZero), kOffsets)); _mm256_packs_epi16(words0, words1), kZero), kOffsets));
} }
constexpr IndexType kStart = kNumChunks * kSimdWidth; constexpr IndexType kStart = kNumChunks * kSimdWidth;

View file

@ -110,36 +110,12 @@ namespace Eval::NNUE {
auto out = reinterpret_cast<__m256i*>(&output[offset]); auto out = reinterpret_cast<__m256i*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) { for (IndexType j = 0; j < kNumChunks; ++j) {
__m256i sum0 = __m256i sum0 =
_mm256_load_si256(&reinterpret_cast<const __m256i*>(
#if defined(__MINGW32__) || defined(__MINGW64__)
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&reinterpret_cast<const __m256i*>(
accumulation[perspectives[p]][0])[j * 2 + 0]); accumulation[perspectives[p]][0])[j * 2 + 0]);
__m256i sum1 = __m256i sum1 =
_mm256_load_si256(&reinterpret_cast<const __m256i*>(
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif
(&reinterpret_cast<const __m256i*>(
accumulation[perspectives[p]][0])[j * 2 + 1]); accumulation[perspectives[p]][0])[j * 2 + 1]);
_mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256
#else
_mm256_store_si256
#endif
(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
_mm256_packs_epi16(sum0, sum1), kZero), kControl)); _mm256_packs_epi16(sum0, sum1), kZero), kControl));
} }
@ -202,11 +178,7 @@ namespace Eval::NNUE {
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]); auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2); constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
for (IndexType j = 0; j < kNumChunks; ++j) { for (IndexType j = 0; j < kNumChunks; ++j) {
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadu_si256(&accumulation[j]), column[j]));
#else
accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]); accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
#endif
} }
#elif defined(USE_SSE2) #elif defined(USE_SSE2)