diff --git a/src/eval/nnue/layers/affine_transform.h b/src/eval/nnue/layers/affine_transform.h index 9b227270..d8101ba4 100644 --- a/src/eval/nnue/layers/affine_transform.h +++ b/src/eval/nnue/layers/affine_transform.h @@ -101,7 +101,15 @@ class AffineTransform { const auto row = reinterpret_cast(&weights_[offset]); for (IndexType j = 0; j < kNumChunks; ++j) { __m256i product = _mm256_maddubs_epi16( - _mm256_load_si256(&input_vector[j]), _mm256_load_si256(&row[j])); +#if defined(__MINGW32__) || defined(__MINGW64__) + // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary + // compiled with g++ in MSYS2 crashes here because the output memory is not aligned + // even though alignas is specified. + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&input_vector[j]), _mm256_load_si256(&row[j])); product = _mm256_madd_epi16(product, kOnes); sum = _mm256_add_epi32(sum, product); } diff --git a/src/eval/nnue/layers/clipped_relu.h b/src/eval/nnue/layers/clipped_relu.h index f904de74..5877fc32 100644 --- a/src/eval/nnue/layers/clipped_relu.h +++ b/src/eval/nnue/layers/clipped_relu.h @@ -73,12 +73,40 @@ class ClippedReLU { const auto out = reinterpret_cast<__m256i*>(output); for (IndexType i = 0; i < kNumChunks; ++i) { const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32( - _mm256_load_si256(&in[i * 4 + 0]), - _mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits); +#if defined(__MINGW32__) || defined(__MINGW64__) + // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary + // compiled with g++ in MSYS2 crashes here because the output memory is not aligned + // even though alignas is specified. + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&in[i * 4 + 0]), +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&in[i * 4 + 1])), kWeightScaleBits); const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32( - _mm256_load_si256(&in[i * 4 + 2]), - _mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits); - _mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8( +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&in[i * 4 + 2]), +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&in[i * 4 + 3])), kWeightScaleBits); +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_storeu_si256 +#else + _mm256_store_si256 +#endif + (&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8( _mm256_packs_epi16(words0, words1), kZero), kOffsets)); } constexpr IndexType kStart = kNumChunks * kSimdWidth; diff --git a/src/eval/nnue/nnue_feature_transformer.h b/src/eval/nnue/nnue_feature_transformer.h index f7c2080f..57d25310 100644 --- a/src/eval/nnue/nnue_feature_transformer.h +++ b/src/eval/nnue/nnue_feature_transformer.h @@ -100,9 +100,24 @@ class FeatureTransformer { #if defined(USE_AVX2) auto out = reinterpret_cast<__m256i*>(&output[offset]); for (IndexType j = 0; j < kNumChunks; ++j) { - __m256i sum0 = _mm256_load_si256(&reinterpret_cast( + __m256i sum0 = +#if defined(__MINGW32__) || defined(__MINGW64__) + // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary + // compiled with g++ in MSYS2 crashes here because the output memory is not aligned + // even though alignas is specified. + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&reinterpret_cast( accumulation[perspectives[p]][0])[j * 2 + 0]); - __m256i sum1 = _mm256_load_si256(&reinterpret_cast( + __m256i sum1 = +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_loadu_si256 +#else + _mm256_load_si256 +#endif + (&reinterpret_cast( accumulation[perspectives[p]][0])[j * 2 + 1]); for (IndexType i = 1; i < kRefreshTriggers.size(); ++i) { sum0 = _mm256_add_epi16(sum0, reinterpret_cast( @@ -110,7 +125,12 @@ class FeatureTransformer { sum1 = _mm256_add_epi16(sum1, reinterpret_cast( accumulation[perspectives[p]][i])[j * 2 + 1]); } - _mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8( +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_storeu_si256 +#else + _mm256_store_si256 +#endif + (&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8( _mm256_packs_epi16(sum0, sum1), kZero), kControl)); } #elif defined(USE_SSE41) @@ -177,7 +197,11 @@ class FeatureTransformer { auto column = reinterpret_cast(&weights_[offset]); constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2); for (IndexType j = 0; j < kNumChunks; ++j) { +#if defined(__MINGW32__) || defined(__MINGW64__) + _mm256_storeu_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadu_si256(&accumulation[j]), column[j])); +#else accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]); +#endif } #elif defined(USE_SSE2) auto accumulation = reinterpret_cast<__m128i*>(