mirror of
https://github.com/sockspls/badfish
synced 2025-07-11 11:39:15 +00:00
Added a hack to avoid crash with binaries compiled by g++ on MSYS2.
This commit is contained in:
parent
89e846c476
commit
09e529edd3
3 changed files with 69 additions and 9 deletions
|
@ -101,7 +101,15 @@ class AffineTransform {
|
|||
const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
__m256i product = _mm256_maddubs_epi16(
|
||||
_mm256_load_si256(&input_vector[j]), _mm256_load_si256(&row[j]));
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
|
||||
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
|
||||
// even though alignas is specified.
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&input_vector[j]), _mm256_load_si256(&row[j]));
|
||||
product = _mm256_madd_epi16(product, kOnes);
|
||||
sum = _mm256_add_epi32(sum, product);
|
||||
}
|
||||
|
|
|
@ -73,12 +73,40 @@ class ClippedReLU {
|
|||
const auto out = reinterpret_cast<__m256i*>(output);
|
||||
for (IndexType i = 0; i < kNumChunks; ++i) {
|
||||
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
|
||||
_mm256_load_si256(&in[i * 4 + 0]),
|
||||
_mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits);
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
|
||||
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
|
||||
// even though alignas is specified.
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&in[i * 4 + 0]),
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&in[i * 4 + 1])), kWeightScaleBits);
|
||||
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
|
||||
_mm256_load_si256(&in[i * 4 + 2]),
|
||||
_mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits);
|
||||
_mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&in[i * 4 + 2]),
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&in[i * 4 + 3])), kWeightScaleBits);
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_storeu_si256
|
||||
#else
|
||||
_mm256_store_si256
|
||||
#endif
|
||||
(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
|
||||
_mm256_packs_epi16(words0, words1), kZero), kOffsets));
|
||||
}
|
||||
constexpr IndexType kStart = kNumChunks * kSimdWidth;
|
||||
|
|
|
@ -100,9 +100,24 @@ class FeatureTransformer {
|
|||
#if defined(USE_AVX2)
|
||||
auto out = reinterpret_cast<__m256i*>(&output[offset]);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
__m256i sum0 = _mm256_load_si256(&reinterpret_cast<const __m256i*>(
|
||||
__m256i sum0 =
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
|
||||
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
|
||||
// even though alignas is specified.
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&reinterpret_cast<const __m256i*>(
|
||||
accumulation[perspectives[p]][0])[j * 2 + 0]);
|
||||
__m256i sum1 = _mm256_load_si256(&reinterpret_cast<const __m256i*>(
|
||||
__m256i sum1 =
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_loadu_si256
|
||||
#else
|
||||
_mm256_load_si256
|
||||
#endif
|
||||
(&reinterpret_cast<const __m256i*>(
|
||||
accumulation[perspectives[p]][0])[j * 2 + 1]);
|
||||
for (IndexType i = 1; i < kRefreshTriggers.size(); ++i) {
|
||||
sum0 = _mm256_add_epi16(sum0, reinterpret_cast<const __m256i*>(
|
||||
|
@ -110,7 +125,12 @@ class FeatureTransformer {
|
|||
sum1 = _mm256_add_epi16(sum1, reinterpret_cast<const __m256i*>(
|
||||
accumulation[perspectives[p]][i])[j * 2 + 1]);
|
||||
}
|
||||
_mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_storeu_si256
|
||||
#else
|
||||
_mm256_store_si256
|
||||
#endif
|
||||
(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
|
||||
_mm256_packs_epi16(sum0, sum1), kZero), kControl));
|
||||
}
|
||||
#elif defined(USE_SSE41)
|
||||
|
@ -177,7 +197,11 @@ class FeatureTransformer {
|
|||
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
|
||||
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
|
||||
for (IndexType j = 0; j < kNumChunks; ++j) {
|
||||
#if defined(__MINGW32__) || defined(__MINGW64__)
|
||||
_mm256_storeu_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadu_si256(&accumulation[j]), column[j]));
|
||||
#else
|
||||
accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
|
||||
#endif
|
||||
}
|
||||
#elif defined(USE_SSE2)
|
||||
auto accumulation = reinterpret_cast<__m128i*>(
|
||||
|
|
Loading…
Add table
Reference in a new issue