1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-05-02 09:39:36 +00:00
BadFish/src/nnue/layers/sqr_clipped_relu.h
Tomasz Sobczyk c079acc26f Update NNUE architecture to SFNNv5. Update network to nn-3c0aa92af1da.nnue.
Architecture changes:

    Duplicated activation after the 1024->15 layer with squared crelu (so 15->15*2). As proposed by vondele.

Trainer changes:

    Added bias to L1 factorization, which was previously missing (no measurable improvement but at least neutral in principle)
    For retraining linearly reduce lambda parameter from 1.0 at epoch 0 to 0.75 at epoch 800.
    reduce max_skipping_rate from 15 to 10 (compared to vondele's outstanding PR)

Note: This network was trained with a ~0.8% error in quantization regarding the newly added activation function.
      This will be fixed in the released trainer version. Expect a trainer PR tomorrow.

Note: The inference implementation cuts a corner to merge results from two activation functions.
       This could possibly be resolved nicer in the future. AVX2 implementation likely not necessary, but NEON is missing.

First training session invocation:

python3 train.py \
    ../nnue-pytorch-training/data/nodes5000pv2_UHO.binpack \
    ../nnue-pytorch-training/data/nodes5000pv2_UHO.binpack \
    --gpus "$3," \
    --threads 4 \
    --num-workers 8 \
    --batch-size 16384 \
    --progress_bar_refresh_rate 20 \
    --random-fen-skipping 3 \
    --features=HalfKAv2_hm^ \
    --lambda=1.0 \
    --max_epochs=400 \
    --default_root_dir ../nnue-pytorch-training/experiment_$1/run_$2

Second training session invocation:

python3 train.py \
    ../nnue-pytorch-training/data/T60T70wIsRightFarseerT60T74T75T76.binpack \
    ../nnue-pytorch-training/data/T60T70wIsRightFarseerT60T74T75T76.binpack \
    --gpus "$3," \
    --threads 4 \
    --num-workers 8 \
    --batch-size 16384 \
    --progress_bar_refresh_rate 20 \
    --random-fen-skipping 3 \
    --features=HalfKAv2_hm^ \
    --start-lambda=1.0 \
    --end-lambda=0.75 \
    --gamma=0.995 \
    --lr=4.375e-4 \
    --max_epochs=800 \
    --resume-from-model /data/sopel/nnue/nnue-pytorch-training/data/exp367/nn-exp367-run3-epoch399.pt \
    --default_root_dir ../nnue-pytorch-training/experiment_$1/run_$2

Passed STC:
LLR: 2.95 (-2.94,2.94) <0.00,2.50>
Total: 27288 W: 7445 L: 7178 D: 12665
Ptnml(0-2): 159, 3002, 7054, 3271, 158
https://tests.stockfishchess.org/tests/view/627e8c001919125939623644

Passed LTC:
LLR: 2.95 (-2.94,2.94) <0.50,3.00>
Total: 21792 W: 5969 L: 5727 D: 10096
Ptnml(0-2): 25, 2152, 6294, 2406, 19
https://tests.stockfishchess.org/tests/view/627f2a855734b18b2e2ece47

closes https://github.com/official-stockfish/Stockfish/pull/4020

Bench: 6481017
2022-05-14 12:47:22 +02:00

120 lines
3.7 KiB
C++

/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Definition of layer ClippedReLU of NNUE evaluation function
#ifndef NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
#define NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
#include "../nnue_common.h"
namespace Stockfish::Eval::NNUE::Layers {
// Clipped ReLU
template <IndexType InDims>
class SqrClippedReLU {
public:
// Input/output type
using InputType = std::int32_t;
using OutputType = std::uint8_t;
// Number of input/output dimensions
static constexpr IndexType InputDimensions = InDims;
static constexpr IndexType OutputDimensions = InputDimensions;
static constexpr IndexType PaddedOutputDimensions =
ceil_to_multiple<IndexType>(OutputDimensions, 32);
using OutputBuffer = OutputType[PaddedOutputDimensions];
// Hash value embedded in the evaluation file
static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0x538D24C7u;
hashValue += prevHash;
return hashValue;
}
// Read network parameters
bool read_parameters(std::istream&) {
return true;
}
// Write network parameters
bool write_parameters(std::ostream&) const {
return true;
}
// Forward propagation
const OutputType* propagate(
const InputType* input, OutputType* output) const {
#if defined(USE_SSE2)
constexpr IndexType NumChunks = InputDimensions / 16;
#ifdef USE_SSE41
const __m128i Zero = _mm_setzero_si128();
#else
const __m128i k0x80s = _mm_set1_epi8(-128);
#endif
static_assert(WeightScaleBits == 6);
const auto in = reinterpret_cast<const __m128i*>(input);
const auto out = reinterpret_cast<__m128i*>(output);
for (IndexType i = 0; i < NumChunks; ++i) {
__m128i words0 = _mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 0]),
_mm_load_si128(&in[i * 4 + 1]));
__m128i words1 = _mm_packs_epi32(
_mm_load_si128(&in[i * 4 + 2]),
_mm_load_si128(&in[i * 4 + 3]));
// Not sure if
words0 = _mm_srli_epi16(_mm_mulhi_epi16(words0, words0), 3);
words1 = _mm_srli_epi16(_mm_mulhi_epi16(words1, words1), 3);
const __m128i packedbytes = _mm_packs_epi16(words0, words1);
_mm_store_si128(&out[i],
#ifdef USE_SSE41
_mm_max_epi8(packedbytes, Zero)
#else
_mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
#endif
);
}
constexpr IndexType Start = NumChunks * 16;
#else
constexpr IndexType Start = 0;
#endif
for (IndexType i = Start; i < InputDimensions; ++i) {
output[i] = static_cast<OutputType>(
// realy should be /127 but we need to make it fast
// needs to be accounted for in the trainer
std::max(0ll, std::min(127ll, (((long long)input[i] * input[i]) >> (2 * WeightScaleBits)) / 128)));
}
return output;
}
};
} // namespace Stockfish::Eval::NNUE::Layers
#endif // NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED