mirror of
https://github.com/sockspls/badfish
synced 2025-04-29 16:23:09 +00:00
Cleanup comments
Tests used to derive some Elo worth comments: https://tests.stockfishchess.org/tests/view/656a7f4e136acbc573555a31 https://tests.stockfishchess.org/tests/view/6585fb455457644dc984620f closes https://github.com/official-stockfish/Stockfish/pull/4945 No functional change
This commit is contained in:
parent
4f99dfcae2
commit
833a2e2bc0
11 changed files with 37 additions and 37 deletions
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -2,7 +2,7 @@ blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Discord server
|
- name: Discord server
|
||||||
url: https://discord.gg/GWDRS3kU6R
|
url: https://discord.gg/GWDRS3kU6R
|
||||||
about: Feel free to ask for support or have a chat with us in our Discord server!
|
about: Feel free to ask for support or have a chat with us on our Discord server!
|
||||||
- name: Discussions, Q&A, ideas, show us something...
|
- name: Discussions, Q&A, ideas, show us something...
|
||||||
url: https://github.com/official-stockfish/Stockfish/discussions/new
|
url: https://github.com/official-stockfish/Stockfish/discussions/new
|
||||||
about: Do you have an idea for Stockfish? Do you want to show something that you made? Please open a discussion about it!
|
about: Do you have an idea for Stockfish? Do you want to show something that you made? Please open a discussion about it!
|
||||||
|
|
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
|
@ -23,7 +23,7 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
language: [ 'cpp' ]
|
language: [ 'cpp' ]
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||||
# Use only 'java' to analyze code written in Java, Kotlin or both
|
# Use only 'java' to analyze code written in Java, Kotlin, or both
|
||||||
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
|
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
|
||||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
||||||
|
|
||||||
|
|
6
.github/workflows/stockfish_binaries.yml
vendored
6
.github/workflows/stockfish_binaries.yml
vendored
|
@ -172,8 +172,8 @@ jobs:
|
||||||
name: stockfish-${{ matrix.config.os }}-${{ matrix.binaries }}
|
name: stockfish-${{ matrix.config.os }}-${{ matrix.binaries }}
|
||||||
path: stockfish-${{ matrix.config.simple_name }}-${{ matrix.binaries }}.tar
|
path: stockfish-${{ matrix.config.simple_name }}-${{ matrix.binaries }}.tar
|
||||||
|
|
||||||
# Artifacts automatically get zipped
|
# Artifacts automatically get zipped.
|
||||||
# to avoid double zipping, we use the unzipped directory
|
# To avoid double-zipping, we use the unzipped directory
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: runner.os == 'Windows'
|
if: runner.os == 'Windows'
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
|
@ -195,7 +195,7 @@ jobs:
|
||||||
id: commit_date
|
id: commit_date
|
||||||
run: echo "COMMIT_DATE=$(git show -s --date=format:'%Y%m%d' --format=%cd HEAD)" >> $GITHUB_ENV
|
run: echo "COMMIT_DATE=$(git show -s --date=format:'%Y%m%d' --format=%cd HEAD)" >> $GITHUB_ENV
|
||||||
|
|
||||||
# Make sure that an old ci which still runs on master doesn't recreate a prerelease
|
# Make sure that an old ci that still runs on master doesn't recreate a prerelease
|
||||||
- name: Check Pullable Commits
|
- name: Check Pullable Commits
|
||||||
id: check_commits
|
id: check_commits
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
* @author Dale Weiler
|
* @author Dale Weiler
|
||||||
* @brief Utility for including binary files
|
* @brief Utility for including binary files
|
||||||
*
|
*
|
||||||
* Facilities for including binary files into the current translation unit and
|
* Facilities for including binary files into the current translation unit
|
||||||
* making use from them externally in other translation units.
|
* and making use of them externally in other translation units.
|
||||||
*/
|
*/
|
||||||
#ifndef INCBIN_HDR
|
#ifndef INCBIN_HDR
|
||||||
#define INCBIN_HDR
|
#define INCBIN_HDR
|
||||||
|
@ -139,7 +139,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
/* The directives are different for Apple branded compilers */
|
/* The directives are different for Apple-branded compilers */
|
||||||
# define INCBIN_SECTION INCBIN_OUTPUT_SECTION "\n"
|
# define INCBIN_SECTION INCBIN_OUTPUT_SECTION "\n"
|
||||||
# define INCBIN_GLOBAL(NAME) ".globl " INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME "\n"
|
# define INCBIN_GLOBAL(NAME) ".globl " INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME "\n"
|
||||||
# define INCBIN_INT ".long "
|
# define INCBIN_INT ".long "
|
||||||
|
@ -261,8 +261,8 @@
|
||||||
INCBIN_STRINGIZE( \
|
INCBIN_STRINGIZE( \
|
||||||
INCBIN_STYLE_IDENT(TYPE)) \
|
INCBIN_STYLE_IDENT(TYPE)) \
|
||||||
|
|
||||||
/* Generate the global labels by indirectly invoking the macro with our style
|
/* Generate the global labels by indirectly invoking the macro
|
||||||
* type and concatenating the name against them. */
|
* with our style type and concatenate the name against them. */
|
||||||
#define INCBIN_GLOBAL_LABELS(NAME, TYPE) \
|
#define INCBIN_GLOBAL_LABELS(NAME, TYPE) \
|
||||||
INCBIN_INVOKE( \
|
INCBIN_INVOKE( \
|
||||||
INCBIN_GLOBAL, \
|
INCBIN_GLOBAL, \
|
||||||
|
|
|
@ -34,11 +34,11 @@ class Position;
|
||||||
|
|
||||||
namespace Stockfish::Eval::NNUE::Features {
|
namespace Stockfish::Eval::NNUE::Features {
|
||||||
|
|
||||||
// Feature HalfKAv2_hm: Combination of the position of own king
|
// Feature HalfKAv2_hm: Combination of the position of own king and the
|
||||||
// and the position of pieces. Position mirrored such that king always on e..h files.
|
// position of pieces. Position mirrored such that king is always on e..h files.
|
||||||
class HalfKAv2_hm {
|
class HalfKAv2_hm {
|
||||||
|
|
||||||
// unique number for each piece type on each square
|
// Unique number for each piece type on each square
|
||||||
enum {
|
enum {
|
||||||
PS_NONE = 0,
|
PS_NONE = 0,
|
||||||
PS_W_PAWN = 0,
|
PS_W_PAWN = 0,
|
||||||
|
@ -56,8 +56,8 @@ class HalfKAv2_hm {
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr IndexType PieceSquareIndex[COLOR_NB][PIECE_NB] = {
|
static constexpr IndexType PieceSquareIndex[COLOR_NB][PIECE_NB] = {
|
||||||
// convention: W - us, B - them
|
// Convention: W - us, B - them
|
||||||
// viewed from other side, W and B are reversed
|
// Viewed from other side, W and B are reversed
|
||||||
{PS_NONE, PS_W_PAWN, PS_W_KNIGHT, PS_W_BISHOP, PS_W_ROOK, PS_W_QUEEN, PS_KING, PS_NONE,
|
{PS_NONE, PS_W_PAWN, PS_W_KNIGHT, PS_W_BISHOP, PS_W_ROOK, PS_W_QUEEN, PS_KING, PS_NONE,
|
||||||
PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE},
|
PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE},
|
||||||
{PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE,
|
{PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE,
|
||||||
|
@ -140,8 +140,8 @@ class HalfKAv2_hm {
|
||||||
static int update_cost(const StateInfo* st);
|
static int update_cost(const StateInfo* st);
|
||||||
static int refresh_cost(const Position& pos);
|
static int refresh_cost(const Position& pos);
|
||||||
|
|
||||||
// Returns whether the change stored in this StateInfo means that
|
// Returns whether the change stored in this StateInfo means
|
||||||
// a full accumulator refresh is required.
|
// that a full accumulator refresh is required.
|
||||||
static bool requires_refresh(const StateInfo* st, Color perspective);
|
static bool requires_refresh(const StateInfo* st, Color perspective);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -235,7 +235,7 @@ class AffineTransformSparseInput {
|
||||||
|
|
||||||
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
|
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
|
||||||
|
|
||||||
// Find indices of nonzero 32bit blocks
|
// Find indices of nonzero 32-bit blocks
|
||||||
find_nnz<NumChunks>(input32, nnz, count);
|
find_nnz<NumChunks>(input32, nnz, count);
|
||||||
|
|
||||||
const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
|
const outvec_t* biasvec = reinterpret_cast<const outvec_t*>(biases);
|
||||||
|
|
|
@ -91,7 +91,7 @@ class SqrClippedReLU {
|
||||||
for (IndexType i = Start; i < InputDimensions; ++i)
|
for (IndexType i = Start; i < InputDimensions; ++i)
|
||||||
{
|
{
|
||||||
output[i] = static_cast<OutputType>(
|
output[i] = static_cast<OutputType>(
|
||||||
// Really should be /127 but we need to make it fast so we right shift
|
// Really should be /127 but we need to make it fast so we right-shift
|
||||||
// by an extra 7 bits instead. Needs to be accounted for in the trainer.
|
// by an extra 7 bits instead. Needs to be accounted for in the trainer.
|
||||||
std::min(127ll, ((long long) (input[i]) * input[i]) >> (2 * WeightScaleBits + 7)));
|
std::min(127ll, ((long long) (input[i]) * input[i]) >> (2 * WeightScaleBits + 7)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ inline IntType read_little_endian(std::istream& stream) {
|
||||||
|
|
||||||
// Utility to write an integer (signed or unsigned, any size)
|
// Utility to write an integer (signed or unsigned, any size)
|
||||||
// to a stream in little-endian order. We swap the byte order before the write if
|
// to a stream in little-endian order. We swap the byte order before the write if
|
||||||
// necessary to always write in little endian order, independently of the byte
|
// necessary to always write in little-endian order, independently of the byte
|
||||||
// ordering of the compiling machine.
|
// ordering of the compiling machine.
|
||||||
template<typename IntType>
|
template<typename IntType>
|
||||||
inline void write_little_endian(std::ostream& stream, IntType value) {
|
inline void write_little_endian(std::ostream& stream, IntType value) {
|
||||||
|
@ -141,8 +141,8 @@ inline void write_little_endian(std::ostream& stream, IntType value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Read integers in bulk from a little indian stream.
|
// Read integers in bulk from a little-endian stream.
|
||||||
// This reads N integers from stream s and put them in array out.
|
// This reads N integers from stream s and puts them in array out.
|
||||||
template<typename IntType>
|
template<typename IntType>
|
||||||
inline void read_little_endian(std::istream& stream, IntType* out, std::size_t count) {
|
inline void read_little_endian(std::istream& stream, IntType* out, std::size_t count) {
|
||||||
if (IsLittleEndian)
|
if (IsLittleEndian)
|
||||||
|
@ -153,7 +153,7 @@ inline void read_little_endian(std::istream& stream, IntType* out, std::size_t c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Write integers in bulk to a little indian stream.
|
// Write integers in bulk to a little-endian stream.
|
||||||
// This takes N integers from array values and writes them on stream s.
|
// This takes N integers from array values and writes them on stream s.
|
||||||
template<typename IntType>
|
template<typename IntType>
|
||||||
inline void write_little_endian(std::ostream& stream, const IntType* values, std::size_t count) {
|
inline void write_little_endian(std::ostream& stream, const IntType* values, std::size_t count) {
|
||||||
|
@ -165,8 +165,8 @@ inline void write_little_endian(std::ostream& stream, const IntType* values, std
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Read N signed integers from the stream s, putting them in
|
// Read N signed integers from the stream s, putting them in the array out.
|
||||||
// the array out. The stream is assumed to be compressed using the signed LEB128 format.
|
// The stream is assumed to be compressed using the signed LEB128 format.
|
||||||
// See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme.
|
// See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme.
|
||||||
template<typename IntType>
|
template<typename IntType>
|
||||||
inline void read_leb_128(std::istream& stream, IntType* out, std::size_t count) {
|
inline void read_leb_128(std::istream& stream, IntType* out, std::size_t count) {
|
||||||
|
@ -216,8 +216,8 @@ inline void read_leb_128(std::istream& stream, IntType* out, std::size_t count)
|
||||||
|
|
||||||
|
|
||||||
// Write signed integers to a stream with LEB128 compression.
|
// Write signed integers to a stream with LEB128 compression.
|
||||||
// This takes N integers from array values, compress them with the LEB128 algorithm and
|
// This takes N integers from array values, compresses them with
|
||||||
// writes the result on the stream s.
|
// the LEB128 algorithm and writes the result on the stream s.
|
||||||
// See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme.
|
// See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme.
|
||||||
template<typename IntType>
|
template<typename IntType>
|
||||||
inline void write_leb_128(std::ostream& stream, const IntType* values, std::size_t count) {
|
inline void write_leb_128(std::ostream& stream, const IntType* values, std::size_t count) {
|
||||||
|
|
|
@ -366,14 +366,14 @@ class FeatureTransformer {
|
||||||
|
|
||||||
// The size must be enough to contain the largest possible update.
|
// The size must be enough to contain the largest possible update.
|
||||||
// That might depend on the feature set and generally relies on the
|
// That might depend on the feature set and generally relies on the
|
||||||
// feature set's update cost calculation to be correct and never
|
// feature set's update cost calculation to be correct and never allow
|
||||||
// allow updates with more added/removed features than MaxActiveDimensions.
|
// updates with more added/removed features than MaxActiveDimensions.
|
||||||
FeatureSet::IndexList removed[N - 1], added[N - 1];
|
FeatureSet::IndexList removed[N - 1], added[N - 1];
|
||||||
|
|
||||||
{
|
{
|
||||||
int i =
|
int i =
|
||||||
N
|
N
|
||||||
- 2; // last potential state to update. Skip last element because it must be nullptr.
|
- 2; // Last potential state to update. Skip last element because it must be nullptr.
|
||||||
while (states_to_update[i] == nullptr)
|
while (states_to_update[i] == nullptr)
|
||||||
--i;
|
--i;
|
||||||
|
|
||||||
|
|
|
@ -747,7 +747,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo
|
||||||
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
|
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use static evaluation difference to improve quiet move ordering (~4 Elo)
|
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
|
||||||
if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture)
|
if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture)
|
||||||
{
|
{
|
||||||
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1652, 1546);
|
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1652, 1546);
|
||||||
|
@ -1201,6 +1201,7 @@ moves_loop: // When in check, search starts here
|
||||||
if (newDepth > d)
|
if (newDepth > d)
|
||||||
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode);
|
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode);
|
||||||
|
|
||||||
|
// Post LMR continuation history updates (~1 Elo)
|
||||||
int bonus = value <= alpha ? -stat_malus(newDepth)
|
int bonus = value <= alpha ? -stat_malus(newDepth)
|
||||||
: value >= beta ? stat_bonus(newDepth)
|
: value >= beta ? stat_bonus(newDepth)
|
||||||
: 0;
|
: 0;
|
||||||
|
@ -1216,7 +1217,7 @@ moves_loop: // When in check, search starts here
|
||||||
if (!ttMove)
|
if (!ttMove)
|
||||||
r += 2;
|
r += 2;
|
||||||
|
|
||||||
// Note that if expected reduction is high, we reduce search depth by 1 here
|
// Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo)
|
||||||
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3), !cutNode);
|
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3), !cutNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1644,8 +1645,7 @@ Value value_to_tt(Value v, int ply) {
|
||||||
// from the transposition table (which refers to the plies to mate/be mated from
|
// from the transposition table (which refers to the plies to mate/be mated from
|
||||||
// current position) to "plies to mate/be mated (TB win/loss) from the root".
|
// current position) to "plies to mate/be mated (TB win/loss) from the root".
|
||||||
// However, to avoid potentially false mate or TB scores related to the 50 moves rule
|
// However, to avoid potentially false mate or TB scores related to the 50 moves rule
|
||||||
// and the graph history interaction, we return highest non-TB score instead.
|
// and the graph history interaction, we return the highest non-TB score instead.
|
||||||
|
|
||||||
Value value_from_tt(Value v, int ply, int r50c) {
|
Value value_from_tt(Value v, int ply, int r50c) {
|
||||||
|
|
||||||
if (v == VALUE_NONE)
|
if (v == VALUE_NONE)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# check for errors under valgrind or sanitizers.
|
# check for errors under Valgrind or sanitizers.
|
||||||
|
|
||||||
error()
|
error()
|
||||||
{
|
{
|
||||||
|
@ -151,7 +151,7 @@ cat << EOF > game.exp
|
||||||
send "quit\n"
|
send "quit\n"
|
||||||
expect eof
|
expect eof
|
||||||
|
|
||||||
# return error code of the spawned program, useful for valgrind
|
# return error code of the spawned program, useful for Valgrind
|
||||||
lassign [wait] pid spawnid os_error_flag value
|
lassign [wait] pid spawnid os_error_flag value
|
||||||
exit \$value
|
exit \$value
|
||||||
EOF
|
EOF
|
||||||
|
@ -179,7 +179,7 @@ cat << EOF > syzygy.exp
|
||||||
send "quit\n"
|
send "quit\n"
|
||||||
expect eof
|
expect eof
|
||||||
|
|
||||||
# return error code of the spawned program, useful for valgrind
|
# return error code of the spawned program, useful for Valgrind
|
||||||
lassign [wait] pid spawnid os_error_flag value
|
lassign [wait] pid spawnid os_error_flag value
|
||||||
exit \$value
|
exit \$value
|
||||||
EOF
|
EOF
|
||||||
|
|
Loading…
Add table
Reference in a new issue