1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-29 16:23:09 +00:00

VVLTC search tune

Parameters were tuned in 3 stages:

* Using an earlier L1-3072 net, and with triple extension margin manually set to 0: https://tests.stockfishchess.org/tests/view/65ffdf5d0ec64f0526c544f2 (~30k games)
* Continue tuning, but with the previous master net (L1-2560). https://tests.stockfishchess.org/tests/view/660663f00ec64f0526c59c41 (~27k games)
* Starting with the parameters from step 2, use the current L1-3072 net, and allow the triple extension margin to be tuned starting from 0: https://tests.stockfishchess.org/tests/view/660c16b8216a13d9498e7536 (40k games)

Passed VVLTC 1st sprt: https://tests.stockfishchess.org/tests/view/66115eacbfeb43334bf7eddd
LLR: 2.95 (-2.94,2.94) <0.00,2.00>
Total: 27138 W: 7045 L: 6789 D: 13304
Ptnml(0-2): 1, 2421, 8471, 2673, 3

Passed VVLTC 2nd sprt: https://tests.stockfishchess.org/tests/view/661483623eb00c8ccc0049c1
LLR: 2.94 (-2.94,2.94) <0.50,2.50>
Total: 26242 W: 6807 L: 6535 D: 12900
Ptnml(0-2): 0, 2353, 8143, 2625, 0

STC Elo estimate: https://tests.stockfishchess.org/tests/view/66175ca55a4693796d96608c
Elo: -10.53 ± 2.4 (95%) LOS: 0.0%
Total: 21584 W: 5294 L: 5948 D: 10342
Ptnml(0-2): 102, 2937, 5363, 2293, 97
nElo: -19.99 ± 4.7 (95%) PairsRatio: 0.79

closes https://github.com/official-stockfish/Stockfish/pull/5162

Bench: 1381387
This commit is contained in:
Muzhen Gaming 2024-04-06 22:39:29 +08:00 committed by Joost VandeVondele
parent 9032c6cbe7
commit 1adf8e1ae6
2 changed files with 39 additions and 39 deletions

View file

@ -29,7 +29,7 @@ class Position;
namespace Eval { namespace Eval {
constexpr inline int SmallNetThreshold = 1165, PsqtOnlyThreshold = 2500; constexpr inline int SmallNetThreshold = 1274, PsqtOnlyThreshold = 2389;
// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
// for the build process (profile-build and fishtest) to work. Do not change the // for the build process (profile-build and fishtest) to work. Do not change the

View file

@ -58,8 +58,8 @@ static constexpr double EvalLevel[10] = {1.043, 1.017, 0.952, 1.009, 0.971,
// Futility margin // Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) { Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
Value futilityMult = 118 - 44 * noTtCutNode; Value futilityMult = 118 - 44 * noTtCutNode;
Value improvingDeduction = 53 * improving * futilityMult / 32; Value improvingDeduction = 52 * improving * futilityMult / 32;
Value worseningDeduction = (309 + 47 * improving) * oppWorsening * futilityMult / 1024; Value worseningDeduction = (310 + 48 * improving) * oppWorsening * futilityMult / 1024;
return futilityMult * d - improvingDeduction - worseningDeduction; return futilityMult * d - improvingDeduction - worseningDeduction;
} }
@ -71,15 +71,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range // Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) { Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)]; auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
v += cv * std::abs(cv) / 11175; v += cv * std::abs(cv) / 9260;
return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1); return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
} }
// History and stats update bonus, based on depth // History and stats update bonus, based on depth
int stat_bonus(Depth d) { return std::clamp(245 * d - 320, 0, 1296); } int stat_bonus(Depth d) { return std::clamp(211 * d - 315, 0, 1291); }
// History and stats update malus, based on depth // History and stats update malus, based on depth
int stat_malus(Depth d) { return (d < 4 ? 554 * d - 303 : 1203); } int stat_malus(Depth d) { return (d < 4 ? 572 * d - 285 : 1372); }
// Add a small random component to draw evaluations to avoid 3-fold blindness // Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); } Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
@ -303,12 +303,12 @@ void Search::Worker::iterative_deepening() {
// Reset aspiration window starting size // Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore; Value avg = rootMoves[pvIdx].averageScore;
delta = 10 + avg * avg / 12493; delta = 11 + avg * avg / 11254;
alpha = std::max(avg - delta, -VALUE_INFINITE); alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE); beta = std::min(avg + delta, VALUE_INFINITE);
// Adjust optimism based on root move's averageScore (~4 Elo) // Adjust optimism based on root move's averageScore (~4 Elo)
optimism[us] = 132 * avg / (std::abs(avg) + 89); optimism[us] = 125 * avg / (std::abs(avg) + 91);
optimism[~us] = -optimism[us]; optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail // Start with a small aspiration window and, in the case of a fail
@ -496,10 +496,10 @@ void Search::Worker::clear() {
for (StatsType c : {NoCaptures, Captures}) for (StatsType c : {NoCaptures, Captures})
for (auto& to : continuationHistory[inCheck][c]) for (auto& to : continuationHistory[inCheck][c])
for (auto& h : to) for (auto& h : to)
h->fill(-67); h->fill(-65);
for (size_t i = 1; i < reductions.size(); ++i) for (size_t i = 1; i < reductions.size(); ++i)
reductions[i] = int((19.80 + std::log(size_t(options["Threads"])) / 2) * std::log(i)); reductions[i] = int((20.14 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
} }
@ -731,7 +731,7 @@ Value Search::Worker::search(
// Use static evaluation difference to improve quiet move ordering (~9 Elo) // Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture) if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{ {
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1578, 1291); int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1644, 1384);
bonus = bonus > 0 ? 2 * bonus : bonus / 2; bonus = bonus > 0 ? 2 * bonus : bonus / 2;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus; thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
@ -754,7 +754,7 @@ Value Search::Worker::search(
// If eval is really low check with qsearch if it can exceed alpha, if it can't, // If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low. // return a fail low.
// Adjust razor margin according to cutoffCnt. (~1 Elo) // Adjust razor margin according to cutoffCnt. (~1 Elo)
if (eval < alpha - 488 - (289 - 142 * ((ss + 1)->cutoffCnt > 3)) * depth * depth) if (eval < alpha - 471 - (276 - 148 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
{ {
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha); value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha) if (value < alpha)
@ -765,21 +765,21 @@ Value Search::Worker::search(
// The depth condition is important for mate finding. // The depth condition is important for mate finding.
if (!ss->ttPv && depth < 12 if (!ss->ttPv && depth < 12
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening) && eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening)
- (ss - 1)->statScore / 267 - (ss - 1)->statScore / 284
>= beta >= beta
&& eval >= beta && eval < VALUE_TB_WIN_IN_MAX_PLY && (!ttMove || ttCapture)) && eval >= beta && eval < VALUE_TB_WIN_IN_MAX_PLY && (!ttMove || ttCapture))
return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval; return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval;
// Step 9. Null move search with verification search (~35 Elo) // Step 9. Null move search with verification search (~35 Elo)
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16878 if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 18001
&& eval >= beta && ss->staticEval >= beta - 20 * depth + 314 && !excludedMove && eval >= beta && ss->staticEval >= beta - 21 * depth + 315 && !excludedMove
&& pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
&& beta > VALUE_TB_LOSS_IN_MAX_PLY) && beta > VALUE_TB_LOSS_IN_MAX_PLY)
{ {
assert(eval - beta >= 0); assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and eval // Null move dynamic reduction based on depth and eval
Depth R = std::min(int(eval - beta) / 144, 6) + depth / 3 + 4; Depth R = std::min(int(eval - beta) / 152, 6) + depth / 3 + 4;
ss->currentMove = Move::null(); ss->currentMove = Move::null();
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
@ -827,7 +827,7 @@ Value Search::Worker::search(
// Step 11. ProbCut (~10 Elo) // Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value // If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move. // much above beta, we can (almost) safely prune the previous move.
probCutBeta = beta + 170 - 64 * improving; probCutBeta = beta + 169 - 63 * improving;
if ( if (
!PvNode && depth > 3 !PvNode && depth > 3
&& std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
@ -883,7 +883,7 @@ Value Search::Worker::search(
moves_loop: // When in check, search starts here moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea, when we are in check (~4 Elo) // Step 12. A small Probcut idea, when we are in check (~4 Elo)
probCutBeta = beta + 409; probCutBeta = beta + 436;
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER) if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta && tte->depth() >= depth - 4 && ttValue >= probCutBeta
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY) && std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
@ -967,7 +967,7 @@ moves_loop: // When in check, search starts here
{ {
Piece capturedPiece = pos.piece_on(move.to_sq()); Piece capturedPiece = pos.piece_on(move.to_sq());
int futilityEval = int futilityEval =
ss->staticEval + 297 + 284 * lmrDepth + PieceValue[capturedPiece] ss->staticEval + 287 + 277 * lmrDepth + PieceValue[capturedPiece]
+ thisThread->captureHistory[movedPiece][move.to_sq()][type_of(capturedPiece)] + thisThread->captureHistory[movedPiece][move.to_sq()][type_of(capturedPiece)]
/ 7; / 7;
if (futilityEval < alpha) if (futilityEval < alpha)
@ -975,7 +975,7 @@ moves_loop: // When in check, search starts here
} }
// SEE based pruning for captures and checks (~11 Elo) // SEE based pruning for captures and checks (~11 Elo)
if (!pos.see_ge(move, -203 * depth)) if (!pos.see_ge(move, -199 * depth))
continue; continue;
} }
else else
@ -987,15 +987,15 @@ moves_loop: // When in check, search starts here
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()]; + thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];
// Continuation history based pruning (~2 Elo) // Continuation history based pruning (~2 Elo)
if (lmrDepth < 6 && history < -4040 * depth) if (lmrDepth < 6 && history < -4173 * depth)
continue; continue;
history += 2 * thisThread->mainHistory[us][move.from_to()]; history += 2 * thisThread->mainHistory[us][move.from_to()];
lmrDepth += history / 5637; lmrDepth += history / 5285;
Value futilityValue = Value futilityValue =
ss->staticEval + (bestValue < ss->staticEval - 59 ? 141 : 58) + 125 * lmrDepth; ss->staticEval + (bestValue < ss->staticEval - 54 ? 128 : 58) + 131 * lmrDepth;
// Futility pruning: parent node (~13 Elo) // Futility pruning: parent node (~13 Elo)
if (!ss->inCheck && lmrDepth < 15 && futilityValue <= alpha) if (!ss->inCheck && lmrDepth < 15 && futilityValue <= alpha)
@ -1009,7 +1009,7 @@ moves_loop: // When in check, search starts here
lmrDepth = std::max(lmrDepth, 0); lmrDepth = std::max(lmrDepth, 0);
// Prune moves with negative SEE (~4 Elo) // Prune moves with negative SEE (~4 Elo)
if (!pos.see_ge(move, -27 * lmrDepth * lmrDepth)) if (!pos.see_ge(move, -26 * lmrDepth * lmrDepth))
continue; continue;
} }
} }
@ -1029,11 +1029,11 @@ moves_loop: // When in check, search starts here
// so changing them requires tests at these types of time controls. // so changing them requires tests at these types of time controls.
// Recursive singular search is avoided. // Recursive singular search is avoided.
if (!rootNode && move == ttMove && !excludedMove if (!rootNode && move == ttMove && !excludedMove
&& depth >= 4 - (thisThread->completedDepth > 30) + ss->ttPv && depth >= 4 - (thisThread->completedDepth > 32) + ss->ttPv
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER) && std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3) && tte->depth() >= depth - 3)
{ {
Value singularBeta = ttValue - (58 + 58 * (ss->ttPv && !PvNode)) * depth / 64; Value singularBeta = ttValue - (64 + 59 * (ss->ttPv && !PvNode)) * depth / 64;
Depth singularDepth = newDepth / 2; Depth singularDepth = newDepth / 2;
ss->excludedMove = move; ss->excludedMove = move;
@ -1048,11 +1048,11 @@ moves_loop: // When in check, search starts here
// We make sure to limit the extensions in some way to avoid a search explosion // We make sure to limit the extensions in some way to avoid a search explosion
if (!PvNode && ss->multipleExtensions <= 16) if (!PvNode && ss->multipleExtensions <= 16)
{ {
extension = 2 + (value < singularBeta - 22 && !ttCapture); extension = 2 + (value < singularBeta - 11 && !ttCapture);
depth += depth < 14; depth += depth < 14;
} }
if (PvNode && !ttCapture && ss->multipleExtensions <= 5 if (PvNode && !ttCapture && ss->multipleExtensions <= 5
&& value < singularBeta - 37) && value < singularBeta - 38)
extension = 2; extension = 2;
} }
@ -1087,7 +1087,7 @@ moves_loop: // When in check, search starts here
else if (PvNode && move == ttMove && move.to_sq() == prevSq else if (PvNode && move == ttMove && move.to_sq() == prevSq
&& thisThread->captureHistory[movedPiece][move.to_sq()] && thisThread->captureHistory[movedPiece][move.to_sq()]
[type_of(pos.piece_on(move.to_sq()))] [type_of(pos.piece_on(move.to_sq()))]
> 4026) > 3807)
extension = 1; extension = 1;
} }
@ -1137,10 +1137,10 @@ moves_loop: // When in check, search starts here
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()] ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()] + (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()] + (*contHist[1])[movedPiece][move.to_sq()]
+ (*contHist[3])[movedPiece][move.to_sq()] - 4723; + (*contHist[3])[movedPiece][move.to_sq()] - 5007;
// Decrease/increase reduction for moves with a good/bad history (~8 Elo) // Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore / 13659; r -= ss->statScore / 12901;
// Step 17. Late moves reduction / extension (LMR, ~117 Elo) // Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1 + rootNode) if (depth >= 2 && moveCount > 1 + rootNode)
@ -1159,7 +1159,7 @@ moves_loop: // When in check, search starts here
{ {
// Adjust full-depth search based on LMR results - if the result // Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower. // was good enough search deeper, if it was bad enough search shallower.
const bool doDeeperSearch = value > (bestValue + 47 + 2 * newDepth); // (~1 Elo) const bool doDeeperSearch = value > (bestValue + 42 + 2 * newDepth); // (~1 Elo)
const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo) const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)
newDepth += doDeeperSearch - doShallowerSearch; newDepth += doDeeperSearch - doShallowerSearch;
@ -1277,7 +1277,7 @@ moves_loop: // When in check, search starts here
else else
{ {
// Reduce other moves if we have found at least one score improvement (~2 Elo) // Reduce other moves if we have found at least one score improvement (~2 Elo)
if (depth > 2 && depth < 12 && beta < 14206 && value > -12077) if (depth > 2 && depth < 12 && beta < 13132 && value > -13295)
depth -= 2; depth -= 2;
assert(depth > 0); assert(depth > 0);
@ -1320,9 +1320,9 @@ moves_loop: // When in check, search starts here
// Bonus for prior countermove that caused the fail low // Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE) else if (!priorCapture && prevSq != SQ_NONE)
{ {
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14963) int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14761)
+ ((ss - 1)->moveCount > 11) + ((ss - 1)->moveCount > 11)
+ (!ss->inCheck && bestValue <= ss->staticEval - 150); + (!ss->inCheck && bestValue <= ss->staticEval - 144);
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
stat_bonus(depth) * bonus); stat_bonus(depth) * bonus);
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()]
@ -1480,7 +1480,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
if (bestValue > alpha) if (bestValue > alpha)
alpha = bestValue; alpha = bestValue;
futilityBase = ss->staticEval + 226; futilityBase = ss->staticEval + 246;
} }
const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory, const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
@ -1560,7 +1560,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
continue; continue;
// Do not search moves with bad enough SEE values (~5 Elo) // Do not search moves with bad enough SEE values (~5 Elo)
if (!pos.see_ge(move, -78)) if (!pos.see_ge(move, -79))
continue; continue;
} }
@ -1628,7 +1628,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) { Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) {
int reductionScale = reductions[d] * reductions[mn]; int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 1107 - delta * 725 / rootDelta) / 1024 + (!i && reductionScale > 956); return (reductionScale + 1123 - delta * 832 / rootDelta) / 1024 + (!i && reductionScale > 1025);
} }
namespace { namespace {
@ -1717,7 +1717,7 @@ void update_all_stats(const Position& pos,
if (!pos.capture_stage(bestMove)) if (!pos.capture_stage(bestMove))
{ {
int bestMoveBonus = bestValue > beta + 168 ? quietMoveBonus // larger bonus int bestMoveBonus = bestValue > beta + 185 ? quietMoveBonus // larger bonus
: stat_bonus(depth); // smaller bonus : stat_bonus(depth); // smaller bonus
// Increase stats for the best move in case it was a quiet move // Increase stats for the best move in case it was a quiet move