mirror of
https://github.com/sockspls/badfish
synced 2025-04-29 16:23:09 +00:00
VVLTC search tune
Result of 32k games of tuning at 60+0.6 8-thread. Link to the tuning attempt: https://tests.stockfishchess.org/tests/view/65def7b04b19edc854ebdec8 Passed VVLTC first SPRT: https://tests.stockfishchess.org/tests/view/65e51b53416ecd92c162ab7f LLR: 2.94 (-2.94,2.94) <0.00,2.00> Total: 37570 W: 9613 L: 9342 D: 18615 Ptnml(0-2): 2, 3454, 11601, 3727, 1 Passed VVLTC second SPRT: https://tests.stockfishchess.org/tests/view/65e87d1c0ec64f0526c3eb39 LLR: 2.94 (-2.94,2.94) <0.50,2.50> Total: 123158 W: 31463 L: 31006 D: 60689 Ptnml(0-2): 5, 11589, 37935, 12044, 6 Note: The small net and psqt-only thresholds have been moved to evaluate.h. The reasoning is that these values are used in both `evaluate.cpp` and `evaluate_nnue.cpp`, and thus unifying their usage avoids inconsistencies during testing, where one occurrence is changed without the other (this happened during the search tune SPRT). closes https://github.com/official-stockfish/Stockfish/pull/5101 Bench: 1741218
This commit is contained in:
parent
b6dfd6bd54
commit
10e2732978
4 changed files with 43 additions and 42 deletions
|
@ -193,8 +193,8 @@ Value Eval::evaluate(const Position& pos, int optimism) {
|
|||
assert(!pos.checkers());
|
||||
|
||||
int simpleEval = simple_eval(pos, pos.side_to_move());
|
||||
bool smallNet = std::abs(simpleEval) > 1050;
|
||||
bool psqtOnly = std::abs(simpleEval) > 2500;
|
||||
bool smallNet = std::abs(simpleEval) > SmallNetThreshold;
|
||||
bool psqtOnly = std::abs(simpleEval) > PsqtOnlyThreshold;
|
||||
|
||||
int nnueComplexity;
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@ class OptionsMap;
|
|||
|
||||
namespace Eval {
|
||||
|
||||
constexpr inline int SmallNetThreshold = 1139, PsqtOnlyThreshold = 2500;
|
||||
|
||||
std::string trace(Position& pos);
|
||||
|
||||
int simple_eval(const Position& pos, Color c);
|
||||
|
|
|
@ -179,8 +179,8 @@ write_parameters(std::ostream& stream, NetSize netSize, const std::string& netDe
|
|||
void hint_common_parent_position(const Position& pos) {
|
||||
|
||||
int simpleEvalAbs = std::abs(simple_eval(pos, pos.side_to_move()));
|
||||
if (simpleEvalAbs > 1050)
|
||||
featureTransformerSmall->hint_common_access(pos, simpleEvalAbs > 2500);
|
||||
if (simpleEvalAbs > Eval::SmallNetThreshold)
|
||||
featureTransformerSmall->hint_common_access(pos, simpleEvalAbs > Eval::PsqtOnlyThreshold);
|
||||
else
|
||||
featureTransformerBig->hint_common_access(pos, false);
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ namespace {
|
|||
|
||||
// Futility margin
|
||||
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
|
||||
Value futilityMult = 117 - 44 * noTtCutNode;
|
||||
Value futilityMult = 121 - 43 * noTtCutNode;
|
||||
Value improvingDeduction = 3 * improving * futilityMult / 2;
|
||||
Value worseningDeduction = (331 + 45 * improving) * oppWorsening * futilityMult / 1024;
|
||||
|
||||
|
@ -69,15 +69,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
|
|||
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
|
||||
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
|
||||
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
|
||||
v += cv * std::abs(cv) / 12475;
|
||||
v += cv * std::abs(cv) / 10759;
|
||||
return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
|
||||
}
|
||||
|
||||
// History and stats update bonus, based on depth
|
||||
int stat_bonus(Depth d) { return std::min(246 * d - 351, 1136); }
|
||||
int stat_bonus(Depth d) { return std::min(249 * d - 327, 1192); }
|
||||
|
||||
// History and stats update malus, based on depth
|
||||
int stat_malus(Depth d) { return std::min(519 * d - 306, 1258); }
|
||||
int stat_malus(Depth d) { return std::min(516 * d - 299, 1432); }
|
||||
|
||||
// Add a small random component to draw evaluations to avoid 3-fold blindness
|
||||
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
|
||||
|
@ -300,12 +300,12 @@ void Search::Worker::iterative_deepening() {
|
|||
|
||||
// Reset aspiration window starting size
|
||||
Value avg = rootMoves[pvIdx].averageScore;
|
||||
delta = 9 + avg * avg / 12487;
|
||||
delta = 9 + avg * avg / 12804;
|
||||
alpha = std::max(avg - delta, -VALUE_INFINITE);
|
||||
beta = std::min(avg + delta, VALUE_INFINITE);
|
||||
|
||||
// Adjust optimism based on root move's averageScore (~4 Elo)
|
||||
optimism[us] = 134 * avg / (std::abs(avg) + 97);
|
||||
optimism[us] = 131 * avg / (std::abs(avg) + 90);
|
||||
optimism[~us] = -optimism[us];
|
||||
|
||||
// Start with a small aspiration window and, in the case of a fail
|
||||
|
@ -500,7 +500,7 @@ void Search::Worker::clear() {
|
|||
h->fill(-71);
|
||||
|
||||
for (size_t i = 1; i < reductions.size(); ++i)
|
||||
reductions[i] = int((18.79 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
|
||||
reductions[i] = int((19.02 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
|
||||
}
|
||||
|
||||
|
||||
|
@ -731,7 +731,7 @@ Value Search::Worker::search(
|
|||
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
|
||||
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
|
||||
{
|
||||
int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1723, 1455);
|
||||
int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1621, 1237);
|
||||
bonus = bonus > 0 ? 2 * bonus : bonus / 2;
|
||||
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
|
||||
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
|
||||
|
@ -754,7 +754,7 @@ Value Search::Worker::search(
|
|||
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
|
||||
// return a fail low.
|
||||
// Adjust razor margin according to cutoffCnt. (~1 Elo)
|
||||
if (eval < alpha - 438 - (332 - 154 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
|
||||
if (eval < alpha - 462 - (296 - 145 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
|
||||
{
|
||||
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
|
||||
if (value < alpha)
|
||||
|
@ -763,24 +763,23 @@ Value Search::Worker::search(
|
|||
|
||||
// Step 8. Futility pruning: child node (~40 Elo)
|
||||
// The depth condition is important for mate finding.
|
||||
if (!ss->ttPv && depth < 11
|
||||
if (!ss->ttPv && depth < 12
|
||||
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening)
|
||||
- (ss - 1)->statScore / 314
|
||||
- (ss - 1)->statScore / 287
|
||||
>= beta
|
||||
&& eval >= beta && eval < 30016 // smaller than TB wins
|
||||
&& (!ttMove || ttCapture))
|
||||
&& eval >= beta && eval < VALUE_TB_WIN_IN_MAX_PLY && (!ttMove || ttCapture))
|
||||
return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval;
|
||||
|
||||
// Step 9. Null move search with verification search (~35 Elo)
|
||||
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16620
|
||||
&& eval >= beta && eval >= ss->staticEval && ss->staticEval >= beta - 21 * depth + 330
|
||||
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16211
|
||||
&& eval >= beta && eval >= ss->staticEval && ss->staticEval >= beta - 20 * depth + 314
|
||||
&& !excludedMove && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
|
||||
&& beta > VALUE_TB_LOSS_IN_MAX_PLY)
|
||||
{
|
||||
assert(eval - beta >= 0);
|
||||
|
||||
// Null move dynamic reduction based on depth and eval
|
||||
Depth R = std::min(int(eval - beta) / 154, 6) + depth / 3 + 4;
|
||||
Depth R = std::min(int(eval - beta) / 151, 6) + depth / 3 + 4;
|
||||
|
||||
ss->currentMove = Move::null();
|
||||
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
|
||||
|
@ -828,7 +827,7 @@ Value Search::Worker::search(
|
|||
// Step 11. ProbCut (~10 Elo)
|
||||
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
|
||||
// much above beta, we can (almost) safely prune the previous move.
|
||||
probCutBeta = beta + 181 - 68 * improving;
|
||||
probCutBeta = beta + 164 - 62 * improving;
|
||||
if (
|
||||
!PvNode && depth > 3
|
||||
&& std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
|
||||
|
@ -884,7 +883,7 @@ Value Search::Worker::search(
|
|||
moves_loop: // When in check, search starts here
|
||||
|
||||
// Step 12. A small Probcut idea, when we are in check (~4 Elo)
|
||||
probCutBeta = beta + 452;
|
||||
probCutBeta = beta + 410;
|
||||
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
|
||||
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta
|
||||
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
|
||||
|
@ -967,7 +966,7 @@ moves_loop: // When in check, search starts here
|
|||
{
|
||||
Piece capturedPiece = pos.piece_on(move.to_sq());
|
||||
int futilityEval =
|
||||
ss->staticEval + 277 + 292 * lmrDepth + PieceValue[capturedPiece]
|
||||
ss->staticEval + 298 + 288 * lmrDepth + PieceValue[capturedPiece]
|
||||
+ thisThread->captureHistory[movedPiece][move.to_sq()][type_of(capturedPiece)]
|
||||
/ 7;
|
||||
if (futilityEval < alpha)
|
||||
|
@ -975,7 +974,7 @@ moves_loop: // When in check, search starts here
|
|||
}
|
||||
|
||||
// SEE based pruning for captures and checks (~11 Elo)
|
||||
if (!pos.see_ge(move, -197 * depth))
|
||||
if (!pos.see_ge(move, -202 * depth))
|
||||
continue;
|
||||
}
|
||||
else
|
||||
|
@ -987,17 +986,17 @@ moves_loop: // When in check, search starts here
|
|||
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];
|
||||
|
||||
// Continuation history based pruning (~2 Elo)
|
||||
if (lmrDepth < 6 && history < -4211 * depth)
|
||||
if (lmrDepth < 6 && history < -4125 * depth)
|
||||
continue;
|
||||
|
||||
history += 2 * thisThread->mainHistory[us][move.from_to()];
|
||||
|
||||
lmrDepth += history / 6437;
|
||||
lmrDepth += history / 5686;
|
||||
|
||||
// Futility pruning: parent node (~13 Elo)
|
||||
if (!ss->inCheck && lmrDepth < 15
|
||||
&& ss->staticEval + (bestValue < ss->staticEval - 57 ? 144 : 57)
|
||||
+ 121 * lmrDepth
|
||||
&& ss->staticEval + (bestValue < ss->staticEval - 55 ? 153 : 58)
|
||||
+ 118 * lmrDepth
|
||||
<= alpha)
|
||||
continue;
|
||||
|
||||
|
@ -1024,11 +1023,11 @@ moves_loop: // When in check, search starts here
|
|||
// so changing them requires tests at these types of time controls.
|
||||
// Recursive singular search is avoided.
|
||||
if (!rootNode && move == ttMove && !excludedMove
|
||||
&& depth >= 4 - (thisThread->completedDepth > 30) + ss->ttPv
|
||||
&& depth >= 4 - (thisThread->completedDepth > 29) + ss->ttPv
|
||||
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
|
||||
&& tte->depth() >= depth - 3)
|
||||
{
|
||||
Value singularBeta = ttValue - (60 + 54 * (ss->ttPv && !PvNode)) * depth / 64;
|
||||
Value singularBeta = ttValue - (58 + 55 * (ss->ttPv && !PvNode)) * depth / 64;
|
||||
Depth singularDepth = newDepth / 2;
|
||||
|
||||
ss->excludedMove = move;
|
||||
|
@ -1044,7 +1043,7 @@ moves_loop: // When in check, search starts here
|
|||
if (!PvNode && ss->multipleExtensions <= 16)
|
||||
{
|
||||
extension = 2 + (value < singularBeta - 78 && !ttCapture);
|
||||
depth += depth < 16;
|
||||
depth += depth < 14;
|
||||
}
|
||||
if (PvNode && !ttCapture && ss->multipleExtensions <= 5
|
||||
&& value < singularBeta - 50)
|
||||
|
@ -1082,7 +1081,7 @@ moves_loop: // When in check, search starts here
|
|||
else if (PvNode && move == ttMove && move.to_sq() == prevSq
|
||||
&& thisThread->captureHistory[movedPiece][move.to_sq()]
|
||||
[type_of(pos.piece_on(move.to_sq()))]
|
||||
> 4394)
|
||||
> 4315)
|
||||
extension = 1;
|
||||
}
|
||||
|
||||
|
@ -1136,10 +1135,10 @@ moves_loop: // When in check, search starts here
|
|||
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
|
||||
+ (*contHist[0])[movedPiece][move.to_sq()]
|
||||
+ (*contHist[1])[movedPiece][move.to_sq()]
|
||||
+ (*contHist[3])[movedPiece][move.to_sq()] - 4392;
|
||||
+ (*contHist[3])[movedPiece][move.to_sq()] - 4587;
|
||||
|
||||
// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
|
||||
r -= ss->statScore / 14189;
|
||||
r -= ss->statScore / 12372;
|
||||
|
||||
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
|
||||
if (depth >= 2 && moveCount > 1 + rootNode)
|
||||
|
@ -1158,7 +1157,7 @@ moves_loop: // When in check, search starts here
|
|||
{
|
||||
// Adjust full-depth search based on LMR results - if the result
|
||||
// was good enough search deeper, if it was bad enough search shallower.
|
||||
const bool doDeeperSearch = value > (bestValue + 49 + 2 * newDepth); // (~1 Elo)
|
||||
const bool doDeeperSearch = value > (bestValue + 48 + 2 * newDepth); // (~1 Elo)
|
||||
const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)
|
||||
|
||||
newDepth += doDeeperSearch - doShallowerSearch;
|
||||
|
@ -1277,7 +1276,7 @@ moves_loop: // When in check, search starts here
|
|||
else
|
||||
{
|
||||
// Reduce other moves if we have found at least one score improvement (~2 Elo)
|
||||
if (depth > 2 && depth < 13 && beta < 13652 && value > -12761)
|
||||
if (depth > 2 && depth < 12 && beta < 13665 && value > -12276)
|
||||
depth -= 2;
|
||||
|
||||
assert(depth > 0);
|
||||
|
@ -1320,8 +1319,8 @@ moves_loop: // When in check, search starts here
|
|||
// Bonus for prior countermove that caused the fail low
|
||||
else if (!priorCapture && prevSq != SQ_NONE)
|
||||
{
|
||||
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -15736)
|
||||
+ ((ss - 1)->moveCount > 11);
|
||||
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14446)
|
||||
+ ((ss - 1)->moveCount > 10);
|
||||
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
|
||||
stat_bonus(depth) * bonus);
|
||||
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()]
|
||||
|
@ -1478,7 +1477,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
|
|||
if (bestValue > alpha)
|
||||
alpha = bestValue;
|
||||
|
||||
futilityBase = ss->staticEval + 206;
|
||||
futilityBase = ss->staticEval + 221;
|
||||
}
|
||||
|
||||
const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
|
||||
|
@ -1558,7 +1557,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
|
|||
continue;
|
||||
|
||||
// Do not search moves with bad enough SEE values (~5 Elo)
|
||||
if (!pos.see_ge(move, -74))
|
||||
if (!pos.see_ge(move, -79))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1626,7 +1625,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
|
|||
|
||||
Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) {
|
||||
int reductionScale = reductions[d] * reductions[mn];
|
||||
return (reductionScale + 1118 - delta * 793 / rootDelta) / 1024 + (!i && reductionScale > 863);
|
||||
return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 952);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -1715,7 +1714,7 @@ void update_all_stats(const Position& pos,
|
|||
|
||||
if (!pos.capture_stage(bestMove))
|
||||
{
|
||||
int bestMoveBonus = bestValue > beta + 166 ? quietMoveBonus // larger bonus
|
||||
int bestMoveBonus = bestValue > beta + 167 ? quietMoveBonus // larger bonus
|
||||
: stat_bonus(depth); // smaller bonus
|
||||
|
||||
// Increase stats for the best move in case it was a quiet move
|
||||
|
|
Loading…
Add table
Reference in a new issue