1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-07-16 05:49:14 +00:00

Tuned values for search constants

Tuned search constants after many search patches since the last
successful tune.

1st LTC @ 60+0.6 th 1 :
LLR: 2.97 (-2.94,2.94) {0.25,1.75}
Total: 57656 W: 7369 L: 7036 D: 43251
Ptnml(0-2): 393, 5214, 17336, 5437, 448
https://tests.stockfishchess.org/tests/view/5ee1e074f29b40b0fc95af19

SMP LTC @ 20+0.2 th 8 :
LLR: 2.95 (-2.94,2.94) {0.25,1.75}
Total: 83576 W: 9731 L: 9341 D: 64504
Ptnml(0-2): 464, 7062, 26369, 7406, 487
https://tests.stockfishchess.org/tests/view/5ee35a21f29b40b0fc95b008

The changes were rebased on top of a successful patch by Viz (see #2734)
and two different ways of doing this were tested. The successful test
modified the constants in the patch by Viz in a similar manner to the
tuning run:

LTC (rebased) @ 60+0.6 th 1 :
LLR: 2.94 (-2.94,2.94) {0.25,1.75}
Total: 193384 W: 24241 L: 23521 D: 145622
Ptnml(0-2): 1309, 17497, 58472, 17993, 1421
https://tests.stockfishchess.org/tests/view/5ee43319ca6c451633a995f9

Further work: the recent patch to quantize eval #2733 affects search quit
quite a bit, so doing another tune in, say, three months time might be a
good idea.

closes https://github.com/official-stockfish/Stockfish/pull/2735

Bench 4246971
This commit is contained in:
xoto10 2020-06-13 09:54:07 +01:00 committed by Stéphane Nicolet
parent 4d657618e9
commit 42b7dbcb5e

View file

@ -65,9 +65,9 @@ namespace {
constexpr uint64_t TtHitAverageResolution = 1024; constexpr uint64_t TtHitAverageResolution = 1024;
// Razor and futility margins // Razor and futility margins
constexpr int RazorMargin = 531; constexpr int RazorMargin = 516;
Value futility_margin(Depth d, bool improving) { Value futility_margin(Depth d, bool improving) {
return Value(217 * (d - improving)); return Value(224 * (d - improving));
} }
// Reductions lookup table, initialized at startup // Reductions lookup table, initialized at startup
@ -75,16 +75,16 @@ namespace {
Depth reduction(bool i, Depth d, int mn) { Depth reduction(bool i, Depth d, int mn) {
int r = Reductions[d] * Reductions[mn]; int r = Reductions[d] * Reductions[mn];
return (r + 511) / 1024 + (!i && r > 1007); return (r + 529) / 1024 + (!i && r > 1050);
} }
constexpr int futility_move_count(bool improving, Depth depth) { constexpr int futility_move_count(bool improving, Depth depth) {
return (4 + depth * depth) / (2 - improving); return (3 + depth * depth) / (2 - improving);
} }
// History and stats update bonus, based on depth // History and stats update bonus, based on depth
int stat_bonus(Depth d) { int stat_bonus(Depth d) {
return d > 15 ? -8 : 19 * d * d + 155 * d - 132; return d > 15 ? 28 : 19 * d * d + 135 * d - 136;
} }
// Add a small random component to draw evaluations to avoid 3fold-blindness // Add a small random component to draw evaluations to avoid 3fold-blindness
@ -194,7 +194,7 @@ namespace {
void Search::init() { void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i) for (int i = 1; i < MAX_MOVES; ++i)
Reductions[i] = int((24.8 + std::log(Threads.size())) * std::log(i)); Reductions[i] = int((24.9 + std::log(Threads.size())) * std::log(i));
} }
@ -408,7 +408,7 @@ void Thread::search() {
beta = std::min(prev + delta, VALUE_INFINITE); beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust contempt based on root move's previousScore (dynamic contempt) // Adjust contempt based on root move's previousScore (dynamic contempt)
int dct = ct + (102 - ct / 2) * prev / (abs(prev) + 157); int dct = ct + (104 - ct / 2) * prev / (abs(prev) + 143);
contempt = (us == WHITE ? make_score(dct, dct / 2) contempt = (us == WHITE ? make_score(dct, dct / 2)
: -make_score(dct, dct / 2)); : -make_score(dct, dct / 2));
@ -506,13 +506,13 @@ void Thread::search() {
&& !Threads.stop && !Threads.stop
&& !mainThread->stopOnPonderhit) && !mainThread->stopOnPonderhit)
{ {
double fallingEval = (332 + 6 * (mainThread->bestPreviousScore - bestValue) double fallingEval = (293 + 6 * (mainThread->bestPreviousScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 704.0; + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 742.0;
fallingEval = Utility::clamp(fallingEval, 0.5, 1.5); fallingEval = Utility::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly // If the bestMove is stable over several iterations, reduce time accordingly
timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.94 : 0.91; timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.93 : 0.96;
double reduction = (1.41 + mainThread->previousTimeReduction) / (2.27 * timeReduction); double reduction = (1.36 + mainThread->previousTimeReduction) / (2.21 * timeReduction);
// Use part of the gained time from a previous stable move for the current move // Use part of the gained time from a previous stable move for the current move
for (Thread* th : Threads) for (Thread* th : Threads)
@ -537,7 +537,7 @@ void Thread::search() {
} }
else if ( Threads.increaseDepth else if ( Threads.increaseDepth
&& !mainThread->ponder && !mainThread->ponder
&& Time.elapsed() > totalTime * 0.6) && Time.elapsed() > totalTime * 0.57)
Threads.increaseDepth = false; Threads.increaseDepth = false;
else else
Threads.increaseDepth = true; Threads.increaseDepth = true;
@ -819,10 +819,10 @@ namespace {
// Step 9. Null move search with verification search (~40 Elo) // Step 9. Null move search with verification search (~40 Elo)
if ( !PvNode if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL && (ss-1)->currentMove != MOVE_NULL
&& (ss-1)->statScore < 23397 && (ss-1)->statScore < 24714
&& eval >= beta && eval >= beta
&& eval >= ss->staticEval && eval >= ss->staticEval
&& ss->staticEval >= beta - 32 * depth - 30 * improving + 120 * ttPv + 292 && ss->staticEval >= beta - 29 * depth - 31 * improving + 119 * ttPv + 299
&& !excludedMove && !excludedMove
&& pos.non_pawn_material(us) && pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor)) && (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
@ -830,7 +830,7 @@ namespace {
assert(eval - beta >= 0); assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and value // Null move dynamic reduction based on depth and value
Depth R = (854 + 68 * depth) / 258 + std::min(int(eval - beta) / 192, 3); Depth R = (793 + 70 * depth) / 252 + std::min(int(eval - beta) / 192, 3);
ss->currentMove = MOVE_NULL; ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
@ -870,10 +870,10 @@ namespace {
// If we have a good enough capture and a reduced search returns a value // If we have a good enough capture and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move. // much above beta, we can (almost) safely prune the previous move.
if ( !PvNode if ( !PvNode
&& depth >= 5 && depth > 5
&& abs(beta) < VALUE_TB_WIN_IN_MAX_PLY) && abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
{ {
Value raisedBeta = beta + 189 - 45 * improving; Value raisedBeta = beta + 182 - 48 * improving;
assert(raisedBeta < VALUE_INFINITE); assert(raisedBeta < VALUE_INFINITE);
MovePicker mp(pos, ttMove, raisedBeta - ss->staticEval, &captureHistory); MovePicker mp(pos, ttMove, raisedBeta - ss->staticEval, &captureHistory);
int probCutCount = 0; int probCutCount = 0;
@ -904,7 +904,7 @@ namespace {
// If the qsearch held, perform the regular search // If the qsearch held, perform the regular search
if (value >= raisedBeta) if (value >= raisedBeta)
value = -search<NonPV>(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 4, !cutNode); value = -search<NonPV>(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 5, !cutNode);
pos.undo_move(move); pos.undo_move(move);
@ -1003,15 +1003,15 @@ moves_loop: // When in check, search starts from here
// Futility pruning: parent node (~5 Elo) // Futility pruning: parent node (~5 Elo)
if ( lmrDepth < 6 if ( lmrDepth < 6
&& !ss->inCheck && !ss->inCheck
&& ss->staticEval + 235 + 172 * lmrDepth <= alpha && ss->staticEval + 252 + 176 * lmrDepth <= alpha
&& (*contHist[0])[movedPiece][to_sq(move)] && (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)] + (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)] + (*contHist[3])[movedPiece][to_sq(move)]
+ (*contHist[5])[movedPiece][to_sq(move)] / 2 < 31400) + (*contHist[5])[movedPiece][to_sq(move)] / 2 < 30251)
continue; continue;
// Prune moves with negative SEE (~20 Elo) // Prune moves with negative SEE (~20 Elo)
if (!pos.see_ge(move, Value(-(32 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth))) if (!pos.see_ge(move, Value(-(31 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth)))
continue; continue;
} }
else else
@ -1027,11 +1027,11 @@ moves_loop: // When in check, search starts from here
&& lmrDepth < 6 && lmrDepth < 6
&& !(PvNode && abs(bestValue) < 2) && !(PvNode && abs(bestValue) < 2)
&& !ss->inCheck && !ss->inCheck
&& ss->staticEval + 270 + 384 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha) && ss->staticEval + 264 + 397 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha)
continue; continue;
// See based pruning // See based pruning
if (!pos.see_ge(move, Value(-194) * depth)) // (~25 Elo) if (!pos.see_ge(move, Value(-192) * depth)) // (~25 Elo)
continue; continue;
} }
} }
@ -1144,12 +1144,12 @@ moves_loop: // When in check, search starts from here
|| moveCountPruning || moveCountPruning
|| ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha || ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha
|| cutNode || cutNode
|| thisThread->ttHitAverage < 375 * TtHitAverageResolution * TtHitAverageWindow / 1024)) || thisThread->ttHitAverage < 399 * TtHitAverageResolution * TtHitAverageWindow / 1024))
{ {
Depth r = reduction(improving, depth, moveCount); Depth r = reduction(improving, depth, moveCount);
// Decrease reduction if the ttHit running average is large // Decrease reduction if the ttHit running average is large
if (thisThread->ttHitAverage > 500 * TtHitAverageResolution * TtHitAverageWindow / 1024) if (thisThread->ttHitAverage > 492 * TtHitAverageResolution * TtHitAverageWindow / 1024)
r--; r--;
// Reduction if other threads are searching this position. // Reduction if other threads are searching this position.
@ -1195,14 +1195,14 @@ moves_loop: // When in check, search starts from here
- 4926; - 4926;
// Decrease/increase reduction by comparing opponent's stat score (~10 Elo) // Decrease/increase reduction by comparing opponent's stat score (~10 Elo)
if (ss->statScore >= -102 && (ss-1)->statScore < -114) if (ss->statScore >= -99 && (ss-1)->statScore < -116)
r--; r--;
else if ((ss-1)->statScore >= -116 && ss->statScore < -154) else if ((ss-1)->statScore >= -117 && ss->statScore < -150)
r++; r++;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo) // Decrease/increase reduction for moves with a good/bad history (~30 Elo)
r -= ss->statScore / 16434; r -= ss->statScore / 15896;
} }
else else
{ {
@ -1474,7 +1474,7 @@ moves_loop: // When in check, search starts from here
if (PvNode && bestValue > alpha) if (PvNode && bestValue > alpha)
alpha = bestValue; alpha = bestValue;
futilityBase = bestValue + 154; futilityBase = bestValue + 138;
} }
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory, const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,