mirror of
https://github.com/sockspls/badfish
synced 2025-05-02 17:49:35 +00:00
Big search tuning (version 2)
One more tuning - this one includes newly introduced heuristics and some other parameters that were not included in previous one. Result of 400k games at 20+0.2 "as is". Tuning is continuing since there is probably a lot more elo to gain. STC: https://tests.stockfishchess.org/tests/view/620782edd71106ed12a497d1 LLR: 2.99 (-2.94,2.94) <0.00,2.50> Total: 38504 W: 10260 L: 9978 D: 18266 Ptnml(0-2): 142, 4249, 10230, 4447, 184 LTC: https://tests.stockfishchess.org/tests/view/6207a243d71106ed12a49d07 LLR: 2.94 (-2.94,2.94) <0.50,3.00> Total: 25176 W: 6793 L: 6546 D: 11837 Ptnml(0-2): 20, 2472, 7360, 2713, 23 closes https://github.com/official-stockfish/Stockfish/pull/3931 Bench: 4784796
This commit is contained in:
parent
cb9c2594fc
commit
3ec6e1d245
1 changed files with 49 additions and 49 deletions
|
@ -63,7 +63,7 @@ namespace {
|
|||
|
||||
// Futility margin
|
||||
Value futility_margin(Depth d, bool improving) {
|
||||
return Value(171 * (d - improving));
|
||||
return Value(147 * (d - improving));
|
||||
}
|
||||
|
||||
// Reductions lookup table, initialized at startup
|
||||
|
@ -71,7 +71,7 @@ namespace {
|
|||
|
||||
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
|
||||
int r = Reductions[d] * Reductions[mn];
|
||||
return (r + 1575 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1011);
|
||||
return (r + 1627 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 992);
|
||||
}
|
||||
|
||||
constexpr int futility_move_count(bool improving, Depth depth) {
|
||||
|
@ -80,7 +80,7 @@ namespace {
|
|||
|
||||
// History and stats update bonus, based on depth
|
||||
int stat_bonus(Depth d) {
|
||||
return std::min((7 * d + 254) * d - 206 , 1990);
|
||||
return std::min((8 * d + 281) * d - 241 , 1949);
|
||||
}
|
||||
|
||||
// Add a small random component to draw evaluations to avoid 3-fold blindness
|
||||
|
@ -157,7 +157,7 @@ namespace {
|
|||
void Search::init() {
|
||||
|
||||
for (int i = 1; i < MAX_MOVES; ++i)
|
||||
Reductions[i] = int((21.5 + std::log(Threads.size()) / 2) * std::log(i));
|
||||
Reductions[i] = int((21.14 + std::log(Threads.size()) / 2) * std::log(i));
|
||||
}
|
||||
|
||||
|
||||
|
@ -303,10 +303,10 @@ void Thread::search() {
|
|||
|
||||
multiPV = std::min(multiPV, rootMoves.size());
|
||||
|
||||
complexityAverage.set(190, 1);
|
||||
complexityAverage.set(211, 1);
|
||||
|
||||
trend = SCORE_ZERO;
|
||||
optimism[ us] = Value(34);
|
||||
optimism[ us] = Value(33);
|
||||
optimism[~us] = -optimism[us];
|
||||
|
||||
int searchAgainCounter = 0;
|
||||
|
@ -349,16 +349,16 @@ void Thread::search() {
|
|||
if (rootDepth >= 4)
|
||||
{
|
||||
Value prev = rootMoves[pvIdx].averageScore;
|
||||
delta = Value(16) + int(prev) * prev / 16384;
|
||||
delta = Value(19) + int(prev) * prev / 18321;
|
||||
alpha = std::max(prev - delta,-VALUE_INFINITE);
|
||||
beta = std::min(prev + delta, VALUE_INFINITE);
|
||||
|
||||
// Adjust trend and optimism based on root move's previousScore
|
||||
int tr = sigmoid(prev, 6, 13, 96, 110, 1);
|
||||
int tr = sigmoid(prev, 4, 11, 92, 119, 1);
|
||||
trend = (us == WHITE ? make_score(tr, tr / 2)
|
||||
: -make_score(tr, tr / 2));
|
||||
|
||||
int opt = sigmoid(prev, 7, 21, 94, 14786, 221);
|
||||
int opt = sigmoid(prev, 9, 18, 115, 12250, 187);
|
||||
optimism[ us] = Value(opt);
|
||||
optimism[~us] = -optimism[us];
|
||||
}
|
||||
|
@ -413,7 +413,7 @@ void Thread::search() {
|
|||
else
|
||||
break;
|
||||
|
||||
delta += delta / 4 + 3;
|
||||
delta += delta / 4 + 2;
|
||||
|
||||
assert(alpha >= -VALUE_INFINITE && beta <= VALUE_INFINITE);
|
||||
}
|
||||
|
@ -459,17 +459,17 @@ void Thread::search() {
|
|||
&& !Threads.stop
|
||||
&& !mainThread->stopOnPonderhit)
|
||||
{
|
||||
double fallingEval = (87 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
|
||||
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 777.20;
|
||||
double fallingEval = (66 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
|
||||
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 809.70;
|
||||
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
|
||||
|
||||
// If the bestMove is stable over several iterations, reduce time accordingly
|
||||
timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.70 : 0.91;
|
||||
double reduction = (1.59 + mainThread->previousTimeReduction) / (2.33 * timeReduction);
|
||||
timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.73 : 0.94;
|
||||
double reduction = (1.66 + mainThread->previousTimeReduction) / (2.35 * timeReduction);
|
||||
double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth)
|
||||
* totBestMoveChanges / Threads.size();
|
||||
int complexity = mainThread->complexityAverage.value();
|
||||
double complexPosition = std::clamp(1.0 + (complexity - 312) / 1750.0, 0.5, 1.5);
|
||||
double complexPosition = std::clamp(1.0 + (complexity - 293) / 1525.0, 0.5, 1.5);
|
||||
|
||||
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
|
||||
|
||||
|
@ -490,7 +490,7 @@ void Thread::search() {
|
|||
}
|
||||
else if ( Threads.increaseDepth
|
||||
&& !mainThread->ponder
|
||||
&& Time.elapsed() > totalTime * 0.55)
|
||||
&& Time.elapsed() > totalTime * 0.49)
|
||||
Threads.increaseDepth = false;
|
||||
else
|
||||
Threads.increaseDepth = true;
|
||||
|
@ -766,7 +766,7 @@ namespace {
|
|||
// margin and the improving flag are used in various pruning heuristics.
|
||||
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
|
||||
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
|
||||
: 200;
|
||||
: 184;
|
||||
|
||||
improving = improvement > 0;
|
||||
complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score())));
|
||||
|
@ -778,7 +778,7 @@ namespace {
|
|||
// return a fail low.
|
||||
if ( !PvNode
|
||||
&& depth <= 6
|
||||
&& eval < alpha - 400 - 300 * depth * depth)
|
||||
&& eval < alpha - 486 - 314 * depth * depth)
|
||||
{
|
||||
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
|
||||
if (value < alpha)
|
||||
|
@ -791,16 +791,16 @@ namespace {
|
|||
&& depth < 8
|
||||
&& eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta
|
||||
&& eval >= beta
|
||||
&& eval < 17548) // 50% larger than VALUE_KNOWN_WIN, but smaller than TB wins.
|
||||
&& eval < 22266) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
|
||||
return eval;
|
||||
|
||||
// Step 9. Null move search with verification search (~22 Elo)
|
||||
if ( !PvNode
|
||||
&& (ss-1)->currentMove != MOVE_NULL
|
||||
&& (ss-1)->statScore < 13706
|
||||
&& (ss-1)->statScore < 15075
|
||||
&& eval >= beta
|
||||
&& eval >= ss->staticEval
|
||||
&& ss->staticEval >= beta - 19 * depth - improvement / 15 + 200 + complexity / 25
|
||||
&& ss->staticEval >= beta - 18 * depth - improvement / 19 + 215 + complexity / 30
|
||||
&& !excludedMove
|
||||
&& pos.non_pawn_material(us)
|
||||
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
|
||||
|
@ -808,7 +808,7 @@ namespace {
|
|||
assert(eval - beta >= 0);
|
||||
|
||||
// Null move dynamic reduction based on depth, eval and complexity of position
|
||||
Depth R = std::min(int(eval - beta) / 205, 3) + depth / 3 + 4 - (complexity > 500);
|
||||
Depth R = std::min(int(eval - beta) / 184, 4) + depth / 3 + 4 - (complexity > 799);
|
||||
|
||||
ss->currentMove = MOVE_NULL;
|
||||
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
|
||||
|
@ -844,13 +844,13 @@ namespace {
|
|||
}
|
||||
}
|
||||
|
||||
probCutBeta = beta + 229 - 47 * improving;
|
||||
probCutBeta = beta + 204 - 52 * improving;
|
||||
|
||||
// Step 10. ProbCut (~4 Elo)
|
||||
// If we have a good enough capture and a reduced search returns a value
|
||||
// much above beta, we can (almost) safely prune the previous move.
|
||||
if ( !PvNode
|
||||
&& depth > 3
|
||||
&& depth > 4
|
||||
&& abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
|
||||
// if value from transposition table is lower than probCutBeta, don't attempt probCut
|
||||
// there and in further interactions with transposition table cutoff depth is set to depth - 3
|
||||
|
@ -908,12 +908,12 @@ namespace {
|
|||
|
||||
// Step 11. If the position is not in TT, decrease depth by 2 or 1 depending on node type (~3 Elo)
|
||||
if ( PvNode
|
||||
&& depth >= 4
|
||||
&& depth >= 3
|
||||
&& !ttMove)
|
||||
depth -= 2;
|
||||
|
||||
if ( cutNode
|
||||
&& depth >= 7
|
||||
&& depth >= 8
|
||||
&& !ttMove)
|
||||
depth--;
|
||||
|
||||
|
@ -923,7 +923,7 @@ moves_loop: // When in check, search starts here
|
|||
probCutBeta = beta + 401;
|
||||
if ( ss->inCheck
|
||||
&& !PvNode
|
||||
&& depth >= 4
|
||||
&& depth >= 2
|
||||
&& ttCapture
|
||||
&& (tte->bound() & BOUND_LOWER)
|
||||
&& tte->depth() >= depth - 3
|
||||
|
@ -1014,14 +1014,14 @@ moves_loop: // When in check, search starts here
|
|||
if ( !pos.empty(to_sq(move))
|
||||
&& !givesCheck
|
||||
&& !PvNode
|
||||
&& lmrDepth < 6
|
||||
&& lmrDepth < 7
|
||||
&& !ss->inCheck
|
||||
&& ss->staticEval + 392 + 207 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
|
||||
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 8 < alpha)
|
||||
&& ss->staticEval + 424 + 138 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
|
||||
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 7 < alpha)
|
||||
continue;
|
||||
|
||||
// SEE based pruning (~9 Elo)
|
||||
if (!pos.see_ge(move, Value(-200) * depth))
|
||||
if (!pos.see_ge(move, Value(-214) * depth))
|
||||
continue;
|
||||
}
|
||||
else
|
||||
|
@ -1040,11 +1040,11 @@ moves_loop: // When in check, search starts here
|
|||
// Futility pruning: parent node (~9 Elo)
|
||||
if ( !ss->inCheck
|
||||
&& lmrDepth < 11
|
||||
&& ss->staticEval + 131 + 137 * lmrDepth + history / 64 <= alpha)
|
||||
&& ss->staticEval + 147 + 125 * lmrDepth + history / 64 <= alpha)
|
||||
continue;
|
||||
|
||||
// Prune moves with negative SEE (~3 Elo)
|
||||
if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 29 * lmrDepth)))
|
||||
if (!pos.see_ge(move, Value(-23 * lmrDepth * lmrDepth - 31 * lmrDepth)))
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -1067,7 +1067,7 @@ moves_loop: // When in check, search starts here
|
|||
&& (tte->bound() & BOUND_LOWER)
|
||||
&& tte->depth() >= depth - 3)
|
||||
{
|
||||
Value singularBeta = ttValue - 3 * depth;
|
||||
Value singularBeta = ttValue - 4 * depth;
|
||||
Depth singularDepth = (depth - 1) / 2;
|
||||
|
||||
ss->excludedMove = move;
|
||||
|
@ -1080,8 +1080,8 @@ moves_loop: // When in check, search starts here
|
|||
|
||||
// Avoid search explosion by limiting the number of double extensions
|
||||
if ( !PvNode
|
||||
&& value < singularBeta - 71
|
||||
&& ss->doubleExtensions <= 6)
|
||||
&& value < singularBeta - 52
|
||||
&& ss->doubleExtensions <= 8)
|
||||
extension = 2;
|
||||
}
|
||||
|
||||
|
@ -1100,15 +1100,15 @@ moves_loop: // When in check, search starts here
|
|||
|
||||
// Check extensions (~1 Elo)
|
||||
else if ( givesCheck
|
||||
&& depth > 7
|
||||
&& abs(ss->staticEval) > 128)
|
||||
&& depth > 8
|
||||
&& abs(ss->staticEval) > 81)
|
||||
extension = 1;
|
||||
|
||||
// Quiet ttMove extensions (~0 Elo)
|
||||
else if ( PvNode
|
||||
&& move == ttMove
|
||||
&& move == ss->killers[0]
|
||||
&& (*contHist[0])[movedPiece][to_sq(move)] >= 8932)
|
||||
&& (*contHist[0])[movedPiece][to_sq(move)] >= 7546)
|
||||
extension = 1;
|
||||
}
|
||||
|
||||
|
@ -1145,7 +1145,7 @@ moves_loop: // When in check, search starts here
|
|||
|
||||
// Decrease reduction at some PvNodes (~2 Elo)
|
||||
if ( PvNode
|
||||
&& bestMoveCount <= 4)
|
||||
&& bestMoveCount <= 3)
|
||||
r--;
|
||||
|
||||
// Decrease reduction if position is or has been on the PV
|
||||
|
@ -1170,18 +1170,18 @@ moves_loop: // When in check, search starts here
|
|||
+ (*contHist[0])[movedPiece][to_sq(move)]
|
||||
+ (*contHist[1])[movedPiece][to_sq(move)]
|
||||
+ (*contHist[3])[movedPiece][to_sq(move)]
|
||||
- 4142;
|
||||
- 4123;
|
||||
|
||||
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
|
||||
r -= ss->statScore / 15328;
|
||||
r -= ss->statScore / 17417;
|
||||
|
||||
// In general we want to cap the LMR depth search at newDepth. But if reductions
|
||||
// are really negative and movecount is low, we allow this move to be searched
|
||||
// deeper than the first move (this may lead to hidden double extensions).
|
||||
int deeper = r >= -1 ? 0
|
||||
: moveCount <= 5 ? 2
|
||||
: PvNode && depth > 4 ? 1
|
||||
: cutNode && moveCount <= 5 ? 1
|
||||
: PvNode && depth > 3 ? 1
|
||||
: cutNode && moveCount <= 7 ? 1
|
||||
: 0;
|
||||
|
||||
Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
|
||||
|
@ -1190,7 +1190,7 @@ moves_loop: // When in check, search starts here
|
|||
|
||||
// If the son is reduced and fails high it will be re-searched at full depth
|
||||
doFullDepthSearch = value > alpha && d < newDepth;
|
||||
doDeeperSearch = value > (alpha + 80 + 20 * (newDepth - d));
|
||||
doDeeperSearch = value > (alpha + 76 + 11 * (newDepth - d));
|
||||
didLMR = true;
|
||||
}
|
||||
else
|
||||
|
@ -1211,7 +1211,7 @@ moves_loop: // When in check, search starts here
|
|||
: -stat_bonus(newDepth);
|
||||
|
||||
if (captureOrPromotion)
|
||||
bonus /= 5;
|
||||
bonus /= 6;
|
||||
|
||||
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
|
||||
}
|
||||
|
@ -1335,14 +1335,14 @@ moves_loop: // When in check, search starts here
|
|||
quietsSearched, quietCount, capturesSearched, captureCount, depth);
|
||||
|
||||
// Bonus for prior countermove that caused the fail low
|
||||
else if ( (depth >= 3 || PvNode)
|
||||
else if ( (depth >= 4 || PvNode)
|
||||
&& !priorCapture)
|
||||
{
|
||||
//Assign extra bonus if current node is PvNode or cutNode
|
||||
//or fail low was really bad
|
||||
bool extraBonus = PvNode
|
||||
|| cutNode
|
||||
|| bestValue < alpha - 99 * depth;
|
||||
|| bestValue < alpha - 71 * depth;
|
||||
|
||||
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
|
||||
}
|
||||
|
@ -1473,7 +1473,7 @@ moves_loop: // When in check, search starts here
|
|||
if (PvNode && bestValue > alpha)
|
||||
alpha = bestValue;
|
||||
|
||||
futilityBase = bestValue + 127;
|
||||
futilityBase = bestValue + 139;
|
||||
}
|
||||
|
||||
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
|
||||
|
|
Loading…
Add table
Reference in a new issue