diff --git a/src/search.cpp b/src/search.cpp index c807f1bd..2a2331cb 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -67,7 +67,7 @@ namespace { // Futility margin Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) { - Value futilityMult = 118 - 33 * noTtCutNode; + Value futilityMult = 109 - 27 * noTtCutNode; Value improvingDeduction = improving * futilityMult * 2; Value worseningDeduction = oppWorsening * futilityMult / 3; @@ -94,16 +94,16 @@ Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos, St cntcv = int((*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()]); const auto cv = - (5932 * pcv + 3269 * macv + 5660 * micv + 6666 * (wnpcv + bnpcv) + 5555 * cntcv) / 131072; + (6384 * pcv + 3583 * macv + 6492 * micv + 6725 * (wnpcv + bnpcv) + cntcv * 5880) / 131072; v += cv; return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1); } // History and stats update bonus, based on depth -int stat_bonus(Depth d) { return std::min(179 * d - 108, 1598); } +int stat_bonus(Depth d) { return std::min(168 * d - 100, 1718); } // History and stats update malus, based on depth -int stat_malus(Depth d) { return std::min(820 * d - 261, 2246); } +int stat_malus(Depth d) { return std::min(768 * d - 257, 2351); } // Add a small random component to draw evaluations to avoid 3-fold blindness Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); } @@ -311,13 +311,13 @@ void Search::Worker::iterative_deepening() { selDepth = 0; // Reset aspiration window starting size - delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 13797; + delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 13461; Value avg = rootMoves[pvIdx].averageScore; alpha = std::max(avg - delta, -VALUE_INFINITE); beta = std::min(avg + delta, VALUE_INFINITE); // Adjust optimism based on root move's averageScore (~4 Elo) - optimism[us] = 132 * avg / (std::abs(avg) + 89); + optimism[us] = 150 * avg / (std::abs(avg) + 85); optimism[~us] = -optimism[us]; // Start with a small aspiration window and, in the case of a fail @@ -502,8 +502,8 @@ void Search::Worker::iterative_deepening() { void Search::Worker::clear() { mainHistory.fill(0); lowPlyHistory.fill(0); - captureHistory.fill(-753); - pawnHistory.fill(-1152); + captureHistory.fill(-758); + pawnHistory.fill(-1158); pawnCorrectionHistory.fill(0); majorPieceCorrectionHistory.fill(0); minorPieceCorrectionHistory.fill(0); @@ -518,10 +518,10 @@ void Search::Worker::clear() { for (StatsType c : {NoCaptures, Captures}) for (auto& to : continuationHistory[inCheck][c]) for (auto& h : to) - h->fill(-678); + h->fill(-645); for (size_t i = 1; i < reductions.size(); ++i) - reductions[i] = int((18.43 + std::log(size_t(options["Threads"])) / 2) * std::log(i)); + reductions[i] = int((19.43 + std::log(size_t(options["Threads"])) / 2) * std::log(i)); refreshTable.clear(networks[numaAccessToken]); } @@ -760,7 +760,7 @@ Value Search::Worker::search( // Use static evaluation difference to improve quiet move ordering (~9 Elo) if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture) { - int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1641, 1423) + 760; + int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1831, 1428) + 623; thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus; if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] @@ -778,7 +778,7 @@ Value Search::Worker::search( // Step 7. Razoring (~1 Elo) // If eval is really low, check with qsearch if we can exceed alpha. If the // search suggests we cannot exceed alpha, return a speculative fail low. - if (eval < alpha - 501 - 272 * depth * depth) + if (eval < alpha - 469 - 307 * depth * depth) { value = qsearch(pos, ss, alpha - 1, alpha); if (value < alpha && std::abs(value) < VALUE_TB_WIN_IN_MAX_PLY) @@ -787,9 +787,9 @@ Value Search::Worker::search( // Step 8. Futility pruning: child node (~40 Elo) // The depth condition is important for mate finding. - if (!ss->ttPv && depth < 13 + if (!ss->ttPv && depth < 14 && eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening) - - (ss - 1)->statScore / 272 + - (ss - 1)->statScore / 290 >= beta && eval >= beta && (!ttData.move || ttCapture) && beta > VALUE_TB_LOSS_IN_MAX_PLY && eval < VALUE_TB_WIN_IN_MAX_PLY) @@ -799,13 +799,13 @@ Value Search::Worker::search( // Step 9. Null move search with verification search (~35 Elo) if (cutNode && (ss - 1)->currentMove != Move::null() && eval >= beta - && ss->staticEval >= beta - 23 * depth + 400 && !excludedMove && pos.non_pawn_material(us) + && ss->staticEval >= beta - 21 * depth + 421 && !excludedMove && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly && beta > VALUE_TB_LOSS_IN_MAX_PLY) { assert(eval - beta >= 0); // Null move dynamic reduction based on depth and eval - Depth R = std::min(int(eval - beta) / 209, 6) + depth / 3 + 5; + Depth R = std::min(int(eval - beta) / 235, 7) + depth / 3 + 5; ss->currentMove = Move::null(); ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; @@ -855,7 +855,7 @@ Value Search::Worker::search( // Step 11. ProbCut (~10 Elo) // If we have a good enough capture (or queen promotion) and a reduced search // returns a value much above beta, we can (almost) safely prune the previous move. - probCutBeta = beta + 189 - 53 * improving - 30 * opponentWorsening; + probCutBeta = beta + 187 - 53 * improving - 27 * opponentWorsening; if (!PvNode && depth > 3 && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY // If value from transposition table is lower than probCutBeta, don't attempt @@ -926,7 +926,7 @@ Value Search::Worker::search( moves_loop: // When in check, search starts here // Step 12. A small Probcut idea (~4 Elo) - probCutBeta = beta + 379; + probCutBeta = beta + 417; if ((ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 4 && ttData.value >= probCutBeta && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(ttData.value) < VALUE_TB_WIN_IN_MAX_PLY) @@ -1010,15 +1010,15 @@ moves_loop: // When in check, search starts here // Futility pruning for captures (~2 Elo) if (!givesCheck && lmrDepth < 7 && !ss->inCheck) { - Value futilityValue = ss->staticEval + 300 + 238 * lmrDepth + Value futilityValue = ss->staticEval + 287 + 253 * lmrDepth + PieceValue[capturedPiece] + captHist / 7; if (futilityValue <= alpha) continue; } // SEE based pruning for captures and checks (~11 Elo) - int seeHist = std::clamp(captHist / 32, -159 * depth, 160 * depth); - if (!pos.see_ge(move, -167 * depth - seeHist)) + int seeHist = std::clamp(captHist / 33, -161 * depth, 156 * depth); + if (!pos.see_ge(move, -162 * depth - seeHist)) continue; } else @@ -1029,15 +1029,15 @@ moves_loop: // When in check, search starts here + thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()]; // Continuation history based pruning (~2 Elo) - if (history < -4071 * depth) + if (history < -3884 * depth) continue; history += 2 * thisThread->mainHistory[us][move.from_to()]; - lmrDepth += history / 3653; + lmrDepth += history / 3609; Value futilityValue = - ss->staticEval + (bestValue < ss->staticEval - 51 ? 145 : 49) + 144 * lmrDepth; + ss->staticEval + (bestValue < ss->staticEval - 45 ? 140 : 43) + 141 * lmrDepth; // Futility pruning: parent node (~13 Elo) if (!ss->inCheck && lmrDepth < 12 && futilityValue <= alpha) @@ -1051,7 +1051,7 @@ moves_loop: // When in check, search starts here lmrDepth = std::max(lmrDepth, 0); // Prune moves with negative SEE (~4 Elo) - if (!pos.see_ge(move, -24 * lmrDepth * lmrDepth)) + if (!pos.see_ge(move, -25 * lmrDepth * lmrDepth)) continue; } } @@ -1074,11 +1074,11 @@ moves_loop: // When in check, search starts here // and lower extension margins scale well. if (!rootNode && move == ttData.move && !excludedMove - && depth >= 4 - (thisThread->completedDepth > 36) + ss->ttPv + && depth >= 4 - (thisThread->completedDepth > 33) + ss->ttPv && std::abs(ttData.value) < VALUE_TB_WIN_IN_MAX_PLY && (ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 3) { - Value singularBeta = ttData.value - (54 + 77 * (ss->ttPv && !PvNode)) * depth / 64; + Value singularBeta = ttData.value - (56 + 79 * (ss->ttPv && !PvNode)) * depth / 64; Depth singularDepth = newDepth / 2; ss->excludedMove = move; @@ -1088,8 +1088,8 @@ moves_loop: // When in check, search starts here if (value < singularBeta) { - int doubleMargin = 262 * PvNode - 204 * !ttCapture; - int tripleMargin = 97 + 266 * PvNode - 255 * !ttCapture + 94 * ss->ttPv; + int doubleMargin = 249 * PvNode - 194 * !ttCapture; + int tripleMargin = 94 + 287 * PvNode - 249 * !ttCapture + 99 * ss->ttPv; extension = 1 + (value < singularBeta - doubleMargin) + (value < singularBeta - tripleMargin); @@ -1127,7 +1127,7 @@ moves_loop: // When in check, search starts here else if (PvNode && move.to_sq() == prevSq && thisThread->captureHistory[movedPiece][move.to_sq()] [type_of(pos.piece_on(move.to_sq()))] - > 4299) + > 4321) extension = 1; } @@ -1182,7 +1182,7 @@ moves_loop: // When in check, search starts here ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()] + (*contHist[0])[movedPiece][move.to_sq()] - + (*contHist[1])[movedPiece][move.to_sq()] - 4410; + + (*contHist[1])[movedPiece][move.to_sq()] - 3996; // Decrease/increase reduction for moves with a good/bad history (~8 Elo) r -= ss->statScore * 1287 / 16384; @@ -1204,8 +1204,8 @@ moves_loop: // When in check, search starts here { // Adjust full-depth search based on LMR results - if the result was // good enough search deeper, if it was bad enough search shallower. - const bool doDeeperSearch = value > (bestValue + 38 + 2 * newDepth); // (~1 Elo) - const bool doShallowerSearch = value < bestValue + 8; // (~2 Elo) + const bool doDeeperSearch = value > (bestValue + 42 + 2 * newDepth); // (~1 Elo) + const bool doShallowerSearch = value < bestValue + 10; // (~2 Elo) newDepth += doDeeperSearch - doShallowerSearch; @@ -1377,29 +1377,29 @@ moves_loop: // When in check, search starts here // Bonus for prior countermove that caused the fail low else if (!priorCapture && prevSq != SQ_NONE) { - int bonus = (118 * (depth > 5) + 38 * !allNode + 169 * ((ss - 1)->moveCount > 8) - + 116 * (!ss->inCheck && bestValue <= ss->staticEval - 101) - + 133 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 92)); + int bonus = (117 * (depth > 5) + 39 * !allNode + 168 * ((ss - 1)->moveCount > 8) + + 115 * (!ss->inCheck && bestValue <= ss->staticEval - 108) + + 119 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 83)); // Proportional to "how much damage we have to undo" - bonus += std::min(-(ss - 1)->statScore / 102, 305); + bonus += std::min(-(ss - 1)->statScore / 113, 300); bonus = std::max(bonus, 0); update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - stat_bonus(depth) * bonus / 107); + stat_bonus(depth) * bonus / 93); thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] - << stat_bonus(depth) * bonus / 174; + << stat_bonus(depth) * bonus / 179; if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] - << stat_bonus(depth) * bonus / 25; + << stat_bonus(depth) * bonus / 24; } // Bonus when search fails low and there is a TT move else if (ttData.move && !allNode) - thisThread->mainHistory[us][ttData.move.from_to()] << stat_bonus(depth) / 4; + thisThread->mainHistory[us][ttData.move.from_to()] << stat_bonus(depth) * 23 / 100; if (PvNode) bestValue = std::min(bestValue, maxValue); @@ -1428,13 +1428,13 @@ moves_loop: // When in check, search starts here auto bonus = std::clamp(int(bestValue - ss->staticEval) * depth / 8, -CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4); thisThread->pawnCorrectionHistory[us][pawn_structure_index(pos)] - << bonus * 101 / 128; - thisThread->majorPieceCorrectionHistory[us][major_piece_index(pos)] << bonus * 157 / 128; - thisThread->minorPieceCorrectionHistory[us][minor_piece_index(pos)] << bonus * 153 / 128; + << bonus * 107 / 128; + thisThread->majorPieceCorrectionHistory[us][major_piece_index(pos)] << bonus * 162 / 128; + thisThread->minorPieceCorrectionHistory[us][minor_piece_index(pos)] << bonus * 148 / 128; thisThread->nonPawnCorrectionHistory[WHITE][us][non_pawn_index(pos)] - << bonus * 123 / 128; + << bonus * 122 / 128; thisThread->nonPawnCorrectionHistory[BLACK][us][non_pawn_index(pos)] - << bonus * 165 / 128; + << bonus * 185 / 128; if (m.is_ok()) (*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()] << bonus; @@ -1566,7 +1566,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) if (bestValue > alpha) alpha = bestValue; - futilityBase = ss->staticEval + 280; + futilityBase = ss->staticEval + 306; } const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory, @@ -1629,11 +1629,11 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) + (*contHist[1])[pos.moved_piece(move)][move.to_sq()] + thisThread->pawnHistory[pawn_structure_index(pos)][pos.moved_piece(move)] [move.to_sq()] - <= 5036) + <= 5095) continue; // Do not search moves with bad enough SEE values (~5 Elo) - if (!pos.see_ge(move, -82)) + if (!pos.see_ge(move, -83)) continue; } @@ -1701,7 +1701,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const { int reductionScale = reductions[d] * reductions[mn]; - return (reductionScale + 1239 - delta * 795 / rootDelta) + (!i && reductionScale > 1341) * 1135; + return (reductionScale + 1304 - delta * 814 / rootDelta) + (!i && reductionScale > 1423) * 1135; } // elapsed() returns the time elapsed since the search started. If the @@ -1832,7 +1832,7 @@ void update_all_stats(const Position& pos, // at ply -1, -2, -3, -4, and -6 with current move. void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { - bonus = bonus * 53 / 64; + bonus = bonus * 50 / 64; for (int i : {1, 2, 3, 4, 6}) {