mirror of
https://github.com/sockspls/badfish
synced 2025-05-03 18:19:35 +00:00
Further LTC tuning of search parameters
Tuning done by bigpenor with some hand adjustments on top by Viz. Had a good performance at fixed games 180+1.8: https://tests.stockfishchess.org/tests/view/631836b437f41b13973d7da1 Elo: 1.35 +-1.2 (95%) LOS: 98.6% Total: 60000 W: 16422 L: 16189 D: 27389 Ptnml(0-2): 39, 5335, 18992, 5622, 12 nElo: 3.13 +-2.8 (95%) PairsRatio: 1.05 Passed 60+0.6 8 threads SPRT: https://tests.stockfishchess.org/tests/view/631ba0ff74bc4fe483a99db3 LLR: 2.95 (-2.94,2.94) <0.50,2.50> Total: 29712 W: 8301 L: 8039 D: 13372 Ptnml(0-2): 12, 2318, 9925, 2598, 3 closes https://github.com/official-stockfish/Stockfish/pull/4160 bench 3938073
This commit is contained in:
parent
5eeb96d0e7
commit
eaf2c8207f
1 changed files with 46 additions and 46 deletions
|
@ -63,7 +63,7 @@ namespace {
|
||||||
|
|
||||||
// Futility margin
|
// Futility margin
|
||||||
Value futility_margin(Depth d, bool improving) {
|
Value futility_margin(Depth d, bool improving) {
|
||||||
return Value(174 * (d - improving));
|
return Value(165 * (d - improving));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reductions lookup table, initialized at startup
|
// Reductions lookup table, initialized at startup
|
||||||
|
@ -71,7 +71,7 @@ namespace {
|
||||||
|
|
||||||
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
|
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
|
||||||
int r = Reductions[d] * Reductions[mn];
|
int r = Reductions[d] * Reductions[mn];
|
||||||
return (r + 1463 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1010);
|
return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr int futility_move_count(bool improving, Depth depth) {
|
constexpr int futility_move_count(bool improving, Depth depth) {
|
||||||
|
@ -81,7 +81,7 @@ namespace {
|
||||||
|
|
||||||
// History and stats update bonus, based on depth
|
// History and stats update bonus, based on depth
|
||||||
int stat_bonus(Depth d) {
|
int stat_bonus(Depth d) {
|
||||||
return std::min((8 * d + 240) * d - 276 , 1907);
|
return std::min((12 * d + 282) * d - 349 , 1594);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a small random component to draw evaluations to avoid 3-fold blindness
|
// Add a small random component to draw evaluations to avoid 3-fold blindness
|
||||||
|
@ -158,7 +158,7 @@ namespace {
|
||||||
void Search::init() {
|
void Search::init() {
|
||||||
|
|
||||||
for (int i = 1; i < MAX_MOVES; ++i)
|
for (int i = 1; i < MAX_MOVES; ++i)
|
||||||
Reductions[i] = int((20.81 + std::log(Threads.size()) / 2) * std::log(i));
|
Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -307,10 +307,10 @@ void Thread::search() {
|
||||||
|
|
||||||
multiPV = std::min(multiPV, rootMoves.size());
|
multiPV = std::min(multiPV, rootMoves.size());
|
||||||
|
|
||||||
complexityAverage.set(174, 1);
|
complexityAverage.set(155, 1);
|
||||||
|
|
||||||
trend = SCORE_ZERO;
|
trend = SCORE_ZERO;
|
||||||
optimism[ us] = Value(39);
|
optimism[ us] = Value(37);
|
||||||
optimism[~us] = -optimism[us];
|
optimism[~us] = -optimism[us];
|
||||||
|
|
||||||
int searchAgainCounter = 0;
|
int searchAgainCounter = 0;
|
||||||
|
@ -353,16 +353,16 @@ void Thread::search() {
|
||||||
if (rootDepth >= 4)
|
if (rootDepth >= 4)
|
||||||
{
|
{
|
||||||
Value prev = rootMoves[pvIdx].averageScore;
|
Value prev = rootMoves[pvIdx].averageScore;
|
||||||
delta = Value(16) + int(prev) * prev / 19178;
|
delta = Value(10) + int(prev) * prev / 15620;
|
||||||
alpha = std::max(prev - delta,-VALUE_INFINITE);
|
alpha = std::max(prev - delta,-VALUE_INFINITE);
|
||||||
beta = std::min(prev + delta, VALUE_INFINITE);
|
beta = std::min(prev + delta, VALUE_INFINITE);
|
||||||
|
|
||||||
// Adjust trend and optimism based on root move's previousScore
|
// Adjust trend and optimism based on root move's previousScore
|
||||||
int tr = sigmoid(prev, 3, 8, 90, 125, 1);
|
int tr = sigmoid(prev, 3, 10, 89, 116, 1);
|
||||||
trend = (us == WHITE ? make_score(tr, tr / 2)
|
trend = (us == WHITE ? make_score(tr, tr / 2)
|
||||||
: -make_score(tr, tr / 2));
|
: -make_score(tr, tr / 2));
|
||||||
|
|
||||||
int opt = sigmoid(prev, 8, 17, 144, 15012, 183);
|
int opt = sigmoid(prev, 7, 20, 169, 19350, 164);
|
||||||
optimism[ us] = Value(opt);
|
optimism[ us] = Value(opt);
|
||||||
optimism[~us] = -optimism[us];
|
optimism[~us] = -optimism[us];
|
||||||
}
|
}
|
||||||
|
@ -465,16 +465,16 @@ void Thread::search() {
|
||||||
&& !Threads.stop
|
&& !Threads.stop
|
||||||
&& !mainThread->stopOnPonderhit)
|
&& !mainThread->stopOnPonderhit)
|
||||||
{
|
{
|
||||||
double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
|
double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
|
||||||
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 781.4;
|
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
|
||||||
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
|
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
|
||||||
|
|
||||||
// If the bestMove is stable over several iterations, reduce time accordingly
|
// If the bestMove is stable over several iterations, reduce time accordingly
|
||||||
timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73;
|
timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
|
||||||
double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction);
|
double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
|
||||||
double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
|
double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
|
||||||
int complexity = mainThread->complexityAverage.value();
|
int complexity = mainThread->complexityAverage.value();
|
||||||
double complexPosition = std::min(1.0 + (complexity - 277) / 1819.1, 1.5);
|
double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
|
||||||
|
|
||||||
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
|
double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
|
||||||
|
|
||||||
|
@ -495,7 +495,7 @@ void Thread::search() {
|
||||||
}
|
}
|
||||||
else if ( Threads.increaseDepth
|
else if ( Threads.increaseDepth
|
||||||
&& !mainThread->ponder
|
&& !mainThread->ponder
|
||||||
&& Time.elapsed() > totalTime * 0.43)
|
&& Time.elapsed() > totalTime * 0.53)
|
||||||
Threads.increaseDepth = false;
|
Threads.increaseDepth = false;
|
||||||
else
|
else
|
||||||
Threads.increaseDepth = true;
|
Threads.increaseDepth = true;
|
||||||
|
@ -760,7 +760,7 @@ namespace {
|
||||||
// Use static evaluation difference to improve quiet move ordering (~3 Elo)
|
// Use static evaluation difference to improve quiet move ordering (~3 Elo)
|
||||||
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
|
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
|
||||||
{
|
{
|
||||||
int bonus = std::clamp(-16 * int((ss-1)->staticEval + ss->staticEval), -2000, 2000);
|
int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
|
||||||
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
|
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -770,7 +770,7 @@ namespace {
|
||||||
// margin and the improving flag are used in various pruning heuristics.
|
// margin and the improving flag are used in various pruning heuristics.
|
||||||
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
|
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
|
||||||
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
|
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
|
||||||
: 175;
|
: 168;
|
||||||
improving = improvement > 0;
|
improving = improvement > 0;
|
||||||
|
|
||||||
// Step 7. Razoring.
|
// Step 7. Razoring.
|
||||||
|
@ -778,7 +778,7 @@ namespace {
|
||||||
// return a fail low.
|
// return a fail low.
|
||||||
if ( !PvNode
|
if ( !PvNode
|
||||||
&& depth <= 7
|
&& depth <= 7
|
||||||
&& eval < alpha - 341 - 267 * depth * depth)
|
&& eval < alpha - 369 - 254 * depth * depth)
|
||||||
{
|
{
|
||||||
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
|
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
|
||||||
if (value < alpha)
|
if (value < alpha)
|
||||||
|
@ -789,18 +789,18 @@ namespace {
|
||||||
// The depth condition is important for mate finding.
|
// The depth condition is important for mate finding.
|
||||||
if ( !ss->ttPv
|
if ( !ss->ttPv
|
||||||
&& depth < 8
|
&& depth < 8
|
||||||
&& eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta
|
&& eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
|
||||||
&& eval >= beta
|
&& eval >= beta
|
||||||
&& eval < 26305) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
|
&& eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
|
||||||
return eval;
|
return eval;
|
||||||
|
|
||||||
// Step 9. Null move search with verification search (~22 Elo)
|
// Step 9. Null move search with verification search (~22 Elo)
|
||||||
if ( !PvNode
|
if ( !PvNode
|
||||||
&& (ss-1)->currentMove != MOVE_NULL
|
&& (ss-1)->currentMove != MOVE_NULL
|
||||||
&& (ss-1)->statScore < 15344
|
&& (ss-1)->statScore < 17139
|
||||||
&& eval >= beta
|
&& eval >= beta
|
||||||
&& eval >= ss->staticEval
|
&& eval >= ss->staticEval
|
||||||
&& ss->staticEval >= beta - 15 * depth - improvement / 15 + 201 + complexity / 24
|
&& ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
|
||||||
&& !excludedMove
|
&& !excludedMove
|
||||||
&& pos.non_pawn_material(us)
|
&& pos.non_pawn_material(us)
|
||||||
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
|
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
|
||||||
|
@ -808,7 +808,7 @@ namespace {
|
||||||
assert(eval - beta >= 0);
|
assert(eval - beta >= 0);
|
||||||
|
|
||||||
// Null move dynamic reduction based on depth, eval and complexity of position
|
// Null move dynamic reduction based on depth, eval and complexity of position
|
||||||
Depth R = std::min(int(eval - beta) / 152, 5) + depth / 3 + 4 - (complexity > 650);
|
Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
|
||||||
|
|
||||||
ss->currentMove = MOVE_NULL;
|
ss->currentMove = MOVE_NULL;
|
||||||
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
|
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
|
||||||
|
@ -844,7 +844,7 @@ namespace {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
probCutBeta = beta + 173 - 46 * improving;
|
probCutBeta = beta + 191 - 54 * improving;
|
||||||
|
|
||||||
// Step 10. ProbCut (~4 Elo)
|
// Step 10. ProbCut (~4 Elo)
|
||||||
// If we have a good enough capture and a reduced search returns a value
|
// If we have a good enough capture and a reduced search returns a value
|
||||||
|
@ -913,7 +913,7 @@ namespace {
|
||||||
moves_loop: // When in check, search starts here
|
moves_loop: // When in check, search starts here
|
||||||
|
|
||||||
// Step 12. A small Probcut idea, when we are in check (~0 Elo)
|
// Step 12. A small Probcut idea, when we are in check (~0 Elo)
|
||||||
probCutBeta = beta + 481;
|
probCutBeta = beta + 417;
|
||||||
if ( ss->inCheck
|
if ( ss->inCheck
|
||||||
&& !PvNode
|
&& !PvNode
|
||||||
&& depth >= 2
|
&& depth >= 2
|
||||||
|
@ -1007,14 +1007,14 @@ moves_loop: // When in check, search starts here
|
||||||
if ( !pos.empty(to_sq(move))
|
if ( !pos.empty(to_sq(move))
|
||||||
&& !givesCheck
|
&& !givesCheck
|
||||||
&& !PvNode
|
&& !PvNode
|
||||||
&& lmrDepth < 6
|
&& lmrDepth < 7
|
||||||
&& !ss->inCheck
|
&& !ss->inCheck
|
||||||
&& ss->staticEval + 277 + 187 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
|
&& ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
|
||||||
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
|
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// SEE based pruning (~9 Elo)
|
// SEE based pruning (~9 Elo)
|
||||||
if (!pos.see_ge(move, Value(-203) * depth))
|
if (!pos.see_ge(move, Value(-222) * depth))
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1032,12 +1032,12 @@ moves_loop: // When in check, search starts here
|
||||||
|
|
||||||
// Futility pruning: parent node (~9 Elo)
|
// Futility pruning: parent node (~9 Elo)
|
||||||
if ( !ss->inCheck
|
if ( !ss->inCheck
|
||||||
&& lmrDepth < 11
|
&& lmrDepth < 13
|
||||||
&& ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha)
|
&& ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// Prune moves with negative SEE (~3 Elo)
|
// Prune moves with negative SEE (~3 Elo)
|
||||||
if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 20 * lmrDepth)))
|
if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1052,7 +1052,7 @@ moves_loop: // When in check, search starts here
|
||||||
// a reduced search on all the other moves but the ttMove and if the
|
// a reduced search on all the other moves but the ttMove and if the
|
||||||
// result is lower than ttValue minus a margin, then we will extend the ttMove.
|
// result is lower than ttValue minus a margin, then we will extend the ttMove.
|
||||||
if ( !rootNode
|
if ( !rootNode
|
||||||
&& depth >= 4 - (thisThread->previousDepth > 27) + 2 * (PvNode && tte->is_pv())
|
&& depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv())
|
||||||
&& move == ttMove
|
&& move == ttMove
|
||||||
&& !excludedMove // Avoid recursive singular search
|
&& !excludedMove // Avoid recursive singular search
|
||||||
/* && ttValue != VALUE_NONE Already implicit in the next condition */
|
/* && ttValue != VALUE_NONE Already implicit in the next condition */
|
||||||
|
@ -1074,8 +1074,8 @@ moves_loop: // When in check, search starts here
|
||||||
|
|
||||||
// Avoid search explosion by limiting the number of double extensions
|
// Avoid search explosion by limiting the number of double extensions
|
||||||
if ( !PvNode
|
if ( !PvNode
|
||||||
&& value < singularBeta - 26
|
&& value < singularBeta - 25
|
||||||
&& ss->doubleExtensions <= 8)
|
&& ss->doubleExtensions <= 9)
|
||||||
extension = 2;
|
extension = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1099,14 +1099,14 @@ moves_loop: // When in check, search starts here
|
||||||
// Check extensions (~1 Elo)
|
// Check extensions (~1 Elo)
|
||||||
else if ( givesCheck
|
else if ( givesCheck
|
||||||
&& depth > 9
|
&& depth > 9
|
||||||
&& abs(ss->staticEval) > 71)
|
&& abs(ss->staticEval) > 82)
|
||||||
extension = 1;
|
extension = 1;
|
||||||
|
|
||||||
// Quiet ttMove extensions (~0 Elo)
|
// Quiet ttMove extensions (~0 Elo)
|
||||||
else if ( PvNode
|
else if ( PvNode
|
||||||
&& move == ttMove
|
&& move == ttMove
|
||||||
&& move == ss->killers[0]
|
&& move == ss->killers[0]
|
||||||
&& (*contHist[0])[movedPiece][to_sq(move)] >= 5491)
|
&& (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
|
||||||
extension = 1;
|
extension = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1159,7 +1159,7 @@ moves_loop: // When in check, search starts here
|
||||||
|
|
||||||
// Decrease reduction for PvNodes based on depth
|
// Decrease reduction for PvNodes based on depth
|
||||||
if (PvNode)
|
if (PvNode)
|
||||||
r -= 1 + 15 / (3 + depth);
|
r -= 1 + 11 / (3 + depth);
|
||||||
|
|
||||||
// Decrease reduction if ttMove has been singularly extended (~1 Elo)
|
// Decrease reduction if ttMove has been singularly extended (~1 Elo)
|
||||||
if (singularQuietLMR)
|
if (singularQuietLMR)
|
||||||
|
@ -1173,10 +1173,10 @@ moves_loop: // When in check, search starts here
|
||||||
+ (*contHist[0])[movedPiece][to_sq(move)]
|
+ (*contHist[0])[movedPiece][to_sq(move)]
|
||||||
+ (*contHist[1])[movedPiece][to_sq(move)]
|
+ (*contHist[1])[movedPiece][to_sq(move)]
|
||||||
+ (*contHist[3])[movedPiece][to_sq(move)]
|
+ (*contHist[3])[movedPiece][to_sq(move)]
|
||||||
- 4560;
|
- 4433;
|
||||||
|
|
||||||
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
|
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
|
||||||
r -= ss->statScore / 15914;
|
r -= ss->statScore / 13628;
|
||||||
|
|
||||||
// In general we want to cap the LMR depth search at newDepth, but when
|
// In general we want to cap the LMR depth search at newDepth, but when
|
||||||
// reduction is negative, we allow this move a limited search extension
|
// reduction is negative, we allow this move a limited search extension
|
||||||
|
@ -1188,7 +1188,7 @@ moves_loop: // When in check, search starts here
|
||||||
// Do full depth search when reduced LMR search fails high
|
// Do full depth search when reduced LMR search fails high
|
||||||
if (value > alpha && d < newDepth)
|
if (value > alpha && d < newDepth)
|
||||||
{
|
{
|
||||||
const bool doDeeperSearch = value > (alpha + 73 + 12 * (newDepth - d));
|
const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
|
||||||
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
|
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
|
||||||
|
|
||||||
int bonus = value > alpha ? stat_bonus(newDepth)
|
int bonus = value > alpha ? stat_bonus(newDepth)
|
||||||
|
@ -1280,8 +1280,8 @@ moves_loop: // When in check, search starts here
|
||||||
alpha = value;
|
alpha = value;
|
||||||
|
|
||||||
// Reduce other moves if we have found at least one score improvement
|
// Reduce other moves if we have found at least one score improvement
|
||||||
if ( depth > 2
|
if ( depth > 1
|
||||||
&& depth < 7
|
&& depth < 6
|
||||||
&& beta < VALUE_KNOWN_WIN
|
&& beta < VALUE_KNOWN_WIN
|
||||||
&& alpha > -VALUE_KNOWN_WIN)
|
&& alpha > -VALUE_KNOWN_WIN)
|
||||||
depth -= 1;
|
depth -= 1;
|
||||||
|
@ -1344,7 +1344,7 @@ moves_loop: // When in check, search starts here
|
||||||
//or fail low was really bad
|
//or fail low was really bad
|
||||||
bool extraBonus = PvNode
|
bool extraBonus = PvNode
|
||||||
|| cutNode
|
|| cutNode
|
||||||
|| bestValue < alpha - 66 * depth;
|
|| bestValue < alpha - 62 * depth;
|
||||||
|
|
||||||
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
|
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
|
||||||
}
|
}
|
||||||
|
@ -1471,7 +1471,7 @@ moves_loop: // When in check, search starts here
|
||||||
if (PvNode && bestValue > alpha)
|
if (PvNode && bestValue > alpha)
|
||||||
alpha = bestValue;
|
alpha = bestValue;
|
||||||
|
|
||||||
futilityBase = bestValue + 118;
|
futilityBase = bestValue + 153;
|
||||||
}
|
}
|
||||||
|
|
||||||
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
|
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
|
||||||
|
@ -1675,7 +1675,7 @@ moves_loop: // When in check, search starts here
|
||||||
|
|
||||||
if (!pos.capture(bestMove))
|
if (!pos.capture(bestMove))
|
||||||
{
|
{
|
||||||
int bonus2 = bestValue > beta + PawnValueMg ? bonus1 // larger bonus
|
int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
|
||||||
: stat_bonus(depth); // smaller bonus
|
: stat_bonus(depth); // smaller bonus
|
||||||
|
|
||||||
// Increase stats for the best move in case it was a quiet move
|
// Increase stats for the best move in case it was a quiet move
|
||||||
|
|
Loading…
Add table
Reference in a new issue