1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-29 08:13:08 +00:00

Introduce Fractional LMR

Tuning Run (90k Games):
https://tests.stockfishchess.org/tests/view/67202b1c86d5ee47d953d442

Passed STC:
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 241024 W: 62616 L: 62001 D: 116407
Ptnml(0-2): 716, 28231, 62015, 28822, 728
https://tests.stockfishchess.org/tests/view/6725196786d5ee47d953d9f2

Passed LTC:
LLR: 2.95 (-2.94,2.94) <0.50,2.50>
Total: 92532 W: 23678 L: 23246 D: 45608
Ptnml(0-2): 45, 9981, 25797, 10383, 60
https://tests.stockfishchess.org/tests/view/6727d3cb86d5ee47d953db9d

closes https://github.com/official-stockfish/Stockfish/pull/5667

Bench: 1066071
This commit is contained in:
Shawn Xu 2024-10-27 14:07:03 -07:00 committed by Disservin
parent f77bac3dca
commit cc5c67c564

View file

@ -999,7 +999,7 @@ moves_loop: // When in check, search starts here
mp.skip_quiet_moves();
// Reduced depth of the next LMR search
int lmrDepth = newDepth - r;
int lmrDepth = newDepth - r / 1024;
if (capture || givesCheck)
{
@ -1156,36 +1156,36 @@ moves_loop: // When in check, search starts here
// Decrease reduction if position is or has been on the PV (~7 Elo)
if (ss->ttPv)
r -= 1 + (ttData.value > alpha) + (ttData.depth >= depth);
r -= 1024 + (ttData.value > alpha) * 1024 + (ttData.depth >= depth) * 1024;
// Decrease reduction for PvNodes (~0 Elo on STC, ~2 Elo on LTC)
if (PvNode)
r--;
r -= 1024;
// These reduction adjustments have no proven non-linear scaling
// Increase reduction for cut nodes (~4 Elo)
if (cutNode)
r += 2 - (ttData.depth >= depth && ss->ttPv);
r += 2518 - (ttData.depth >= depth && ss->ttPv) * 991;
// Increase reduction if ttMove is a capture but the current move is not a capture (~3 Elo)
if (ttCapture && !capture)
r += 1 + (depth < 8);
r += 1043 + (depth < 8) * 999;
// Increase reduction if next ply has a lot of fail high (~5 Elo)
if ((ss + 1)->cutoffCnt > 3)
r += 1 + allNode;
r += 938 + allNode * 960;
// For first picked move (ttMove) reduce reduction (~3 Elo)
else if (move == ttData.move)
r -= 2;
r -= 1879;
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()] - 4410;
// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore / 11016;
r -= ss->statScore * 1287 / 16384;
// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1)
@ -1195,7 +1195,7 @@ moves_loop: // When in check, search starts here
// beyond the first move depth.
// To prevent problems when the max value is less than the min value,
// std::clamp has been replaced by a more robust implementation.
Depth d = std::max(1, std::min(newDepth - r, newDepth + !allNode));
Depth d = std::max(1, std::min(newDepth - r / 1024, newDepth + !allNode));
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, d, true);
@ -1223,10 +1223,11 @@ moves_loop: // When in check, search starts here
{
// Increase reduction if ttMove is not present (~6 Elo)
if (!ttData.move)
r += 2;
r += 2037;
// Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo)
value = -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3), !cutNode);
value =
-search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 2983), !cutNode);
}
// For PV nodes only, do a full PV search on the first move or after a fail high,
@ -1700,7 +1701,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta)
Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const {
int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 1239 - delta * 795 / rootDelta) / 1024 + (!i && reductionScale > 1341);
return (reductionScale + 1239 - delta * 795 / rootDelta) + (!i && reductionScale > 1341) * 1135;
}
// elapsed() returns the time elapsed since the search started. If the