mirror of
https://github.com/sockspls/badfish
synced 2025-04-29 16:23:09 +00:00
Use exploration rate for reductions
This patch measures how frequently search is exploring new configurations. This is done be computing a running average of ttHit. The ttHitAverage rate is somewhat low (e.g. 30% for startpos) in the normal case, while it can be very high if no progress is made (e.g. 90% for the fortress I used for testing). This information can be used to influence search. In this patch, by adjusting reductions if the rate > 50%. A first version (using a low ttHitAverageResolution and this 50% threshold) passed testing: STC LLR: 2.96 (-2.94,2.94) [-1.50,4.50] Total: 26425 W: 5837 L: 5650 D: 14938 http://tests.stockfishchess.org/tests/view/5dcede8b0ebc5902563258fa LTC LLR: 2.96 (-2.94,2.94) [0.00,3.50] Total: 32313 W: 5392 L: 5128 D: 21793 http://tests.stockfishchess.org/tests/view/5dcefb1f0ebc590256325c0e However, as discussed in pull request 2414, using a larger ttHitAverageResolution gives a better approximation of the underlying distributions. This needs a slight adjustment for the threshold as the new distributions are shifted a bit compared to the older ones, and this threshold seemingly is sensitive (we used 0.53125 here). https://github.com/official-stockfish/Stockfish/pull/2414 This final version also passed testing, and is used for the patch: STC LLR: 2.95 (-2.94,2.94) [-1.50,4.50] Total: 16025 W: 3555 L: 3399 D: 9071 http://tests.stockfishchess.org/tests/view/5dd070b90ebc5902579e20c2 LTC LLR: 2.96 (-2.94,2.94) [0.00,3.50] Total: 37576 W: 6277 L: 5998 D: 25301 http://tests.stockfishchess.org/tests/view/5dd0f58e6f544e798086f224 Closes https://github.com/official-stockfish/Stockfish/pull/2414 Bench: 4989584
This commit is contained in:
parent
3468138210
commit
fe124896b2
2 changed files with 12 additions and 0 deletions
|
@ -61,6 +61,9 @@ namespace {
|
|||
// Different node types, used as a template parameter
|
||||
enum NodeType { NonPV, PV };
|
||||
|
||||
constexpr uint64_t ttHitAverageWindow = 4096;
|
||||
constexpr uint64_t ttHitAverageResolution = 1024;
|
||||
|
||||
// Razor and futility margins
|
||||
constexpr int RazorMargin = 661;
|
||||
Value futility_margin(Depth d, bool improving) {
|
||||
|
@ -363,6 +366,7 @@ void Thread::search() {
|
|||
multiPV = std::max(multiPV, (size_t)4);
|
||||
|
||||
multiPV = std::min(multiPV, rootMoves.size());
|
||||
ttHitAverage = ttHitAverageWindow * ttHitAverageResolution / 2;
|
||||
|
||||
int ct = int(Options["Contempt"]) * PawnValueEg / 100; // From centipawns
|
||||
|
||||
|
@ -665,6 +669,9 @@ namespace {
|
|||
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
|
||||
: ttHit ? tte->move() : MOVE_NONE;
|
||||
ttPv = PvNode || (ttHit && tte->is_pv());
|
||||
// thisThread->ttHitAverage can be used to approximate the running average of ttHit
|
||||
thisThread->ttHitAverage = (ttHitAverageWindow - 1) * thisThread->ttHitAverage / ttHitAverageWindow
|
||||
+ ttHitAverageResolution * ttHit;
|
||||
|
||||
// At non-PV nodes we check for an early TT cutoff
|
||||
if ( !PvNode
|
||||
|
@ -1082,6 +1089,10 @@ moves_loop: // When in check, search starts from here
|
|||
{
|
||||
Depth r = reduction(improving, depth, moveCount);
|
||||
|
||||
// Decrease reduction if the ttHit running average is large
|
||||
if (thisThread->ttHitAverage > 544 * ttHitAverageResolution * ttHitAverageWindow / 1024)
|
||||
r--;
|
||||
|
||||
// Reduction if other threads are searching this position.
|
||||
if (th.marked())
|
||||
r++;
|
||||
|
|
|
@ -61,6 +61,7 @@ public:
|
|||
Pawns::Table pawnsTable;
|
||||
Material::Table materialTable;
|
||||
size_t pvIdx, pvLast;
|
||||
uint64_t ttHitAverage;
|
||||
int selDepth, nmpMinPly;
|
||||
Color nmpColor;
|
||||
std::atomic<uint64_t> nodes, tbHits, bestMoveChanges;
|
||||
|
|
Loading…
Add table
Reference in a new issue