mirror of
https://github.com/sockspls/badfish
synced 2025-05-02 01:29:36 +00:00
Simplify limiting extensions.
Replace the current method for limiting extensions to avoid search getting stuck
with a much simpler method.
the test position in 73018a0337
can still be searched without stuck search.
fixes #3815 where the search now makes progress with rootDepth
shows robust behavior in a d10 search for 1M positions.
passed STC
https://tests.stockfishchess.org/tests/view/61e303e3babab931824dfb18
LLR: 2.94 (-2.94,2.94) <-2.25,0.25>
Total: 57568 W: 15449 L: 15327 D: 26792
Ptnml(0-2): 243, 6211, 15779, 6283, 268
passed LTC
https://tests.stockfishchess.org/tests/view/61e3586cbabab931824e091c
LLR: 2.96 (-2.94,2.94) <-2.25,0.25>
Total: 128200 W: 34632 L: 34613 D: 58955
Ptnml(0-2): 124, 12559, 38710, 12588, 119
closes https://github.com/official-stockfish/Stockfish/pull/3899
Bench: 4550528
This commit is contained in:
parent
77cf5704b6
commit
9083050be6
2 changed files with 59 additions and 100 deletions
152
src/search.cpp
152
src/search.cpp
|
@ -88,30 +88,6 @@ namespace {
|
||||||
return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
|
return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the current thread is in a search explosion
|
|
||||||
ExplosionState search_explosion(Thread* thisThread) {
|
|
||||||
|
|
||||||
uint64_t nodesNow = thisThread->nodes;
|
|
||||||
bool explosive = thisThread->doubleExtensionAverage[WHITE].is_greater(2, 100)
|
|
||||||
|| thisThread->doubleExtensionAverage[BLACK].is_greater(2, 100);
|
|
||||||
|
|
||||||
if (explosive)
|
|
||||||
thisThread->nodesLastExplosive = nodesNow;
|
|
||||||
else
|
|
||||||
thisThread->nodesLastNormal = nodesNow;
|
|
||||||
|
|
||||||
if ( explosive
|
|
||||||
&& thisThread->state == EXPLOSION_NONE
|
|
||||||
&& nodesNow - thisThread->nodesLastNormal > 6000000)
|
|
||||||
thisThread->state = MUST_CALM_DOWN;
|
|
||||||
|
|
||||||
if ( thisThread->state == MUST_CALM_DOWN
|
|
||||||
&& nodesNow - thisThread->nodesLastExplosive > 6000000)
|
|
||||||
thisThread->state = EXPLOSION_NONE;
|
|
||||||
|
|
||||||
return thisThread->state;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skill structure is used to implement strength limit. If we have an uci_elo then
|
// Skill structure is used to implement strength limit. If we have an uci_elo then
|
||||||
// we convert it to a suitable fractional skill level using anchoring to CCRL Elo
|
// we convert it to a suitable fractional skill level using anchoring to CCRL Elo
|
||||||
// (goldfish 1.13 = 2000) and a fit through Ordo derived Elo for match (TC 60+0.6)
|
// (goldfish 1.13 = 2000) and a fit through Ordo derived Elo for match (TC 60+0.6)
|
||||||
|
@ -327,16 +303,11 @@ void Thread::search() {
|
||||||
|
|
||||||
multiPV = std::min(multiPV, rootMoves.size());
|
multiPV = std::min(multiPV, rootMoves.size());
|
||||||
|
|
||||||
doubleExtensionAverage[WHITE].set(0, 100); // initialize the running average at 0%
|
|
||||||
doubleExtensionAverage[BLACK].set(0, 100); // initialize the running average at 0%
|
|
||||||
complexityAverage.set(232, 1);
|
complexityAverage.set(232, 1);
|
||||||
|
|
||||||
nodesLastExplosive = nodes;
|
trend = SCORE_ZERO;
|
||||||
nodesLastNormal = nodes;
|
optimism[ us] = Value(25);
|
||||||
state = EXPLOSION_NONE;
|
optimism[~us] = -optimism[us];
|
||||||
trend = SCORE_ZERO;
|
|
||||||
optimism[ us] = Value(25);
|
|
||||||
optimism[~us] = -optimism[us];
|
|
||||||
|
|
||||||
int searchAgainCounter = 0;
|
int searchAgainCounter = 0;
|
||||||
|
|
||||||
|
@ -548,14 +519,6 @@ namespace {
|
||||||
template <NodeType nodeType>
|
template <NodeType nodeType>
|
||||||
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) {
|
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) {
|
||||||
|
|
||||||
Thread* thisThread = pos.this_thread();
|
|
||||||
|
|
||||||
// Step 0. Limit search explosion
|
|
||||||
if ( ss->ply > 10
|
|
||||||
&& search_explosion(thisThread) == MUST_CALM_DOWN
|
|
||||||
&& depth > (ss-1)->depth)
|
|
||||||
depth = (ss-1)->depth;
|
|
||||||
|
|
||||||
constexpr bool PvNode = nodeType != NonPV;
|
constexpr bool PvNode = nodeType != NonPV;
|
||||||
constexpr bool rootNode = nodeType == Root;
|
constexpr bool rootNode = nodeType == Root;
|
||||||
const Depth maxNextDepth = rootNode ? depth : depth + 1;
|
const Depth maxNextDepth = rootNode ? depth : depth + 1;
|
||||||
|
@ -596,6 +559,7 @@ namespace {
|
||||||
int moveCount, captureCount, quietCount, bestMoveCount, improvement, complexity;
|
int moveCount, captureCount, quietCount, bestMoveCount, improvement, complexity;
|
||||||
|
|
||||||
// Step 1. Initialize node
|
// Step 1. Initialize node
|
||||||
|
Thread* thisThread = pos.this_thread();
|
||||||
ss->inCheck = pos.checkers();
|
ss->inCheck = pos.checkers();
|
||||||
priorCapture = pos.captured_piece();
|
priorCapture = pos.captured_piece();
|
||||||
Color us = pos.side_to_move();
|
Color us = pos.side_to_move();
|
||||||
|
@ -643,9 +607,6 @@ namespace {
|
||||||
ss->depth = depth;
|
ss->depth = depth;
|
||||||
Square prevSq = to_sq((ss-1)->currentMove);
|
Square prevSq = to_sq((ss-1)->currentMove);
|
||||||
|
|
||||||
// Update the running average statistics for double extensions
|
|
||||||
thisThread->doubleExtensionAverage[us].update(ss->depth > (ss-1)->depth);
|
|
||||||
|
|
||||||
// Initialize statScore to zero for the grandchildren of the current position.
|
// Initialize statScore to zero for the grandchildren of the current position.
|
||||||
// So statScore is shared between all grandchildren and only the first grandchild
|
// So statScore is shared between all grandchildren and only the first grandchild
|
||||||
// starts with statScore = 0. Later grandchildren start with the last calculated
|
// starts with statScore = 0. Later grandchildren start with the last calculated
|
||||||
|
@ -1077,65 +1038,68 @@ moves_loop: // When in check, search starts here
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 14. Extensions (~66 Elo)
|
// Step 14. Extensions (~66 Elo)
|
||||||
|
// We take care to not overdo to avoid search getting stuck.
|
||||||
// Singular extension search (~58 Elo). If all moves but one fail low on a
|
if (ss->ply < thisThread->rootDepth * 2)
|
||||||
// search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
|
|
||||||
// then that move is singular and should be extended. To verify this we do
|
|
||||||
// a reduced search on all the other moves but the ttMove and if the
|
|
||||||
// result is lower than ttValue minus a margin, then we will extend the ttMove.
|
|
||||||
if ( !rootNode
|
|
||||||
&& depth >= 6 + 2 * (PvNode && tte->is_pv())
|
|
||||||
&& move == ttMove
|
|
||||||
&& !excludedMove // Avoid recursive singular search
|
|
||||||
/* && ttValue != VALUE_NONE Already implicit in the next condition */
|
|
||||||
&& abs(ttValue) < VALUE_KNOWN_WIN
|
|
||||||
&& (tte->bound() & BOUND_LOWER)
|
|
||||||
&& tte->depth() >= depth - 3)
|
|
||||||
{
|
{
|
||||||
Value singularBeta = ttValue - 3 * depth;
|
// Singular extension search (~58 Elo). If all moves but one fail low on a
|
||||||
Depth singularDepth = (depth - 1) / 2;
|
// search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
|
||||||
|
// then that move is singular and should be extended. To verify this we do
|
||||||
ss->excludedMove = move;
|
// a reduced search on all the other moves but the ttMove and if the
|
||||||
value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
|
// result is lower than ttValue minus a margin, then we will extend the ttMove.
|
||||||
ss->excludedMove = MOVE_NONE;
|
if ( !rootNode
|
||||||
|
&& depth >= 6 + 2 * (PvNode && tte->is_pv())
|
||||||
if (value < singularBeta)
|
&& move == ttMove
|
||||||
|
&& !excludedMove // Avoid recursive singular search
|
||||||
|
/* && ttValue != VALUE_NONE Already implicit in the next condition */
|
||||||
|
&& abs(ttValue) < VALUE_KNOWN_WIN
|
||||||
|
&& (tte->bound() & BOUND_LOWER)
|
||||||
|
&& tte->depth() >= depth - 3)
|
||||||
{
|
{
|
||||||
extension = 1;
|
Value singularBeta = ttValue - 3 * depth;
|
||||||
|
Depth singularDepth = (depth - 1) / 2;
|
||||||
|
|
||||||
// Avoid search explosion by limiting the number of double extensions
|
ss->excludedMove = move;
|
||||||
if ( !PvNode
|
value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
|
||||||
&& value < singularBeta - 75
|
ss->excludedMove = MOVE_NONE;
|
||||||
&& ss->doubleExtensions <= 6)
|
|
||||||
extension = 2;
|
if (value < singularBeta)
|
||||||
|
{
|
||||||
|
extension = 1;
|
||||||
|
|
||||||
|
// Avoid search explosion by limiting the number of double extensions
|
||||||
|
if ( !PvNode
|
||||||
|
&& value < singularBeta - 75
|
||||||
|
&& ss->doubleExtensions <= 6)
|
||||||
|
extension = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi-cut pruning
|
||||||
|
// Our ttMove is assumed to fail high, and now we failed high also on a reduced
|
||||||
|
// search without the ttMove. So we assume this expected Cut-node is not singular,
|
||||||
|
// that multiple moves fail high, and we can prune the whole subtree by returning
|
||||||
|
// a soft bound.
|
||||||
|
else if (singularBeta >= beta)
|
||||||
|
return singularBeta;
|
||||||
|
|
||||||
|
// If the eval of ttMove is greater than beta, we reduce it (negative extension)
|
||||||
|
else if (ttValue >= beta)
|
||||||
|
extension = -2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multi-cut pruning
|
// Check extensions (~1 Elo)
|
||||||
// Our ttMove is assumed to fail high, and now we failed high also on a reduced
|
else if ( givesCheck
|
||||||
// search without the ttMove. So we assume this expected Cut-node is not singular,
|
&& depth > 6
|
||||||
// that multiple moves fail high, and we can prune the whole subtree by returning
|
&& abs(ss->staticEval) > 100)
|
||||||
// a soft bound.
|
extension = 1;
|
||||||
else if (singularBeta >= beta)
|
|
||||||
return singularBeta;
|
|
||||||
|
|
||||||
// If the eval of ttMove is greater than beta, we reduce it (negative extension)
|
// Quiet ttMove extensions (~0 Elo)
|
||||||
else if (ttValue >= beta)
|
else if ( PvNode
|
||||||
extension = -2;
|
&& move == ttMove
|
||||||
|
&& move == ss->killers[0]
|
||||||
|
&& (*contHist[0])[movedPiece][to_sq(move)] >= 10000)
|
||||||
|
extension = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check extensions (~1 Elo)
|
|
||||||
else if ( givesCheck
|
|
||||||
&& depth > 6
|
|
||||||
&& abs(ss->staticEval) > 100)
|
|
||||||
extension = 1;
|
|
||||||
|
|
||||||
// Quiet ttMove extensions (~0 Elo)
|
|
||||||
else if ( PvNode
|
|
||||||
&& move == ttMove
|
|
||||||
&& move == ss->killers[0]
|
|
||||||
&& (*contHist[0])[movedPiece][to_sq(move)] >= 10000)
|
|
||||||
extension = 1;
|
|
||||||
|
|
||||||
// Add extension to new depth
|
// Add extension to new depth
|
||||||
newDepth += extension;
|
newDepth += extension;
|
||||||
ss->doubleExtensions = (ss-1)->doubleExtensions + (extension == 2);
|
ss->doubleExtensions = (ss-1)->doubleExtensions + (extension == 2);
|
||||||
|
|
|
@ -60,16 +60,11 @@ public:
|
||||||
Pawns::Table pawnsTable;
|
Pawns::Table pawnsTable;
|
||||||
Material::Table materialTable;
|
Material::Table materialTable;
|
||||||
size_t pvIdx, pvLast;
|
size_t pvIdx, pvLast;
|
||||||
RunningAverage doubleExtensionAverage[COLOR_NB];
|
|
||||||
RunningAverage complexityAverage;
|
RunningAverage complexityAverage;
|
||||||
uint64_t nodesLastExplosive;
|
|
||||||
uint64_t nodesLastNormal;
|
|
||||||
std::atomic<uint64_t> nodes, tbHits, bestMoveChanges;
|
std::atomic<uint64_t> nodes, tbHits, bestMoveChanges;
|
||||||
Value bestValue;
|
|
||||||
int selDepth, nmpMinPly;
|
int selDepth, nmpMinPly;
|
||||||
Color nmpColor;
|
Color nmpColor;
|
||||||
ExplosionState state;
|
Value bestValue, optimism[COLOR_NB];
|
||||||
Value optimism[COLOR_NB];
|
|
||||||
|
|
||||||
Position rootPos;
|
Position rootPos;
|
||||||
StateInfo rootState;
|
StateInfo rootState;
|
||||||
|
|
Loading…
Add table
Reference in a new issue