mirror of
https://github.com/sockspls/badfish
synced 2025-05-02 01:29:36 +00:00
Revert "doEvenDeeperSearch + tuning"
This reverts commit 98965c139d
.
The increase of depth could lead to search explosions,
most visible with TB.
fixes https://github.com/official-stockfish/Stockfish/issues/4276
closes https://github.com/official-stockfish/Stockfish/pull/4256
Bench: 3872306
This commit is contained in:
parent
8f817ef082
commit
955edf1d1d
2 changed files with 10 additions and 11 deletions
|
@ -1063,7 +1063,7 @@ Value Eval::evaluate(const Position& pos, int* complexity) {
|
|||
else
|
||||
{
|
||||
int nnueComplexity;
|
||||
int scale = 1076 + 96 * pos.non_pawn_material() / 5120;
|
||||
int scale = 1064 + 106 * pos.non_pawn_material() / 5120;
|
||||
|
||||
Color stm = pos.side_to_move();
|
||||
Value optimism = pos.this_thread()->optimism[stm];
|
||||
|
@ -1071,21 +1071,21 @@ Value Eval::evaluate(const Position& pos, int* complexity) {
|
|||
Value nnue = NNUE::evaluate(pos, true, &nnueComplexity);
|
||||
|
||||
// Blend nnue complexity with (semi)classical complexity
|
||||
nnueComplexity = ( 412 * nnueComplexity
|
||||
+ 428 * abs(psq - nnue)
|
||||
nnueComplexity = ( 416 * nnueComplexity
|
||||
+ 424 * abs(psq - nnue)
|
||||
+ (optimism > 0 ? int(optimism) * int(psq - nnue) : 0)
|
||||
) / 1026;
|
||||
) / 1024;
|
||||
|
||||
// Return hybrid NNUE complexity to caller
|
||||
if (complexity)
|
||||
*complexity = nnueComplexity;
|
||||
|
||||
optimism = optimism * (278 + nnueComplexity) / 256;
|
||||
v = (nnue * scale + optimism * (scale - 755)) / 1024;
|
||||
optimism = optimism * (269 + nnueComplexity) / 256;
|
||||
v = (nnue * scale + optimism * (scale - 754)) / 1024;
|
||||
}
|
||||
|
||||
// Damp down the evaluation linearly when shuffling
|
||||
v = v * (197 - pos.rule50_count()) / 214;
|
||||
v = v * (195 - pos.rule50_count()) / 211;
|
||||
|
||||
// Guarantee evaluation does not hit the tablebase range
|
||||
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
|
||||
|
|
|
@ -81,7 +81,7 @@ namespace {
|
|||
|
||||
// History and stats update bonus, based on depth
|
||||
int stat_bonus(Depth d) {
|
||||
return std::min((12 * d + 282) * d - 349 , 1480);
|
||||
return std::min((12 * d + 282) * d - 349 , 1594);
|
||||
}
|
||||
|
||||
// Add a small random component to draw evaluations to avoid 3-fold blindness
|
||||
|
@ -1175,7 +1175,7 @@ moves_loop: // When in check, search starts here
|
|||
- 4433;
|
||||
|
||||
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
|
||||
r -= ss->statScore / (13000 + 4152 * (depth > 7 && depth < 19));
|
||||
r -= ss->statScore / (13628 + 4000 * (depth > 7 && depth < 19));
|
||||
|
||||
// In general we want to cap the LMR depth search at newDepth, but when
|
||||
// reduction is negative, we allow this move a limited search extension
|
||||
|
@ -1190,10 +1190,9 @@ moves_loop: // When in check, search starts here
|
|||
// Adjust full depth search based on LMR results - if result
|
||||
// was good enough search deeper, if it was bad enough search shallower
|
||||
const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
|
||||
const bool doEvenDeeperSearch = value > alpha + 582;
|
||||
const bool doShallowerSearch = value < bestValue + newDepth;
|
||||
|
||||
newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch;
|
||||
newDepth += doDeeperSearch - doShallowerSearch;
|
||||
|
||||
if (newDepth > d)
|
||||
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
|
||||
|
|
Loading…
Add table
Reference in a new issue