1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-30 00:33:09 +00:00

Remove PSQT-only mode

Passed STC:
LLR: 2.94 (-2.94,2.94) <-1.75,0.25>
Total: 94208 W: 24270 L: 24112 D: 45826
Ptnml(0-2): 286, 11186, 24009, 11330, 293
https://tests.stockfishchess.org/tests/view/6635ddd773559a8aa8582826

Passed LTC:
LLR: 2.95 (-2.94,2.94) <-1.75,0.25>
Total: 114960 W: 29107 L: 28982 D: 56871
Ptnml(0-2): 37, 12683, 31924, 12790, 46
https://tests.stockfishchess.org/tests/view/663604a973559a8aa85881ed

closes #5214

Bench 1653939
This commit is contained in:
cj5716 2024-05-04 09:52:27 +08:00 committed by Joost VandeVondele
parent be026bdcb2
commit 8ee9905d8b
8 changed files with 172 additions and 244 deletions

View file

@ -56,13 +56,11 @@ Value Eval::evaluate(const Eval::NNUE::Networks& networks,
int simpleEval = simple_eval(pos, pos.side_to_move()); int simpleEval = simple_eval(pos, pos.side_to_move());
bool smallNet = std::abs(simpleEval) > SmallNetThreshold; bool smallNet = std::abs(simpleEval) > SmallNetThreshold;
bool psqtOnly = std::abs(simpleEval) > PsqtOnlyThreshold;
int nnueComplexity; int nnueComplexity;
int v; int v;
Value nnue = smallNet Value nnue = smallNet ? networks.small.evaluate(pos, &caches.small, true, &nnueComplexity)
? networks.small.evaluate(pos, &caches.small, true, &nnueComplexity, psqtOnly) : networks.big.evaluate(pos, &caches.big, true, &nnueComplexity);
: networks.big.evaluate(pos, &caches.big, true, &nnueComplexity, false);
const auto adjustEval = [&](int optDiv, int nnueDiv, int pawnCountConstant, int pawnCountMul, const auto adjustEval = [&](int optDiv, int nnueDiv, int pawnCountConstant, int pawnCountMul,
int npmConstant, int evalDiv, int shufflingConstant, int npmConstant, int evalDiv, int shufflingConstant,
@ -83,8 +81,6 @@ Value Eval::evaluate(const Eval::NNUE::Networks& networks,
if (!smallNet) if (!smallNet)
adjustEval(524, 32395, 942, 11, 139, 1058, 178, 204); adjustEval(524, 32395, 942, 11, 139, 1058, 178, 204);
else if (psqtOnly)
adjustEval(517, 32857, 908, 7, 155, 1006, 224, 238);
else else
adjustEval(515, 32793, 944, 9, 140, 1067, 206, 206); adjustEval(515, 32793, 944, 9, 140, 1067, 206, 206);

View file

@ -29,7 +29,7 @@ class Position;
namespace Eval { namespace Eval {
constexpr inline int SmallNetThreshold = 1274, PsqtOnlyThreshold = 2389; constexpr inline int SmallNetThreshold = 1274;
// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
// for the build process (profile-build and fishtest) to work. Do not change the // for the build process (profile-build and fishtest) to work. Do not change the

View file

@ -189,8 +189,7 @@ template<typename Arch, typename Transformer>
Value Network<Arch, Transformer>::evaluate(const Position& pos, Value Network<Arch, Transformer>::evaluate(const Position& pos,
AccumulatorCaches::Cache<FTDimensions>* cache, AccumulatorCaches::Cache<FTDimensions>* cache,
bool adjusted, bool adjusted,
int* complexity, int* complexity) const {
bool psqtOnly) const {
// We manually align the arrays on the stack because with gcc < 9.3 // We manually align the arrays on the stack because with gcc < 9.3
// overaligning stack variables with alignas() doesn't work correctly. // overaligning stack variables with alignas() doesn't work correctly.
@ -211,12 +210,11 @@ Value Network<Arch, Transformer>::evaluate(const Position&
ASSERT_ALIGNED(transformedFeatures, alignment); ASSERT_ALIGNED(transformedFeatures, alignment);
const int bucket = (pos.count<ALL_PIECES>() - 1) / 4; const int bucket = (pos.count<ALL_PIECES>() - 1) / 4;
const auto psqt = const auto psqt = featureTransformer->transform(pos, cache, transformedFeatures, bucket);
featureTransformer->transform(pos, cache, transformedFeatures, bucket, psqtOnly); const auto positional = network[bucket]->propagate(transformedFeatures);
const auto positional = !psqtOnly ? (network[bucket]->propagate(transformedFeatures)) : 0;
if (complexity) if (complexity)
*complexity = !psqtOnly ? std::abs(psqt - positional) / OutputScale : 0; *complexity = std::abs(psqt - positional) / OutputScale;
// Give more value to positional evaluation when adjusted flag is set // Give more value to positional evaluation when adjusted flag is set
if (adjusted) if (adjusted)
@ -261,10 +259,9 @@ void Network<Arch, Transformer>::verify(std::string evalfilePath) const {
template<typename Arch, typename Transformer> template<typename Arch, typename Transformer>
void Network<Arch, Transformer>::hint_common_access(const Position& pos, void Network<Arch, Transformer>::hint_common_access(
AccumulatorCaches::Cache<FTDimensions>* cache, const Position& pos, AccumulatorCaches::Cache<FTDimensions>* cache) const {
bool psqtOnly) const { featureTransformer->hint_common_access(pos, cache);
featureTransformer->hint_common_access(pos, cache, psqtOnly);
} }
template<typename Arch, typename Transformer> template<typename Arch, typename Transformer>
@ -293,7 +290,7 @@ Network<Arch, Transformer>::trace_evaluate(const Position&
for (IndexType bucket = 0; bucket < LayerStacks; ++bucket) for (IndexType bucket = 0; bucket < LayerStacks; ++bucket)
{ {
const auto materialist = const auto materialist =
featureTransformer->transform(pos, cache, transformedFeatures, bucket, false); featureTransformer->transform(pos, cache, transformedFeatures, bucket);
const auto positional = network[bucket]->propagate(transformedFeatures); const auto positional = network[bucket]->propagate(transformedFeatures);
t.psqt[bucket] = static_cast<Value>(materialist / OutputScale); t.psqt[bucket] = static_cast<Value>(materialist / OutputScale);

View file

@ -56,13 +56,11 @@ class Network {
Value evaluate(const Position& pos, Value evaluate(const Position& pos,
AccumulatorCaches::Cache<FTDimensions>* cache, AccumulatorCaches::Cache<FTDimensions>* cache,
bool adjusted = false, bool adjusted = false,
int* complexity = nullptr, int* complexity = nullptr) const;
bool psqtOnly = false) const;
void hint_common_access(const Position& pos, void hint_common_access(const Position& pos,
AccumulatorCaches::Cache<FTDimensions>* cache, AccumulatorCaches::Cache<FTDimensions>* cache) const;
bool psqtOnly) const;
void verify(std::string evalfilePath) const; void verify(std::string evalfilePath) const;
NnueEvalTrace trace_evaluate(const Position& pos, NnueEvalTrace trace_evaluate(const Position& pos,

View file

@ -38,7 +38,6 @@ struct alignas(CacheLineSize) Accumulator {
std::int16_t accumulation[COLOR_NB][Size]; std::int16_t accumulation[COLOR_NB][Size];
std::int32_t psqtAccumulation[COLOR_NB][PSQTBuckets]; std::int32_t psqtAccumulation[COLOR_NB][PSQTBuckets];
bool computed[COLOR_NB]; bool computed[COLOR_NB];
bool computedPSQT[COLOR_NB];
}; };
@ -63,7 +62,6 @@ struct AccumulatorCaches {
PSQTWeightType psqtAccumulation[PSQTBuckets]; PSQTWeightType psqtAccumulation[PSQTBuckets];
Bitboard byColorBB[COLOR_NB]; Bitboard byColorBB[COLOR_NB];
Bitboard byTypeBB[PIECE_TYPE_NB]; Bitboard byTypeBB[PIECE_TYPE_NB];
bool psqtOnly;
// To initialize a refresh entry, we set all its bitboards empty, // To initialize a refresh entry, we set all its bitboards empty,
// so we put the biases in the accumulation, without any weights on top // so we put the biases in the accumulation, without any weights on top

View file

@ -309,10 +309,9 @@ class FeatureTransformer {
std::int32_t transform(const Position& pos, std::int32_t transform(const Position& pos,
AccumulatorCaches::Cache<HalfDimensions>* cache, AccumulatorCaches::Cache<HalfDimensions>* cache,
OutputType* output, OutputType* output,
int bucket, int bucket) const {
bool psqtOnly) const { update_accumulator<WHITE>(pos, cache);
update_accumulator<WHITE>(pos, cache, psqtOnly); update_accumulator<BLACK>(pos, cache);
update_accumulator<BLACK>(pos, cache, psqtOnly);
const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()}; const Color perspectives[2] = {pos.side_to_move(), ~pos.side_to_move()};
const auto& psqtAccumulation = (pos.state()->*accPtr).psqtAccumulation; const auto& psqtAccumulation = (pos.state()->*accPtr).psqtAccumulation;
@ -320,9 +319,6 @@ class FeatureTransformer {
(psqtAccumulation[perspectives[0]][bucket] - psqtAccumulation[perspectives[1]][bucket]) (psqtAccumulation[perspectives[0]][bucket] - psqtAccumulation[perspectives[1]][bucket])
/ 2; / 2;
if (psqtOnly)
return psqt;
const auto& accumulation = (pos.state()->*accPtr).accumulation; const auto& accumulation = (pos.state()->*accPtr).accumulation;
for (IndexType p = 0; p < 2; ++p) for (IndexType p = 0; p < 2; ++p)
@ -375,23 +371,20 @@ class FeatureTransformer {
} // end of function transform() } // end of function transform()
void hint_common_access(const Position& pos, void hint_common_access(const Position& pos,
AccumulatorCaches::Cache<HalfDimensions>* cache, AccumulatorCaches::Cache<HalfDimensions>* cache) const {
bool psqtOnly) const { hint_common_access_for_perspective<WHITE>(pos, cache);
hint_common_access_for_perspective<WHITE>(pos, cache, psqtOnly); hint_common_access_for_perspective<BLACK>(pos, cache);
hint_common_access_for_perspective<BLACK>(pos, cache, psqtOnly);
} }
private: private:
template<Color Perspective> template<Color Perspective>
[[nodiscard]] std::pair<StateInfo*, StateInfo*> [[nodiscard]] std::pair<StateInfo*, StateInfo*>
try_find_computed_accumulator(const Position& pos, bool psqtOnly) const { try_find_computed_accumulator(const Position& pos) const {
// Look for a usable accumulator of an earlier position. We keep track // Look for a usable accumulator of an earlier position. We keep track
// of the estimated gain in terms of features to be added/subtracted. // of the estimated gain in terms of features to be added/subtracted.
StateInfo *st = pos.state(), *next = nullptr; StateInfo *st = pos.state(), *next = nullptr;
int gain = FeatureSet::refresh_cost(pos); int gain = FeatureSet::refresh_cost(pos);
while (st->previous while (st->previous && !(st->*accPtr).computed[Perspective])
&& (!(st->*accPtr).computedPSQT[Perspective]
|| (!psqtOnly && !(st->*accPtr).computed[Perspective])))
{ {
// This governs when a full feature refresh is needed and how many // This governs when a full feature refresh is needed and how many
// updates are better than just one full refresh. // updates are better than just one full refresh.
@ -412,8 +405,7 @@ class FeatureTransformer {
template<Color Perspective, size_t N> template<Color Perspective, size_t N>
void update_accumulator_incremental(const Position& pos, void update_accumulator_incremental(const Position& pos,
StateInfo* computed_st, StateInfo* computed_st,
StateInfo* states_to_update[N], StateInfo* states_to_update[N]) const {
bool psqtOnly) const {
static_assert(N > 0); static_assert(N > 0);
assert([&]() { assert([&]() {
for (size_t i = 0; i < N; ++i) for (size_t i = 0; i < N; ++i)
@ -443,8 +435,7 @@ class FeatureTransformer {
for (int i = N - 1; i >= 0; --i) for (int i = N - 1; i >= 0; --i)
{ {
(states_to_update[i]->*accPtr).computed[Perspective] = !psqtOnly; (states_to_update[i]->*accPtr).computed[Perspective] = true;
(states_to_update[i]->*accPtr).computedPSQT[Perspective] = true;
const StateInfo* end_state = i == 0 ? computed_st : states_to_update[i - 1]; const StateInfo* end_state = i == 0 ? computed_st : states_to_update[i - 1];
@ -462,8 +453,6 @@ class FeatureTransformer {
{ {
assert(states_to_update[0]); assert(states_to_update[0]);
if (!psqtOnly)
{
auto accIn = auto accIn =
reinterpret_cast<const vec_t*>(&(st->*accPtr).accumulation[Perspective][0]); reinterpret_cast<const vec_t*>(&(st->*accPtr).accumulation[Perspective][0]);
auto accOut = reinterpret_cast<vec_t*>( auto accOut = reinterpret_cast<vec_t*>(
@ -490,7 +479,6 @@ class FeatureTransformer {
accOut[k] = vec_sub_16(vec_add_16(accIn[k], columnA[k]), accOut[k] = vec_sub_16(vec_add_16(accIn[k], columnA[k]),
vec_add_16(columnR0[k], columnR1[k])); vec_add_16(columnR0[k], columnR1[k]));
} }
}
auto accPsqtIn = auto accPsqtIn =
reinterpret_cast<const psqt_vec_t*>(&(st->*accPtr).psqtAccumulation[Perspective][0]); reinterpret_cast<const psqt_vec_t*>(&(st->*accPtr).psqtAccumulation[Perspective][0]);
@ -523,7 +511,6 @@ class FeatureTransformer {
} }
else else
{ {
if (!psqtOnly)
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j) for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{ {
// Load accumulator // Load accumulator
@ -553,9 +540,8 @@ class FeatureTransformer {
} }
// Store accumulator // Store accumulator
auto accTileOut = auto accTileOut = reinterpret_cast<vec_t*>(
reinterpret_cast<vec_t*>(&(states_to_update[i]->*accPtr) &(states_to_update[i]->*accPtr).accumulation[Perspective][j * TileHeight]);
.accumulation[Perspective][j * TileHeight]);
for (IndexType k = 0; k < NumRegs; ++k) for (IndexType k = 0; k < NumRegs; ++k)
vec_store(&accTileOut[k], acc[k]); vec_store(&accTileOut[k], acc[k]);
} }
@ -601,10 +587,8 @@ class FeatureTransformer {
#else #else
for (IndexType i = 0; i < N; ++i) for (IndexType i = 0; i < N; ++i)
{ {
if (!psqtOnly)
std::memcpy((states_to_update[i]->*accPtr).accumulation[Perspective], std::memcpy((states_to_update[i]->*accPtr).accumulation[Perspective],
(st->*accPtr).accumulation[Perspective], (st->*accPtr).accumulation[Perspective], HalfDimensions * sizeof(BiasType));
HalfDimensions * sizeof(BiasType));
for (std::size_t k = 0; k < PSQTBuckets; ++k) for (std::size_t k = 0; k < PSQTBuckets; ++k)
(states_to_update[i]->*accPtr).psqtAccumulation[Perspective][k] = (states_to_update[i]->*accPtr).psqtAccumulation[Perspective][k] =
@ -614,13 +598,10 @@ class FeatureTransformer {
// Difference calculation for the deactivated features // Difference calculation for the deactivated features
for (const auto index : removed[i]) for (const auto index : removed[i])
{
if (!psqtOnly)
{ {
const IndexType offset = HalfDimensions * index; const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j) for (IndexType j = 0; j < HalfDimensions; ++j)
(st->*accPtr).accumulation[Perspective][j] -= weights[offset + j]; (st->*accPtr).accumulation[Perspective][j] -= weights[offset + j];
}
for (std::size_t k = 0; k < PSQTBuckets; ++k) for (std::size_t k = 0; k < PSQTBuckets; ++k)
(st->*accPtr).psqtAccumulation[Perspective][k] -= (st->*accPtr).psqtAccumulation[Perspective][k] -=
@ -629,13 +610,10 @@ class FeatureTransformer {
// Difference calculation for the activated features // Difference calculation for the activated features
for (const auto index : added[i]) for (const auto index : added[i])
{
if (!psqtOnly)
{ {
const IndexType offset = HalfDimensions * index; const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j) for (IndexType j = 0; j < HalfDimensions; ++j)
(st->*accPtr).accumulation[Perspective][j] += weights[offset + j]; (st->*accPtr).accumulation[Perspective][j] += weights[offset + j];
}
for (std::size_t k = 0; k < PSQTBuckets; ++k) for (std::size_t k = 0; k < PSQTBuckets; ++k)
(st->*accPtr).psqtAccumulation[Perspective][k] += (st->*accPtr).psqtAccumulation[Perspective][k] +=
@ -647,21 +625,13 @@ class FeatureTransformer {
template<Color Perspective> template<Color Perspective>
void update_accumulator_refresh_cache(const Position& pos, void update_accumulator_refresh_cache(const Position& pos,
AccumulatorCaches::Cache<HalfDimensions>* cache, AccumulatorCaches::Cache<HalfDimensions>* cache) const {
bool psqtOnly) const {
assert(cache != nullptr); assert(cache != nullptr);
Square ksq = pos.square<KING>(Perspective); Square ksq = pos.square<KING>(Perspective);
auto& entry = (*cache)[ksq][Perspective]; auto& entry = (*cache)[ksq][Perspective];
FeatureSet::IndexList removed, added; FeatureSet::IndexList removed, added;
if (entry.psqtOnly && !psqtOnly)
{
entry.clear(biases);
FeatureSet::append_active_indices<Perspective>(pos, added);
}
else
{
for (Color c : {WHITE, BLACK}) for (Color c : {WHITE, BLACK})
{ {
for (PieceType pt = PAWN; pt <= KING; ++pt) for (PieceType pt = PAWN; pt <= KING; ++pt)
@ -684,17 +654,14 @@ class FeatureTransformer {
} }
} }
} }
}
auto& accumulator = pos.state()->*accPtr; auto& accumulator = pos.state()->*accPtr;
accumulator.computed[Perspective] = !psqtOnly; accumulator.computed[Perspective] = true;
accumulator.computedPSQT[Perspective] = true;
#ifdef VECTOR #ifdef VECTOR
vec_t acc[NumRegs]; vec_t acc[NumRegs];
psqt_vec_t psqt[NumPsqtRegs]; psqt_vec_t psqt[NumPsqtRegs];
if (!psqtOnly)
for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j) for (IndexType j = 0; j < HalfDimensions / TileHeight; ++j)
{ {
auto entryTile = reinterpret_cast<vec_t*>(&entry.accumulation[j * TileHeight]); auto entryTile = reinterpret_cast<vec_t*>(&entry.accumulation[j * TileHeight]);
@ -770,25 +737,19 @@ class FeatureTransformer {
#else #else
for (const auto index : removed) for (const auto index : removed)
{
if (!psqtOnly)
{ {
const IndexType offset = HalfDimensions * index; const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j) for (IndexType j = 0; j < HalfDimensions; ++j)
entry.accumulation[j] -= weights[offset + j]; entry.accumulation[j] -= weights[offset + j];
}
for (std::size_t k = 0; k < PSQTBuckets; ++k) for (std::size_t k = 0; k < PSQTBuckets; ++k)
entry.psqtAccumulation[k] -= psqtWeights[index * PSQTBuckets + k]; entry.psqtAccumulation[k] -= psqtWeights[index * PSQTBuckets + k];
} }
for (const auto index : added) for (const auto index : added)
{
if (!psqtOnly)
{ {
const IndexType offset = HalfDimensions * index; const IndexType offset = HalfDimensions * index;
for (IndexType j = 0; j < HalfDimensions; ++j) for (IndexType j = 0; j < HalfDimensions; ++j)
entry.accumulation[j] += weights[offset + j]; entry.accumulation[j] += weights[offset + j];
}
for (std::size_t k = 0; k < PSQTBuckets; ++k) for (std::size_t k = 0; k < PSQTBuckets; ++k)
entry.psqtAccumulation[k] += psqtWeights[index * PSQTBuckets + k]; entry.psqtAccumulation[k] += psqtWeights[index * PSQTBuckets + k];
@ -799,7 +760,6 @@ class FeatureTransformer {
// The accumulator of the refresh entry has been updated. // The accumulator of the refresh entry has been updated.
// Now copy its content to the actual accumulator we were refreshing // Now copy its content to the actual accumulator we were refreshing
if (!psqtOnly)
std::memcpy(accumulator.accumulation[Perspective], entry.accumulation, std::memcpy(accumulator.accumulation[Perspective], entry.accumulation,
sizeof(BiasType) * HalfDimensions); sizeof(BiasType) * HalfDimensions);
@ -811,14 +771,11 @@ class FeatureTransformer {
for (PieceType pt = PAWN; pt <= KING; ++pt) for (PieceType pt = PAWN; pt <= KING; ++pt)
entry.byTypeBB[pt] = pos.pieces(pt); entry.byTypeBB[pt] = pos.pieces(pt);
entry.psqtOnly = psqtOnly;
} }
template<Color Perspective> template<Color Perspective>
void hint_common_access_for_perspective(const Position& pos, void hint_common_access_for_perspective(const Position& pos,
AccumulatorCaches::Cache<HalfDimensions>* cache, AccumulatorCaches::Cache<HalfDimensions>* cache) const {
bool psqtOnly) const {
// Works like update_accumulator, but performs less work. // Works like update_accumulator, but performs less work.
// Updates ONLY the accumulator for pos. // Updates ONLY the accumulator for pos.
@ -826,33 +783,28 @@ class FeatureTransformer {
// Look for a usable accumulator of an earlier position. We keep track // Look for a usable accumulator of an earlier position. We keep track
// of the estimated gain in terms of features to be added/subtracted. // of the estimated gain in terms of features to be added/subtracted.
// Fast early exit. // Fast early exit.
if ((pos.state()->*accPtr).computed[Perspective] if ((pos.state()->*accPtr).computed[Perspective])
|| (psqtOnly && (pos.state()->*accPtr).computedPSQT[Perspective]))
return; return;
auto [oldest_st, _] = try_find_computed_accumulator<Perspective>(pos, psqtOnly); auto [oldest_st, _] = try_find_computed_accumulator<Perspective>(pos);
if ((oldest_st->*accPtr).computed[Perspective] if ((oldest_st->*accPtr).computed[Perspective])
|| (psqtOnly && (oldest_st->*accPtr).computedPSQT[Perspective]))
{ {
// Only update current position accumulator to minimize work. // Only update current position accumulator to minimize work.
StateInfo* states_to_update[1] = {pos.state()}; StateInfo* states_to_update[1] = {pos.state()};
update_accumulator_incremental<Perspective, 1>(pos, oldest_st, states_to_update, update_accumulator_incremental<Perspective, 1>(pos, oldest_st, states_to_update);
psqtOnly);
} }
else else
update_accumulator_refresh_cache<Perspective>(pos, cache, psqtOnly); update_accumulator_refresh_cache<Perspective>(pos, cache);
} }
template<Color Perspective> template<Color Perspective>
void update_accumulator(const Position& pos, void update_accumulator(const Position& pos,
AccumulatorCaches::Cache<HalfDimensions>* cache, AccumulatorCaches::Cache<HalfDimensions>* cache) const {
bool psqtOnly) const {
auto [oldest_st, next] = try_find_computed_accumulator<Perspective>(pos, psqtOnly); auto [oldest_st, next] = try_find_computed_accumulator<Perspective>(pos);
if ((oldest_st->*accPtr).computed[Perspective] if ((oldest_st->*accPtr).computed[Perspective])
|| (psqtOnly && (oldest_st->*accPtr).computedPSQT[Perspective]))
{ {
if (next == nullptr) if (next == nullptr)
return; return;
@ -866,19 +818,17 @@ class FeatureTransformer {
{ {
StateInfo* states_to_update[1] = {next}; StateInfo* states_to_update[1] = {next};
update_accumulator_incremental<Perspective, 1>(pos, oldest_st, states_to_update, update_accumulator_incremental<Perspective, 1>(pos, oldest_st, states_to_update);
psqtOnly);
} }
else else
{ {
StateInfo* states_to_update[2] = {next, pos.state()}; StateInfo* states_to_update[2] = {next, pos.state()};
update_accumulator_incremental<Perspective, 2>(pos, oldest_st, states_to_update, update_accumulator_incremental<Perspective, 2>(pos, oldest_st, states_to_update);
psqtOnly);
} }
} }
else else
update_accumulator_refresh_cache<Perspective>(pos, cache, psqtOnly); update_accumulator_refresh_cache<Perspective>(pos, cache);
} }
template<IndexType Size> template<IndexType Size>

View file

@ -48,10 +48,9 @@ void hint_common_parent_position(const Position& pos,
int simpleEvalAbs = std::abs(simple_eval(pos, pos.side_to_move())); int simpleEvalAbs = std::abs(simple_eval(pos, pos.side_to_move()));
if (simpleEvalAbs > Eval::SmallNetThreshold) if (simpleEvalAbs > Eval::SmallNetThreshold)
networks.small.hint_common_access(pos, &caches.small, networks.small.hint_common_access(pos, &caches.small);
simpleEvalAbs > Eval::PsqtOnlyThreshold);
else else
networks.big.hint_common_access(pos, &caches.big, false); networks.big.hint_common_access(pos, &caches.big);
} }
namespace { namespace {
@ -149,18 +148,14 @@ trace(Position& pos, const Eval::NNUE::Networks& networks, Eval::NNUE::Accumulat
auto st = pos.state(); auto st = pos.state();
pos.remove_piece(sq); pos.remove_piece(sq);
st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = false;
st->accumulatorBig.computedPSQT[WHITE] = st->accumulatorBig.computedPSQT[BLACK] =
false;
Value eval = networks.big.evaluate(pos, &caches.big); Value eval = networks.big.evaluate(pos, &caches.big);
eval = pos.side_to_move() == WHITE ? eval : -eval; eval = pos.side_to_move() == WHITE ? eval : -eval;
v = base - eval; v = base - eval;
pos.put_piece(pc, sq); pos.put_piece(pc, sq);
st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = false;
st->accumulatorBig.computedPSQT[WHITE] = st->accumulatorBig.computedPSQT[BLACK] =
false;
} }
writeSquare(f, r, pc, v); writeSquare(f, r, pc, v);

View file

@ -681,10 +681,7 @@ void Position::do_move(Move m, StateInfo& newSt, bool givesCheck) {
// Used by NNUE // Used by NNUE
st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] =
st->accumulatorBig.computedPSQT[WHITE] = st->accumulatorBig.computedPSQT[BLACK] = st->accumulatorSmall.computed[WHITE] = st->accumulatorSmall.computed[BLACK] = false;
st->accumulatorSmall.computed[WHITE] = st->accumulatorSmall.computed[BLACK] =
st->accumulatorSmall.computedPSQT[WHITE] = st->accumulatorSmall.computedPSQT[BLACK] =
false;
auto& dp = st->dirtyPiece; auto& dp = st->dirtyPiece;
dp.dirty_num = 1; dp.dirty_num = 1;
@ -971,10 +968,7 @@ void Position::do_null_move(StateInfo& newSt, TranspositionTable& tt) {
st->dirtyPiece.dirty_num = 0; st->dirtyPiece.dirty_num = 0;
st->dirtyPiece.piece[0] = NO_PIECE; // Avoid checks in UpdateAccumulator() st->dirtyPiece.piece[0] = NO_PIECE; // Avoid checks in UpdateAccumulator()
st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] = st->accumulatorBig.computed[WHITE] = st->accumulatorBig.computed[BLACK] =
st->accumulatorBig.computedPSQT[WHITE] = st->accumulatorBig.computedPSQT[BLACK] = st->accumulatorSmall.computed[WHITE] = st->accumulatorSmall.computed[BLACK] = false;
st->accumulatorSmall.computed[WHITE] = st->accumulatorSmall.computed[BLACK] =
st->accumulatorSmall.computedPSQT[WHITE] = st->accumulatorSmall.computedPSQT[BLACK] =
false;
if (st->epSquare != SQ_NONE) if (st->epSquare != SQ_NONE)
{ {