1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-30 08:43:09 +00:00

Fix endless reaparenting loop

The check for detecting when a split point has all the
slaves still running is done with:

   slavesMask == allSlavesMask

When a thread reparents, slavesMask is increased, then, if
the same thread finishes, because there are no more moves,
slavesMask returns to original value and the above condition
returns to be true. So that the just finished thread immediately
reparents again with the same split point, then starts and
then immediately exits in a tight loop that ends only when a
second slave finishes, so that slavesMask decrements and the
condition becomes false. This gives a spurious and anomaly
high number of faked reparents.

With this patch, that rewrites the logic to avoid this pitfall,
the reparenting success rate drops to a more realistical 5-10%
for 4 threads case.

As a side effect note that now there is no more the limit of
maxThreadsPerSplitPoint when reparenting.

No functional change.

Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
Marco Costalba 2012-04-17 09:11:34 +02:00
parent 5392007a24
commit ce159b16b9
3 changed files with 10 additions and 14 deletions

View file

@ -1844,6 +1844,7 @@ void Thread::idle_loop(SplitPoint* sp_master) {
assert(is_searching);
is_searching = false;
sp->allSlavesRunning = false;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
@ -1860,20 +1861,19 @@ void Thread::idle_loop(SplitPoint* sp_master) {
// unsafe because if we are exiting there is a chance are already freed.
lock_release(sp->lock);
// Try to reparent to another split point
// Try to reparent to the first split point, with still all slaves
// running, where we are available as a possible slave.
for (int i = 0; i < Threads.size(); i++)
{
Thread* th = &Threads[i];
int spCnt = th->splitPointsCnt;
SplitPoint* latest = &th->splitPoints[spCnt ? spCnt - 1 : 0];
// Find the first split point with still all slaves running
// where we are available as a possible slave.
if ( this->is_available_to(th)
&& spCnt > 0
&& !th->cutoff_occurred()
&& latest->slavesMask == latest->allSlavesMask
&& more_than_one(latest->allSlavesMask))
&& latest->allSlavesRunning
&& more_than_one(latest->slavesMask))
{
lock_grab(latest->lock);
lock_grab(Threads.splitLock);
@ -1883,10 +1883,10 @@ void Thread::idle_loop(SplitPoint* sp_master) {
if ( this->is_available_to(th)
&& spCnt == th->splitPointsCnt
&& !th->cutoff_occurred()
&& latest->slavesMask == latest->allSlavesMask
&& more_than_one(latest->allSlavesMask))
&& latest->allSlavesRunning
&& more_than_one(latest->slavesMask))
{
latest->slavesMask |= 1ULL << idx; // allSlavesMask is not updated
latest->slavesMask |= 1ULL << idx;
curSplitPoint = latest;
is_searching = true;
}

View file

@ -319,7 +319,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
sp->master = master;
sp->cutoff = false;
sp->slavesMask = 1ULL << master->idx;
sp->allSlavesMask = 1ULL << master->idx;
sp->allSlavesRunning = true;
sp->depth = depth;
sp->bestMove = *bestMove;
sp->threatMove = threatMove;
@ -348,7 +348,6 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
if (threads[i]->is_available_to(master))
{
sp->slavesMask |= 1ULL << i;
sp->allSlavesMask |= 1ULL << i;
threads[i]->curSplitPoint = sp;
threads[i]->is_searching = true; // Slave leaves idle_loop()
@ -356,10 +355,7 @@ Value ThreadsManager::split(Position& pos, Stack* ss, Value alpha, Value beta,
threads[i]->wake_up();
if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Master is always included
{
sp->allSlavesMask = 0; // Disable reparenting to this split point
break;
}
}
master->splitPointsCnt++;

View file

@ -51,13 +51,13 @@ struct SplitPoint {
// Shared data
Lock lock;
volatile uint64_t slavesMask;
volatile uint64_t allSlavesMask;
volatile int64_t nodes;
volatile Value alpha;
volatile Value bestValue;
volatile Move bestMove;
volatile int moveCount;
volatile bool cutoff;
volatile bool allSlavesRunning;
};