mirror of
https://github.com/sockspls/badfish
synced 2025-05-01 01:03:09 +00:00
Do not sleep, but yield
During the search, do not block on condition variable, but instead use std::this_thread::yield(). Clear gain with 16 threads. Again results vary highly depending on hardware, but on average it's a clear gain. ELO: 12.17 +-4.3 (95%) LOS: 100.0% Total: 7998 W: 1407 L: 1127 D: 5464 There is no functional change in single thread mode Resolves #294
This commit is contained in:
parent
a4b98a052e
commit
f04f50b368
2 changed files with 21 additions and 28 deletions
|
@ -1595,8 +1595,25 @@ void Thread::idle_loop() {
|
||||||
|
|
||||||
assert(!this_sp || (this_sp->master == this && searching));
|
assert(!this_sp || (this_sp->master == this && searching));
|
||||||
|
|
||||||
while (!exit)
|
while ( !exit
|
||||||
|
&& !(this_sp && this_sp->slavesMask.none()))
|
||||||
{
|
{
|
||||||
|
// If there is nothing to do, sleep.
|
||||||
|
while( !exit
|
||||||
|
&& !(this_sp && this_sp->slavesMask.none())
|
||||||
|
&& !searching)
|
||||||
|
{
|
||||||
|
if ( !this_sp
|
||||||
|
&& !Threads.main()->thinking)
|
||||||
|
{
|
||||||
|
std::unique_lock<Mutex> lk(mutex);
|
||||||
|
while (!exit && !Threads.main()->thinking)
|
||||||
|
sleepCondition.wait(lk);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
std::this_thread::yield();
|
||||||
|
}
|
||||||
|
|
||||||
// If this thread has been assigned work, launch a search
|
// If this thread has been assigned work, launch a search
|
||||||
while (searching)
|
while (searching)
|
||||||
{
|
{
|
||||||
|
@ -1639,15 +1656,6 @@ void Thread::idle_loop() {
|
||||||
sp->allSlavesSearching = false;
|
sp->allSlavesSearching = false;
|
||||||
sp->nodes += pos.nodes_searched();
|
sp->nodes += pos.nodes_searched();
|
||||||
|
|
||||||
// Wake up the master thread so to allow it to return from the idle
|
|
||||||
// loop in case we are the last slave of the split point.
|
|
||||||
if (this != sp->master && sp->slavesMask.none())
|
|
||||||
{
|
|
||||||
assert(!sp->master->searching);
|
|
||||||
|
|
||||||
sp->master->notify_one();
|
|
||||||
}
|
|
||||||
|
|
||||||
// After releasing the lock we can't access any SplitPoint related data
|
// After releasing the lock we can't access any SplitPoint related data
|
||||||
// in a safe way because it could have been released under our feet by
|
// in a safe way because it could have been released under our feet by
|
||||||
// the sp master.
|
// the sp master.
|
||||||
|
@ -1711,21 +1719,6 @@ void Thread::idle_loop() {
|
||||||
sp->mutex.unlock();
|
sp->mutex.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid races with notify_one() fired from last slave of the split point
|
|
||||||
std::unique_lock<Mutex> lk(mutex);
|
|
||||||
|
|
||||||
// If we are master and all slaves have finished then exit idle_loop
|
|
||||||
if (this_sp && this_sp->slavesMask.none())
|
|
||||||
{
|
|
||||||
assert(!searching);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are not searching, wait for a condition to be signaled instead of
|
|
||||||
// wasting CPU time polling for work.
|
|
||||||
if (!searching && !exit)
|
|
||||||
sleepCondition.wait(lk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -184,8 +184,6 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
}
|
}
|
||||||
|
|
||||||
slave->allocMutex.unlock();
|
slave->allocMutex.unlock();
|
||||||
|
|
||||||
slave->notify_one(); // Could be sleeping
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Everything is set up. The master thread enters the idle loop, from which
|
// Everything is set up. The master thread enters the idle loop, from which
|
||||||
|
@ -375,5 +373,7 @@ void ThreadPool::start_thinking(const Position& pos, const LimitsType& limits,
|
||||||
RootMoves.push_back(RootMove(m));
|
RootMoves.push_back(RootMove(m));
|
||||||
|
|
||||||
main()->thinking = true;
|
main()->thinking = true;
|
||||||
main()->notify_one(); // Starts main thread
|
|
||||||
|
for (Thread* th : *this)
|
||||||
|
th->notify_one();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue