mirror of
https://github.com/sockspls/badfish
synced 2025-04-30 08:43:09 +00:00
Tidy up comments in thread.cpp
No functional change. Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
parent
eabba1119f
commit
dafd5b5864
2 changed files with 43 additions and 38 deletions
|
@ -409,7 +409,8 @@ bool think(Position& pos, const SearchLimits& limits, Move searchMoves[]) {
|
||||||
read_evaluation_uci_options(pos.side_to_move());
|
read_evaluation_uci_options(pos.side_to_move());
|
||||||
Threads.read_uci_options();
|
Threads.read_uci_options();
|
||||||
|
|
||||||
// If needed allocate pawn and material hash tables and adjust TT size
|
// Allocate pawn and material hash tables if number of active threads
|
||||||
|
// increased and set a new TT size if changed.
|
||||||
Threads.init_hash_tables();
|
Threads.init_hash_tables();
|
||||||
TT.set_size(Options["Hash"].value<int>());
|
TT.set_size(Options["Hash"].value<int>());
|
||||||
|
|
||||||
|
@ -2142,7 +2143,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
// Slave threads can exit as soon as AllThreadsShouldExit raises,
|
// Slave threads can exit as soon as allThreadsShouldExit flag raises,
|
||||||
// master should exit as last one.
|
// master should exit as last one.
|
||||||
if (allThreadsShouldExit)
|
if (allThreadsShouldExit)
|
||||||
{
|
{
|
||||||
|
@ -2151,7 +2152,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are not thinking, wait for a condition to be signaled
|
// If we are not searching, wait for a condition to be signaled
|
||||||
// instead of wasting CPU time polling for work.
|
// instead of wasting CPU time polling for work.
|
||||||
while ( threadID >= activeThreads
|
while ( threadID >= activeThreads
|
||||||
|| threads[threadID].state == Thread::INITIALIZING
|
|| threads[threadID].state == Thread::INITIALIZING
|
||||||
|
@ -2166,7 +2167,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
// Grab the lock to avoid races with Thread::wake_up()
|
// Grab the lock to avoid races with Thread::wake_up()
|
||||||
lock_grab(&threads[threadID].sleepLock);
|
lock_grab(&threads[threadID].sleepLock);
|
||||||
|
|
||||||
// If we are master and all slaves have finished do not go to sleep
|
// If we are master and all slaves have finished don't go to sleep
|
||||||
for (i = 0; sp && i < activeThreads && !sp->is_slave[i]; i++) {}
|
for (i = 0; sp && i < activeThreads && !sp->is_slave[i]; i++) {}
|
||||||
allFinished = (i == activeThreads);
|
allFinished = (i == activeThreads);
|
||||||
|
|
||||||
|
@ -2176,7 +2177,10 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do sleep here after retesting sleep conditions
|
// Do sleep after retesting sleep conditions under lock protection, in
|
||||||
|
// particular we need to avoid a deadlock in case a master thread has,
|
||||||
|
// in the meanwhile, allocated us and sent the wake_up() call before we
|
||||||
|
// had the chance to grab the lock.
|
||||||
if (threadID >= activeThreads || threads[threadID].state == Thread::AVAILABLE)
|
if (threadID >= activeThreads || threads[threadID].state == Thread::AVAILABLE)
|
||||||
cond_wait(&threads[threadID].sleepCond, &threads[threadID].sleepLock);
|
cond_wait(&threads[threadID].sleepCond, &threads[threadID].sleepLock);
|
||||||
|
|
||||||
|
@ -2191,7 +2195,6 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
threads[threadID].state = Thread::SEARCHING;
|
threads[threadID].state = Thread::SEARCHING;
|
||||||
|
|
||||||
// Copy split point position and search stack and call search()
|
// Copy split point position and search stack and call search()
|
||||||
// with SplitPoint template parameter set to true.
|
|
||||||
SearchStack ss[PLY_MAX_PLUS_2];
|
SearchStack ss[PLY_MAX_PLUS_2];
|
||||||
SplitPoint* tsp = threads[threadID].splitPoint;
|
SplitPoint* tsp = threads[threadID].splitPoint;
|
||||||
Position pos(*tsp->pos, threadID);
|
Position pos(*tsp->pos, threadID);
|
||||||
|
@ -2227,14 +2230,10 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
|
||||||
|
|
||||||
if (allFinished)
|
if (allFinished)
|
||||||
{
|
{
|
||||||
// Because sp->slaves[] is reset under lock protection,
|
// Because sp->is_slave[] is reset under lock protection,
|
||||||
// be sure sp->lock has been released before to return.
|
// be sure sp->lock has been released before to return.
|
||||||
lock_grab(&(sp->lock));
|
lock_grab(&(sp->lock));
|
||||||
lock_release(&(sp->lock));
|
lock_release(&(sp->lock));
|
||||||
|
|
||||||
// In helpful master concept a master can help only a sub-tree, and
|
|
||||||
// because here is all finished is not possible master is booked.
|
|
||||||
assert(threads[threadID].state == Thread::AVAILABLE);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,9 +132,10 @@ void ThreadsManager::init() {
|
||||||
// Allocate pawn and material hash tables for main thread
|
// Allocate pawn and material hash tables for main thread
|
||||||
init_hash_tables();
|
init_hash_tables();
|
||||||
|
|
||||||
|
// Initialize threads lock, used when allocating slaves during splitting
|
||||||
lock_init(&threadsLock);
|
lock_init(&threadsLock);
|
||||||
|
|
||||||
// Initialize thread and split point locks
|
// Initialize sleep and split point locks
|
||||||
for (int i = 0; i < MAX_THREADS; i++)
|
for (int i = 0; i < MAX_THREADS; i++)
|
||||||
{
|
{
|
||||||
lock_init(&threads[i].sleepLock);
|
lock_init(&threads[i].sleepLock);
|
||||||
|
@ -169,7 +170,7 @@ void ThreadsManager::init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// exit() is called to cleanly exit the threads when the program finishes
|
// exit() is called to cleanly terminate the threads when the program finishes
|
||||||
|
|
||||||
void ThreadsManager::exit() {
|
void ThreadsManager::exit() {
|
||||||
|
|
||||||
|
@ -178,14 +179,14 @@ void ThreadsManager::exit() {
|
||||||
|
|
||||||
for (int i = 0; i < MAX_THREADS; i++)
|
for (int i = 0; i < MAX_THREADS; i++)
|
||||||
{
|
{
|
||||||
// Wake up all the threads and waits for termination
|
// Wake up all the threads and wait for termination
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
{
|
{
|
||||||
threads[i].wake_up();
|
threads[i].wake_up();
|
||||||
while (threads[i].state != Thread::TERMINATED) {}
|
while (threads[i].state != Thread::TERMINATED) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we can safely destroy the locks and wait conditions
|
// Now we can safely destroy locks and wait conditions
|
||||||
lock_destroy(&threads[i].sleepLock);
|
lock_destroy(&threads[i].sleepLock);
|
||||||
cond_destroy(&threads[i].sleepCond);
|
cond_destroy(&threads[i].sleepCond);
|
||||||
|
|
||||||
|
@ -258,25 +259,25 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
|
||||||
return bestValue;
|
return bestValue;
|
||||||
|
|
||||||
// Pick the next available split point object from the split point stack
|
// Pick the next available split point object from the split point stack
|
||||||
SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
|
SplitPoint* sp = masterThread.splitPoints + masterThread.activeSplitPoints;
|
||||||
|
|
||||||
// Initialize the split point object
|
// Initialize the split point object
|
||||||
splitPoint.parent = masterThread.splitPoint;
|
sp->parent = masterThread.splitPoint;
|
||||||
splitPoint.master = master;
|
sp->master = master;
|
||||||
splitPoint.is_betaCutoff = false;
|
sp->is_betaCutoff = false;
|
||||||
splitPoint.depth = depth;
|
sp->depth = depth;
|
||||||
splitPoint.threatMove = threatMove;
|
sp->threatMove = threatMove;
|
||||||
splitPoint.alpha = alpha;
|
sp->alpha = alpha;
|
||||||
splitPoint.beta = beta;
|
sp->beta = beta;
|
||||||
splitPoint.nodeType = nodeType;
|
sp->nodeType = nodeType;
|
||||||
splitPoint.bestValue = bestValue;
|
sp->bestValue = bestValue;
|
||||||
splitPoint.mp = mp;
|
sp->mp = mp;
|
||||||
splitPoint.moveCount = moveCount;
|
sp->moveCount = moveCount;
|
||||||
splitPoint.pos = &pos;
|
sp->pos = &pos;
|
||||||
splitPoint.nodes = 0;
|
sp->nodes = 0;
|
||||||
splitPoint.ss = ss;
|
sp->ss = ss;
|
||||||
for (i = 0; i < activeThreads; i++)
|
for (i = 0; i < activeThreads; i++)
|
||||||
splitPoint.is_slave[i] = false;
|
sp->is_slave[i] = false;
|
||||||
|
|
||||||
// If we are here it means we are not available
|
// If we are here it means we are not available
|
||||||
assert(masterThread.state == Thread::SEARCHING);
|
assert(masterThread.state == Thread::SEARCHING);
|
||||||
|
@ -292,8 +293,8 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
|
||||||
if (i != master && threads[i].is_available_to(master))
|
if (i != master && threads[i].is_available_to(master))
|
||||||
{
|
{
|
||||||
workersCnt++;
|
workersCnt++;
|
||||||
splitPoint.is_slave[i] = true;
|
sp->is_slave[i] = true;
|
||||||
threads[i].splitPoint = &splitPoint;
|
threads[i].splitPoint = sp;
|
||||||
|
|
||||||
// This makes the slave to exit from idle_loop()
|
// This makes the slave to exit from idle_loop()
|
||||||
threads[i].state = Thread::WORKISWAITING;
|
threads[i].state = Thread::WORKISWAITING;
|
||||||
|
@ -308,7 +309,7 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
|
||||||
if (!Fake && workersCnt == 1)
|
if (!Fake && workersCnt == 1)
|
||||||
return bestValue;
|
return bestValue;
|
||||||
|
|
||||||
masterThread.splitPoint = &splitPoint;
|
masterThread.splitPoint = sp;
|
||||||
masterThread.activeSplitPoints++;
|
masterThread.activeSplitPoints++;
|
||||||
masterThread.state = Thread::WORKISWAITING;
|
masterThread.state = Thread::WORKISWAITING;
|
||||||
|
|
||||||
|
@ -317,7 +318,11 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
|
||||||
// Thread::WORKISWAITING. We send the split point as a second parameter to
|
// Thread::WORKISWAITING. We send the split point as a second parameter to
|
||||||
// the idle loop, which means that the main thread will return from the idle
|
// the idle loop, which means that the main thread will return from the idle
|
||||||
// loop when all threads have finished their work at this split point.
|
// loop when all threads have finished their work at this split point.
|
||||||
idle_loop(master, &splitPoint);
|
idle_loop(master, sp);
|
||||||
|
|
||||||
|
// In helpful master concept a master can help only a sub-tree, and
|
||||||
|
// because here is all finished is not possible master is booked.
|
||||||
|
assert(masterThread.state == Thread::AVAILABLE);
|
||||||
|
|
||||||
// We have returned from the idle loop, which means that all threads are
|
// We have returned from the idle loop, which means that all threads are
|
||||||
// finished. Note that changing state and decreasing activeSplitPoints is done
|
// finished. Note that changing state and decreasing activeSplitPoints is done
|
||||||
|
@ -326,12 +331,13 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
|
||||||
|
|
||||||
masterThread.state = Thread::SEARCHING;
|
masterThread.state = Thread::SEARCHING;
|
||||||
masterThread.activeSplitPoints--;
|
masterThread.activeSplitPoints--;
|
||||||
masterThread.splitPoint = splitPoint.parent;
|
|
||||||
|
|
||||||
lock_release(&threadsLock);
|
lock_release(&threadsLock);
|
||||||
|
|
||||||
pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
|
masterThread.splitPoint = sp->parent;
|
||||||
return splitPoint.bestValue;
|
pos.set_nodes_searched(pos.nodes_searched() + sp->nodes);
|
||||||
|
|
||||||
|
return sp->bestValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Explicit template instantiations
|
// Explicit template instantiations
|
||||||
|
|
Loading…
Add table
Reference in a new issue