1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-29 16:23:09 +00:00

Tidy up comments in thread.cpp

No functional change.

Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
Marco Costalba 2011-08-08 10:53:52 +01:00
parent eabba1119f
commit dafd5b5864
2 changed files with 43 additions and 38 deletions

View file

@ -409,7 +409,8 @@ bool think(Position& pos, const SearchLimits& limits, Move searchMoves[]) {
read_evaluation_uci_options(pos.side_to_move());
Threads.read_uci_options();
// If needed allocate pawn and material hash tables and adjust TT size
// Allocate pawn and material hash tables if number of active threads
// increased and set a new TT size if changed.
Threads.init_hash_tables();
TT.set_size(Options["Hash"].value<int>());
@ -2142,7 +2143,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
while (true)
{
// Slave threads can exit as soon as AllThreadsShouldExit raises,
// Slave threads can exit as soon as allThreadsShouldExit flag raises,
// master should exit as last one.
if (allThreadsShouldExit)
{
@ -2151,7 +2152,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
return;
}
// If we are not thinking, wait for a condition to be signaled
// If we are not searching, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
while ( threadID >= activeThreads
|| threads[threadID].state == Thread::INITIALIZING
@ -2166,7 +2167,7 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
// Grab the lock to avoid races with Thread::wake_up()
lock_grab(&threads[threadID].sleepLock);
// If we are master and all slaves have finished do not go to sleep
// If we are master and all slaves have finished don't go to sleep
for (i = 0; sp && i < activeThreads && !sp->is_slave[i]; i++) {}
allFinished = (i == activeThreads);
@ -2176,7 +2177,10 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
break;
}
// Do sleep here after retesting sleep conditions
// Do sleep after retesting sleep conditions under lock protection, in
// particular we need to avoid a deadlock in case a master thread has,
// in the meanwhile, allocated us and sent the wake_up() call before we
// had the chance to grab the lock.
if (threadID >= activeThreads || threads[threadID].state == Thread::AVAILABLE)
cond_wait(&threads[threadID].sleepCond, &threads[threadID].sleepLock);
@ -2191,7 +2195,6 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
threads[threadID].state = Thread::SEARCHING;
// Copy split point position and search stack and call search()
// with SplitPoint template parameter set to true.
SearchStack ss[PLY_MAX_PLUS_2];
SplitPoint* tsp = threads[threadID].splitPoint;
Position pos(*tsp->pos, threadID);
@ -2227,14 +2230,10 @@ void ThreadsManager::idle_loop(int threadID, SplitPoint* sp) {
if (allFinished)
{
// Because sp->slaves[] is reset under lock protection,
// Because sp->is_slave[] is reset under lock protection,
// be sure sp->lock has been released before to return.
lock_grab(&(sp->lock));
lock_release(&(sp->lock));
// In helpful master concept a master can help only a sub-tree, and
// because here is all finished is not possible master is booked.
assert(threads[threadID].state == Thread::AVAILABLE);
return;
}
}

View file

@ -132,9 +132,10 @@ void ThreadsManager::init() {
// Allocate pawn and material hash tables for main thread
init_hash_tables();
// Initialize threads lock, used when allocating slaves during splitting
lock_init(&threadsLock);
// Initialize thread and split point locks
// Initialize sleep and split point locks
for (int i = 0; i < MAX_THREADS; i++)
{
lock_init(&threads[i].sleepLock);
@ -169,7 +170,7 @@ void ThreadsManager::init() {
}
// exit() is called to cleanly exit the threads when the program finishes
// exit() is called to cleanly terminate the threads when the program finishes
void ThreadsManager::exit() {
@ -178,14 +179,14 @@ void ThreadsManager::exit() {
for (int i = 0; i < MAX_THREADS; i++)
{
// Wake up all the threads and waits for termination
// Wake up all the threads and wait for termination
if (i != 0)
{
threads[i].wake_up();
while (threads[i].state != Thread::TERMINATED) {}
}
// Now we can safely destroy the locks and wait conditions
// Now we can safely destroy locks and wait conditions
lock_destroy(&threads[i].sleepLock);
cond_destroy(&threads[i].sleepCond);
@ -258,25 +259,25 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
return bestValue;
// Pick the next available split point object from the split point stack
SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints];
SplitPoint* sp = masterThread.splitPoints + masterThread.activeSplitPoints;
// Initialize the split point object
splitPoint.parent = masterThread.splitPoint;
splitPoint.master = master;
splitPoint.is_betaCutoff = false;
splitPoint.depth = depth;
splitPoint.threatMove = threatMove;
splitPoint.alpha = alpha;
splitPoint.beta = beta;
splitPoint.nodeType = nodeType;
splitPoint.bestValue = bestValue;
splitPoint.mp = mp;
splitPoint.moveCount = moveCount;
splitPoint.pos = &pos;
splitPoint.nodes = 0;
splitPoint.ss = ss;
sp->parent = masterThread.splitPoint;
sp->master = master;
sp->is_betaCutoff = false;
sp->depth = depth;
sp->threatMove = threatMove;
sp->alpha = alpha;
sp->beta = beta;
sp->nodeType = nodeType;
sp->bestValue = bestValue;
sp->mp = mp;
sp->moveCount = moveCount;
sp->pos = &pos;
sp->nodes = 0;
sp->ss = ss;
for (i = 0; i < activeThreads; i++)
splitPoint.is_slave[i] = false;
sp->is_slave[i] = false;
// If we are here it means we are not available
assert(masterThread.state == Thread::SEARCHING);
@ -292,8 +293,8 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
if (i != master && threads[i].is_available_to(master))
{
workersCnt++;
splitPoint.is_slave[i] = true;
threads[i].splitPoint = &splitPoint;
sp->is_slave[i] = true;
threads[i].splitPoint = sp;
// This makes the slave to exit from idle_loop()
threads[i].state = Thread::WORKISWAITING;
@ -308,7 +309,7 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
if (!Fake && workersCnt == 1)
return bestValue;
masterThread.splitPoint = &splitPoint;
masterThread.splitPoint = sp;
masterThread.activeSplitPoints++;
masterThread.state = Thread::WORKISWAITING;
@ -317,7 +318,11 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
// Thread::WORKISWAITING. We send the split point as a second parameter to
// the idle loop, which means that the main thread will return from the idle
// loop when all threads have finished their work at this split point.
idle_loop(master, &splitPoint);
idle_loop(master, sp);
// In helpful master concept a master can help only a sub-tree, and
// because here is all finished is not possible master is booked.
assert(masterThread.state == Thread::AVAILABLE);
// We have returned from the idle loop, which means that all threads are
// finished. Note that changing state and decreasing activeSplitPoints is done
@ -326,12 +331,13 @@ Value ThreadsManager::split(Position& pos, SearchStack* ss, Value alpha, Value b
masterThread.state = Thread::SEARCHING;
masterThread.activeSplitPoints--;
masterThread.splitPoint = splitPoint.parent;
lock_release(&threadsLock);
pos.set_nodes_searched(pos.nodes_searched() + splitPoint.nodes);
return splitPoint.bestValue;
masterThread.splitPoint = sp->parent;
pos.set_nodes_searched(pos.nodes_searched() + sp->nodes);
return sp->bestValue;
}
// Explicit template instantiations