mirror of
https://github.com/sockspls/badfish
synced 2025-04-29 16:23:09 +00:00
Move SplitPoint array under its thread
And cleanup / rename that part of code. No functional change also with faked split. Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
parent
2dfec0f614
commit
04e1ba8aa2
2 changed files with 42 additions and 43 deletions
|
@ -98,7 +98,6 @@ namespace {
|
||||||
int ActiveThreads;
|
int ActiveThreads;
|
||||||
volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
|
volatile bool AllThreadsShouldExit, AllThreadsShouldSleep;
|
||||||
Thread threads[MAX_THREADS];
|
Thread threads[MAX_THREADS];
|
||||||
SplitPoint SplitPointStack[MAX_THREADS][ACTIVE_SPLIT_POINTS_MAX];
|
|
||||||
|
|
||||||
Lock MPLock, WaitLock;
|
Lock MPLock, WaitLock;
|
||||||
|
|
||||||
|
@ -2436,10 +2435,10 @@ namespace {
|
||||||
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
|
SitIdleEvent[i] = CreateEvent(0, FALSE, FALSE, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Initialize SplitPointStack locks
|
// Initialize splitPoints[] locks
|
||||||
for (i = 0; i < MAX_THREADS; i++)
|
for (i = 0; i < MAX_THREADS; i++)
|
||||||
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
|
for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++)
|
||||||
lock_init(&(SplitPointStack[i][j].lock), NULL);
|
lock_init(&(threads[i].splitPoints[j].lock), NULL);
|
||||||
|
|
||||||
// Will be set just before program exits to properly end the threads
|
// Will be set just before program exits to properly end the threads
|
||||||
AllThreadsShouldExit = false;
|
AllThreadsShouldExit = false;
|
||||||
|
@ -2493,8 +2492,8 @@ namespace {
|
||||||
|
|
||||||
// Now we can safely destroy the locks
|
// Now we can safely destroy the locks
|
||||||
for (int i = 0; i < MAX_THREADS; i++)
|
for (int i = 0; i < MAX_THREADS; i++)
|
||||||
for (int j = 0; j < ACTIVE_SPLIT_POINTS_MAX; j++)
|
for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++)
|
||||||
lock_destroy(&(SplitPointStack[i][j].lock));
|
lock_destroy(&(threads[i].splitPoints[j].lock));
|
||||||
|
|
||||||
lock_destroy(&WaitLock);
|
lock_destroy(&WaitLock);
|
||||||
lock_destroy(&MPLock);
|
lock_destroy(&MPLock);
|
||||||
|
@ -2547,7 +2546,7 @@ namespace {
|
||||||
// Apply the "helpful master" concept if possible. Use localActiveSplitPoints
|
// Apply the "helpful master" concept if possible. Use localActiveSplitPoints
|
||||||
// that is known to be > 0, instead of threads[slave].activeSplitPoints that
|
// that is known to be > 0, instead of threads[slave].activeSplitPoints that
|
||||||
// could have been set to 0 by another thread leading to an out of bound access.
|
// could have been set to 0 by another thread leading to an out of bound access.
|
||||||
if (SplitPointStack[slave][localActiveSplitPoints - 1].slaves[master])
|
if (threads[slave].splitPoints[localActiveSplitPoints - 1].slaves[master])
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -2594,54 +2593,54 @@ namespace {
|
||||||
assert(p.thread() >= 0 && p.thread() < ActiveThreads);
|
assert(p.thread() >= 0 && p.thread() < ActiveThreads);
|
||||||
assert(ActiveThreads > 1);
|
assert(ActiveThreads > 1);
|
||||||
|
|
||||||
int master = p.thread();
|
int i, master = p.thread();
|
||||||
|
Thread& masterThread = threads[master];
|
||||||
|
|
||||||
lock_grab(&MPLock);
|
lock_grab(&MPLock);
|
||||||
|
|
||||||
// If no other thread is available to help us, or if we have too many
|
// If no other thread is available to help us, or if we have too many
|
||||||
// active split points, don't split.
|
// active split points, don't split.
|
||||||
if ( !available_thread_exists(master)
|
if ( !available_thread_exists(master)
|
||||||
|| threads[master].activeSplitPoints >= ACTIVE_SPLIT_POINTS_MAX)
|
|| masterThread.activeSplitPoints >= MAX_ACTIVE_SPLIT_POINTS)
|
||||||
{
|
{
|
||||||
lock_release(&MPLock);
|
lock_release(&MPLock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pick the next available split point object from the split point stack
|
// Pick the next available split point object from the split point stack
|
||||||
SplitPoint* splitPoint = &SplitPointStack[master][threads[master].activeSplitPoints];
|
SplitPoint& splitPoint = masterThread.splitPoints[masterThread.activeSplitPoints++];
|
||||||
|
|
||||||
// Initialize the split point object
|
// Initialize the split point object
|
||||||
splitPoint->parent = threads[master].splitPoint;
|
splitPoint.parent = masterThread.splitPoint;
|
||||||
splitPoint->stopRequest = false;
|
splitPoint.stopRequest = false;
|
||||||
splitPoint->ply = ply;
|
splitPoint.ply = ply;
|
||||||
splitPoint->depth = depth;
|
splitPoint.depth = depth;
|
||||||
splitPoint->mateThreat = mateThreat;
|
splitPoint.mateThreat = mateThreat;
|
||||||
splitPoint->alpha = *alpha;
|
splitPoint.alpha = *alpha;
|
||||||
splitPoint->beta = beta;
|
splitPoint.beta = beta;
|
||||||
splitPoint->pvNode = pvNode;
|
splitPoint.pvNode = pvNode;
|
||||||
splitPoint->bestValue = *bestValue;
|
splitPoint.bestValue = *bestValue;
|
||||||
splitPoint->mp = mp;
|
splitPoint.mp = mp;
|
||||||
splitPoint->moveCount = *moveCount;
|
splitPoint.moveCount = *moveCount;
|
||||||
splitPoint->pos = &p;
|
splitPoint.pos = &p;
|
||||||
splitPoint->parentSstack = ss;
|
splitPoint.parentSstack = ss;
|
||||||
for (int i = 0; i < ActiveThreads; i++)
|
for (i = 0; i < ActiveThreads; i++)
|
||||||
splitPoint->slaves[i] = 0;
|
splitPoint.slaves[i] = 0;
|
||||||
|
|
||||||
threads[master].splitPoint = splitPoint;
|
masterThread.splitPoint = &splitPoint;
|
||||||
threads[master].activeSplitPoints++;
|
|
||||||
|
|
||||||
// If we are here it means we are not available
|
// If we are here it means we are not available
|
||||||
assert(threads[master].state != THREAD_AVAILABLE);
|
assert(masterThread.state != THREAD_AVAILABLE);
|
||||||
|
|
||||||
int workersCnt = 1; // At least the master is included
|
int workersCnt = 1; // At least the master is included
|
||||||
|
|
||||||
// Allocate available threads setting state to THREAD_BOOKED
|
// Allocate available threads setting state to THREAD_BOOKED
|
||||||
for (int i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++)
|
for (i = 0; !Fake && i < ActiveThreads && workersCnt < MaxThreadsPerSplitPoint; i++)
|
||||||
if (thread_is_available(i, master))
|
if (thread_is_available(i, master))
|
||||||
{
|
{
|
||||||
threads[i].state = THREAD_BOOKED;
|
threads[i].state = THREAD_BOOKED;
|
||||||
threads[i].splitPoint = splitPoint;
|
threads[i].splitPoint = &splitPoint;
|
||||||
splitPoint->slaves[i] = 1;
|
splitPoint.slaves[i] = 1;
|
||||||
workersCnt++;
|
workersCnt++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2652,10 +2651,10 @@ namespace {
|
||||||
|
|
||||||
// Tell the threads that they have work to do. This will make them leave
|
// Tell the threads that they have work to do. This will make them leave
|
||||||
// their idle loop. But before copy search stack tail for each thread.
|
// their idle loop. But before copy search stack tail for each thread.
|
||||||
for (int i = 0; i < ActiveThreads; i++)
|
for (i = 0; i < ActiveThreads; i++)
|
||||||
if (i == master || splitPoint->slaves[i])
|
if (i == master || splitPoint.slaves[i])
|
||||||
{
|
{
|
||||||
memcpy(splitPoint->sstack[i], ss - 1, 4 * sizeof(SearchStack));
|
memcpy(splitPoint.sstack[i], ss - 1, 4 * sizeof(SearchStack));
|
||||||
|
|
||||||
assert(i == master || threads[i].state == THREAD_BOOKED);
|
assert(i == master || threads[i].state == THREAD_BOOKED);
|
||||||
|
|
||||||
|
@ -2667,16 +2666,16 @@ namespace {
|
||||||
// THREAD_WORKISWAITING. We send the split point as a second parameter to the
|
// THREAD_WORKISWAITING. We send the split point as a second parameter to the
|
||||||
// idle loop, which means that the main thread will return from the idle
|
// idle loop, which means that the main thread will return from the idle
|
||||||
// loop when all threads have finished their work at this split point.
|
// loop when all threads have finished their work at this split point.
|
||||||
idle_loop(master, splitPoint);
|
idle_loop(master, &splitPoint);
|
||||||
|
|
||||||
// We have returned from the idle loop, which means that all threads are
|
// We have returned from the idle loop, which means that all threads are
|
||||||
// finished. Update alpha and bestValue, and return.
|
// finished. Update alpha and bestValue, and return.
|
||||||
lock_grab(&MPLock);
|
lock_grab(&MPLock);
|
||||||
|
|
||||||
*alpha = splitPoint->alpha;
|
*alpha = splitPoint.alpha;
|
||||||
*bestValue = splitPoint->bestValue;
|
*bestValue = splitPoint.bestValue;
|
||||||
threads[master].activeSplitPoints--;
|
masterThread.activeSplitPoints--;
|
||||||
threads[master].splitPoint = splitPoint->parent;
|
masterThread.splitPoint = splitPoint.parent;
|
||||||
|
|
||||||
lock_release(&MPLock);
|
lock_release(&MPLock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
////
|
////
|
||||||
|
|
||||||
const int MAX_THREADS = 8;
|
const int MAX_THREADS = 8;
|
||||||
const int ACTIVE_SPLIT_POINTS_MAX = 8;
|
const int MAX_ACTIVE_SPLIT_POINTS = 8;
|
||||||
|
|
||||||
|
|
||||||
////
|
////
|
||||||
|
@ -83,12 +83,12 @@ enum ThreadState
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Thread {
|
struct Thread {
|
||||||
SplitPoint* volatile splitPoint;
|
|
||||||
volatile int activeSplitPoints;
|
|
||||||
uint64_t nodes;
|
uint64_t nodes;
|
||||||
uint64_t betaCutOffs[2];
|
uint64_t betaCutOffs[2];
|
||||||
volatile ThreadState state;
|
volatile ThreadState state;
|
||||||
unsigned char pad[64]; // set some distance among local data for each thread
|
SplitPoint* volatile splitPoint;
|
||||||
|
volatile int activeSplitPoints;
|
||||||
|
SplitPoint splitPoints[MAX_ACTIVE_SPLIT_POINTS];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue