1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-30 00:33:09 +00:00

Revert previous patch

It seems we have a speed regression under Linux, anyhow
commit and revert to leave some documentation in case we
want to try again in the future.

Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
Marco Costalba 2010-10-24 09:51:49 +01:00
parent 96e589646d
commit 5b445cdf59
2 changed files with 15 additions and 25 deletions

View file

@ -2212,9 +2212,6 @@ split_point_start: // At split points actual search starts from here
assert(threadID >= 0 && threadID < MAX_THREADS); assert(threadID >= 0 && threadID < MAX_THREADS);
int i;
bool allFinished = false;
while (true) while (true)
{ {
// Slave threads can exit as soon as AllThreadsShouldExit raises, // Slave threads can exit as soon as AllThreadsShouldExit raises,
@ -2230,23 +2227,23 @@ split_point_start: // At split points actual search starts from here
// instead of wasting CPU time polling for work. // instead of wasting CPU time polling for work.
while ( threadID >= ActiveThreads while ( threadID >= ActiveThreads
|| threads[threadID].state == THREAD_INITIALIZING || threads[threadID].state == THREAD_INITIALIZING
|| threads[threadID].state == THREAD_AVAILABLE) || (!sp && threads[threadID].state == THREAD_AVAILABLE))
{ {
assert(!sp);
assert(threadID != 0);
if (AllThreadsShouldExit)
break;
lock_grab(&MPLock); lock_grab(&MPLock);
// Test with lock held to avoid races with wake_sleeping_thread() // Retest condition under lock protection
for (i = 0; sp && i < ActiveThreads && !sp->slaves[i]; i++) {} if (!( threadID >= ActiveThreads
allFinished = (i == ActiveThreads); || threads[threadID].state == THREAD_INITIALIZING
|| (!sp && threads[threadID].state == THREAD_AVAILABLE)))
// Retest sleep conditions under lock protection
if ( AllThreadsShouldExit
|| allFinished
|| !( threadID >= ActiveThreads
|| threads[threadID].state == THREAD_INITIALIZING
|| threads[threadID].state == THREAD_AVAILABLE))
{ {
lock_release(&MPLock); lock_release(&MPLock);
break; continue;
} }
// Put thread to sleep // Put thread to sleep
@ -2276,19 +2273,14 @@ split_point_start: // At split points actual search starts from here
assert(threads[threadID].state == THREAD_SEARCHING); assert(threads[threadID].state == THREAD_SEARCHING);
threads[threadID].state = THREAD_AVAILABLE; threads[threadID].state = THREAD_AVAILABLE;
// Wake up master thread so to allow it to return from the idle loop in
// case we are the last slave of the split point.
if (threadID != tsp->master && threads[tsp->master].state == THREAD_AVAILABLE)
wake_sleeping_thread(tsp->master);
} }
// If this thread is the master of a split point and all slaves have // If this thread is the master of a split point and all slaves have
// finished their work at this split point, return from the idle loop. // finished their work at this split point, return from the idle loop.
for (i = 0; sp && i < ActiveThreads && !sp->slaves[i]; i++) {} int i = 0;
allFinished = (i == ActiveThreads); for ( ; sp && i < ActiveThreads && !sp->slaves[i]; i++) {}
if (allFinished) if (i == ActiveThreads)
{ {
// Because sp->slaves[] is reset under lock protection, // Because sp->slaves[] is reset under lock protection,
// be sure sp->lock has been released before to return. // be sure sp->lock has been released before to return.
@ -2495,7 +2487,6 @@ split_point_start: // At split points actual search starts from here
// Initialize the split point object // Initialize the split point object
splitPoint.parent = masterThread.splitPoint; splitPoint.parent = masterThread.splitPoint;
splitPoint.master = master;
splitPoint.stopRequest = false; splitPoint.stopRequest = false;
splitPoint.ply = ply; splitPoint.ply = ply;
splitPoint.depth = depth; splitPoint.depth = depth;

View file

@ -55,7 +55,6 @@ struct SplitPoint {
bool pvNode, mateThreat; bool pvNode, mateThreat;
Value beta; Value beta;
int ply; int ply;
int master;
Move threatMove; Move threatMove;
SearchStack sstack[MAX_THREADS][PLY_MAX_PLUS_2]; SearchStack sstack[MAX_THREADS][PLY_MAX_PLUS_2];