mirror of
https://github.com/sockspls/badfish
synced 2025-05-02 01:29:36 +00:00
Use acquire() and release() for spinlocks
It is more idiomatick than lock() and unlock() No functional change.
This commit is contained in:
parent
bae4679de2
commit
13d4df95cd
4 changed files with 31 additions and 30 deletions
|
@ -162,9 +162,9 @@ void MovePicker::score<CAPTURES>() {
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
void MovePicker::score<QUIETS>() {
|
void MovePicker::score<QUIETS>() {
|
||||||
Square prevMoveSq = to_sq((ss-1)->currentMove);
|
|
||||||
Piece prevMovePiece = pos.piece_on(prevMoveSq);
|
Square prevSq = to_sq((ss-1)->currentMove);
|
||||||
const HistoryStats &cmh = counterMovesHistory[prevMovePiece][prevMoveSq];
|
const HistoryStats& cmh = counterMovesHistory[pos.piece_on(prevSq)][prevSq];
|
||||||
|
|
||||||
for (auto& m : *this)
|
for (auto& m : *this)
|
||||||
m.value = history[pos.moved_piece(m)][to_sq(m)]
|
m.value = history[pos.moved_piece(m)][to_sq(m)]
|
||||||
|
|
|
@ -826,7 +826,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
moveCount = ++splitPoint->moveCount;
|
moveCount = ++splitPoint->moveCount;
|
||||||
splitPoint->mutex.unlock();
|
splitPoint->spinlock.release();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
++moveCount;
|
++moveCount;
|
||||||
|
@ -895,7 +895,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
||||||
&& moveCount >= FutilityMoveCounts[improving][depth])
|
&& moveCount >= FutilityMoveCounts[improving][depth])
|
||||||
{
|
{
|
||||||
if (SpNode)
|
if (SpNode)
|
||||||
splitPoint->mutex.lock();
|
splitPoint->spinlock.acquire();
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -914,7 +914,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
||||||
|
|
||||||
if (SpNode)
|
if (SpNode)
|
||||||
{
|
{
|
||||||
splitPoint->mutex.lock();
|
splitPoint->spinlock.acquire();
|
||||||
if (bestValue > splitPoint->bestValue)
|
if (bestValue > splitPoint->bestValue)
|
||||||
splitPoint->bestValue = bestValue;
|
splitPoint->bestValue = bestValue;
|
||||||
}
|
}
|
||||||
|
@ -926,7 +926,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
||||||
if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
|
if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
|
||||||
{
|
{
|
||||||
if (SpNode)
|
if (SpNode)
|
||||||
splitPoint->mutex.lock();
|
splitPoint->spinlock.acquire();
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1026,7 +1026,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
||||||
// Step 18. Check for new best move
|
// Step 18. Check for new best move
|
||||||
if (SpNode)
|
if (SpNode)
|
||||||
{
|
{
|
||||||
splitPoint->mutex.lock();
|
splitPoint->spinlock.acquire();
|
||||||
bestValue = splitPoint->bestValue;
|
bestValue = splitPoint->bestValue;
|
||||||
alpha = splitPoint->alpha;
|
alpha = splitPoint->alpha;
|
||||||
}
|
}
|
||||||
|
@ -1630,7 +1630,7 @@ void Thread::idle_loop() {
|
||||||
std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
|
std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
|
||||||
ss->splitPoint = sp;
|
ss->splitPoint = sp;
|
||||||
|
|
||||||
sp->mutex.lock();
|
sp->spinlock.acquire();
|
||||||
|
|
||||||
assert(activePosition == nullptr);
|
assert(activePosition == nullptr);
|
||||||
|
|
||||||
|
@ -1659,7 +1659,7 @@ void Thread::idle_loop() {
|
||||||
// After releasing the lock we can't access any SplitPoint related data
|
// After releasing the lock we can't access any SplitPoint related data
|
||||||
// in a safe way because it could have been released under our feet by
|
// in a safe way because it could have been released under our feet by
|
||||||
// the sp master.
|
// the sp master.
|
||||||
sp->mutex.unlock();
|
sp->spinlock.release();
|
||||||
|
|
||||||
// Try to late join to another split point if none of its slaves has
|
// Try to late join to another split point if none of its slaves has
|
||||||
// already finished.
|
// already finished.
|
||||||
|
@ -1699,12 +1699,12 @@ void Thread::idle_loop() {
|
||||||
sp = bestSp;
|
sp = bestSp;
|
||||||
|
|
||||||
// Recheck the conditions under lock protection
|
// Recheck the conditions under lock protection
|
||||||
sp->mutex.lock();
|
sp->spinlock.acquire();
|
||||||
|
|
||||||
if ( sp->allSlavesSearching
|
if ( sp->allSlavesSearching
|
||||||
&& sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT)
|
&& sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT)
|
||||||
{
|
{
|
||||||
allocMutex.lock();
|
spinlock.acquire();
|
||||||
|
|
||||||
if (can_join(sp))
|
if (can_join(sp))
|
||||||
{
|
{
|
||||||
|
@ -1713,10 +1713,10 @@ void Thread::idle_loop() {
|
||||||
searching = true;
|
searching = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
allocMutex.unlock();
|
spinlock.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
sp->mutex.unlock();
|
sp->spinlock.release();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1767,7 +1767,7 @@ void check_time() {
|
||||||
{
|
{
|
||||||
SplitPoint& sp = th->splitPoints[i];
|
SplitPoint& sp = th->splitPoints[i];
|
||||||
|
|
||||||
sp.mutex.lock();
|
sp.spinlock.acquire();
|
||||||
|
|
||||||
nodes += sp.nodes;
|
nodes += sp.nodes;
|
||||||
|
|
||||||
|
@ -1775,7 +1775,7 @@ void check_time() {
|
||||||
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
|
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
|
||||||
nodes += Threads[idx]->activePosition->nodes_searched();
|
nodes += Threads[idx]->activePosition->nodes_searched();
|
||||||
|
|
||||||
sp.mutex.unlock();
|
sp.spinlock.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nodes >= Limits.nodes)
|
if (nodes >= Limits.nodes)
|
||||||
|
|
|
@ -144,7 +144,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
// Pick and init the next available split point
|
// Pick and init the next available split point
|
||||||
SplitPoint& sp = splitPoints[splitPointsSize];
|
SplitPoint& sp = splitPoints[splitPointsSize];
|
||||||
|
|
||||||
sp.mutex.lock(); // No contention here until we don't increment splitPointsSize
|
sp.spinlock.acquire(); // No contention here until we don't increment splitPointsSize
|
||||||
|
|
||||||
sp.master = this;
|
sp.master = this;
|
||||||
sp.parentSplitPoint = activeSplitPoint;
|
sp.parentSplitPoint = activeSplitPoint;
|
||||||
|
@ -174,7 +174,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
|
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
|
||||||
&& (slave = Threads.available_slave(&sp)) != nullptr)
|
&& (slave = Threads.available_slave(&sp)) != nullptr)
|
||||||
{
|
{
|
||||||
slave->allocMutex.lock();
|
slave->spinlock.acquire();
|
||||||
|
|
||||||
if (slave->can_join(activeSplitPoint))
|
if (slave->can_join(activeSplitPoint))
|
||||||
{
|
{
|
||||||
|
@ -183,14 +183,14 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
slave->searching = true;
|
slave->searching = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
slave->allocMutex.unlock();
|
slave->spinlock.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Everything is set up. The master thread enters the idle loop, from which
|
// Everything is set up. The master thread enters the idle loop, from which
|
||||||
// it will instantly launch a search, because its 'searching' flag is set.
|
// it will instantly launch a search, because its 'searching' flag is set.
|
||||||
// The thread will return from the idle loop when all slaves have finished
|
// The thread will return from the idle loop when all slaves have finished
|
||||||
// their work at this split point.
|
// their work at this split point.
|
||||||
sp.mutex.unlock();
|
sp.spinlock.release();
|
||||||
|
|
||||||
Thread::idle_loop(); // Force a call to base class idle_loop()
|
Thread::idle_loop(); // Force a call to base class idle_loop()
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
// We have returned from the idle loop, which means that all threads are
|
// We have returned from the idle loop, which means that all threads are
|
||||||
// finished. Note that decreasing splitPointsSize must be done under lock
|
// finished. Note that decreasing splitPointsSize must be done under lock
|
||||||
// protection to avoid a race with Thread::can_join().
|
// protection to avoid a race with Thread::can_join().
|
||||||
sp.mutex.lock();
|
sp.spinlock.acquire();
|
||||||
|
|
||||||
--splitPointsSize;
|
--splitPointsSize;
|
||||||
activeSplitPoint = sp.parentSplitPoint;
|
activeSplitPoint = sp.parentSplitPoint;
|
||||||
|
@ -214,7 +214,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||||
*bestMove = sp.bestMove;
|
*bestMove = sp.bestMove;
|
||||||
*bestValue = sp.bestValue;
|
*bestValue = sp.bestValue;
|
||||||
|
|
||||||
sp.mutex.unlock();
|
sp.spinlock.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
17
src/thread.h
17
src/thread.h
|
@ -41,16 +41,17 @@ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
|
||||||
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
|
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
|
||||||
|
|
||||||
class Spinlock {
|
class Spinlock {
|
||||||
std::atomic_int _lock;
|
|
||||||
|
std::atomic_int lock;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Spinlock() { _lock = 1; } // Init here to workaround a bug with MSVC 2013
|
Spinlock() { lock = 1; } // Init here to workaround a bug with MSVC 2013
|
||||||
void lock() {
|
void acquire() {
|
||||||
while (_lock.fetch_sub(1, std::memory_order_acquire) != 1)
|
while (lock.fetch_sub(1, std::memory_order_acquire) != 1)
|
||||||
for (int cnt = 0; _lock.load(std::memory_order_relaxed) <= 0; ++cnt)
|
for (int cnt = 0; lock.load(std::memory_order_relaxed) <= 0; ++cnt)
|
||||||
if (cnt >= 10000) std::this_thread::yield(); // Be nice to hyperthreading
|
if (cnt >= 10000) std::this_thread::yield(); // Be nice to hyperthreading
|
||||||
}
|
}
|
||||||
void unlock() { _lock.store(1, std::memory_order_release); }
|
void release() { lock.store(1, std::memory_order_release); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,7 +74,7 @@ struct SplitPoint {
|
||||||
SplitPoint* parentSplitPoint;
|
SplitPoint* parentSplitPoint;
|
||||||
|
|
||||||
// Shared variable data
|
// Shared variable data
|
||||||
Spinlock mutex;
|
Spinlock spinlock;
|
||||||
std::bitset<MAX_THREADS> slavesMask;
|
std::bitset<MAX_THREADS> slavesMask;
|
||||||
volatile bool allSlavesSearching;
|
volatile bool allSlavesSearching;
|
||||||
volatile uint64_t nodes;
|
volatile uint64_t nodes;
|
||||||
|
@ -97,6 +98,7 @@ struct ThreadBase {
|
||||||
|
|
||||||
std::thread nativeThread;
|
std::thread nativeThread;
|
||||||
Mutex mutex;
|
Mutex mutex;
|
||||||
|
Spinlock spinlock;
|
||||||
ConditionVariable sleepCondition;
|
ConditionVariable sleepCondition;
|
||||||
volatile bool exit = false;
|
volatile bool exit = false;
|
||||||
};
|
};
|
||||||
|
@ -127,7 +129,6 @@ struct Thread : public ThreadBase {
|
||||||
SplitPoint* volatile activeSplitPoint;
|
SplitPoint* volatile activeSplitPoint;
|
||||||
volatile size_t splitPointsSize;
|
volatile size_t splitPointsSize;
|
||||||
volatile bool searching;
|
volatile bool searching;
|
||||||
Spinlock allocMutex;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue