mirror of
https://github.com/sockspls/badfish
synced 2025-04-30 00:33:09 +00:00
Use acquire() and release() for spinlocks
It is more idiomatick than lock() and unlock() No functional change.
This commit is contained in:
parent
bae4679de2
commit
13d4df95cd
4 changed files with 31 additions and 30 deletions
|
@ -162,9 +162,9 @@ void MovePicker::score<CAPTURES>() {
|
|||
|
||||
template<>
|
||||
void MovePicker::score<QUIETS>() {
|
||||
Square prevMoveSq = to_sq((ss-1)->currentMove);
|
||||
Piece prevMovePiece = pos.piece_on(prevMoveSq);
|
||||
const HistoryStats &cmh = counterMovesHistory[prevMovePiece][prevMoveSq];
|
||||
|
||||
Square prevSq = to_sq((ss-1)->currentMove);
|
||||
const HistoryStats& cmh = counterMovesHistory[pos.piece_on(prevSq)][prevSq];
|
||||
|
||||
for (auto& m : *this)
|
||||
m.value = history[pos.moved_piece(m)][to_sq(m)]
|
||||
|
|
|
@ -826,7 +826,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
|||
continue;
|
||||
|
||||
moveCount = ++splitPoint->moveCount;
|
||||
splitPoint->mutex.unlock();
|
||||
splitPoint->spinlock.release();
|
||||
}
|
||||
else
|
||||
++moveCount;
|
||||
|
@ -895,7 +895,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
|||
&& moveCount >= FutilityMoveCounts[improving][depth])
|
||||
{
|
||||
if (SpNode)
|
||||
splitPoint->mutex.lock();
|
||||
splitPoint->spinlock.acquire();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -914,7 +914,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
|||
|
||||
if (SpNode)
|
||||
{
|
||||
splitPoint->mutex.lock();
|
||||
splitPoint->spinlock.acquire();
|
||||
if (bestValue > splitPoint->bestValue)
|
||||
splitPoint->bestValue = bestValue;
|
||||
}
|
||||
|
@ -926,7 +926,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
|||
if (predictedDepth < 4 * ONE_PLY && pos.see_sign(move) < VALUE_ZERO)
|
||||
{
|
||||
if (SpNode)
|
||||
splitPoint->mutex.lock();
|
||||
splitPoint->spinlock.acquire();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -1026,7 +1026,7 @@ moves_loop: // When in check and at SpNode search starts from here
|
|||
// Step 18. Check for new best move
|
||||
if (SpNode)
|
||||
{
|
||||
splitPoint->mutex.lock();
|
||||
splitPoint->spinlock.acquire();
|
||||
bestValue = splitPoint->bestValue;
|
||||
alpha = splitPoint->alpha;
|
||||
}
|
||||
|
@ -1630,7 +1630,7 @@ void Thread::idle_loop() {
|
|||
std::memcpy(ss-2, sp->ss-2, 5 * sizeof(Stack));
|
||||
ss->splitPoint = sp;
|
||||
|
||||
sp->mutex.lock();
|
||||
sp->spinlock.acquire();
|
||||
|
||||
assert(activePosition == nullptr);
|
||||
|
||||
|
@ -1659,7 +1659,7 @@ void Thread::idle_loop() {
|
|||
// After releasing the lock we can't access any SplitPoint related data
|
||||
// in a safe way because it could have been released under our feet by
|
||||
// the sp master.
|
||||
sp->mutex.unlock();
|
||||
sp->spinlock.release();
|
||||
|
||||
// Try to late join to another split point if none of its slaves has
|
||||
// already finished.
|
||||
|
@ -1699,12 +1699,12 @@ void Thread::idle_loop() {
|
|||
sp = bestSp;
|
||||
|
||||
// Recheck the conditions under lock protection
|
||||
sp->mutex.lock();
|
||||
sp->spinlock.acquire();
|
||||
|
||||
if ( sp->allSlavesSearching
|
||||
&& sp->slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT)
|
||||
{
|
||||
allocMutex.lock();
|
||||
spinlock.acquire();
|
||||
|
||||
if (can_join(sp))
|
||||
{
|
||||
|
@ -1713,10 +1713,10 @@ void Thread::idle_loop() {
|
|||
searching = true;
|
||||
}
|
||||
|
||||
allocMutex.unlock();
|
||||
spinlock.release();
|
||||
}
|
||||
|
||||
sp->mutex.unlock();
|
||||
sp->spinlock.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1767,7 +1767,7 @@ void check_time() {
|
|||
{
|
||||
SplitPoint& sp = th->splitPoints[i];
|
||||
|
||||
sp.mutex.lock();
|
||||
sp.spinlock.acquire();
|
||||
|
||||
nodes += sp.nodes;
|
||||
|
||||
|
@ -1775,7 +1775,7 @@ void check_time() {
|
|||
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
|
||||
nodes += Threads[idx]->activePosition->nodes_searched();
|
||||
|
||||
sp.mutex.unlock();
|
||||
sp.spinlock.release();
|
||||
}
|
||||
|
||||
if (nodes >= Limits.nodes)
|
||||
|
|
|
@ -144,7 +144,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
|||
// Pick and init the next available split point
|
||||
SplitPoint& sp = splitPoints[splitPointsSize];
|
||||
|
||||
sp.mutex.lock(); // No contention here until we don't increment splitPointsSize
|
||||
sp.spinlock.acquire(); // No contention here until we don't increment splitPointsSize
|
||||
|
||||
sp.master = this;
|
||||
sp.parentSplitPoint = activeSplitPoint;
|
||||
|
@ -174,7 +174,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
|||
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
|
||||
&& (slave = Threads.available_slave(&sp)) != nullptr)
|
||||
{
|
||||
slave->allocMutex.lock();
|
||||
slave->spinlock.acquire();
|
||||
|
||||
if (slave->can_join(activeSplitPoint))
|
||||
{
|
||||
|
@ -183,14 +183,14 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
|||
slave->searching = true;
|
||||
}
|
||||
|
||||
slave->allocMutex.unlock();
|
||||
slave->spinlock.release();
|
||||
}
|
||||
|
||||
// Everything is set up. The master thread enters the idle loop, from which
|
||||
// it will instantly launch a search, because its 'searching' flag is set.
|
||||
// The thread will return from the idle loop when all slaves have finished
|
||||
// their work at this split point.
|
||||
sp.mutex.unlock();
|
||||
sp.spinlock.release();
|
||||
|
||||
Thread::idle_loop(); // Force a call to base class idle_loop()
|
||||
|
||||
|
@ -205,7 +205,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
|||
// We have returned from the idle loop, which means that all threads are
|
||||
// finished. Note that decreasing splitPointsSize must be done under lock
|
||||
// protection to avoid a race with Thread::can_join().
|
||||
sp.mutex.lock();
|
||||
sp.spinlock.acquire();
|
||||
|
||||
--splitPointsSize;
|
||||
activeSplitPoint = sp.parentSplitPoint;
|
||||
|
@ -214,7 +214,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
|||
*bestMove = sp.bestMove;
|
||||
*bestValue = sp.bestValue;
|
||||
|
||||
sp.mutex.unlock();
|
||||
sp.spinlock.release();
|
||||
}
|
||||
|
||||
|
||||
|
|
17
src/thread.h
17
src/thread.h
|
@ -41,16 +41,17 @@ const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
|
|||
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
|
||||
|
||||
class Spinlock {
|
||||
std::atomic_int _lock;
|
||||
|
||||
std::atomic_int lock;
|
||||
|
||||
public:
|
||||
Spinlock() { _lock = 1; } // Init here to workaround a bug with MSVC 2013
|
||||
void lock() {
|
||||
while (_lock.fetch_sub(1, std::memory_order_acquire) != 1)
|
||||
for (int cnt = 0; _lock.load(std::memory_order_relaxed) <= 0; ++cnt)
|
||||
Spinlock() { lock = 1; } // Init here to workaround a bug with MSVC 2013
|
||||
void acquire() {
|
||||
while (lock.fetch_sub(1, std::memory_order_acquire) != 1)
|
||||
for (int cnt = 0; lock.load(std::memory_order_relaxed) <= 0; ++cnt)
|
||||
if (cnt >= 10000) std::this_thread::yield(); // Be nice to hyperthreading
|
||||
}
|
||||
void unlock() { _lock.store(1, std::memory_order_release); }
|
||||
void release() { lock.store(1, std::memory_order_release); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -73,7 +74,7 @@ struct SplitPoint {
|
|||
SplitPoint* parentSplitPoint;
|
||||
|
||||
// Shared variable data
|
||||
Spinlock mutex;
|
||||
Spinlock spinlock;
|
||||
std::bitset<MAX_THREADS> slavesMask;
|
||||
volatile bool allSlavesSearching;
|
||||
volatile uint64_t nodes;
|
||||
|
@ -97,6 +98,7 @@ struct ThreadBase {
|
|||
|
||||
std::thread nativeThread;
|
||||
Mutex mutex;
|
||||
Spinlock spinlock;
|
||||
ConditionVariable sleepCondition;
|
||||
volatile bool exit = false;
|
||||
};
|
||||
|
@ -127,7 +129,6 @@ struct Thread : public ThreadBase {
|
|||
SplitPoint* volatile activeSplitPoint;
|
||||
volatile size_t splitPointsSize;
|
||||
volatile bool searching;
|
||||
Spinlock allocMutex;
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue