1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-07-11 19:49:14 +00:00

Retire slavesPositions

Save the current active position in each Thread
instead of keeping a centralized array in struct
SplitPoint.

This allow to skip a memset() call at each split.

No functional change.
This commit is contained in:
Marco Costalba 2013-02-08 11:04:07 +01:00
parent 880726c13a
commit e5bc79fb9c
3 changed files with 12 additions and 10 deletions

View file

@ -1672,9 +1672,9 @@ void Thread::idle_loop() {
sp->mutex.lock();
assert(sp->slavesPositions[idx] == NULL);
assert(activePosition == NULL);
sp->slavesPositions[idx] = &pos;
activePosition = &pos;
switch (sp->nodeType) {
case Root:
@ -1693,7 +1693,7 @@ void Thread::idle_loop() {
assert(searching);
searching = false;
sp->slavesPositions[idx] = NULL;
activePosition = NULL;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
@ -1742,7 +1742,7 @@ void check_time() {
nodes = RootPos.nodes_searched();
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active slaves positions.
// all the currently active positions nodes.
for (size_t i = 0; i < Threads.size(); i++)
for (int j = 0; j < Threads[i]->splitPointsSize; j++)
{
@ -1754,8 +1754,9 @@ void check_time() {
Bitboard sm = sp.slavesMask;
while (sm)
{
Position* pos = sp.slavesPositions[pop_lsb(&sm)];
nodes += pos ? pos->nodes_searched() : 0;
Position* pos = Threads[pop_lsb(&sm)]->activePosition;
if (pos)
nodes += pos->nodes_searched();
}
sp.mutex.unlock();

View file

@ -19,7 +19,6 @@
#include <algorithm> // For std::count
#include <cassert>
#include <cstring> // For memset
#include <iostream>
#include "movegen.h"
@ -49,6 +48,7 @@ Thread::Thread() /* : splitPoints() */ { // Value-initialization bug in MSVC
searching = exit = false;
maxPly = splitPointsSize = 0;
activeSplitPoint = NULL;
activePosition = NULL;
idx = Threads.size();
if (!thread_create(handle, start_routine, this))
@ -281,8 +281,6 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
sp.cutoff = false;
sp.ss = ss;
memset(sp.slavesPositions, 0, sizeof(sp.slavesPositions));
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
@ -291,6 +289,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
splitPointsSize++;
activeSplitPoint = &sp;
activePosition = NULL;
size_t slavesCnt = 1; // This thread is always included
Thread* slave;
@ -318,6 +317,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
// In helpful master concept a master can help only a sub-tree of its split
// point, and because here is all finished is not possible master is booked.
assert(!searching);
assert(!activePosition);
}
// We have returned from the idle loop, which means that all threads are
@ -329,6 +329,7 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
searching = true;
splitPointsSize--;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;

View file

@ -75,7 +75,6 @@ struct SplitPoint {
// Shared data
Mutex mutex;
Position* slavesPositions[MAX_THREADS];
volatile uint64_t slavesMask;
volatile int64_t nodes;
volatile Value alpha;
@ -110,6 +109,7 @@ struct Thread {
Material::Table materialTable;
Endgames endgames;
Pawns::Table pawnsTable;
Position* activePosition;
size_t idx;
int maxPly;
Mutex mutex;