1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-05-03 18:19:35 +00:00

[Cluster] Move IO to the root.

Fixes one TODO, by moving the IO related to bestmove to the root, even if this move is found by a different rank.

This is needed to make sure IO from different ranks is ordered properly. If this is not done it is possible that e.g. a bestmove arrives before all info lines have been received, leading to output that confuses tools and humans alike (see e.g. https://github.com/cutechess/cutechess/issues/472)
This commit is contained in:
Joost VandeVondele 2019-01-03 15:15:50 +01:00 committed by Stéphane Nicolet
parent 267ca781cd
commit 8a3f8e21ae
3 changed files with 54 additions and 17 deletions

View file

@ -77,11 +77,12 @@ void init() {
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
MPI_Comm_size(MPI_COMM_WORLD, &world_size); MPI_Comm_size(MPI_COMM_WORLD, &world_size);
const std::array<MPI_Aint, 4> MIdisps = {offsetof(MoveInfo, move), const std::array<MPI_Aint, 5> MIdisps = {offsetof(MoveInfo, move),
offsetof(MoveInfo, ponder),
offsetof(MoveInfo, depth), offsetof(MoveInfo, depth),
offsetof(MoveInfo, score), offsetof(MoveInfo, score),
offsetof(MoveInfo, rank)}; offsetof(MoveInfo, rank)};
MPI_Type_create_hindexed_block(4, 1, MIdisps.data(), MPI_INT, &MIDatatype); MPI_Type_create_hindexed_block(5, 1, MIdisps.data(), MPI_INT, &MIDatatype);
MPI_Type_commit(&MIDatatype); MPI_Type_commit(&MIDatatype);
MPI_Comm_dup(MPI_COMM_WORLD, &InputComm); MPI_Comm_dup(MPI_COMM_WORLD, &InputComm);
@ -311,7 +312,7 @@ void save(Thread* thread, TTEntry* tte,
// TODO update to the scheme in master.. can this use aggregation of votes? // TODO update to the scheme in master.. can this use aggregation of votes?
void pick_moves(MoveInfo& mi) { void pick_moves(MoveInfo& mi, std::string& PVLine) {
MoveInfo* pMoveInfo = NULL; MoveInfo* pMoveInfo = NULL;
if (is_root()) if (is_root())
@ -344,7 +345,28 @@ void pick_moves(MoveInfo& mi) {
} }
free(pMoveInfo); free(pMoveInfo);
} }
// Send around the final result
MPI_Bcast(&mi, 1, MIDatatype, 0, MoveComm); MPI_Bcast(&mi, 1, MIDatatype, 0, MoveComm);
// Send PV line to root as needed
if (mi.rank != 0 && mi.rank == rank()) {
int size;
std::vector<char> vec;
vec.assign(PVLine.begin(), PVLine.end());
size = vec.size();
MPI_Send(&size, 1, MPI_INT, 0, 42, MoveComm);
MPI_Send(vec.data(), size, MPI_CHAR, 0, 42, MoveComm);
}
if (mi.rank != 0 && is_root()) {
int size;
std::vector<char> vec;
MPI_Recv(&size, 1, MPI_INT, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE);
vec.resize(size);
MPI_Recv(vec.data(), size, MPI_CHAR, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE);
PVLine.assign(vec.begin(), vec.end());
}
} }
uint64_t nodes_searched() { uint64_t nodes_searched() {

View file

@ -34,6 +34,7 @@ namespace Cluster {
struct MoveInfo { struct MoveInfo {
int move; int move;
int ponder;
int depth; int depth;
int score; int score;
int rank; int rank;
@ -73,7 +74,7 @@ int size();
int rank(); int rank();
inline bool is_root() { return rank() == 0; } inline bool is_root() { return rank() == 0; }
void save(Thread* thread, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev); void save(Thread* thread, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev);
void pick_moves(MoveInfo& mi); void pick_moves(MoveInfo& mi, std::string& PVLine);
void ttRecvBuff_resize(size_t nThreads); void ttRecvBuff_resize(size_t nThreads);
uint64_t nodes_searched(); uint64_t nodes_searched();
uint64_t tb_hits(); uint64_t tb_hits();
@ -90,7 +91,7 @@ constexpr int size() { return 1; }
constexpr int rank() { return 0; } constexpr int rank() { return 0; }
constexpr bool is_root() { return true; } constexpr bool is_root() { return true; }
inline void save(Thread*, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev) { tte->save(k, v, b, d, m, ev); } inline void save(Thread*, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev) { tte->save(k, v, b, d, m, ev); }
inline void pick_moves(MoveInfo&) { } inline void pick_moves(MoveInfo&, std::string&) { }
inline void ttRecvBuff_resize(size_t) { } inline void ttRecvBuff_resize(size_t) { }
uint64_t nodes_searched(); uint64_t nodes_searched();
uint64_t tb_hits(); uint64_t tb_hits();

View file

@ -288,27 +288,41 @@ void MainThread::search() {
} }
} }
Cluster::MoveInfo mi{bestThread->rootMoves[0].pv[0],
// Prepare PVLine and ponder move
std::string PVLine = UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE);
Move bestMove = bestThread->rootMoves[0].pv[0];
Move ponderMove = MOVE_NONE;
if (bestThread->rootMoves[0].pv.size() > 1 || bestThread->rootMoves[0].extract_ponder_from_tt(rootPos))
ponderMove = bestThread->rootMoves[0].pv[1];
// Exchange info as needed
Cluster::MoveInfo mi{bestMove,
ponderMove,
bestThread->completedDepth, bestThread->completedDepth,
bestThread->rootMoves[0].score, bestThread->rootMoves[0].score,
Cluster::rank()}; Cluster::rank()};
Cluster::pick_moves(mi); Cluster::pick_moves(mi, PVLine);
previousScore = static_cast<Value>(mi.score); previousScore = static_cast<Value>(mi.score);
// TODO output should be done on the cluster_root if (Cluster::is_root())
if (mi.rank == Cluster::rank()) { {
// Send again PV info if we have a new best thread // Send again PV info if we have a new best thread/rank
if (!Cluster::is_root() || bestThread != this) if (bestThread != this || mi.rank != 0)
sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl; sync_cout << PVLine << sync_endl;
sync_cout << "bestmove " << UCI::move(bestThread->rootMoves[0].pv[0], rootPos.is_chess960()); bestMove = static_cast<Move>(mi.move);
ponderMove = static_cast<Move>(mi.ponder);
if (bestThread->rootMoves[0].pv.size() > 1 || bestThread->rootMoves[0].extract_ponder_from_tt(rootPos)) if (ponderMove != MOVE_NONE)
std::cout << " ponder " << UCI::move(bestThread->rootMoves[0].pv[1], rootPos.is_chess960()); sync_cout << "bestmove " << UCI::move(bestMove, rootPos.is_chess960())
<< " ponder " << UCI::move(ponderMove, rootPos.is_chess960()) << sync_endl;
std::cout << sync_endl; else
sync_cout << "bestmove " << UCI::move(bestMove, rootPos.is_chess960()) << sync_endl;
} }
} }