From 8a3f8e21ae6b1dfb1e513bfca5df4e2d618fd55a Mon Sep 17 00:00:00 2001 From: Joost VandeVondele Date: Thu, 3 Jan 2019 15:15:50 +0100 Subject: [PATCH] [Cluster] Move IO to the root. Fixes one TODO, by moving the IO related to bestmove to the root, even if this move is found by a different rank. This is needed to make sure IO from different ranks is ordered properly. If this is not done it is possible that e.g. a bestmove arrives before all info lines have been received, leading to output that confuses tools and humans alike (see e.g. https://github.com/cutechess/cutechess/issues/472) --- src/cluster.cpp | 28 +++++++++++++++++++++++++--- src/cluster.h | 5 +++-- src/search.cpp | 38 ++++++++++++++++++++++++++------------ 3 files changed, 54 insertions(+), 17 deletions(-) diff --git a/src/cluster.cpp b/src/cluster.cpp index d01a05e5..de8551e0 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -77,11 +77,12 @@ void init() { MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); MPI_Comm_size(MPI_COMM_WORLD, &world_size); - const std::array MIdisps = {offsetof(MoveInfo, move), + const std::array MIdisps = {offsetof(MoveInfo, move), + offsetof(MoveInfo, ponder), offsetof(MoveInfo, depth), offsetof(MoveInfo, score), offsetof(MoveInfo, rank)}; - MPI_Type_create_hindexed_block(4, 1, MIdisps.data(), MPI_INT, &MIDatatype); + MPI_Type_create_hindexed_block(5, 1, MIdisps.data(), MPI_INT, &MIDatatype); MPI_Type_commit(&MIDatatype); MPI_Comm_dup(MPI_COMM_WORLD, &InputComm); @@ -311,7 +312,7 @@ void save(Thread* thread, TTEntry* tte, // TODO update to the scheme in master.. can this use aggregation of votes? -void pick_moves(MoveInfo& mi) { +void pick_moves(MoveInfo& mi, std::string& PVLine) { MoveInfo* pMoveInfo = NULL; if (is_root()) @@ -344,7 +345,28 @@ void pick_moves(MoveInfo& mi) { } free(pMoveInfo); } + + // Send around the final result MPI_Bcast(&mi, 1, MIDatatype, 0, MoveComm); + + // Send PV line to root as needed + if (mi.rank != 0 && mi.rank == rank()) { + int size; + std::vector vec; + vec.assign(PVLine.begin(), PVLine.end()); + size = vec.size(); + MPI_Send(&size, 1, MPI_INT, 0, 42, MoveComm); + MPI_Send(vec.data(), size, MPI_CHAR, 0, 42, MoveComm); + } + if (mi.rank != 0 && is_root()) { + int size; + std::vector vec; + MPI_Recv(&size, 1, MPI_INT, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE); + vec.resize(size); + MPI_Recv(vec.data(), size, MPI_CHAR, mi.rank, 42, MoveComm, MPI_STATUS_IGNORE); + PVLine.assign(vec.begin(), vec.end()); + } + } uint64_t nodes_searched() { diff --git a/src/cluster.h b/src/cluster.h index b4bc7649..6648a676 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -34,6 +34,7 @@ namespace Cluster { struct MoveInfo { int move; + int ponder; int depth; int score; int rank; @@ -73,7 +74,7 @@ int size(); int rank(); inline bool is_root() { return rank() == 0; } void save(Thread* thread, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev); -void pick_moves(MoveInfo& mi); +void pick_moves(MoveInfo& mi, std::string& PVLine); void ttRecvBuff_resize(size_t nThreads); uint64_t nodes_searched(); uint64_t tb_hits(); @@ -90,7 +91,7 @@ constexpr int size() { return 1; } constexpr int rank() { return 0; } constexpr bool is_root() { return true; } inline void save(Thread*, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev) { tte->save(k, v, b, d, m, ev); } -inline void pick_moves(MoveInfo&) { } +inline void pick_moves(MoveInfo&, std::string&) { } inline void ttRecvBuff_resize(size_t) { } uint64_t nodes_searched(); uint64_t tb_hits(); diff --git a/src/search.cpp b/src/search.cpp index 569a9c4a..3dc03d04 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -288,27 +288,41 @@ void MainThread::search() { } } - Cluster::MoveInfo mi{bestThread->rootMoves[0].pv[0], + + // Prepare PVLine and ponder move + std::string PVLine = UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE); + + Move bestMove = bestThread->rootMoves[0].pv[0]; + Move ponderMove = MOVE_NONE; + if (bestThread->rootMoves[0].pv.size() > 1 || bestThread->rootMoves[0].extract_ponder_from_tt(rootPos)) + ponderMove = bestThread->rootMoves[0].pv[1]; + + // Exchange info as needed + Cluster::MoveInfo mi{bestMove, + ponderMove, bestThread->completedDepth, bestThread->rootMoves[0].score, Cluster::rank()}; - Cluster::pick_moves(mi); + Cluster::pick_moves(mi, PVLine); previousScore = static_cast(mi.score); - // TODO output should be done on the cluster_root - if (mi.rank == Cluster::rank()) { - // Send again PV info if we have a new best thread - if (!Cluster::is_root() || bestThread != this) - sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl; + if (Cluster::is_root()) + { + // Send again PV info if we have a new best thread/rank + if (bestThread != this || mi.rank != 0) + sync_cout << PVLine << sync_endl; - sync_cout << "bestmove " << UCI::move(bestThread->rootMoves[0].pv[0], rootPos.is_chess960()); + bestMove = static_cast(mi.move); + ponderMove = static_cast(mi.ponder); - if (bestThread->rootMoves[0].pv.size() > 1 || bestThread->rootMoves[0].extract_ponder_from_tt(rootPos)) - std::cout << " ponder " << UCI::move(bestThread->rootMoves[0].pv[1], rootPos.is_chess960()); - - std::cout << sync_endl; + if (ponderMove != MOVE_NONE) + sync_cout << "bestmove " << UCI::move(bestMove, rootPos.is_chess960()) + << " ponder " << UCI::move(ponderMove, rootPos.is_chess960()) << sync_endl; + else + sync_cout << "bestmove " << UCI::move(bestMove, rootPos.is_chess960()) << sync_endl; } + }