From 271181bb31e8a0c2a2d260762aae542147064ccf Mon Sep 17 00:00:00 2001 From: Joost VandeVondele Date: Wed, 5 Dec 2018 07:26:08 +0100 Subject: [PATCH] [cluster] Add depth condition to cluster TT saves. since the logic for saving moves in the sendbuffer and the associated rehashing is expensive, only do it for TT stores of sufficient depth. quite some gain in local testing with 4 ranks against the previous version. Elo difference: 288.84 +/- 21.98 This starts to make the branch useful, but for on-node runs, difference remains to the standard threading. --- src/cluster.cpp | 76 ++++++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/src/cluster.cpp b/src/cluster.cpp index a0a2be71..5dbfcaf3 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -175,51 +175,55 @@ int rank() { void save(Thread* thread, TTEntry* tte, Key k, Value v, Bound b, Depth d, Move m, Value ev) { + tte->save(k, v, b, d, m, ev); - // Try to add to thread's send buffer + if (d > 5 * ONE_PLY) { - std::lock_guard lk(thread->ttBuffer.mutex); - thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte)); - } + // Try to add to thread's send buffer + { + std::lock_guard lk(thread->ttBuffer.mutex); + thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte)); + } - // Communicate on main search thread - if (thread == Threads.main()) { - static MPI_Request req = MPI_REQUEST_NULL; - static TTSendBuffer send_buff = {}; - int flag; - bool found; - TTEntry* replace_tte; + // Communicate on main search thread + if (thread == Threads.main()) { + static MPI_Request req = MPI_REQUEST_NULL; + static TTSendBuffer send_buff = {}; + int flag; + bool found; + TTEntry* replace_tte; - // Test communication status - MPI_Test(&req, &flag, MPI_STATUS_IGNORE); + // Test communication status + MPI_Test(&req, &flag, MPI_STATUS_IGNORE); - // Current communication is complete - if (flag) { - // Save all recieved entries - for (auto&& e : TTBuff) { - replace_tte = TT.probe(e.first, found); - replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(), - e.second.move(), e.second.eval()); - } + // Current communication is complete + if (flag) { + // Save all recieved entries + for (auto&& e : TTBuff) { + replace_tte = TT.probe(e.first, found); + replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(), + e.second.move(), e.second.eval()); + } - // Reset send buffer - send_buff = {}; + // Reset send buffer + send_buff = {}; - // Build up new send buffer: best 16 found across all threads - for (auto&& th : Threads) { - std::lock_guard lk(th->ttBuffer.mutex); - for (auto&& e : th->ttBuffer.buffer) - send_buff.replace(e); - // Reset thread's send buffer - th->ttBuffer.buffer = {}; - } + // Build up new send buffer: best 16 found across all threads + for (auto&& th : Threads) { + std::lock_guard lk(th->ttBuffer.mutex); + for (auto&& e : th->ttBuffer.buffer) + send_buff.replace(e); + // Reset thread's send buffer + th->ttBuffer.buffer = {}; + } - // Start next communication - MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype, - TTBuff.data(), TTSendBufferSize, TTEntryDatatype, - TTComm, &req); - } + // Start next communication + MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype, + TTBuff.data(), TTSendBufferSize, TTEntryDatatype, + TTComm, &req); + } + } } }