1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-07-11 19:49:14 +00:00

[cluster] Add depth condition to cluster TT saves.

since the logic for saving moves in the sendbuffer and the associated rehashing is expensive, only do it for TT stores of sufficient depth.

quite some gain in local testing with 4 ranks against the previous version.
Elo difference: 288.84 +/- 21.98

This starts to make the branch useful, but for on-node runs, difference remains to the standard threading.
This commit is contained in:
Joost VandeVondele 2018-12-05 07:26:08 +01:00 committed by Stéphane Nicolet
parent 66b2c6b9f1
commit 271181bb31

View file

@ -175,51 +175,55 @@ int rank() {
void save(Thread* thread, TTEntry* tte, void save(Thread* thread, TTEntry* tte,
Key k, Value v, Bound b, Depth d, Move m, Value ev) { Key k, Value v, Bound b, Depth d, Move m, Value ev) {
tte->save(k, v, b, d, m, ev); tte->save(k, v, b, d, m, ev);
// Try to add to thread's send buffer if (d > 5 * ONE_PLY)
{ {
std::lock_guard<Mutex> lk(thread->ttBuffer.mutex); // Try to add to thread's send buffer
thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte)); {
} std::lock_guard<Mutex> lk(thread->ttBuffer.mutex);
thread->ttBuffer.buffer.replace(KeyedTTEntry(k,*tte));
}
// Communicate on main search thread // Communicate on main search thread
if (thread == Threads.main()) { if (thread == Threads.main()) {
static MPI_Request req = MPI_REQUEST_NULL; static MPI_Request req = MPI_REQUEST_NULL;
static TTSendBuffer<TTSendBufferSize> send_buff = {}; static TTSendBuffer<TTSendBufferSize> send_buff = {};
int flag; int flag;
bool found; bool found;
TTEntry* replace_tte; TTEntry* replace_tte;
// Test communication status // Test communication status
MPI_Test(&req, &flag, MPI_STATUS_IGNORE); MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
// Current communication is complete // Current communication is complete
if (flag) { if (flag) {
// Save all recieved entries // Save all recieved entries
for (auto&& e : TTBuff) { for (auto&& e : TTBuff) {
replace_tte = TT.probe(e.first, found); replace_tte = TT.probe(e.first, found);
replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(), replace_tte->save(e.first, e.second.value(), e.second.bound(), e.second.depth(),
e.second.move(), e.second.eval()); e.second.move(), e.second.eval());
} }
// Reset send buffer // Reset send buffer
send_buff = {}; send_buff = {};
// Build up new send buffer: best 16 found across all threads // Build up new send buffer: best 16 found across all threads
for (auto&& th : Threads) { for (auto&& th : Threads) {
std::lock_guard<Mutex> lk(th->ttBuffer.mutex); std::lock_guard<Mutex> lk(th->ttBuffer.mutex);
for (auto&& e : th->ttBuffer.buffer) for (auto&& e : th->ttBuffer.buffer)
send_buff.replace(e); send_buff.replace(e);
// Reset thread's send buffer // Reset thread's send buffer
th->ttBuffer.buffer = {}; th->ttBuffer.buffer = {};
} }
// Start next communication // Start next communication
MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype, MPI_Iallgather(send_buff.data(), send_buff.size(), TTEntryDatatype,
TTBuff.data(), TTSendBufferSize, TTEntryDatatype, TTBuff.data(), TTSendBufferSize, TTEntryDatatype,
TTComm, &req); TTComm, &req);
} }
}
} }
} }