1
0
Fork 0
mirror of https://github.com/sockspls/badfish synced 2025-04-30 08:43:09 +00:00

Bishop pins only

This commit is contained in:
Gary Linscott 2013-02-11 10:26:18 -05:00
commit a7cdf7299a
17 changed files with 512 additions and 582 deletions

View file

@ -471,6 +471,29 @@ ScaleFactor Endgame<KBPsK>::operator()(const Position& pos) const {
return SCALE_FACTOR_DRAW;
}
}
// All pawns on same B or G file? Then potential draw
if ( (pawnFile == FILE_B || pawnFile == FILE_G)
&& !(pos.pieces(PAWN) & ~file_bb(pawnFile))
&& pos.non_pawn_material(weakerSide) == 0
&& pos.piece_count(weakerSide, PAWN) >= 1)
{
// Get weaker pawn closest to opponent's queening square
Bitboard wkPawns = pos.pieces(weakerSide, PAWN);
Square weakerPawnSq = strongerSide == WHITE ? msb(wkPawns) : lsb(wkPawns);
Square strongerKingSq = pos.king_square(strongerSide);
Square weakerKingSq = pos.king_square(weakerSide);
Square bishopSq = pos.piece_list(strongerSide, BISHOP)[0];
// Draw if weaker pawn is on rank 7, bishop can't attack the pawn, and
// weaker king can stop opposing opponent's king from penetrating.
if ( relative_rank(strongerSide, weakerPawnSq) == RANK_7
&& opposite_colors(bishopSq, weakerPawnSq)
&& square_distance(weakerPawnSq, weakerKingSq) <= square_distance(weakerPawnSq, strongerKingSq))
return SCALE_FACTOR_DRAW;
}
return SCALE_FACTOR_NONE;
}

View file

@ -577,16 +577,15 @@ Value do_evaluate(const Position& pos, Value& margin) {
mobility += MobilityBonus[Piece][mob];
if (Piece == BISHOP && (PseudoAttacks[Piece][pos.king_square(Them)] & s)) {
const Bitboard between = BetweenBB[s][pos.king_square(Them)] & pos.pieces();
if (!more_than_one(between))
score += make_score(25, 25);
}
// Decrease score if we are attacked by an enemy pawn. Remaining part
// of threat evaluation must be done later when we have full attack info.
if (ei.attackedBy[Them][PAWN] & s)
score -= ThreatenedByPawnPenalty[Piece];
else if (Piece == BISHOP && (PseudoAttacks[Piece][pos.king_square(Them)] & s)) {
const Bitboard between = BetweenBB[s][pos.king_square(Them)] & pos.pieces();
if (!more_than_one(between))
score += make_score(15, 25);
}
// Bishop and knight outposts squares
if ( (Piece == BISHOP || Piece == KNIGHT)
@ -693,7 +692,8 @@ Value do_evaluate(const Position& pos, Value& margin) {
& ~ei.attackedBy[Them][0];
if (undefendedMinors)
score += UndefendedMinorPenalty;
score += more_than_one(undefendedMinors) ? UndefendedMinorPenalty * 2
: UndefendedMinorPenalty;
// Enemy pieces not defended by a pawn and under our attack
weakEnemies = pos.pieces(Them)

View file

@ -1,72 +0,0 @@
/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2008 Tord Romstad (Glaurung author)
Copyright (C) 2008-2012 Marco Costalba, Joona Kiiski, Tord Romstad
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#if !defined(HISTORY_H_INCLUDED)
#define HISTORY_H_INCLUDED
#include <algorithm>
#include <cstring>
#include "types.h"
/// The History class stores statistics about how often different moves
/// have been successful or unsuccessful during the current search. These
/// statistics are used for reduction and move ordering decisions. History
/// entries are stored according only to moving piece and destination square,
/// in particular two moves with different origin but same destination and
/// same piece will be considered identical.
class History {
public:
void clear();
Value value(Piece p, Square to) const;
void add(Piece p, Square to, Value bonus);
Value gain(Piece p, Square to) const;
void update_gain(Piece p, Square to, Value g);
static const Value MaxValue = Value(2000);
private:
Value history[PIECE_NB][SQUARE_NB];
Value maxGains[PIECE_NB][SQUARE_NB];
};
inline void History::clear() {
memset(history, 0, 16 * 64 * sizeof(Value));
memset(maxGains, 0, 16 * 64 * sizeof(Value));
}
inline Value History::value(Piece p, Square to) const {
return history[p][to];
}
inline void History::add(Piece p, Square to, Value bonus) {
if (abs(history[p][to] + bonus) < MaxValue) history[p][to] += bonus;
}
inline Value History::gain(Piece p, Square to) const {
return maxGains[p][to];
}
inline void History::update_gain(Piece p, Square to, Value g) {
maxGains[p][to] = std::max(g, maxGains[p][to] - 1);
}
#endif // !defined(HISTORY_H_INCLUDED)

View file

@ -249,41 +249,38 @@ namespace {
}
FORCE_INLINE MoveStack* generate_king_moves(const Position& pos, MoveStack* mlist,
Color us, Bitboard target) {
Square from = pos.king_square(us);
Bitboard b = pos.attacks_from<KING>(from) & target;
SERIALIZE(b);
return mlist;
}
template<GenType Type> FORCE_INLINE
MoveStack* generate_all_moves(const Position& pos, MoveStack* mlist, Color us,
MoveStack* generate_all(const Position& pos, MoveStack* mlist, Color us,
Bitboard target, const CheckInfo* ci = NULL) {
const bool Checks = Type == QUIET_CHECKS;
mlist = (us == WHITE ? generate_pawn_moves<WHITE, Type>(pos, mlist, target, ci)
: generate_pawn_moves<BLACK, Type>(pos, mlist, target, ci));
mlist = generate_moves<KNIGHT, Type == QUIET_CHECKS>(pos, mlist, us, target, ci);
mlist = generate_moves<BISHOP, Type == QUIET_CHECKS>(pos, mlist, us, target, ci);
mlist = generate_moves<ROOK, Type == QUIET_CHECKS>(pos, mlist, us, target, ci);
mlist = generate_moves<QUEEN, Type == QUIET_CHECKS>(pos, mlist, us, target, ci);
mlist = generate_moves<KNIGHT, Checks>(pos, mlist, us, target, ci);
mlist = generate_moves<BISHOP, Checks>(pos, mlist, us, target, ci);
mlist = generate_moves<ROOK, Checks>(pos, mlist, us, target, ci);
mlist = generate_moves<QUEEN, Checks>(pos, mlist, us, target, ci);
if (Type != QUIET_CHECKS && Type != EVASIONS)
mlist = generate_king_moves(pos, mlist, us, target);
{
Square from = pos.king_square(us);
Bitboard b = pos.attacks_from<KING>(from) & target;
SERIALIZE(b);
}
if (Type != CAPTURES && Type != EVASIONS && pos.can_castle(us))
{
if (pos.is_chess960())
{
mlist = generate_castle<KING_SIDE, Type == QUIET_CHECKS, true>(pos, mlist, us);
mlist = generate_castle<QUEEN_SIDE, Type == QUIET_CHECKS, true>(pos, mlist, us);
mlist = generate_castle<KING_SIDE, Checks, true>(pos, mlist, us);
mlist = generate_castle<QUEEN_SIDE, Checks, true>(pos, mlist, us);
}
else
{
mlist = generate_castle<KING_SIDE, Type == QUIET_CHECKS, false>(pos, mlist, us);
mlist = generate_castle<QUEEN_SIDE, Type == QUIET_CHECKS, false>(pos, mlist, us);
mlist = generate_castle<KING_SIDE, Checks, false>(pos, mlist, us);
mlist = generate_castle<QUEEN_SIDE, Checks, false>(pos, mlist, us);
}
}
@ -310,18 +307,12 @@ MoveStack* generate(const Position& pos, MoveStack* mlist) {
assert(!pos.checkers());
Color us = pos.side_to_move();
Bitboard target;
if (Type == CAPTURES)
target = pos.pieces(~us);
Bitboard target = Type == CAPTURES ? pos.pieces(~us)
: Type == QUIETS ? ~pos.pieces()
: Type == NON_EVASIONS ? ~pos.pieces(us) : 0;
else if (Type == QUIETS)
target = ~pos.pieces();
else if (Type == NON_EVASIONS)
target = ~pos.pieces(us);
return generate_all_moves<Type>(pos, mlist, us, target);
return generate_all<Type>(pos, mlist, us, target);
}
// Explicit template instantiations
@ -337,7 +328,6 @@ MoveStack* generate<QUIET_CHECKS>(const Position& pos, MoveStack* mlist) {
assert(!pos.checkers());
Color us = pos.side_to_move();
CheckInfo ci(pos);
Bitboard dc = ci.dcCandidates;
@ -357,7 +347,7 @@ MoveStack* generate<QUIET_CHECKS>(const Position& pos, MoveStack* mlist) {
SERIALIZE(b);
}
return generate_all_moves<QUIET_CHECKS>(pos, mlist, us, ~pos.pieces(), &ci);
return generate_all<QUIET_CHECKS>(pos, mlist, pos.side_to_move(), ~pos.pieces(), &ci);
}
@ -419,7 +409,7 @@ MoveStack* generate<EVASIONS>(const Position& pos, MoveStack* mlist) {
// Generate blocking evasions or captures of the checking piece
Bitboard target = between_bb(checksq, ksq) | pos.checkers();
return generate_all_moves<EVASIONS>(pos, mlist, us, target);
return generate_all<EVASIONS>(pos, mlist, us, target);
}

View file

@ -18,10 +18,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <algorithm>
#include <cassert>
#include "movegen.h"
#include "movepick.h"
#include "thread.h"
@ -37,6 +35,20 @@ namespace {
STOP
};
// Our insertion sort, guaranteed to be stable, as is needed
void insertion_sort(MoveStack* begin, MoveStack* end)
{
MoveStack tmp, *p, *q;
for (p = begin + 1; p < end; ++p)
{
tmp = *p;
for (q = p; q != begin && *(q-1) < tmp; --q)
*q = *(q-1);
*q = tmp;
}
}
// Unary predicate used by std::partition to split positive scores from remaining
// ones so to sort separately the two sets, and with the second sort delayed.
inline bool has_positive_score(const MoveStack& ms) { return ms.score > 0; }
@ -59,7 +71,7 @@ namespace {
/// move ordering is at the current node.
MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
Search::Stack* s, Value beta) : pos(p), H(h), depth(d) {
Search::Stack* s, Value beta) : pos(p), Hist(h), depth(d) {
assert(d > DEPTH_ZERO);
@ -92,7 +104,7 @@ MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
}
MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
Square sq) : pos(p), H(h), cur(moves), end(moves) {
Square sq) : pos(p), Hist(h), cur(moves), end(moves) {
assert(d <= DEPTH_ZERO);
@ -124,7 +136,7 @@ MovePicker::MovePicker(const Position& p, Move ttm, Depth d, const History& h,
}
MovePicker::MovePicker(const Position& p, Move ttm, const History& h, PieceType pt)
: pos(p), H(h), cur(moves), end(moves) {
: pos(p), Hist(h), cur(moves), end(moves) {
assert(!pos.checkers());
@ -141,12 +153,10 @@ MovePicker::MovePicker(const Position& p, Move ttm, const History& h, PieceType
}
/// MovePicker::score_captures(), MovePicker::score_noncaptures() and
/// MovePicker::score_evasions() assign a numerical move ordering score
/// to each move in a move list. The moves with highest scores will be
/// picked first by next_move().
void MovePicker::score_captures() {
/// score() assign a numerical move ordering score to each move in a move list.
/// The moves with highest scores will be picked first.
template<>
void MovePicker::score<CAPTURES>() {
// Winning and equal captures in the main search are ordered by MVV/LVA.
// Suprisingly, this appears to perform slightly better than SEE based
// move ordering. The reason is probably that in a position with a winning
@ -169,47 +179,50 @@ void MovePicker::score_captures() {
- type_of(pos.piece_moved(m));
if (type_of(m) == PROMOTION)
it->score += PieceValue[MG][promotion_type(m)];
it->score += PieceValue[MG][promotion_type(m)] - PieceValue[MG][PAWN];
else if (type_of(m) == ENPASSANT)
it->score += PieceValue[MG][PAWN];
}
}
void MovePicker::score_noncaptures() {
template<>
void MovePicker::score<QUIETS>() {
Move m;
for (MoveStack* it = moves; it != end; ++it)
{
m = it->move;
it->score = H.value(pos.piece_moved(m), to_sq(m));
it->score = Hist[pos.piece_moved(m)][to_sq(m)];
}
}
void MovePicker::score_evasions() {
template<>
void MovePicker::score<EVASIONS>() {
// Try good captures ordered by MVV/LVA, then non-captures if destination square
// is not under attack, ordered by history value, then bad-captures and quiet
// moves with a negative SEE. This last group is ordered by the SEE score.
Move m;
int seeScore;
if (end < moves + 2)
return;
for (MoveStack* it = moves; it != end; ++it)
{
m = it->move;
if ((seeScore = pos.see_sign(m)) < 0)
it->score = seeScore - History::MaxValue; // Be sure we are at the bottom
it->score = seeScore - History::Max; // At the bottom
else if (pos.is_capture(m))
it->score = PieceValue[MG][pos.piece_on(to_sq(m))]
- type_of(pos.piece_moved(m)) + History::MaxValue;
- type_of(pos.piece_moved(m)) + History::Max;
else
it->score = H.value(pos.piece_moved(m), to_sq(m));
it->score = Hist[pos.piece_moved(m)][to_sq(m)];
}
}
/// MovePicker::generate_next() generates, scores and sorts the next bunch of moves,
/// when there are no more moves to try for the current phase.
/// generate_next() generates, scores and sorts the next bunch of moves, when
/// there are no more moves to try for the current phase.
void MovePicker::generate_next() {
@ -219,7 +232,7 @@ void MovePicker::generate_next() {
case CAPTURES_S1: case CAPTURES_S3: case CAPTURES_S4: case CAPTURES_S5: case CAPTURES_S6:
end = generate<CAPTURES>(pos, moves);
score_captures();
score<CAPTURES>();
return;
case KILLERS_S1:
@ -229,16 +242,16 @@ void MovePicker::generate_next() {
case QUIETS_1_S1:
endQuiets = end = generate<QUIETS>(pos, moves);
score_noncaptures();
score<QUIETS>();
end = std::partition(cur, end, has_positive_score);
sort<MoveStack>(cur, end);
insertion_sort(cur, end);
return;
case QUIETS_2_S1:
cur = end;
end = endQuiets;
if (depth >= 3 * ONE_PLY)
sort<MoveStack>(cur, end);
insertion_sort(cur, end);
return;
case BAD_CAPTURES_S1:
@ -249,7 +262,8 @@ void MovePicker::generate_next() {
case EVASIONS_S2:
end = generate<EVASIONS>(pos, moves);
score_evasions();
if (end > moves + 1)
score<EVASIONS>();
return;
case QUIET_CHECKS_S3:
@ -268,11 +282,10 @@ void MovePicker::generate_next() {
}
/// MovePicker::next_move() is the most important method of the MovePicker class.
/// It returns a new pseudo legal move every time it is called, until there
/// are no more moves left. It picks the move with the biggest score from a list
/// of generated moves taking care not to return the tt move if has already been
/// searched previously.
/// next_move() is the most important method of the MovePicker class. It returns
/// a new pseudo legal move every time is called, until there are no more moves
/// left. It picks the move with the biggest score from a list of generated moves
/// taking care not returning the ttMove if has already been searched previously.
template<>
Move MovePicker::next_move<false>() {
@ -359,6 +372,6 @@ Move MovePicker::next_move<false>() {
/// Version of next_move() to use at split point nodes where the move is grabbed
/// from the split point's shared MovePicker object. This function is not thread
/// safe so should be lock protected by the caller.
/// safe so must be lock protected by the caller.
template<>
Move MovePicker::next_move<true>() { return ss->sp->mp->next_move<false>(); }
Move MovePicker::next_move<true>() { return ss->sp->movePicker->next_move<false>(); }

View file

@ -20,12 +20,48 @@
#if !defined MOVEPICK_H_INCLUDED
#define MOVEPICK_H_INCLUDED
#include "history.h"
#include <algorithm> // For std::max
#include <cstring> // For memset
#include "movegen.h"
#include "position.h"
#include "search.h"
#include "types.h"
/// The Stats struct stores moves statistics. According to the template parameter
/// the class can store both History and Gains type statistics. History records
/// how often different moves have been successful or unsuccessful during the
/// current search and is used for reduction and move ordering decisions. Gains
/// records the move's best evaluation gain from one ply to the next and is used
/// for pruning decisions. Entries are stored according only to moving piece and
/// destination square, in particular two moves with different origin but same
/// destination and same piece will be considered identical.
template<bool Gain>
struct Stats {
static const Value Max = Value(2000);
const Value* operator[](Piece p) const { return &table[p][0]; }
void clear() { memset(table, 0, sizeof(table)); }
void update(Piece p, Square to, Value v) {
if (Gain)
table[p][to] = std::max(v, table[p][to] - 1);
else if (abs(table[p][to] + v) < Max)
table[p][to] += v;
}
private:
Value table[PIECE_NB][SQUARE_NB];
};
typedef Stats<false> History;
typedef Stats<true> Gains;
/// MovePicker class is used to pick one pseudo legal move at a time from the
/// current position. The most important method is next_move(), which returns a
/// new pseudo legal move each time it is called, until there are no moves left,
@ -44,13 +80,11 @@ public:
template<bool SpNode> Move next_move();
private:
void score_captures();
void score_noncaptures();
void score_evasions();
template<GenType> void score();
void generate_next();
const Position& pos;
const History& H;
const History& Hist;
Search::Stack* ss;
Depth depth;
Move ttMove;

View file

@ -110,8 +110,7 @@ const string move_to_san(Position& pos, Move m) {
assert(MoveList<LEGAL>(pos).contains(m));
Bitboard attackers;
bool ambiguousMove, ambiguousFile, ambiguousRank;
Bitboard others, b;
string san;
Color us = pos.side_to_move();
Square from = from_sq(m);
@ -127,31 +126,23 @@ const string move_to_san(Position& pos, Move m) {
{
san = PieceToChar[WHITE][pt]; // Upper case
// Disambiguation if we have more then one piece with destination 'to'
// note that for pawns is not needed because starting file is explicit.
ambiguousMove = ambiguousFile = ambiguousRank = false;
// Disambiguation if we have more then one piece of type 'pt' that can
// reach 'to' with a legal move.
others = b = (pos.attacks_from(pc, to) & pos.pieces(us, pt)) ^ from;
attackers = (pos.attacks_from(pc, to) & pos.pieces(us, pt)) ^ from;
while (attackers)
while (b)
{
Square sq = pop_lsb(&attackers);
// If the move is illegal, the piece is not included in the sub-set
if (!pos.pl_move_is_legal(make_move(sq, to), pos.pinned_pieces()))
continue;
ambiguousFile |= file_of(sq) == file_of(from);
ambiguousRank |= rank_of(sq) == rank_of(from);
ambiguousMove = true;
Move move = make_move(pop_lsb(&b), to);
if (!pos.pl_move_is_legal(move, pos.pinned_pieces()))
others ^= from_sq(move);
}
if (ambiguousMove)
if (others)
{
if (!ambiguousFile)
if (!(others & file_bb(from)))
san += file_to_char(file_of(from));
else if (!ambiguousRank)
else if (!(others & rank_bb(from)))
san += rank_to_char(rank_of(from));
else

View file

@ -740,13 +740,6 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
st->rule50++;
st->pliesFromNull++;
if (type_of(m) == CASTLE)
{
st->key = k;
do_castle_move<true>(m);
return;
}
Color us = sideToMove;
Color them = ~us;
Square from = from_sq(m);
@ -756,9 +749,25 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
PieceType capture = type_of(m) == ENPASSANT ? PAWN : type_of(piece_on(to));
assert(color_of(piece) == us);
assert(piece_on(to) == NO_PIECE || color_of(piece_on(to)) == them);
assert(piece_on(to) == NO_PIECE || color_of(piece_on(to)) == them || type_of(m) == CASTLE);
assert(capture != KING);
if (type_of(m) == CASTLE)
{
assert(piece == make_piece(us, KING));
bool kingSide = to > from;
Square rfrom = to; // Castle is encoded as "king captures friendly rook"
Square rto = relative_square(us, kingSide ? SQ_F1 : SQ_D1);
to = relative_square(us, kingSide ? SQ_G1 : SQ_C1);
capture = NO_PIECE_TYPE;
do_castle(from, to, rfrom, rto);
st->psqScore += psq_delta(make_piece(us, ROOK), rfrom, rto);
k ^= Zobrist::psq[us][ROOK][rfrom] ^ Zobrist::psq[us][ROOK][rto];
}
if (capture)
{
Square capsq = to;
@ -793,7 +802,7 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
// Update piece list, move the last piece at index[capsq] position and
// shrink the list.
//
// WARNING: This is a not revresible operation. When we will reinsert the
// WARNING: This is a not reversible operation. When we will reinsert the
// captured piece in undo_move() we will put it at the end of the list and
// not in its original place, it means index[] and pieceList[] are not
// guaranteed to be invariant to a do_move() + undo_move() sequence.
@ -802,9 +811,10 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
pieceList[them][capture][index[lastSquare]] = lastSquare;
pieceList[them][capture][pieceCount[them][capture]] = SQ_NONE;
// Update hash keys
// Update material hash key and prefetch access to materialTable
k ^= Zobrist::psq[them][capture][capsq];
st->materialKey ^= Zobrist::psq[them][capture][pieceCount[them][capture]];
prefetch((char*)thisThread->materialTable[st->materialKey]);
// Update incremental scores
st->psqScore -= pieceSquareTable[make_piece(them, capture)][capsq];
@ -831,22 +841,25 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
st->castleRights &= ~cr;
}
// Prefetch TT access as soon as we know key is updated
// Prefetch TT access as soon as we know the new hash key
prefetch((char*)TT.first_entry(k));
// Move the piece
// Move the piece. The tricky Chess960 castle is handled earlier
if (type_of(m) != CASTLE)
{
Bitboard from_to_bb = SquareBB[from] ^ SquareBB[to];
byTypeBB[ALL_PIECES] ^= from_to_bb;
byTypeBB[pt] ^= from_to_bb;
byColorBB[us] ^= from_to_bb;
board[to] = board[from];
board[from] = NO_PIECE;
board[to] = piece;
// Update piece lists, index[from] is not updated and becomes stale. This
// works as long as index[] is accessed just by known occupied squares.
index[to] = index[from];
pieceList[us][pt][index[to]] = to;
}
// If the moving piece is a pawn do some special extra work
if (pt == PAWN)
@ -894,17 +907,14 @@ void Position::do_move(Move m, StateInfo& newSt, const CheckInfo& ci, bool moveI
st->npMaterial[us] += PieceValue[MG][promotion];
}
// Update pawn hash key
// Update pawn hash key and prefetch access to pawnsTable
st->pawnKey ^= Zobrist::psq[us][PAWN][from] ^ Zobrist::psq[us][PAWN][to];
prefetch((char*)thisThread->pawnsTable[st->pawnKey]);
// Reset rule 50 draw counter
st->rule50 = 0;
}
// Prefetch pawn and material hash tables
prefetch((char*)thisThread->pawnsTable[st->pawnKey]);
prefetch((char*)thisThread->materialTable[st->materialKey]);
// Update incremental scores
st->psqScore += psq_delta(piece, from, to);
@ -954,22 +964,14 @@ void Position::undo_move(Move m) {
sideToMove = ~sideToMove;
if (type_of(m) == CASTLE)
{
do_castle_move<false>(m);
return;
}
Color us = sideToMove;
Color them = ~us;
Square from = from_sq(m);
Square to = to_sq(m);
Piece piece = piece_on(to);
PieceType pt = type_of(piece);
PieceType pt = type_of(piece_on(to));
PieceType capture = st->capturedType;
assert(is_empty(from));
assert(color_of(piece) == us);
assert(is_empty(from) || type_of(m) == CASTLE);
assert(capture != KING);
if (type_of(m) == PROMOTION)
@ -997,19 +999,32 @@ void Position::undo_move(Move m) {
pt = PAWN;
}
if (type_of(m) == CASTLE)
{
bool kingSide = to > from;
Square rfrom = to; // Castle is encoded as "king captures friendly rook"
Square rto = relative_square(us, kingSide ? SQ_F1 : SQ_D1);
to = relative_square(us, kingSide ? SQ_G1 : SQ_C1);
capture = NO_PIECE_TYPE;
pt = KING;
do_castle(to, from, rto, rfrom);
}
else
{
// Put the piece back at the source square
Bitboard from_to_bb = SquareBB[from] ^ SquareBB[to];
byTypeBB[ALL_PIECES] ^= from_to_bb;
byTypeBB[pt] ^= from_to_bb;
byColorBB[us] ^= from_to_bb;
board[from] = board[to];
board[to] = NO_PIECE;
board[from] = make_piece(us, pt);
// Update piece lists, index[to] is not updated and becomes stale. This
// works as long as index[] is accessed just by known occupied squares.
index[from] = index[to];
pieceList[us][pt][index[from]] = from;
}
if (capture)
{
@ -1044,44 +1059,12 @@ void Position::undo_move(Move m) {
}
/// Position::do_castle_move() is a private method used to do/undo a castling
/// move. Note that castling moves are encoded as "king captures friendly rook"
/// moves, for instance white short castling in a non-Chess960 game is encoded
/// as e1h1.
template<bool Do>
void Position::do_castle_move(Move m) {
/// Position::do_castle() is a helper used to do/undo a castling move. This
/// is a bit tricky, especially in Chess960.
assert(is_ok(m));
assert(type_of(m) == CASTLE);
Square kto, kfrom, rfrom, rto, kAfter, rAfter;
void Position::do_castle(Square kfrom, Square kto, Square rfrom, Square rto) {
Color us = sideToMove;
Square kBefore = from_sq(m);
Square rBefore = to_sq(m);
// Find after-castle squares for king and rook
if (rBefore > kBefore) // O-O
{
kAfter = relative_square(us, SQ_G1);
rAfter = relative_square(us, SQ_F1);
}
else // O-O-O
{
kAfter = relative_square(us, SQ_C1);
rAfter = relative_square(us, SQ_D1);
}
kfrom = Do ? kBefore : kAfter;
rfrom = Do ? rBefore : rAfter;
kto = Do ? kAfter : kBefore;
rto = Do ? rAfter : rBefore;
assert(piece_on(kfrom) == make_piece(us, KING));
assert(piece_on(rfrom) == make_piece(us, ROOK));
// Move the pieces, with some care; in chess960 could be kto == rfrom
Bitboard k_from_to_bb = SquareBB[kfrom] ^ SquareBB[kto];
Bitboard r_from_to_bb = SquareBB[rfrom] ^ SquareBB[rto];
byTypeBB[KING] ^= k_from_to_bb;
@ -1089,98 +1072,56 @@ void Position::do_castle_move(Move m) {
byTypeBB[ALL_PIECES] ^= k_from_to_bb ^ r_from_to_bb;
byColorBB[us] ^= k_from_to_bb ^ r_from_to_bb;
// Update board
Piece king = make_piece(us, KING);
Piece rook = make_piece(us, ROOK);
// Could be from == to, so first set NO_PIECE then KING and ROOK
board[kfrom] = board[rfrom] = NO_PIECE;
board[kto] = king;
board[rto] = rook;
board[kto] = make_piece(us, KING);
board[rto] = make_piece(us, ROOK);
// Update piece lists
pieceList[us][KING][index[kfrom]] = kto;
pieceList[us][ROOK][index[rfrom]] = rto;
int tmp = index[rfrom]; // In Chess960 could be kto == rfrom
index[kto] = index[kfrom];
index[rto] = tmp;
// Could be kfrom == rto, so use a 'tmp' variable
int tmp = index[kfrom];
index[rto] = index[rfrom];
index[kto] = tmp;
pieceList[us][KING][index[kto]] = kto;
pieceList[us][ROOK][index[rto]] = rto;
}
if (Do)
{
// Reset capture field
st->capturedType = NO_PIECE_TYPE;
// Update incremental scores
st->psqScore += psq_delta(king, kfrom, kto);
st->psqScore += psq_delta(rook, rfrom, rto);
/// Position::do(undo)_null_move() is used to do(undo) a "null move": It flips
/// the side to move without executing any move on the board.
// Update hash key
st->key ^= Zobrist::psq[us][KING][kfrom] ^ Zobrist::psq[us][KING][kto];
st->key ^= Zobrist::psq[us][ROOK][rfrom] ^ Zobrist::psq[us][ROOK][rto];
void Position::do_null_move(StateInfo& newSt) {
assert(!checkers());
memcpy(&newSt, st, sizeof(StateInfo)); // Fully copy here
newSt.previous = st;
st = &newSt;
// Clear en passant square
if (st->epSquare != SQ_NONE)
{
st->key ^= Zobrist::enpassant[file_of(st->epSquare)];
st->epSquare = SQ_NONE;
}
// Update castling rights
st->key ^= Zobrist::castle[st->castleRights & castleRightsMask[kfrom]];
st->castleRights &= ~castleRightsMask[kfrom];
// Update checkers BB
st->checkersBB = attackers_to(king_square(~us)) & pieces(us);
sideToMove = ~sideToMove;
}
else
// Undo: point our state pointer back to the previous state
st = st->previous;
assert(pos_is_ok());
}
/// Position::do_null_move() is used to do/undo a "null move": It flips the side
/// to move and updates the hash key without executing any move on the board.
template<bool Do>
void Position::do_null_move(StateInfo& backupSt) {
assert(!checkers());
// Back up the information necessary to undo the null move to the supplied
// StateInfo object. Note that differently from normal case here backupSt
// is actually used as a backup storage not as the new state. This reduces
// the number of fields to be copied.
StateInfo* src = Do ? st : &backupSt;
StateInfo* dst = Do ? &backupSt : st;
dst->key = src->key;
dst->epSquare = src->epSquare;
dst->psqScore = src->psqScore;
dst->rule50 = src->rule50;
dst->pliesFromNull = src->pliesFromNull;
sideToMove = ~sideToMove;
if (Do)
{
if (st->epSquare != SQ_NONE)
st->key ^= Zobrist::enpassant[file_of(st->epSquare)];
st->key ^= Zobrist::side;
prefetch((char*)TT.first_entry(st->key));
st->epSquare = SQ_NONE;
st->rule50++;
st->pliesFromNull = 0;
}
sideToMove = ~sideToMove;
assert(pos_is_ok());
}
// Explicit template instantiations
template void Position::do_null_move<false>(StateInfo& backupSt);
template void Position::do_null_move<true>(StateInfo& backupSt);
void Position::undo_null_move() {
assert(!checkers());
st = st->previous;
sideToMove = ~sideToMove;
}
/// Position::see() is a static exchange evaluator: It tries to estimate the
@ -1422,31 +1363,36 @@ Value Position::compute_non_pawn_material(Color c) const {
/// Position::is_draw() tests whether the position is drawn by material,
/// repetition, or the 50 moves rule. It does not detect stalemates, this
/// must be done by the search.
template<bool CheckRepetition, bool CheckThreeFold>
template<bool SkipRepetition>
bool Position::is_draw() const {
// Draw by material?
if ( !pieces(PAWN)
&& (non_pawn_material(WHITE) + non_pawn_material(BLACK) <= BishopValueMg))
return true;
// Draw by the 50 moves rule?
if (st->rule50 > 99 && (!checkers() || MoveList<LEGAL>(*this).size()))
return true;
if (CheckRepetition)
// Draw by repetition?
if (!SkipRepetition)
{
int i = 4, e = std::min(st->rule50, st->pliesFromNull), cnt;
int i = 4, e = std::min(st->rule50, st->pliesFromNull);
if (i <= e)
{
StateInfo* stp = st->previous->previous;
for (cnt = 0; i <= e; i += 2)
{
do {
stp = stp->previous->previous;
if (stp->key == st->key && (!CheckThreeFold || ++cnt >= 2))
if (stp->key == st->key)
return true;
}
i += 2;
} while (i <= e);
}
}
@ -1454,9 +1400,8 @@ bool Position::is_draw() const {
}
// Explicit template instantiations
template bool Position::is_draw<true, true>() const;
template bool Position::is_draw<true, false>() const;
template bool Position::is_draw<false,false>() const;
template bool Position::is_draw<false>() const;
template bool Position::is_draw<true>() const;
/// Position::flip() flips position with the white and black sides reversed. This

View file

@ -154,7 +154,8 @@ public:
void do_move(Move m, StateInfo& st);
void do_move(Move m, StateInfo& st, const CheckInfo& ci, bool moveIsCheck);
void undo_move(Move m);
template<bool Do> void do_null_move(StateInfo& st);
void do_null_move(StateInfo& st);
void undo_null_move();
// Static exchange evaluation
int see(Move m) const;
@ -178,7 +179,7 @@ public:
Thread* this_thread() const;
int64_t nodes_searched() const;
void set_nodes_searched(int64_t n);
template<bool CheckRepetition, bool CheckThreeFold> bool is_draw() const;
template<bool SkipRepetition> bool is_draw() const;
// Position consistency check, for debugging
bool pos_is_ok(int* failedStep = NULL) const;
@ -190,8 +191,8 @@ private:
void put_piece(Piece p, Square s);
void set_castle_right(Color c, Square rfrom);
// Helper template functions
template<bool Do> void do_castle_move(Move m);
// Helper functions
void do_castle(Square kfrom, Square kto, Square rfrom, Square rto);
template<bool FindPinned> Bitboard hidden_checkers() const;
// Computing hash keys from scratch (for initialization and debugging)

View file

@ -26,7 +26,6 @@
#include "book.h"
#include "evaluate.h"
#include "history.h"
#include "movegen.h"
#include "movepick.h"
#include "notation.h"
@ -87,7 +86,8 @@ namespace {
TimeManager TimeMgr;
int BestMoveChanges;
Value DrawValue[COLOR_NB];
History H;
History Hist;
Gains Gain;
template <NodeType NT>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth);
@ -98,8 +98,9 @@ namespace {
void id_loop(Position& pos);
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply);
bool check_is_dangerous(Position& pos, Move move, Value futilityBase, Value beta);
bool prevents_move(const Position& pos, Move first, Move second);
bool check_is_dangerous(const Position& pos, Move move, Value futilityBase, Value beta);
bool allows(const Position& pos, Move first, Move second);
bool refutes(const Position& pos, Move first, Move second);
string uci_pv(const Position& pos, int depth, Value alpha, Value beta);
struct Skill {
@ -228,22 +229,22 @@ void Search::think() {
// Reset the threads, still sleeping: will be wake up at split time
for (size_t i = 0; i < Threads.size(); i++)
Threads[i].maxPly = 0;
Threads[i]->maxPly = 0;
Threads.sleepWhileIdle = Options["Use Sleeping Threads"];
// Set best timer interval to avoid lagging under time pressure. Timer is
// used to check for remaining available thinking time.
Threads.timer_thread()->msec =
Threads.timer->msec =
Limits.use_time_management() ? std::min(100, std::max(TimeMgr.available_time() / 16, TimerResolution)) :
Limits.nodes ? 2 * TimerResolution
: 100;
Threads.timer_thread()->notify_one(); // Wake up the recurring timer
Threads.timer->notify_one(); // Wake up the recurring timer
id_loop(RootPos); // Let's start searching !
Threads.timer_thread()->msec = 0; // Stop the timer
Threads.timer->msec = 0; // Stop the timer
Threads.sleepWhileIdle = true; // Send idle threads to sleep
if (Options["Use Search Log"])
@ -299,7 +300,8 @@ namespace {
bestValue = delta = -VALUE_INFINITE;
ss->currentMove = MOVE_NULL; // Hack to skip update gains
TT.new_search();
H.clear();
Hist.clear();
Gain.clear();
PVSize = Options["MultiPV"];
Skill skill(Options["Skill Level"]);
@ -352,7 +354,7 @@ namespace {
// we want to keep the same order for all the moves but the new
// PV that goes to the front. Note that in case of MultiPV search
// the already searched PV lines are preserved.
sort<RootMove>(RootMoves.begin() + PVIdx, RootMoves.end());
std::stable_sort(RootMoves.begin() + PVIdx, RootMoves.end());
// Write PV back to transposition table in case the relevant
// entries have been overwritten during the search.
@ -397,7 +399,8 @@ namespace {
}
// Sort the PV lines searched so far and update the GUI
sort<RootMove>(RootMoves.begin(), RootMoves.begin() + PVIdx + 1);
std::stable_sort(RootMoves.begin(), RootMoves.begin() + PVIdx + 1);
if (PVIdx + 1 == PVSize || Time::now() - SearchTime > 3000)
sync_cout << uci_pv(pos, depth, alpha, beta) << sync_endl;
}
@ -534,7 +537,7 @@ namespace {
if (!RootNode)
{
// Step 2. Check for aborted search and immediate draw
if (Signals.stop || pos.is_draw<true, PvNode>() || ss->ply > MAX_PLY)
if (Signals.stop || pos.is_draw<false>() || ss->ply > MAX_PLY)
return DrawValue[pos.side_to_move()];
// Step 3. Mate distance pruning. Even if we mate at the next move our score
@ -591,8 +594,8 @@ namespace {
else if (tte)
{
// Never assume anything on values stored in TT
if ( (ss->staticEval = eval = tte->static_value()) == VALUE_NONE
||(ss->evalMargin = tte->static_value_margin()) == VALUE_NONE)
if ( (ss->staticEval = eval = tte->eval_value()) == VALUE_NONE
||(ss->evalMargin = tte->eval_margin()) == VALUE_NONE)
eval = ss->staticEval = evaluate(pos, ss->evalMargin);
// Can ttValue be used as a better position evaluation?
@ -617,7 +620,7 @@ namespace {
&& type_of(move) == NORMAL)
{
Square to = to_sq(move);
H.update_gain(pos.piece_on(to), to, -(ss-1)->staticEval - ss->staticEval);
Gain.update(pos.piece_on(to), to, -(ss-1)->staticEval - ss->staticEval);
}
// Step 6. Razoring (is omitted in PV nodes)
@ -667,12 +670,12 @@ namespace {
if (eval - PawnValueMg > beta)
R += ONE_PLY;
pos.do_null_move<true>(st);
pos.do_null_move(st);
(ss+1)->skipNullMove = true;
nullValue = depth-R < ONE_PLY ? -qsearch<NonPV, false>(pos, ss+1, -beta, -alpha, DEPTH_ZERO)
: - search<NonPV>(pos, ss+1, -beta, -alpha, depth-R);
(ss+1)->skipNullMove = false;
pos.do_null_move<false>(st);
pos.undo_null_move();
if (nullValue >= beta)
{
@ -692,9 +695,21 @@ namespace {
return nullValue;
}
else
{
// The null move failed low, which means that we may be faced with
// some kind of threat.
// some kind of threat. If the previous move was reduced, check if
// the move that refuted the null move was somehow connected to the
// move which was reduced. If a connection is found, return a fail
// low score (which will cause the reduced move to fail high in the
// parent node, which will trigger a re-search with full depth).
threatMove = (ss+1)->currentMove;
if ( depth < 5 * ONE_PLY
&& (ss-1)->reduction
&& threatMove != MOVE_NONE
&& allows(pos, (ss-1)->currentMove, threatMove))
return beta - 1;
}
}
// Step 9. ProbCut (is omitted in PV nodes)
@ -715,7 +730,7 @@ namespace {
assert((ss-1)->currentMove != MOVE_NONE);
assert((ss-1)->currentMove != MOVE_NULL);
MovePicker mp(pos, ttMove, H, pos.captured_piece_type());
MovePicker mp(pos, ttMove, Hist, pos.captured_piece_type());
CheckInfo ci(pos);
while ((move = mp.next_move<false>()) != MOVE_NONE)
@ -747,7 +762,7 @@ namespace {
split_point_start: // At split points actual search starts from here
MovePicker mp(pos, ttMove, depth, H, ss, PvNode ? -VALUE_INFINITE : beta);
MovePicker mp(pos, ttMove, depth, Hist, ss, PvNode ? -VALUE_INFINITE : beta);
CheckInfo ci(pos);
value = bestValue; // Workaround a bogus 'uninitialized' warning under gcc
singularExtensionNode = !RootNode
@ -847,12 +862,13 @@ split_point_start: // At split points actual search starts from here
&& !inCheck
&& !dangerous
&& move != ttMove
&& (!threatMove || !prevents_move(pos, move, threatMove))
&& (bestValue > VALUE_MATED_IN_MAX_PLY || ( bestValue == -VALUE_INFINITE
&& alpha > VALUE_MATED_IN_MAX_PLY)))
{
// Move count based pruning
if (depth < 16 * ONE_PLY && moveCount >= FutilityMoveCounts[depth])
if ( depth < 16 * ONE_PLY
&& moveCount >= FutilityMoveCounts[depth]
&& (!threatMove || !refutes(pos, move, threatMove)))
{
if (SpNode)
sp->mutex.lock();
@ -865,7 +881,7 @@ split_point_start: // At split points actual search starts from here
// but fixing this made program slightly weaker.
Depth predictedDepth = newDepth - reduction<PvNode>(depth, moveCount);
futilityValue = ss->staticEval + ss->evalMargin + futility_margin(predictedDepth, moveCount)
+ H.gain(pos.piece_moved(move), to_sq(move));
+ Gain[pos.piece_moved(move)][to_sq(move)];
if (futilityValue < beta)
{
@ -907,8 +923,9 @@ split_point_start: // At split points actual search starts from here
&& !pvMove
&& !captureOrPromotion
&& !dangerous
&& ss->killers[0] != move
&& ss->killers[1] != move)
&& move != ttMove
&& move != ss->killers[0]
&& move != ss->killers[1])
{
ss->reduction = reduction<PvNode>(depth, moveCount);
Depth d = std::max(newDepth - ss->reduction, ONE_PLY);
@ -1008,13 +1025,13 @@ split_point_start: // At split points actual search starts from here
// Step 19. Check for splitting the search
if ( !SpNode
&& depth >= Threads.minimumSplitDepth
&& Threads.slave_available(thisThread)
&& Threads.available_slave(thisThread)
&& thisThread->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD)
{
assert(bestValue < beta);
bestValue = Threads.split<FakeSplit>(pos, ss, alpha, beta, bestValue, &bestMove,
depth, threatMove, moveCount, mp, NT);
thisThread->split<FakeSplit>(pos, ss, alpha, beta, &bestValue, &bestMove,
depth, threatMove, moveCount, &mp, NT);
if (bestValue >= beta)
break;
}
@ -1057,13 +1074,13 @@ split_point_start: // At split points actual search starts from here
// Increase history value of the cut-off move
Value bonus = Value(int(depth) * int(depth));
H.add(pos.piece_moved(bestMove), to_sq(bestMove), bonus);
Hist.update(pos.piece_moved(bestMove), to_sq(bestMove), bonus);
// Decrease history of all the other played non-capture moves
for (int i = 0; i < playedMoveCount - 1; i++)
{
Move m = movesSearched[i];
H.add(pos.piece_moved(m), to_sq(m), -bonus);
Hist.update(pos.piece_moved(m), to_sq(m), -bonus);
}
}
}
@ -1109,7 +1126,7 @@ split_point_start: // At split points actual search starts from here
ss->ply = (ss-1)->ply + 1;
// Check for an instant draw or maximum ply reached
if (pos.is_draw<false, false>() || ss->ply > MAX_PLY)
if (pos.is_draw<true>() || ss->ply > MAX_PLY)
return DrawValue[pos.side_to_move()];
// Transposition table lookup. At PV nodes, we don't use the TT for
@ -1147,8 +1164,8 @@ split_point_start: // At split points actual search starts from here
if (tte)
{
// Never assume anything on values stored in TT
if ( (ss->staticEval = bestValue = tte->static_value()) == VALUE_NONE
||(ss->evalMargin = tte->static_value_margin()) == VALUE_NONE)
if ( (ss->staticEval = bestValue = tte->eval_value()) == VALUE_NONE
||(ss->evalMargin = tte->eval_margin()) == VALUE_NONE)
ss->staticEval = bestValue = evaluate(pos, ss->evalMargin);
}
else
@ -1175,7 +1192,7 @@ split_point_start: // At split points actual search starts from here
// to search the moves. Because the depth is <= 0 here, only captures,
// queen promotions and checks (only if depth >= DEPTH_QS_CHECKS) will
// be generated.
MovePicker mp(pos, ttMove, depth, H, to_sq((ss-1)->currentMove));
MovePicker mp(pos, ttMove, depth, Hist, to_sq((ss-1)->currentMove));
CheckInfo ci(pos);
// Loop through the moves until no moves remain or a beta cutoff occurs
@ -1318,7 +1335,7 @@ split_point_start: // At split points actual search starts from here
// check_is_dangerous() tests if a checking move can be pruned in qsearch()
bool check_is_dangerous(Position& pos, Move move, Value futilityBase, Value beta)
bool check_is_dangerous(const Position& pos, Move move, Value futilityBase, Value beta)
{
Piece pc = pos.piece_moved(move);
Square from = from_sq(move);
@ -1343,6 +1360,7 @@ split_point_start: // At split points actual search starts from here
Bitboard b = (enemies ^ ksq) & newAtt & ~oldAtt;
while (b)
{
// Note that here we generate illegal "double move"!
if (futilityBase + PieceValue[EG][pos.piece_on(pop_lsb(&b))] >= beta)
return true;
}
@ -1351,12 +1369,52 @@ split_point_start: // At split points actual search starts from here
}
// prevents_move() tests whether a move (first) is able to defend against an
// opponent's move (second). In this case will not be pruned. Normally the
// second move is the threat move (the best move returned from a null search
// that fails low).
// allows() tests whether the 'first' move at previous ply somehow makes the
// 'second' move possible, for instance if the moving piece is the same in
// both moves. Normally the second move is the threat (the best move returned
// from a null search that fails low).
bool prevents_move(const Position& pos, Move first, Move second) {
bool allows(const Position& pos, Move first, Move second) {
assert(is_ok(first));
assert(is_ok(second));
assert(color_of(pos.piece_on(from_sq(second))) == ~pos.side_to_move());
assert(color_of(pos.piece_on(to_sq(first))) == ~pos.side_to_move());
Square m1from = from_sq(first);
Square m2from = from_sq(second);
Square m1to = to_sq(first);
Square m2to = to_sq(second);
// The piece is the same or second's destination was vacated by the first move
if (m1to == m2from || m2to == m1from)
return true;
// Second one moves through the square vacated by first one
if (between_bb(m2from, m2to) & m1from)
return true;
// Second's destination is defended by the first move's piece
Bitboard m1att = pos.attacks_from(pos.piece_on(m1to), m1to, pos.pieces() ^ m2from);
if (m1att & m2to)
return true;
// Second move gives a discovered check through the first's checking piece
if (m1att & pos.king_square(pos.side_to_move()))
{
assert(between_bb(m1to, pos.king_square(pos.side_to_move())) & m2from);
return true;
}
return false;
}
// refutes() tests whether a 'first' move is able to defend against a 'second'
// opponent's move. In this case will not be pruned. Normally the second move
// is the threat (the best move returned from a null search that fails low).
bool refutes(const Position& pos, Move first, Move second) {
assert(is_ok(first));
assert(is_ok(second));
@ -1455,8 +1513,8 @@ split_point_start: // At split points actual search starts from here
int selDepth = 0;
for (size_t i = 0; i < Threads.size(); i++)
if (Threads[i].maxPly > selDepth)
selDepth = Threads[i].maxPly;
if (Threads[i]->maxPly > selDepth)
selDepth = Threads[i]->maxPly;
for (size_t i = 0; i < uciPVSize; i++)
{
@ -1516,7 +1574,7 @@ void RootMove::extract_pv_from_tt(Position& pos) {
&& pos.is_pseudo_legal(m = tte->move()) // Local copy, TT could change
&& pos.pl_move_is_legal(m, pos.pinned_pieces())
&& ply < MAX_PLY
&& (!pos.is_draw<true, true>() || ply < 2));
&& (!pos.is_draw<false>() || ply < 2));
pv.push_back(MOVE_NONE); // Must be zero-terminating
@ -1558,7 +1616,7 @@ void Thread::idle_loop() {
// at the thread creation. So it means we are the split point's master.
const SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL;
assert(!this_sp || (this_sp->master == this && searching));
assert(!this_sp || (this_sp->masterThread == this && searching));
// If this thread is the master of a split point and all slaves have finished
// their work at this split point, return from the idle loop.
@ -1614,9 +1672,9 @@ void Thread::idle_loop() {
sp->mutex.lock();
assert(sp->slavesPositions[idx] == NULL);
assert(activePosition == NULL);
sp->slavesPositions[idx] = &pos;
activePosition = &pos;
switch (sp->nodeType) {
case Root:
@ -1635,18 +1693,18 @@ void Thread::idle_loop() {
assert(searching);
searching = false;
sp->slavesPositions[idx] = NULL;
activePosition = NULL;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();
// Wake up master thread so to allow it to return from the idle loop
// in case we are the last slave of the split point.
if ( Threads.sleepWhileIdle
&& this != sp->master
&& this != sp->masterThread
&& !sp->slavesMask)
{
assert(!sp->master->searching);
sp->master->notify_one();
assert(!sp->masterThread->searching);
sp->masterThread->notify_one();
}
// After releasing the lock we cannot access anymore any SplitPoint
@ -1684,11 +1742,11 @@ void check_time() {
nodes = RootPos.nodes_searched();
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active slaves positions.
// all the currently active positions nodes.
for (size_t i = 0; i < Threads.size(); i++)
for (int j = 0; j < Threads[i].splitPointsSize; j++)
for (int j = 0; j < Threads[i]->splitPointsSize; j++)
{
SplitPoint& sp = Threads[i].splitPoints[j];
SplitPoint& sp = Threads[i]->splitPoints[j];
sp.mutex.lock();
@ -1696,8 +1754,9 @@ void check_time() {
Bitboard sm = sp.slavesMask;
while (sm)
{
Position* pos = sp.slavesPositions[pop_lsb(&sm)];
nodes += pos ? pos->nodes_searched() : 0;
Position* pos = Threads[pop_lsb(&sm)]->activePosition;
if (pos)
nodes += pos->nodes_searched();
}
sp.mutex.unlock();

View file

@ -56,12 +56,11 @@ struct Stack {
/// all non-pv moves.
struct RootMove {
RootMove(){} // Needed by sort()
RootMove(Move m) : score(-VALUE_INFINITE), prevScore(-VALUE_INFINITE) {
pv.push_back(m); pv.push_back(MOVE_NONE);
}
bool operator<(const RootMove& m) const { return score < m.score; }
bool operator<(const RootMove& m) const { return score > m.score; } // Ascending sort
bool operator==(const Move& m) const { return pv[0] == m; }
void extract_pv_from_tt(Position& pos);

View file

@ -17,6 +17,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <algorithm> // For std::count
#include <cassert>
#include <iostream>
@ -42,11 +43,12 @@ namespace { extern "C" {
// Thread c'tor starts a newly-created thread of execution that will call
// the the virtual function idle_loop(), going immediately to sleep.
Thread::Thread() : splitPoints() {
Thread::Thread() /* : splitPoints() */ { // Value-initialization bug in MSVC
searching = exit = false;
maxPly = splitPointsSize = 0;
activeSplitPoint = NULL;
activePosition = NULL;
idx = Threads.size();
if (!thread_create(handle, start_routine, this))
@ -146,7 +148,7 @@ void Thread::wait_for(volatile const bool& b) {
bool Thread::cutoff_occurred() const {
for (SplitPoint* sp = activeSplitPoint; sp; sp = sp->parent)
for (SplitPoint* sp = activeSplitPoint; sp; sp = sp->parentSplitPoint)
if (sp->cutoff)
return true;
@ -185,7 +187,7 @@ void ThreadPool::init() {
sleepWhileIdle = true;
timer = new TimerThread();
threads.push_back(new MainThread());
push_back(new MainThread());
read_uci_options();
}
@ -196,8 +198,8 @@ void ThreadPool::exit() {
delete timer; // As first because check_time() accesses threads data
for (size_t i = 0; i < threads.size(); i++)
delete threads[i];
for (iterator it = begin(); it != end(); ++it)
delete *it;
}
@ -214,13 +216,13 @@ void ThreadPool::read_uci_options() {
assert(requested > 0);
while (threads.size() < requested)
threads.push_back(new Thread());
while (size() < requested)
push_back(new Thread());
while (threads.size() > requested)
while (size() > requested)
{
delete threads.back();
threads.pop_back();
delete back();
pop_back();
}
}
@ -228,13 +230,13 @@ void ThreadPool::read_uci_options() {
// slave_available() tries to find an idle thread which is available as a slave
// for the thread 'master'.
bool ThreadPool::slave_available(Thread* master) const {
Thread* ThreadPool::available_slave(Thread* master) const {
for (size_t i = 0; i < threads.size(); i++)
if (threads[i]->is_available_to(master))
return true;
for (const_iterator it = begin(); it != end(); ++it)
if ((*it)->is_available_to(master))
return *it;
return false;
return NULL;
}
@ -248,34 +250,31 @@ bool ThreadPool::slave_available(Thread* master) const {
// search() then split() returns.
template <bool Fake>
Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
Value bestValue, Move* bestMove, Depth depth, Move threatMove,
int moveCount, MovePicker& mp, int nodeType) {
void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bestValue,
Move* bestMove, Depth depth, Move threatMove, int moveCount,
MovePicker* movePicker, int nodeType) {
assert(pos.pos_is_ok());
assert(bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(bestValue > -VALUE_INFINITE);
assert(*bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(*bestValue > -VALUE_INFINITE);
assert(depth >= Threads.minimumSplitDepth);
Thread* master = pos.this_thread();
assert(master->searching);
assert(master->splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
assert(searching);
assert(splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
// Pick the next available split point from the split point stack
SplitPoint& sp = master->splitPoints[master->splitPointsSize];
SplitPoint& sp = splitPoints[splitPointsSize];
sp.master = master;
sp.parent = master->activeSplitPoint;
sp.slavesMask = 1ULL << master->idx;
sp.masterThread = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 1ULL << idx;
sp.depth = depth;
sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
sp.threatMove = threatMove;
sp.alpha = alpha;
sp.beta = beta;
sp.nodeType = nodeType;
sp.bestValue = bestValue;
sp.mp = &mp;
sp.movePicker = movePicker;
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
@ -285,25 +284,27 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
mutex.lock();
Threads.mutex.lock();
sp.mutex.lock();
master->splitPointsSize++;
master->activeSplitPoint = &sp;
splitPointsSize++;
activeSplitPoint = &sp;
activePosition = NULL;
size_t slavesCnt = 1; // Master is always included
size_t slavesCnt = 1; // This thread is always included
Thread* slave;
for (size_t i = 0; i < threads.size() && !Fake; ++i)
if (threads[i]->is_available_to(master) && ++slavesCnt <= maxThreadsPerSplitPoint)
while ( (slave = Threads.available_slave(this)) != NULL
&& ++slavesCnt <= Threads.maxThreadsPerSplitPoint && !Fake)
{
sp.slavesMask |= 1ULL << threads[i]->idx;
threads[i]->activeSplitPoint = &sp;
threads[i]->searching = true; // Slave leaves idle_loop()
threads[i]->notify_one(); // Could be sleeping
sp.slavesMask |= 1ULL << slave->idx;
slave->activeSplitPoint = &sp;
slave->searching = true; // Slave leaves idle_loop()
slave->notify_one(); // Could be sleeping
}
sp.mutex.unlock();
mutex.unlock();
Threads.mutex.unlock();
// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
@ -311,34 +312,35 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
// their work at this split point.
if (slavesCnt > 1 || Fake)
{
master->Thread::idle_loop(); // Force a call to base class idle_loop()
Thread::idle_loop(); // Force a call to base class idle_loop()
// In helpful master concept a master can help only a sub-tree of its split
// point, and because here is all finished is not possible master is booked.
assert(!master->searching);
assert(!searching);
assert(!activePosition);
}
// We have returned from the idle loop, which means that all threads are
// finished. Note that setting 'searching' and decreasing splitPointsSize is
// done under lock protection to avoid a race with Thread::is_available_to().
mutex.lock();
Threads.mutex.lock();
sp.mutex.lock();
master->searching = true;
master->splitPointsSize--;
master->activeSplitPoint = sp.parent;
searching = true;
splitPointsSize--;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;
sp.mutex.unlock();
mutex.unlock();
return sp.bestValue;
Threads.mutex.unlock();
}
// Explicit template instantiations
template Value ThreadPool::split<false>(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker&, int);
template Value ThreadPool::split<true>(Position&, Stack*, Value, Value, Value, Move*, Depth, Move, int, MovePicker&, int);
template void Thread::split<false>(Position&, Stack*, Value, Value, Value*, Move*, Depth, Move, int, MovePicker*, int);
template void Thread::split< true>(Position&, Stack*, Value, Value, Value*, Move*, Depth, Move, int, MovePicker*, int);
// wait_for_think_finished() waits for main thread to go to sleep then returns
@ -370,7 +372,8 @@ void ThreadPool::start_thinking(const Position& pos, const LimitsType& limits,
RootMoves.clear();
for (MoveList<LEGAL> ml(pos); !ml.end(); ++ml)
if (searchMoves.empty() || count(searchMoves.begin(), searchMoves.end(), ml.move()))
if ( searchMoves.empty()
|| std::count(searchMoves.begin(), searchMoves.end(), ml.move()))
RootMoves.push_back(RootMove(ml.move()));
main_thread()->thinking = true;

View file

@ -63,19 +63,18 @@ struct SplitPoint {
// Const data after split point has been setup
const Position* pos;
const Search::Stack* ss;
Thread* master;
Thread* masterThread;
Depth depth;
Value beta;
int nodeType;
Move threatMove;
// Const pointers to shared data
MovePicker* mp;
SplitPoint* parent;
MovePicker* movePicker;
SplitPoint* parentSplitPoint;
// Shared data
Mutex mutex;
Position* slavesPositions[MAX_THREADS];
volatile uint64_t slavesMask;
volatile int64_t nodes;
volatile Value alpha;
@ -102,10 +101,15 @@ struct Thread {
bool is_available_to(Thread* master) const;
void wait_for(volatile const bool& b);
template <bool Fake>
void split(Position& pos, Search::Stack* ss, Value alpha, Value beta, Value* bestValue, Move* bestMove,
Depth depth, Move threatMove, int moveCount, MovePicker* movePicker, int nodeType);
SplitPoint splitPoints[MAX_SPLITPOINTS_PER_THREAD];
Material::Table materialTable;
Endgames endgames;
Pawns::Table pawnsTable;
Position* activePosition;
size_t idx;
int maxPly;
Mutex mutex;
@ -134,40 +138,28 @@ struct TimerThread : public Thread {
};
/// ThreadPool class handles all the threads related stuff like init, starting,
/// ThreadPool struct handles all the threads related stuff like init, starting,
/// parking and, the most important, launching a slave thread at a split point.
/// All the access to shared thread data is done through this class.
class ThreadPool {
struct ThreadPool : public std::vector<Thread*> {
public:
void init(); // No c'tor and d'tor, threads rely on globals that should
void exit(); // be initialized and valid during the whole thread lifetime.
Thread& operator[](size_t id) { return *threads[id]; }
size_t size() const { return threads.size(); }
MainThread* main_thread() { return static_cast<MainThread*>(threads[0]); }
TimerThread* timer_thread() { return timer; }
MainThread* main_thread() { return static_cast<MainThread*>((*this)[0]); }
void read_uci_options();
bool slave_available(Thread* master) const;
Thread* available_slave(Thread* master) const;
void wait_for_think_finished();
void start_thinking(const Position&, const Search::LimitsType&,
const std::vector<Move>&, Search::StateStackPtr&);
template <bool Fake>
Value split(Position& pos, Search::Stack* ss, Value alpha, Value beta, Value bestValue, Move* bestMove,
Depth depth, Move threatMove, int moveCount, MovePicker& mp, int nodeType);
bool sleepWhileIdle;
Depth minimumSplitDepth;
size_t maxThreadsPerSplitPoint;
Mutex mutex;
ConditionVariable sleepCondition;
private:
std::vector<Thread*> threads;
TimerThread* timer;
size_t maxThreadsPerSplitPoint;
};
extern ThreadPool Threads;

View file

@ -25,35 +25,25 @@
TranspositionTable TT; // Our global transposition table
TranspositionTable::TranspositionTable() {
size = generation = 0;
entries = NULL;
}
TranspositionTable::~TranspositionTable() {
delete [] entries;
}
/// TranspositionTable::set_size() sets the size of the transposition table,
/// measured in megabytes. Transposition table consists of a power of 2 number of
/// TTCluster and each cluster consists of ClusterSize number of TTEntries. Each
/// non-empty entry contains information of exactly one position.
/// measured in megabytes. Transposition table consists of a power of 2 number
/// of clusters and each cluster consists of ClusterSize number of TTEntry.
void TranspositionTable::set_size(size_t mbSize) {
size_t newSize = 1ULL << msb((mbSize << 20) / sizeof(TTCluster));
assert(msb((mbSize << 20) / sizeof(TTEntry)) < 32);
if (newSize == size)
uint32_t size = ClusterSize << msb((mbSize << 20) / sizeof(TTEntry[ClusterSize]));
if (hashMask == size - ClusterSize)
return;
size = newSize;
delete [] entries;
entries = new (std::nothrow) TTCluster[size];
hashMask = size - ClusterSize;
delete [] table;
table = new (std::nothrow) TTEntry[size];
if (!entries)
if (!table)
{
std::cerr << "Failed to allocate " << mbSize
<< "MB for transposition table." << std::endl;
@ -70,7 +60,7 @@ void TranspositionTable::set_size(size_t mbSize) {
void TranspositionTable::clear() {
memset(entries, 0, size * sizeof(TTCluster));
memset(table, 0, (hashMask + ClusterSize) * sizeof(TTEntry));
}
@ -82,23 +72,23 @@ void TranspositionTable::clear() {
/// more valuable than a TTEntry t2 if t1 is from the current search and t2 is from
/// a previous search, or if the depth of t1 is bigger than the depth of t2.
void TranspositionTable::store(const Key posKey, Value v, Bound t, Depth d, Move m, Value statV, Value kingD) {
void TranspositionTable::store(const Key key, Value v, Bound t, Depth d, Move m, Value statV, Value kingD) {
int c1, c2, c3;
TTEntry *tte, *replace;
uint32_t posKey32 = posKey >> 32; // Use the high 32 bits as key inside the cluster
uint32_t key32 = key >> 32; // Use the high 32 bits as key inside the cluster
tte = replace = first_entry(posKey);
tte = replace = first_entry(key);
for (int i = 0; i < ClusterSize; i++, tte++)
for (unsigned i = 0; i < ClusterSize; i++, tte++)
{
if (!tte->key() || tte->key() == posKey32) // Empty or overwrite old
if (!tte->key() || tte->key() == key32) // Empty or overwrite old
{
// Preserve any existing ttMove
if (m == MOVE_NONE)
m = tte->move();
tte->save(posKey32, v, t, d, m, generation, statV, kingD);
tte->save(key32, v, t, d, m, generation, statV, kingD);
return;
}
@ -110,7 +100,7 @@ void TranspositionTable::store(const Key posKey, Value v, Bound t, Depth d, Move
if (c1 + c2 + c3 > 0)
replace = tte;
}
replace->save(posKey32, v, t, d, m, generation, statV, kingD);
replace->save(key32, v, t, d, m, generation, statV, kingD);
}
@ -118,24 +108,14 @@ void TranspositionTable::store(const Key posKey, Value v, Bound t, Depth d, Move
/// transposition table. Returns a pointer to the TTEntry or NULL if
/// position is not found.
TTEntry* TranspositionTable::probe(const Key posKey) const {
TTEntry* TranspositionTable::probe(const Key key) const {
uint32_t posKey32 = posKey >> 32;
TTEntry* tte = first_entry(posKey);
TTEntry* tte = first_entry(key);
uint32_t key32 = key >> 32;
for (int i = 0; i < ClusterSize; i++, tte++)
if (tte->key() == posKey32)
for (unsigned i = 0; i < ClusterSize; i++, tte++)
if (tte->key() == key32)
return tte;
return NULL;
}
/// TranspositionTable::new_search() is called at the beginning of every new
/// search. It increments the "generation" variable, which is used to
/// distinguish transposition table entries from previous searches from
/// entries from the current search.
void TranspositionTable::new_search() {
generation++;
}

View file

@ -44,7 +44,7 @@
class TTEntry {
public:
void save(uint32_t k, Value v, Bound b, Depth d, Move m, int g, Value statV, Value statM) {
void save(uint32_t k, Value v, Bound b, Depth d, Move m, int g, Value ev, Value em) {
key32 = (uint32_t)k;
move16 = (uint16_t)m;
@ -52,8 +52,8 @@ public:
generation8 = (uint8_t)g;
value16 = (int16_t)v;
depth16 = (int16_t)d;
staticValue = (int16_t)statV;
staticMargin = (int16_t)statM;
evalValue = (int16_t)ev;
evalMargin = (int16_t)em;
}
void set_generation(int g) { generation8 = (uint8_t)g; }
@ -63,52 +63,41 @@ public:
Value value() const { return (Value)value16; }
Bound type() const { return (Bound)bound; }
int generation() const { return (int)generation8; }
Value static_value() const { return (Value)staticValue; }
Value static_value_margin() const { return (Value)staticMargin; }
Value eval_value() const { return (Value)evalValue; }
Value eval_margin() const { return (Value)evalMargin; }
private:
uint32_t key32;
uint16_t move16;
uint8_t bound, generation8;
int16_t value16, depth16, staticValue, staticMargin;
int16_t value16, depth16, evalValue, evalMargin;
};
/// This is the number of TTEntry slots for each cluster
const int ClusterSize = 4;
/// TTCluster consists of ClusterSize number of TTEntries. Size of TTCluster
/// must not be bigger than a cache line size. In case it is less, it should
/// be padded to guarantee always aligned accesses.
struct TTCluster {
TTEntry data[ClusterSize];
};
/// The transposition table class. This is basically just a huge array containing
/// TTCluster objects, and a few methods for writing and reading entries.
/// A TranspositionTable consists of a power of 2 number of clusters and each
/// cluster consists of ClusterSize number of TTEntry. Each non-empty entry
/// contains information of exactly one position. Size of a cluster shall not be
/// bigger than a cache line size. In case it is less, it should be padded to
/// guarantee always aligned accesses.
class TranspositionTable {
TranspositionTable(const TranspositionTable&);
TranspositionTable& operator=(const TranspositionTable&);
static const unsigned ClusterSize = 4; // A cluster is 64 Bytes
public:
TranspositionTable();
~TranspositionTable();
~TranspositionTable() { delete [] table; }
void new_search() { generation++; }
TTEntry* probe(const Key key) const;
TTEntry* first_entry(const Key key) const;
void refresh(const TTEntry* tte) const;
void set_size(size_t mbSize);
void clear();
void store(const Key posKey, Value v, Bound type, Depth d, Move m, Value statV, Value kingD);
TTEntry* probe(const Key posKey) const;
void new_search();
TTEntry* first_entry(const Key posKey) const;
void refresh(const TTEntry* tte) const;
void store(const Key key, Value v, Bound type, Depth d, Move m, Value statV, Value kingD);
private:
size_t size;
TTCluster* entries;
uint32_t hashMask;
TTEntry* table;
uint8_t generation; // Size must be not bigger then TTEntry::generation8
};
@ -119,9 +108,9 @@ extern TranspositionTable TT;
/// a cluster given a position. The lowest order bits of the key are used to
/// get the index of the cluster.
inline TTEntry* TranspositionTable::first_entry(const Key posKey) const {
inline TTEntry* TranspositionTable::first_entry(const Key key) const {
return entries[((uint32_t)posKey) & (size - 1)].data;
return table + ((uint32_t)key & hashMask);
}

View file

@ -489,21 +489,4 @@ inline const std::string square_to_string(Square s) {
return ch;
}
/// Our insertion sort implementation, works with pointers and iterators and is
/// guaranteed to be stable, as is needed.
template<typename T, typename K>
void sort(K begin, K end)
{
T tmp;
K p, q;
for (p = begin + 1; p < end; p++)
{
tmp = *p;
for (q = p; q != begin && *(q-1) < tmp; --q)
*q = *(q-1);
*q = tmp;
}
}
#endif // !defined(TYPES_H_INCLUDED)

View file

@ -44,7 +44,7 @@ namespace {
void set_option(istringstream& up);
void set_position(Position& pos, istringstream& up);
void go(Position& pos, istringstream& up);
void go(const Position& pos, istringstream& up);
}
@ -182,7 +182,7 @@ namespace {
// the thinking time and other parameters from the input string, and starts
// the search.
void go(Position& pos, istringstream& is) {
void go(const Position& pos, istringstream& is) {
Search::LimitsType limits;
vector<Move> searchMoves;