mirror of
https://github.com/sockspls/badfish
synced 2025-04-30 16:53:09 +00:00
Better fix for gcc optimization issue
According to the standard, compiler is free to choose the enum type as long as can keep its data. Also cast to short and right shift are implementation defined in case of a signed integer. Normally all the compilers implement this stuff in the "usual" way, but gcc with -O3 and -O2 pushes aggressively the language to its limits to squeeze even the last bit of speed. And this broke our not 100% standard conforming code. The fix is to rewrite the Score enum and the 16 bits word extracting functions in a way that is 100% standard compliant and with no speed regression on gcc and also on the other compilers. Verified it works on all compilers and with equivalent functionality. Signed-off-by: Marco Costalba <mcostalba@gmail.com>
This commit is contained in:
parent
0973cc2ef6
commit
8d724220a7
2 changed files with 15 additions and 6 deletions
|
@ -25,11 +25,8 @@ EXE = stockfish
|
|||
### ==========================================================================
|
||||
### Compiler speed switches for both GCC and ICC. These settings are generally
|
||||
### fast on a broad range of systems, but may be changed experimentally
|
||||
###
|
||||
### NOTE: Some versions of gcc miscompile value.h with -O2 or -O3, this is the
|
||||
### safe setup, try changing to -O3 or -O2 and verify it works for you.
|
||||
### ==========================================================================
|
||||
GCCFLAGS = -O1 -msse
|
||||
GCCFLAGS = -O3 -msse
|
||||
ICCFLAGS = -fast -msse
|
||||
ICCFLAGS-OSX = -fast -mdynamic-no-pic
|
||||
|
||||
|
|
16
src/value.h
16
src/value.h
|
@ -56,10 +56,22 @@ enum Value {
|
|||
/// integer (enum), first LSB 16 bits are used to store endgame
|
||||
/// value, while upper bits are used for midgame value.
|
||||
|
||||
enum Score { ENSURE_32_BIT_SIZE = 1 << 31 };
|
||||
// Compiler is free to choose the enum type as long as can keep
|
||||
// its data, so ensure Score to be an integer type.
|
||||
enum Score { ENSURE_32_BITS_SIZE_P = (1 << 16), ENSURE_32_BITS_SIZE_N = -(1 << 16)};
|
||||
|
||||
// Extracting the _signed_ lower and upper 16 bits it not so trivial
|
||||
// because according to the standard a simple cast to short is
|
||||
// implementation defined and so is a right shift of a signed integer.
|
||||
inline Value mg_value(Score s) { return Value(((int(s) + 32768) & ~0xffff) / 0x10000); }
|
||||
|
||||
// Unfortunatly on Intel 64 bit we have a small speed regression, so use a faster code in
|
||||
// this case, although not 100% standard compliant it seems to work for Intel and MSVC.
|
||||
#if defined(IS_64BIT) && (!defined(__GNUC__) || defined(__INTEL_COMPILER))
|
||||
inline Value eg_value(Score s) { return Value(int16_t(s & 0xffff)); }
|
||||
inline Value mg_value(Score s) { return Value((int(s) + 32768) >> 16); }
|
||||
#else
|
||||
inline Value eg_value(Score s) { return Value((int)(unsigned(s) & 0x7fffu) - (int)(unsigned(s) & 0x8000u)); }
|
||||
#endif
|
||||
|
||||
inline Score make_score(int mg, int eg) { return Score((mg << 16) + eg); }
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue