- cd src
script:
+ # Download net
+ - make net
+
# Obtain bench reference from git log
- git log HEAD | grep "\b[Bb]ench[ :]\+[0-9]\{7\}" | head -n 1 | sed "s/[^0-9]*\([0-9]*\).*/\1/g" > git_sig
- export benchref=$(cat git_sig)
- echo "Reference bench:" $benchref
- #
# Compiler version string
- $COMPILER -v
- #
+ # test help target
+ - make help
+
# Verify bench number against various builds
- export CXXFLAGS="-Werror -D_GLIBCXX_DEBUG"
- - make clean && make -j2 ARCH=x86-64 optimize=no debug=yes build && ../tests/signature.sh $benchref
+ - make clean && make -j2 ARCH=x86-64-modern optimize=no debug=yes build && ../tests/signature.sh $benchref
+ - export CXXFLAGS="-Werror"
+ - make clean && make -j2 ARCH=x86-64-modern build && ../tests/signature.sh $benchref
+ - make clean && make -j2 ARCH=x86-64-ssse3 build && ../tests/signature.sh $benchref
+ - make clean && make -j2 ARCH=x86-64-sse3-popcnt build && ../tests/signature.sh $benchref
+ - make clean && make -j2 ARCH=x86-64 build && ../tests/signature.sh $benchref
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=general-64 build && ../tests/signature.sh $benchref; fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-32 optimize=no debug=yes build && ../tests/signature.sh $benchref; fi
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-32-sse41-popcnt build && ../tests/signature.sh $benchref; fi
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-32-sse2 build && ../tests/signature.sh $benchref; fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-32 build && ../tests/signature.sh $benchref; fi
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=general-32 build && ../tests/signature.sh $benchref; fi
+ # workaround: exclude a custom version of llvm+clang, which doesn't find llvm-profdata on ubuntu
+ - if [[ "$TRAVIS_OS_NAME" != "linux" || "$COMP" == "gcc" ]]; then make clean && make -j2 ARCH=x86-64-modern profile-build && ../tests/signature.sh $benchref; fi
+
+ # compile only for some more advanced architectures (might not run in travis)
+ - make clean && make -j2 ARCH=x86-64-avx2 build
+ - make clean && make -j2 ARCH=x86-64-bmi2 build
+ - make clean && make -j2 ARCH=x86-64-avx512 build
+ - make clean && make -j2 ARCH=x86-64-vnni512 build
+ - make clean && make -j2 ARCH=x86-64-vnni256 build
#
# Check perft and reproducible search
- - export CXXFLAGS="-Werror"
- - make clean && make -j2 ARCH=x86-64 build
+ - make clean && make -j2 ARCH=x86-64-modern build
- ../tests/perft.sh
- ../tests/reprosearch.sh
# Valgrind
#
- export CXXFLAGS="-O1 -fno-inline"
- - if [ -x "$(command -v valgrind )" ]; then make clean && make -j2 ARCH=x86-64 debug=yes optimize=no build > /dev/null && ../tests/instrumented.sh --valgrind; fi
+ - if [ -x "$(command -v valgrind )" ]; then make clean && make -j2 ARCH=x86-64-modern debug=yes optimize=no build > /dev/null && ../tests/instrumented.sh --valgrind; fi
- if [ -x "$(command -v valgrind )" ]; then ../tests/instrumented.sh --valgrind-thread; fi
#
# Sanitizer
#
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-64 sanitize=undefined optimize=no debug=yes build > /dev/null && ../tests/instrumented.sh --sanitizer-undefined; fi
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-64 sanitize=thread optimize=no debug=yes build > /dev/null && ../tests/instrumented.sh --sanitizer-thread; fi
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-64-modern sanitize=undefined optimize=no debug=yes build > /dev/null && ../tests/instrumented.sh --sanitizer-undefined; fi
+ - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make clean && make -j2 ARCH=x86-64-modern sanitize=thread optimize=no debug=yes build > /dev/null && ../tests/instrumented.sh --sanitizer-thread; fi
Alayan Feh (Alayan-stk-2)
Alexander Kure
Alexander Pagel (Lolligerhans)
+Alfredo Menezes (lonfom169)
Ali AlZhrani (Cooffe)
Andrew Grant (AndyGrant)
Andrey Neporada (nepal)
candirufish
Chess13234
Chris Cain (ceebo)
+Dale Weiler (graphitemaster)
Dan Schmidt (dfannius)
Daniel Axtens (daxtens)
Daniel Dugovic (ddugovic)
-Dariusz Orzechowski
+Dariusz Orzechowski (dorzechowski)
David Zar
Daylen Yang (daylen)
+Deshawn Mohan-Smith (GoldenRare)
DiscanX
Dominik Schlösser (domschl)
double-beep
Linmiao Xu (linrock)
Fabian Beuke (madnight)
Fabian Fichter (ianfab)
+Fanael Linithien (Fanael)
fanon
Fauzi Akram Dabat (FauziAkram)
Felix Wittmann
gamander
Gary Heckman (gheckman)
+George Sobala (gsobala)
gguliash
Gian-Carlo Pascutto (gcp)
Gontran Lemaire (gonlem)
Jean-Francois Romang (jromang)
Jekaa
Jerry Donald Watson (jerrydonaldwatson)
+jjoshua2
Jonathan Calovski (Mysseno)
-Jonathan Dumale (SFisGOD)
+Jonathan Buladas Dumale (SFisGOD)
Joost VandeVondele (vondele)
Jörg Oster (joergoster)
Joseph Ellis (jhellis3)
marotear
Matthew Lai (matthewlai)
Matthew Sullivan (Matt14916)
+Maxim Molchanov (Maxim)
Michael An (man)
Michael Byrne (MichaelB7)
Michael Chaly (Vizvezdenec)
Nikolay Kostov (NikolayIT)
Nguyen Pham (nguyenpham)
Norman Schmidt (FireFather)
+notruck
Ondrej Mosnáček (WOnder93)
Oskar Werkelin Ahlin
Pablo Vazquez
[](https://ci.appveyor.com/project/mcostalba/stockfish/branch/master)
[Stockfish](https://stockfishchess.org) is a free, powerful UCI chess engine
-derived from Glaurung 2.1. It features two evaluation functions, the classical
-evaluation based on handcrafted terms, and the NNUE evaluation based on
-efficiently updateable neural networks. The classical evaluation runs efficiently
-on most 64bit CPU architectures, while the NNUE evaluation benefits strongly from the
-vector intrinsics available on modern CPUs (avx2 or similar).
+derived from Glaurung 2.1. Stockfish is not a complete chess program and requires a
+UCI-compatible graphical user interface (GUI) (e.g. XBoard with PolyGlot, Scid,
+Cute Chess, eboard, Arena, Sigma Chess, Shredder, Chess Partner or Fritz) in order
+to be used comfortably. Read the documentation for your GUI of choice for information
+about how to use Stockfish with it.
-Stockfish is not a complete chess program and requires a
-UCI-compatible GUI (e.g. XBoard with PolyGlot, Scid, Cute Chess, eboard, Arena,
-Sigma Chess, Shredder, Chess Partner or Fritz) in order to be used comfortably.
-Read the documentation for your GUI of choice for information about how to use
-Stockfish with it.
+The Stockfish engine features two evaluation functions for chess, the classical
+evaluation based on handcrafted terms, and the NNUE evaluation based on efficiently
+updatable neural networks. The classical evaluation runs efficiently on almost all
+CPU architectures, while the NNUE evaluation benefits from the vector
+intrinsics available on most CPUs (sse2, avx2, neon, or similar).
## Files
* src, a subdirectory containing the full source code, including a Makefile
that can be used to compile Stockfish on Unix-like systems.
-To use the NNUE evaluation an additional data file with neural network parameters
-needs to be downloaded. The filename for the default set can be found as the default
-value of the `EvalFile` UCI option, with the format
-`nn-[SHA256 first 12 digits].nnue` (e.g. nn-c157e0a5755b.nnue). This file can be downloaded from
+ * a file with the .nnue extension, storing the neural network for the NNUE
+ evaluation. Binary distributions will have this file embedded.
+
+Note: to use the NNUE evaluation, the additional data file with neural network parameters
+needs to be available. Normally, this file is already embedded in the binary or it can be downloaded.
+The filename for the default (recommended) net can be found as the default
+value of the `EvalFile` UCI option, with the format `nn-[SHA256 first 12 digits].nnue`
+(for instance, `nn-c157e0a5755b.nnue`). This file can be downloaded from
```
https://tests.stockfishchess.org/api/nn/[filename]
```
* #### Use NNUE
Toggle between the NNUE and classical evaluation functions. If set to "true",
- the network parameters must be availabe to load from file (see also EvalFile).
+ the network parameters must be available to load from file (see also EvalFile),
+ if they are not embedded in the binary.
* #### EvalFile
The name of the file of the NNUE evaluation parameters. Depending on the GUI the
- filename should include the full path to the folder/directory that contains the file.
-
- * #### Contempt
- A positive value for contempt favors middle game positions and avoids draws,
- effective for the classical evaluation only.
-
- * #### Analysis Contempt
- By default, contempt is set to prefer the side to move. Set this option to "White"
- or "Black" to analyse with contempt for that side, or "Off" to disable contempt.
+ filename might have to include the full path to the folder/directory that contains the file.
+ Other locations, such as the directory that contains the binary and the working directory,
+ are also searched.
* #### UCI_AnalyseMode
An option handled by your GUI.
* #### SyzygyProbeDepth
Minimum remaining search depth for which a position is probed. Set this option
- to a higher value to probe less agressively if you experience too much slowdown
+ to a higher value to probe less aggressively if you experience too much slowdown
(in terms of nps) due to TB probing.
* #### Syzygy50MoveRule
Limit Syzygy tablebase probing to positions with at most this many pieces left
(including kings and pawns).
+ * #### Contempt
+ A positive value for contempt favors middle game positions and avoids draws,
+ effective for the classical evaluation only.
+
+ * #### Analysis Contempt
+ By default, contempt is set to prefer the side to move. Set this option to "White"
+ or "Black" to analyse with contempt for that side, or "Off" to disable contempt.
+
* #### Move Overhead
Assume a time delay of x ms due to network and GUI overheads. This is useful to
avoid losses on time in those cases.
* #### Debug Log File
Write all communication to and from the engine into a text file.
-## classical and NNUE evaluation
+## A note on classical and NNUE evaluation
Both approaches assign a value to a position that is used in alpha-beta (PVS) search
to find the best move. The classical evaluation computes this value as a function
of various chess concepts, handcrafted by experts, tested and tuned using fishtest.
The NNUE evaluation computes this value with a neural network based on basic
inputs (e.g. piece positions only). The network is optimized and trained
-on the evalutions of millions of positions at moderate search depth.
+on the evaluations of millions of positions at moderate search depth.
The NNUE evaluation was first introduced in shogi, and ported to Stockfish afterward.
It can be evaluated efficiently on CPUs, and exploits the fact that only parts
If the engine is searching a position that is not in the tablebases (e.g.
a position with 8 pieces), it will access the tablebases during the search.
-If the engine reports a very large score (typically 153.xx), this means
-that it has found a winning line into a tablebase position.
+If the engine reports a very large score (typically 153.xx), this means
+it has found a winning line into a tablebase position.
If the engine is given a position to search that is in the tablebases, it
will use the tablebases at the beginning of the search to preselect all
taking into account the 50-move rule.
It will then perform a search only on those moves. **The engine will not move
immediately**, unless there is only a single good move. **The engine likely
-will not report a mate score even if the position is known to be won.**
+will not report a mate score, even if the position is known to be won.**
It is therefore clear that this behaviour is not identical to what one might
be used to with Nalimov tablebases. There are technical reasons for this
Stockfish supports large pages on Linux and Windows. Large pages make
the hash access more efficient, improving the engine speed, especially
-on large hash sizes. Typical increases are 5..10% in terms of nps, but
-speed increases up to 30% have been measured. The support is
+on large hash sizes. Typical increases are 5..10% in terms of nodes per
+second, but speed increases up to 30% have been measured. The support is
automatic. Stockfish attempts to use large pages when available and
will fall back to regular memory allocation when this is not the case.
Large page support on Linux is obtained by the Linux kernel
transparent huge pages functionality. Typically, transparent huge pages
-are already enabled and no configuration is needed.
+are already enabled, and no configuration is needed.
### Support on Windows
The use of large pages requires "Lock Pages in Memory" privilege. See
[Enable the Lock Pages in Memory Option (Windows)](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows)
-on how to enable this privilege. Logout/login may be needed
-afterwards. Due to memory fragmentation, it may not always be
-possible to allocate large pages even when enabled. A reboot
-might alleviate this problem. To determine whether large pages
-are in use, see the engine log.
+on how to enable this privilege, then run [RAMMap](https://docs.microsoft.com/en-us/sysinternals/downloads/rammap)
+to double-check that large pages are used. We suggest that you reboot
+your computer after you have enabled large pages, because long Windows
+sessions suffer from memory fragmentation, which may prevent Stockfish
+from getting large pages: a fresh session is better in this regard.
## Compiling Stockfish yourself from the sources
```
cd src
make help
+ make net
make build ARCH=x86-64-modern
```
-When not using the Makefile to compile (for instance with Microsoft MSVC) you
+When not using the Makefile to compile (for instance, with Microsoft MSVC) you
need to manually set/unset some switches in the compiler command line; see
file *types.h* for a quick reference.
be found by typing the following commands in a console:
```
- ./stockfish
- compiler
+ ./stockfish compiler
```
## Understanding the code base and participating in the project
## Terms of use
Stockfish is free, and distributed under the **GNU General Public License version 3**
-(GPL v3). Essentially, this means that you are free to do almost exactly
+(GPL v3). Essentially, this means you are free to do almost exactly
what you want with the program, including distributing it among your
-friends, making it available for download from your web site, selling
+friends, making it available for download from your website, selling
it (either by itself or as part of some bigger software package), or
using it as the starting point for a software project of your own.
-Contributors with >10,000 CPU hours as of January 7, 2020
+Contributors with >10,000 CPU hours as of Sept 2, 2020
Thank you!
Username CPU Hours Games played
--------------------------------------------------
-noobpwnftw 9305707 695548021
-mlang 780050 61648867
-dew 621626 43921547
-mibere 524702 42238645
-crunchy 354587 27344275
-cw 354495 27274181
-fastgm 332801 22804359
-JojoM 295750 20437451
-CSU_Dynasty 262015 21828122
-Fisherman 232181 18939229
-ctoks 218866 17622052
-glinscott 201989 13780820
-tvijlbrief 201204 15337115
-velislav 188630 14348485
-gvreuls 187164 15149976
-bking_US 180289 11876016
-nordlandia 172076 13467830
-leszek 157152 11443978
-Thanar 148021 12365359
-spams 141975 10319326
-drabel 138073 11121749
-vdv 137850 9394330
-mgrabiak 133578 10454324
-TueRens 132485 10878471
-bcross 129683 11557084
-marrco 126078 9356740
-sqrt2 125830 9724586
-robal 122873 9593418
-vdbergh 120766 8926915
-malala 115926 8002293
-CoffeeOne 114241 5004100
-dsmith 113189 7570238
-BrunoBanani 104644 7436849
-Data 92328 8220352
-mhoram 89333 6695109
-davar 87924 7009424
-xoto 81094 6869316
-ElbertoOne 80899 7023771
-grandphish2 78067 6160199
-brabos 77212 6186135
-psk 75733 5984901
-BRAVONE 73875 5054681
-sunu 70771 5597972
-sterni1971 70605 5590573
-MaZePallas 66886 5188978
-Vizvezdenec 63708 4967313
-nssy 63462 5259388
-jromang 61634 4940891
-teddybaer 61231 5407666
-Pking_cda 60099 5293873
-solarlight 57469 5028306
-dv8silencer 56913 3883992
-tinker 54936 4086118
-renouve 49732 3501516
-Freja 49543 3733019
-robnjr 46972 4053117
-rap 46563 3219146
-Bobo1239 46036 3817196
-ttruscott 45304 3649765
-racerschmacer 44881 3975413
-finfish 44764 3370515
-eva42 41783 3599691
-biffhero 40263 3111352
-bigpen0r 39817 3291647
-mhunt 38871 2691355
-ronaldjerum 38820 3240695
-Antihistamine 38785 2761312
-pb00067 38038 3086320
-speedycpu 37591 3003273
-rkl 37207 3289580
-VoyagerOne 37050 3441673
-jbwiebe 35320 2805433
-cuistot 34191 2146279
-homyur 33927 2850481
-manap 32873 2327384
-gri 32538 2515779
-oryx 31267 2899051
-EthanOConnor 30959 2090311
-SC 30832 2730764
-csnodgrass 29505 2688994
-jmdana 29458 2205261
-strelock 28219 2067805
-jkiiski 27832 1904470
-Pyafue 27533 1902349
-Garf 27515 2747562
-eastorwest 27421 2317535
-slakovv 26903 2021889
-Prcuvu 24835 2170122
-anst 24714 2190091
-hyperbolic.tom 24319 2017394
-Patrick_G 23687 1801617
-Sharaf_DG 22896 1786697
-nabildanial 22195 1519409
-chriswk 21931 1868317
-achambord 21665 1767323
-Zirie 20887 1472937
-team-oh 20217 1636708
-Isidor 20096 1680691
-ncfish1 19931 1520927
-nesoneg 19875 1463031
-Spprtr 19853 1548165
-JanErik 19849 1703875
-agg177 19478 1395014
-SFTUser 19231 1567999
-xor12 19017 1680165
-sg4032 18431 1641865
-rstoesser 18118 1293588
-MazeOfGalious 17917 1629593
-j3corre 17743 941444
-cisco2015 17725 1690126
-ianh2105 17706 1632562
-dex 17678 1467203
-jundery 17194 1115855
-iisiraider 17019 1101015
-horst.prack 17012 1465656
-Adrian.Schmidt123 16563 1281436
-purplefishies 16342 1092533
-wei 16274 1745989
-ville 16144 1384026
-eudhan 15712 1283717
-OuaisBla 15581 972000
-DragonLord 15559 1162790
-dju 14716 875569
-chris 14479 1487385
-0xB00B1ES 14079 1001120
-OssumOpossum 13776 1007129
-enedene 13460 905279
-bpfliegel 13346 884523
-Ente 13198 1156722
-IgorLeMasson 13087 1147232
-jpulman 13000 870599
-ako027ako 12775 1173203
-Nikolay.IT 12352 1068349
-Andrew Grant 12327 895539
-joster 12008 950160
-AdrianSA 11996 804972
-Nesa92 11455 1111993
-fatmurphy 11345 853210
-Dark_wizzie 11108 1007152
-modolief 10869 896470
-mschmidt 10757 803401
-infinity 10594 727027
-mabichito 10524 749391
-Thomas A. Anderson 10474 732094
-thijsk 10431 719357
-Flopzee 10339 894821
-crocogoat 10104 1013854
-SapphireBrand 10104 969604
-stocky 10017 699440
+noobpwnftw 19352969 1231459677
+mlang 957168 61657446
+dew 949885 56893432
+mibere 703817 46865007
+crunchy 427035 27344275
+cw 416006 27521077
+JojoM 415904 24479564
+fastgm 404873 23953472
+CSU_Dynasty 335774 22850550
+tvijlbrief 335199 21871270
+Fisherman 325053 21786603
+gvreuls 311480 20751516
+ctoks 275877 18710423
+velislav 241267 15596372
+glinscott 217799 13780820
+nordlandia 211692 13484886
+bcross 206213 14934233
+bking_US 198894 11876016
+leszek 189170 11446821
+mgrabiak 183896 11778092
+drabel 181408 12489478
+TueRens 181349 12192000
+Thanar 179852 12365359
+vdv 175171 9881246
+robal 166948 10702862
+spams 157128 10319326
+marrco 149947 9376421
+sqrt2 147963 9724586
+vdbergh 137041 8926915
+CoffeeOne 136294 5004100
+malala 136182 8002293
+mhoram 128934 8177193
+davar 122092 7960001
+dsmith 122059 7570238
+xoto 119696 8222144
+grandphish2 116481 7582197
+Data 113305 8220352
+BrunoBanani 112960 7436849
+ElbertoOne 99028 7023771
+MaZePallas 98571 6362619
+brabos 92118 6186135
+psk 89957 5984901
+sunu 88463 6007033
+sterni1971 86948 5613788
+Vizvezdenec 83752 5343724
+BRAVONE 81239 5054681
+nssy 76497 5259388
+teddybaer 75125 5407666
+Pking_cda 73776 5293873
+jromang 70695 4940891
+solarlight 70517 5028306
+dv8silencer 70287 3883992
+Bobo1239 68515 4652287
+racerschmacer 67468 4935996
+manap 66273 4121774
+tinker 63458 4213726
+linrock 59082 4516053
+robnjr 57262 4053117
+Freja 56938 3733019
+ttruscott 56005 3679485
+renouve 53811 3501516
+cuistot 52532 3014920
+finfish 51360 3370515
+eva42 51272 3599691
+rkl 50759 3840947
+rap 49985 3219146
+pb00067 49727 3298270
+ronaldjerum 47654 3240695
+bigpen0r 47278 3291647
+biffhero 46564 3111352
+VoyagerOne 45386 3445881
+speedycpu 43842 3003273
+jbwiebe 43305 2805433
+Antihistamine 41788 2761312
+mhunt 41735 2691355
+eastorwest 40387 2812173
+homyur 39893 2850481
+gri 39871 2515779
+oryx 38228 2941656
+0x3C33 37773 2529097
+SC 37290 2731014
+csnodgrass 36207 2688994
+jmdana 36108 2205261
+strelock 34716 2074055
+Garf 33800 2747562
+EthanOConnor 33370 2090311
+slakovv 32915 2021889
+Spprtr 32591 2139601
+Prcuvu 30377 2170122
+anst 30301 2190091
+jkiiski 30136 1904470
+hyperbolic.tom 29840 2017394
+Pyafue 29650 1902349
+OuaisBla 27629 1578000
+chriswk 26902 1868317
+achambord 26582 1767323
+Patrick_G 26276 1801617
+yorkman 26193 1992080
+SFTUser 25182 1675689
+nabildanial 24942 1519409
+Sharaf_DG 24765 1786697
+ncfish1 24411 1520927
+agg177 23890 1395014
+JanErik 23408 1703875
+Isidor 23388 1680691
+Norabor 22976 1587862
+cisco2015 22880 1759669
+Zirie 22542 1472937
+team-oh 22272 1636708
+MazeOfGalious 21978 1629593
+sg4032 21945 1643065
+ianh2105 21725 1632562
+xor12 21628 1680365
+dex 21612 1467203
+nesoneg 21494 1463031
+horst.prack 20878 1465656
+0xB00B1ES 20590 1208666
+j3corre 20405 941444
+Adrian.Schmidt123 20316 1281436
+wei 19973 1745989
+rstoesser 19569 1293588
+eudhan 19274 1283717
+Ente 19070 1373058
+jundery 18445 1115855
+iisiraider 18247 1101015
+ville 17883 1384026
+chris 17698 1487385
+purplefishies 17595 1092533
+DragonLord 17014 1162790
+dju 16515 929427
+IgorLeMasson 16064 1147232
+ako027ako 15671 1173203
+Nikolay.IT 15154 1068349
+Andrew Grant 15114 895539
+yurikvelo 15027 1165616
+OssumOpossum 14857 1007129
+enedene 14476 905279
+bpfliegel 14298 884523
+jpulman 13982 870599
+joster 13794 950160
+Nesa92 13786 1114691
+Dark_wizzie 13422 1007152
+Hjax 13350 900887
+Fifis 13313 965473
+mabichito 12903 749391
+thijsk 12886 722107
+crocogoat 12876 1048802
+AdrianSA 12860 804972
+Flopzee 12698 894821
+fatmurphy 12547 853210
+SapphireBrand 12416 969604
+modolief 12386 896470
+scuzzi 12362 833465
+pgontarz 12151 848794
+stocky 11954 699440
+mschmidt 11941 803401
+infinity 11470 727027
+torbjo 11387 728873
+Thomas A. Anderson 11372 732094
+snicolet 11106 869170
+amicic 10779 733593
+rpngn 10712 688203
+d64 10680 771144
+basepi 10637 744851
+jjoshua2 10559 670905
+dzjp 10343 732529
+ols 10259 570669
+lbraesch 10252 647825
build_script:
- cmake --build . --config %CONFIGURATION% -- /verbosity:minimal
+ - ps: |
+ # Download default NNUE net from fishtest
+ $nnuenet = Get-Content -Path src\evaluate.h | Select-String -CaseSensitive -Pattern "EvalFileDefaultName" | Select-String -CaseSensitive -Pattern "nn-[a-z0-9]{12}.nnue"
+ $dummy = $nnuenet -match "(?<nnuenet>nn-[a-z0-9]{12}.nnue)"
+ $nnuenet = $Matches.nnuenet
+ Write-Host "Default net:" $nnuenet
+ $nnuedownloadurl = "https://tests.stockfishchess.org/api/nn/$nnuenet"
+ $nnuefilepath = "src\${env:CONFIGURATION}\$nnuenet"
+ if (Test-Path -Path $nnuefilepath) {
+ Write-Host "Already available."
+ } else {
+ Write-Host "Downloading $nnuedownloadurl to $nnuefilepath"
+ Invoke-WebRequest -Uri $nnuedownloadurl -OutFile $nnuefilepath
+ }
before_test:
- cd src/%CONFIGURATION%
SRCS = benchmark.cpp bitbase.cpp bitboard.cpp endgame.cpp evaluate.cpp main.cpp \
material.cpp misc.cpp movegen.cpp movepick.cpp pawns.cpp position.cpp psqt.cpp \
search.cpp thread.cpp timeman.cpp tt.cpp uci.cpp ucioption.cpp tune.cpp syzygy/tbprobe.cpp \
- nnue/evaluate_nnue.cpp nnue/features/half_kp.cpp
+ nnue/evaluate_nnue.cpp nnue/features/half_kp.cpp \
+ hashprobe.grpc.pb.cc hashprobe.pb.cc
+CLISRCS = client.cpp hashprobe.grpc.pb.cc hashprobe.pb.cc uci.cpp
OBJS = $(notdir $(SRCS:.cpp=.o))
+CLIOBJS = $(notdir $(CLISRCS:.cpp=.o))
VPATH = syzygy:nnue:nnue/features
# bits = 64/32 --- -DIS_64BIT --- 64-/32-bit operating system
# prefetch = yes/no --- -DUSE_PREFETCH --- Use prefetch asm-instruction
# popcnt = yes/no --- -DUSE_POPCNT --- Use popcnt asm-instruction
+# pext = yes/no --- -DUSE_PEXT --- Use pext x86_64 asm-instruction
# sse = yes/no --- -msse --- Use Intel Streaming SIMD Extensions
-# sse3 = yes/no --- -msse3 --- Use Intel Streaming SIMD Extensions 3
+# mmx = yes/no --- -mmmx --- Use Intel MMX instructions
+# sse2 = yes/no --- -msse2 --- Use Intel Streaming SIMD Extensions 2
# ssse3 = yes/no --- -mssse3 --- Use Intel Supplemental Streaming SIMD Extensions 3
# sse41 = yes/no --- -msse4.1 --- Use Intel Streaming SIMD Extensions 4.1
-# sse42 = yes/no --- -msse4.2 --- Use Intel Streaming SIMD Extensions 4.2
# avx2 = yes/no --- -mavx2 --- Use Intel Advanced Vector Extensions 2
-# pext = yes/no --- -DUSE_PEXT --- Use pext x86_64 asm-instruction
# avx512 = yes/no --- -mavx512bw --- Use Intel Advanced Vector Extensions 512
+# vnni256 = yes/no --- -mavx512vnni --- Use Intel Vector Neural Network Instructions 256
+# vnni512 = yes/no --- -mavx512vnni --- Use Intel Vector Neural Network Instructions 512
# neon = yes/no --- -DUSE_NEON --- Use ARM SIMD architecture
#
# Note that Makefile is space sensitive, so when adding new architectures
# at the end of the line for flag values.
### 2.1. General and architecture defaults
+
+ifeq ($(ARCH),)
+ ARCH = x86-64-modern
+ help_skip_sanity = yes
+endif
+# explicitly check for the list of supported architectures (as listed with make help),
+# the user can override with `make ARCH=x86-32-vnni256 SUPPORTED_ARCH=true`
+ifeq ($(ARCH), $(filter $(ARCH), \
+ x86-64-vnni512 x86-64-vnni256 x86-64-avx512 x86-64-bmi2 x86-64-avx2 \
+ x86-64-sse41-popcnt x86-64-modern x86-64-ssse3 x86-64-sse3-popcnt \
+ x86-64 x86-32-sse41-popcnt x86-32-sse2 x86-32 ppc-64 ppc-32 \
+ armv7 armv7-neon armv8 apple-silicon general-64 general-32))
+ SUPPORTED_ARCH=true
+else
+ SUPPORTED_ARCH=false
+endif
+
optimize = yes
debug = no
sanitize = no
bits = 64
prefetch = no
popcnt = no
+pext = no
sse = no
-sse3 = no
+mmx = no
+sse2 = no
ssse3 = no
sse41 = no
-sse42 = no
avx2 = no
-pext = no
avx512 = no
+vnni256 = no
+vnni512 = no
neon = no
-ARCH = x86-64-modern
+STRIP = strip
### 2.2 Architecture specific
-ifeq ($(ARCH),general-32)
- arch = any
- bits = 32
-endif
-ifeq ($(ARCH),x86-32-old)
+ifeq ($(findstring x86,$(ARCH)),x86)
+
+# x86-32/64
+
+ifeq ($(findstring x86-32,$(ARCH)),x86-32)
arch = i386
bits = 32
+ sse = yes
+ mmx = yes
+else
+ arch = x86_64
+ sse = yes
+ sse2 = yes
endif
-ifeq ($(ARCH),x86-32)
- arch = i386
- bits = 32
- prefetch = yes
+ifeq ($(findstring -sse,$(ARCH)),-sse)
sse = yes
endif
-ifeq ($(ARCH),general-64)
- arch = any
+ifeq ($(findstring -popcnt,$(ARCH)),-popcnt)
+ popcnt = yes
endif
-ifeq ($(ARCH),x86-64)
- arch = x86_64
- prefetch = yes
- sse = yes
+ifeq ($(findstring -mmx,$(ARCH)),-mmx)
+ mmx = yes
endif
-ifeq ($(ARCH),x86-64-sse3)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -sse2,$(ARCH)),-sse2)
sse = yes
- sse3 = yes
+ sse2 = yes
endif
-ifeq ($(ARCH),x86-64-sse3-popcnt)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -ssse3,$(ARCH)),-ssse3)
sse = yes
- sse3 = yes
- popcnt = yes
+ sse2 = yes
+ ssse3 = yes
endif
-ifeq ($(ARCH),x86-64-ssse3)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -sse41,$(ARCH)),-sse41)
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
+ sse41 = yes
endif
-ifeq ($(ARCH),x86-64-sse41)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -modern,$(ARCH)),-modern)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
endif
-ifeq ($(ARCH),x86-64-modern)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -avx2,$(ARCH)),-avx2)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
+ avx2 = yes
endif
-ifeq ($(ARCH),x86-64-sse42)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -bmi2,$(ARCH)),-bmi2)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
- sse42 = yes
+ avx2 = yes
+ pext = yes
endif
-ifeq ($(ARCH),x86-64-avx2)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -avx512,$(ARCH)),-avx512)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
- sse42 = yes
avx2 = yes
+ pext = yes
+ avx512 = yes
endif
-ifeq ($(ARCH),x86-64-bmi2)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -vnni256,$(ARCH)),-vnni256)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
- sse42 = yes
avx2 = yes
pext = yes
+ vnni256 = yes
endif
-ifeq ($(ARCH),x86-64-avx512)
- arch = x86_64
- prefetch = yes
+ifeq ($(findstring -vnni512,$(ARCH)),-vnni512)
popcnt = yes
sse = yes
- sse3 = yes
+ sse2 = yes
ssse3 = yes
sse41 = yes
- sse42 = yes
avx2 = yes
pext = yes
avx512 = yes
+ vnni512 = yes
+endif
+
+ifeq ($(sse),yes)
+ prefetch = yes
+endif
+
+# 64-bit pext is not available on x86-32
+ifeq ($(bits),32)
+ pext = no
+endif
+
+else
+
+# all other architectures
+
+ifeq ($(ARCH),general-32)
+ arch = any
+ bits = 32
+endif
+
+ifeq ($(ARCH),general-64)
+ arch = any
endif
ifeq ($(ARCH),armv7)
bits = 32
endif
+ifeq ($(ARCH),armv7-neon)
+ arch = armv7
+ prefetch = yes
+ popcnt = yes
+ neon = yes
+ bits = 32
+endif
+
ifeq ($(ARCH),armv8)
- arch = armv8-a
+ arch = armv8
prefetch = yes
popcnt = yes
neon = yes
prefetch = yes
endif
+endif
+
### ==========================================================================
### Section 3. Low-level Configuration
### ==========================================================================
ifeq ($(COMP),gcc)
comp=gcc
CXX=g++
- CXXFLAGS += -pedantic -Wextra -Wshadow
+ CXXFLAGS += -pedantic -Wextra
- ifeq ($(ARCH),$(filter $(ARCH),armv7 armv8))
+ ifeq ($(arch),$(filter $(arch),armv7 armv8))
ifeq ($(OS),Android)
CXXFLAGS += -m$(bits)
LDFLAGS += -m$(bits)
LDFLAGS += -m$(bits)
endif
+ ifeq ($(arch),$(filter $(arch),armv7))
+ LDFLAGS += -latomic
+ endif
+
ifneq ($(KERNEL),Darwin)
LDFLAGS += -Wl,--no-as-needed
endif
endif
endif
- ifeq ($(ARCH),$(filter $(ARCH),armv7 armv8))
+ ifeq ($(arch),$(filter $(arch),armv7 armv8))
ifeq ($(OS),Android)
CXXFLAGS += -m$(bits)
LDFLAGS += -m$(bits)
endif
endif
+ifeq ($(KERNEL),Darwin)
+ CXXFLAGS += -arch $(arch) -mmacosx-version-min=10.14
+ LDFLAGS += -arch $(arch) -mmacosx-version-min=10.14
+ XCRUN = xcrun
+endif
+
+# To cross-compile for Android, NDK version r21 or later is recommended.
+# In earlier NDK versions, you'll need to pass -fno-addrsig if using GNU binutils.
+# Currently we don't know how to make PGO builds with the NDK yet.
+ifeq ($(COMP),ndk)
+ CXXFLAGS += -stdlib=libc++ -fPIE
+ comp=clang
+ ifeq ($(arch),armv7)
+ CXX=armv7a-linux-androideabi16-clang++
+ CXXFLAGS += -mthumb -march=armv7-a -mfloat-abi=softfp -mfpu=neon
+ STRIP=arm-linux-androideabi-strip
+ endif
+ ifeq ($(arch),armv8)
+ CXX=aarch64-linux-android21-clang++
+ STRIP=aarch64-linux-android-strip
+ endif
+ LDFLAGS += -static-libstdc++ -pie -lm -latomic
+endif
+
ifeq ($(comp),icc)
profile_make = icc-profile-make
profile_use = icc-profile-use
-else
-ifeq ($(comp),clang)
+else ifeq ($(comp),clang)
profile_make = clang-profile-make
profile_use = clang-profile-use
else
profile_make = gcc-profile-make
profile_use = gcc-profile-use
endif
-endif
-
-ifeq ($(KERNEL),Darwin)
- CXXFLAGS += -arch $(arch) -mmacosx-version-min=10.15
- LDFLAGS += -arch $(arch) -mmacosx-version-min=10.15
-endif
### Travis CI script uses COMPILER to overwrite CXX
ifdef COMPILER
CXX=$(COMPCXX)
endif
+### Sometimes gcc is really clang
+ifeq ($(COMP),gcc)
+ gccversion = $(shell $(CXX) --version)
+ gccisclang = $(findstring clang,$(gccversion))
+ ifneq ($(gccisclang),)
+ profile_make = clang-profile-make
+ profile_use = clang-profile-use
+ endif
+endif
+
### On mingw use Windows threads, otherwise POSIX
ifneq ($(comp),mingw)
+ CXXFLAGS += -DUSE_PTHREADS
# On Android Bionic's C library comes with its own pthread implementation bundled in
ifneq ($(OS),Android)
# Haiku has pthreads in its libroot, so only link it in on other platforms
ifneq ($(KERNEL),Haiku)
- LDFLAGS += -lpthread
+ ifneq ($(COMP),ndk)
+ LDFLAGS += -lpthread
+ endif
endif
endif
endif
### 3.3 Optimization
ifeq ($(optimize),yes)
- CXXFLAGS += -O3
+ CXXFLAGS += -O3 -g
ifeq ($(comp),gcc)
ifeq ($(OS), Android)
ifeq ($(prefetch),yes)
ifeq ($(sse),yes)
CXXFLAGS += -msse
- DEPENDFLAGS += -msse
endif
else
CXXFLAGS += -DNO_PREFETCH
### 3.6 popcnt
ifeq ($(popcnt),yes)
- ifeq ($(arch),$(filter $(arch),ppc64 armv8-a arm64))
+ ifeq ($(arch),$(filter $(arch),ppc64 armv7 armv8 arm64))
CXXFLAGS += -DUSE_POPCNT
else ifeq ($(comp),icc)
CXXFLAGS += -msse3 -DUSE_POPCNT
endif
endif
+
ifeq ($(avx2),yes)
CXXFLAGS += -DUSE_AVX2
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
ifeq ($(avx512),yes)
CXXFLAGS += -DUSE_AVX512
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
- CXXFLAGS += -mavx512bw
+ CXXFLAGS += -mavx512f -mavx512bw
endif
endif
-ifeq ($(sse42),yes)
- CXXFLAGS += -DUSE_SSE42
+ifeq ($(vnni256),yes)
+ CXXFLAGS += -DUSE_VNNI
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
- CXXFLAGS += -msse4.2
+ CXXFLAGS += -mavx512f -mavx512bw -mavx512vnni -mavx512dq -mavx512vl -mprefer-vector-width=256
+ endif
+endif
+
+ifeq ($(vnni512),yes)
+ CXXFLAGS += -DUSE_VNNI
+ ifeq ($(comp),$(filter $(comp),gcc clang mingw))
+ CXXFLAGS += -mavx512vnni -mavx512dq -mavx512vl
endif
endif
endif
endif
-ifeq ($(sse3),yes)
- CXXFLAGS += -DUSE_SSE3
+ifeq ($(sse2),yes)
+ CXXFLAGS += -DUSE_SSE2
ifeq ($(comp),$(filter $(comp),gcc clang mingw))
- CXXFLAGS += -msse3
+ CXXFLAGS += -msse2
endif
endif
-ifeq ($(neon),yes)
- CXXFLAGS += -DUSE_NEON
+ifeq ($(mmx),yes)
+ CXXFLAGS += -DUSE_MMX
+ ifeq ($(comp),$(filter $(comp),gcc clang mingw))
+ CXXFLAGS += -mmmx
+ endif
endif
-ifeq ($(arch),x86_64)
- CXXFLAGS += -DUSE_SSE2
+ifeq ($(neon),yes)
+ CXXFLAGS += -DUSE_NEON
+ ifeq ($(KERNEL),Linux)
+ ifneq ($(COMP),ndk)
+ ifneq ($(arch),armv8)
+ CXXFLAGS += -mfpu=neon
+ endif
+ endif
+ endif
endif
### 3.7 pext
### needs access to the optimization flags.
ifeq ($(optimize),yes)
ifeq ($(debug), no)
- ifeq ($(comp),$(filter $(comp),gcc clang))
+ ifeq ($(comp),clang)
+ CXXFLAGS += -flto=thin
+ ifneq ($(findstring MINGW,$(KERNEL)),)
+ CXXFLAGS += -fuse-ld=lld
+ else ifneq ($(findstring MSYS,$(KERNEL)),)
+ CXXFLAGS += -fuse-ld=lld
+ endif
+ LDFLAGS += $(CXXFLAGS)
+
+# GCC and CLANG use different methods for parallelizing LTO and CLANG pretends to be
+# GCC on some systems.
+ else ifeq ($(comp),gcc)
+ ifeq ($(gccisclang),)
CXXFLAGS += -flto
+ LDFLAGS += $(CXXFLAGS) -flto=jobserver
+ ifneq ($(findstring MINGW,$(KERNEL)),)
+ LDFLAGS += -save-temps
+ else ifneq ($(findstring MSYS,$(KERNEL)),)
+ LDFLAGS += -save-temps
+ endif
+ else
+ CXXFLAGS += -flto=thin
LDFLAGS += $(CXXFLAGS)
endif
# To use LTO and static linking on windows, the tool chain requires a recent gcc:
-# gcc version 10.1 in msys2 or TDM-GCC version 9.2 are know to work, older might not.
+# gcc version 10.1 in msys2 or TDM-GCC version 9.2 are known to work, older might not.
# So, only enable it for a cross from Linux by default.
- ifeq ($(comp),mingw)
+ else ifeq ($(comp),mingw)
ifeq ($(KERNEL),Linux)
+ ifneq ($(arch),i386)
CXXFLAGS += -flto
- LDFLAGS += $(CXXFLAGS)
+ LDFLAGS += $(CXXFLAGS) -flto=jobserver
+ endif
endif
endif
endif
### Section 4. Public Targets
### ==========================================================================
+
help:
@echo ""
@echo "To compile stockfish, type: "
@echo ""
@echo "Supported targets:"
@echo ""
+ @echo "help > Display architecture details"
@echo "build > Standard build"
- @echo "profile-build > Standard build with PGO"
+ @echo "net > Download the default nnue net"
+ @echo "profile-build > Faster build (with profile-guided optimization)"
@echo "strip > Strip executable"
@echo "install > Install executable"
@echo "clean > Clean up"
- @echo "net > Download the default nnue net"
@echo ""
@echo "Supported archs:"
@echo ""
+ @echo "x86-64-vnni512 > x86 64-bit with vnni support 512bit wide"
+ @echo "x86-64-vnni256 > x86 64-bit with vnni support 256bit wide"
@echo "x86-64-avx512 > x86 64-bit with avx512 support"
@echo "x86-64-bmi2 > x86 64-bit with bmi2 support"
@echo "x86-64-avx2 > x86 64-bit with avx2 support"
- @echo "x86-64-sse42 > x86 64-bit with sse42 support"
- @echo "x86-64-modern > x86 64-bit with sse41 support (x86-64-sse41)"
- @echo "x86-64-sse41 > x86 64-bit with sse41 support"
+ @echo "x86-64-sse41-popcnt > x86 64-bit with sse41 and popcnt support"
+ @echo "x86-64-modern > common modern CPU, currently x86-64-sse41-popcnt"
@echo "x86-64-ssse3 > x86 64-bit with ssse3 support"
@echo "x86-64-sse3-popcnt > x86 64-bit with sse3 and popcnt support"
- @echo "x86-64-sse3 > x86 64-bit with sse3 support"
- @echo "x86-64 > x86 64-bit generic"
- @echo "x86-32 > x86 32-bit (also enables SSE)"
- @echo "x86-32-old > x86 32-bit fall back for old hardware"
+ @echo "x86-64 > x86 64-bit generic (with sse2 support)"
+ @echo "x86-32-sse41-popcnt > x86 32-bit with sse41 and popcnt support"
+ @echo "x86-32-sse2 > x86 32-bit with sse2 support"
+ @echo "x86-32 > x86 32-bit generic (with mmx and sse support)"
@echo "ppc-64 > PPC 64-bit"
@echo "ppc-32 > PPC 32-bit"
@echo "armv7 > ARMv7 32-bit"
- @echo "armv8 > ARMv8 64-bit"
+ @echo "armv7-neon > ARMv7 32-bit with popcnt and neon"
+ @echo "armv8 > ARMv8 64-bit with popcnt and neon"
@echo "apple-silicon > Apple silicon ARM64"
@echo "general-64 > unspecified 64-bit"
@echo "general-32 > unspecified 32-bit"
@echo "mingw > Gnu compiler with MinGW under Windows"
@echo "clang > LLVM Clang compiler"
@echo "icc > Intel compiler"
+ @echo "ndk > Google NDK to cross-compile for Android"
@echo ""
@echo "Simple examples. If you don't know what to do, you likely want to run: "
@echo ""
- @echo "make -j build ARCH=x86-64 (This is for 64-bit systems)"
- @echo "make -j build ARCH=x86-32 (This is for 32-bit systems)"
- @echo ""
- @echo "Advanced examples, for experienced users: "
+ @echo "make -j build ARCH=x86-64 (A portable, slow compile for 64-bit systems)"
+ @echo "make -j build ARCH=x86-32 (A portable, slow compile for 32-bit systems)"
@echo ""
- @echo "make -j build ARCH=x86-64-modern COMP=clang"
- @echo "make -j profile-build ARCH=x86-64-bmi2 COMP=gcc COMPCXX=g++-4.8"
+ @echo "Advanced examples, for experienced users looking for performance: "
@echo ""
- @echo "The selected architecture $(ARCH) enables the following configuration: "
+ @echo "make help ARCH=x86-64-bmi2"
+ @echo "make -j profile-build ARCH=x86-64-bmi2 COMP=gcc COMPCXX=g++-9.0"
+ @echo "make -j build ARCH=x86-64-ssse3 COMP=clang"
@echo ""
+ @echo "-------------------------------"
+ifeq ($(SUPPORTED_ARCH)$(help_skip_sanity), true)
+ @echo "The selected architecture $(ARCH) will enable the following configuration: "
@$(MAKE) ARCH=$(ARCH) COMP=$(COMP) config-sanity
+else
+ @echo "Specify a supported architecture with the ARCH option for more details"
+ @echo ""
+endif
.PHONY: help build profile-build strip install clean net objclean profileclean \
config-sanity icc-profile-use icc-profile-make gcc-profile-use gcc-profile-make \
clang-profile-use clang-profile-make
-build: config-sanity
+build: net config-sanity
$(MAKE) ARCH=$(ARCH) COMP=$(COMP) all
-profile-build: config-sanity objclean profileclean
+profile-build: net config-sanity objclean profileclean
@echo ""
@echo "Step 1/4. Building instrumented executable ..."
$(MAKE) ARCH=$(ARCH) COMP=$(COMP) $(profile_make)
$(MAKE) ARCH=$(ARCH) COMP=$(COMP) profileclean
strip:
- strip $(EXE)
+ $(STRIP) $(EXE)
install:
-mkdir -p -m 755 $(BINDIR)
-cp $(EXE) $(BINDIR)
-strip $(BINDIR)/$(EXE)
-#clean all
+# clean all
clean: objclean profileclean
@rm -f .depend *~ core
+# evaluation network (nnue)
net:
- $(eval nnuenet := $(shell grep EvalFile ucioption.cpp | grep Option | sed 's/.*\(nn-[a-z0-9]\{12\}.nnue\).*/\1/'))
+ $(eval nnuenet := $(shell grep EvalFileDefaultName evaluate.h | grep define | sed 's/.*\(nn-[a-z0-9]\{12\}.nnue\).*/\1/'))
@echo "Default net: $(nnuenet)"
$(eval nnuedownloadurl := https://tests.stockfishchess.org/api/nn/$(nnuenet))
- $(eval curl_or_wget := $(shell if hash curl 2>/dev/null; then echo "curl -sL"; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi))
- @if test -f "$(nnuenet)"; then echo "Already available."; else echo "Downloading $(nnuedownloadurl)"; $(curl_or_wget) $(nnuedownloadurl) > $(nnuenet); fi
+ $(eval curl_or_wget := $(shell if hash curl 2>/dev/null; then echo "curl -skL"; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi))
+ @if test -f "$(nnuenet)"; then \
+ echo "Already available."; \
+ else \
+ if [ "x$(curl_or_wget)" = "x" ]; then \
+ echo "Automatic download failed: neither curl nor wget is installed. Install one of these tools or download the net manually"; exit 1; \
+ else \
+ echo "Downloading $(nnuedownloadurl)"; $(curl_or_wget) $(nnuedownloadurl) > $(nnuenet);\
+ fi; \
+ fi;
+ $(eval shasum_command := $(shell if hash shasum 2>/dev/null; then echo "shasum -a 256 "; elif hash sha256sum 2>/dev/null; then echo "sha256sum "; fi))
+ @if [ "x$(shasum_command)" != "x" ]; then \
+ if [ "$(nnuenet)" != "nn-"`$(shasum_command) $(nnuenet) | cut -c1-12`".nnue" ]; then \
+ echo "Failed download or $(nnuenet) corrupted, please delete!"; exit 1; \
+ fi \
+ else \
+ echo "shasum / sha256sum not found, skipping net validation"; \
+ fi
# clean binaries and objects
objclean:
# clean auxiliary profiling files
profileclean:
@rm -rf profdir
- @rm -f bench.txt *.gcda *.gcno ./syzygy/*.gcda ./nnue/*.gcda ./nnue/features/*.gcda
+ @rm -f bench.txt *.gcda *.gcno ./syzygy/*.gcda ./nnue/*.gcda ./nnue/features/*.gcda *.s
@rm -f stockfish.profdata *.profraw
default:
### Section 5. Private Targets
### ==========================================================================
-all: $(EXE) .depend
+all: $(EXE) client .depend
-config-sanity:
+config-sanity: net
@echo ""
@echo "Config:"
@echo "debug: '$(debug)'"
@echo "os: '$(OS)'"
@echo "prefetch: '$(prefetch)'"
@echo "popcnt: '$(popcnt)'"
+ @echo "pext: '$(pext)'"
@echo "sse: '$(sse)'"
- @echo "sse3: '$(sse3)'"
+ @echo "mmx: '$(mmx)'"
+ @echo "sse2: '$(sse2)'"
@echo "ssse3: '$(ssse3)'"
@echo "sse41: '$(sse41)'"
- @echo "sse42: '$(sse42)'"
@echo "avx2: '$(avx2)'"
- @echo "pext: '$(pext)'"
@echo "avx512: '$(avx512)'"
+ @echo "vnni256: '$(vnni256)'"
+ @echo "vnni512: '$(vnni512)'"
@echo "neon: '$(neon)'"
@echo ""
@echo "Flags:"
@test "$(debug)" = "yes" || test "$(debug)" = "no"
@test "$(sanitize)" = "undefined" || test "$(sanitize)" = "thread" || test "$(sanitize)" = "address" || test "$(sanitize)" = "no"
@test "$(optimize)" = "yes" || test "$(optimize)" = "no"
+ @test "$(SUPPORTED_ARCH)" = "true"
@test "$(arch)" = "any" || test "$(arch)" = "x86_64" || test "$(arch)" = "i386" || \
test "$(arch)" = "ppc64" || test "$(arch)" = "ppc" || \
- test "$(arch)" = "armv7" || test "$(arch)" = "armv8-a" || test "$(arch)" = "arm64"
+ test "$(arch)" = "armv7" || test "$(arch)" = "armv8" || test "$(arch)" = "arm64"
@test "$(bits)" = "32" || test "$(bits)" = "64"
@test "$(prefetch)" = "yes" || test "$(prefetch)" = "no"
@test "$(popcnt)" = "yes" || test "$(popcnt)" = "no"
+ @test "$(pext)" = "yes" || test "$(pext)" = "no"
@test "$(sse)" = "yes" || test "$(sse)" = "no"
- @test "$(sse3)" = "yes" || test "$(sse3)" = "no"
+ @test "$(mmx)" = "yes" || test "$(mmx)" = "no"
+ @test "$(sse2)" = "yes" || test "$(sse2)" = "no"
@test "$(ssse3)" = "yes" || test "$(ssse3)" = "no"
@test "$(sse41)" = "yes" || test "$(sse41)" = "no"
- @test "$(sse42)" = "yes" || test "$(sse42)" = "no"
@test "$(avx2)" = "yes" || test "$(avx2)" = "no"
- @test "$(pext)" = "yes" || test "$(pext)" = "no"
@test "$(avx512)" = "yes" || test "$(avx512)" = "no"
+ @test "$(vnni256)" = "yes" || test "$(vnni256)" = "no"
+ @test "$(vnni512)" = "yes" || test "$(vnni512)" = "no"
@test "$(neon)" = "yes" || test "$(neon)" = "no"
- @test "$(comp)" = "gcc" || test "$(comp)" = "icc" || test "$(comp)" = "mingw" || test "$(comp)" = "clang"
+ @test "$(comp)" = "gcc" || test "$(comp)" = "icc" || test "$(comp)" = "mingw" || test "$(comp)" = "clang" \
+ || test "$(comp)" = "armv7a-linux-androideabi16-clang" || test "$(comp)" = "aarch64-linux-android21-clang"
$(EXE): $(OBJS)
- $(CXX) -o $@ $(OBJS) $(LDFLAGS)
+ +$(CXX) -o $@ $(OBJS) $(LDFLAGS)
clang-profile-make:
$(MAKE) ARCH=$(ARCH) COMP=$(COMP) \
all
clang-profile-use:
- llvm-profdata merge -output=stockfish.profdata *.profraw
+ $(XCRUN) llvm-profdata merge -output=stockfish.profdata *.profraw
$(MAKE) ARCH=$(ARCH) COMP=$(COMP) \
EXTRACXXFLAGS='-fprofile-instr-use=stockfish.profdata' \
EXTRALDFLAGS='-fprofile-use ' \
EXTRACXXFLAGS='-prof_use -prof_dir ./profdir' \
all
+### GRPC
+
+PROTOS_PATH = .
+PROTOC = protoc
+GRPC_CPP_PLUGIN = grpc_cpp_plugin
+GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)`
+
+%.grpc.pb.h %.grpc.pb.cc: %.proto
+ $(PROTOC) -I $(PROTOS_PATH) --grpc_out=. --plugin=protoc-gen-grpc=$(GRPC_CPP_PLUGIN_PATH) $<
+
+# oh my
+%.cpp: %.cc
+ cp $< $@
+
+%.pb.h %.pb.cc: %.proto
+ $(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $<
+
+#LDFLAGS += -Wl,-Bstatic -Wl,-\( -lprotobuf -lgrpc++_unsecure -lgrpc_unsecure -lgrpc -lz -Wl,-\) -Wl,-Bdynamic -ldl
+LDFLAGS += /usr/lib/x86_64-linux-gnu/libprotobuf.a /usr/lib/x86_64-linux-gnu/libgrpc++_unsecure.a /usr/lib/x86_64-linux-gnu/libgrpc_unsecure.a /usr/lib/x86_64-linux-gnu/libgrpc.a /usr/lib/x86_64-linux-gnu/libcares.a -ldl -lz
+#LDFLAGS += /usr/lib/x86_64-linux-gnu/libprotobuf.a /usr/lib/libgrpc++_unsecure.a /usr/lib/libgrpc_unsecure.a /usr/lib/libgrpc.a /usr/lib/x86_64-linux-gnu/libcares.a -ldl -lz
+
+client: $(CLIOBJS)
+ $(CXX) -o $@ $(CLIOBJS) $(LDFLAGS)
+
+# Other stuff
+
.depend:
-@$(CXX) $(DEPENDFLAGS) -MM $(SRCS) > $@ 2> /dev/null
/// setup_bench() builds a list of UCI commands to be run by bench. There
/// are five parameters: TT size in MB, number of search threads that
/// should be used, the limit value spent for each position, a file name
-/// where to look for positions in FEN format and the type of the limit:
-/// depth, perft, nodes and movetime (in millisecs).
+/// where to look for positions in FEN format, the type of the limit:
+/// depth, perft, nodes and movetime (in millisecs), and evaluation type
+/// mixed (default), classical, NNUE.
///
/// bench -> search default positions up to depth 13
/// bench 64 1 15 -> search default positions up to depth 15 (TT = 64MB)
string limit = (is >> token) ? token : "13";
string fenFile = (is >> token) ? token : "default";
string limitType = (is >> token) ? token : "depth";
+ string evalType = (is >> token) ? token : "mixed";
go = limitType == "eval" ? "eval" : "go " + limitType + " " + limit;
list.emplace_back("setoption name Hash value " + ttSize);
list.emplace_back("ucinewgame");
+ size_t posCounter = 0;
+
for (const string& fen : fens)
if (fen.find("setoption") != string::npos)
list.emplace_back(fen);
else
{
+ if (evalType == "classical" || (evalType == "mixed" && posCounter % 2 == 0))
+ list.emplace_back("setoption name Use NNUE value false");
+ else if (evalType == "NNUE" || (evalType == "mixed" && posCounter % 2 != 0))
+ list.emplace_back("setoption name Use NNUE value true");
list.emplace_back("position fen " + fen);
list.emplace_back(go);
+ ++posCounter;
}
+ list.emplace_back("setoption name Use NNUE value true");
+
return list;
}
Bitboard BishopTable[0x1480]; // To store bishop attacks
void init_magics(PieceType pt, Bitboard table[], Magic magics[]);
+
+}
+
+
+/// safe_destination() returns the bitboard of target square for the given step
+/// from the given square. If the step is off the board, returns empty bitboard.
+
+inline Bitboard safe_destination(Square s, int step) {
+ Square to = Square(s + step);
+ return is_ok(to) && distance(s, to) <= 2 ? square_bb(to) : Bitboard(0);
}
Direction RookDirections[4] = {NORTH, SOUTH, EAST, WEST};
Direction BishopDirections[4] = {NORTH_EAST, SOUTH_EAST, SOUTH_WEST, NORTH_WEST};
- for(Direction d : (pt == ROOK ? RookDirections : BishopDirections))
+ for (Direction d : (pt == ROOK ? RookDirections : BishopDirections))
{
Square s = sq;
while(safe_destination(s, d) && !(occupied & s))
inline int edge_distance(Rank r) { return std::min(r, Rank(RANK_8 - r)); }
-/// safe_destination() returns the bitboard of target square for the given step
-/// from the given square. If the step is off the board, returns empty bitboard.
-
-inline Bitboard safe_destination(Square s, int step)
-{
- Square to = Square(s + step);
- return is_ok(to) && distance(s, to) <= 2 ? square_bb(to) : Bitboard(0);
-}
-
-
/// attacks_bb(Square) returns the pseudo attacks of the give piece type
/// assuming an empty board.
--- /dev/null
+#include <iostream>
+#include <memory>
+#include <string>
+
+#include <grpc++/grpc++.h>
+
+#include "hashprobe.grpc.pb.h"
+#include "types.h"
+#include "uci.h"
+
+using grpc::Channel;
+using grpc::ClientContext;
+using grpc::Status;
+using namespace hashprobe;
+
+std::string FormatMove(const HashProbeMove &move) {
+ if (move.pretty().empty()) return "MOVE_NONE";
+ return move.pretty();
+}
+
+int main(int argc, char** argv) {
+ std::shared_ptr<Channel> channel(grpc::CreateChannel(
+ "localhost:50051", grpc::InsecureChannelCredentials()));
+ std::unique_ptr<HashProbe::Stub> stub(HashProbe::NewStub(channel));
+
+ for ( ;; ) {
+ char buf[256];
+ if (fgets(buf, sizeof(buf), stdin) == nullptr || buf[0] == '\n') {
+ exit(0);
+ }
+
+ char *ptr = strchr(buf, '\n');
+ if (ptr != nullptr) *ptr = 0;
+
+ HashProbeRequest request;
+ request.set_fen(buf);
+
+ HashProbeResponse response;
+ ClientContext context;
+ Status status = stub->Probe(&context, request, &response);
+
+ if (status.ok()) {
+ for (const HashProbeLine &line : response.line()) {
+ std::cout << FormatMove(line.move()) << " ";
+ std::cout << line.found() << " ";
+ for (const HashProbeMove &move : line.pv()) {
+ std::cout << FormatMove(move) << ",";
+ }
+ std::cout << " ";
+ switch (line.bound()) {
+ case HashProbeLine::BOUND_NONE:
+ std::cout << "?";
+ break;
+ case HashProbeLine::BOUND_EXACT:
+ std::cout << "==";
+ break;
+ case HashProbeLine::BOUND_UPPER:
+ std::cout << "<=";
+ break;
+ case HashProbeLine::BOUND_LOWER:
+ std::cout << ">=";
+ break;
+ }
+ switch (line.value().score_type()) {
+ case HashProbeScore::SCORE_CP:
+ std::cout << " cp " << line.value().score_cp() << " ";
+ break;
+ case HashProbeScore::SCORE_MATE:
+ std::cout << " mate " << line.value().score_mate() << " ";
+ break;
+ }
+ std::cout << line.depth() << std::endl;
+ }
+ std::cout << "END" << std::endl;
+ } else {
+ std::cout << "ERROR" << std::endl;
+ }
+ }
+
+ return 0;
+}
assert(verify_material(pos, strongSide, RookValueMg, 2));
assert(verify_material(pos, weakSide, RookValueMg, 1));
- Square strongPawn1 = pos.squares<PAWN>(strongSide)[0];
- Square strongPawn2 = pos.squares<PAWN>(strongSide)[1];
+ Square strongPawn1 = lsb(pos.pieces(strongSide, PAWN));
+ Square strongPawn2 = msb(pos.pieces(strongSide, PAWN));
Square weakKing = pos.square<KING>(weakSide);
// Does the stronger side have a passed pawn?
return SCALE_FACTOR_NONE;
Square weakKing = pos.square<KING>(weakSide);
- Square strongPawn1 = pos.squares<PAWN>(strongSide)[0];
- Square strongPawn2 = pos.squares<PAWN>(strongSide)[1];
+ Square strongPawn1 = lsb(pos.pieces(strongSide, PAWN));
+ Square strongPawn2 = msb(pos.pieces(strongSide, PAWN));
Square blockSq1, blockSq2;
if (relative_rank(strongSide, strongPawn1) > relative_rank(strongSide, strongPawn2))
#include <cassert>
#include <cstdlib>
#include <cstring> // For std::memset
+#include <fstream>
#include <iomanip>
#include <sstream>
#include <iostream>
+#include <streambuf>
+#include <vector>
#include "bitboard.h"
#include "evaluate.h"
#include "material.h"
+#include "misc.h"
#include "pawns.h"
#include "thread.h"
#include "uci.h"
+#include "incbin/incbin.h"
+
+
+// Macro to embed the default NNUE file data in the engine binary (using incbin.h, by Dale Weiler).
+// This macro invocation will declare the following three variables
+// const unsigned char gEmbeddedNNUEData[]; // a pointer to the embedded data
+// const unsigned char *const gEmbeddedNNUEEnd; // a marker to the end
+// const unsigned int gEmbeddedNNUESize; // the size of the embedded file
+// Note that this does not work in Microsof Visual Studio.
+#if !defined(_MSC_VER) && !defined(NNUE_EMBEDDING_OFF)
+ INCBIN(EmbeddedNNUE, EvalFileDefaultName);
+#else
+ const unsigned char gEmbeddedNNUEData[1] = {0x0};
+ const unsigned char *const gEmbeddedNNUEEnd = &gEmbeddedNNUEData[1];
+ const unsigned int gEmbeddedNNUESize = 1;
+#endif
+
+
+using namespace std;
+using namespace Eval::NNUE;
namespace Eval {
bool useNNUE;
- std::string eval_file_loaded="None";
+ string eval_file_loaded = "None";
- void init_NNUE() {
+ /// NNUE::init() tries to load a nnue network at startup time, or when the engine
+ /// receives a UCI command "setoption name EvalFile value nn-[a-z0-9]{12}.nnue"
+ /// The name of the nnue network is always retrieved from the EvalFile option.
+ /// We search the given network in three locations: internally (the default
+ /// network may be embedded in the binary), in the active working directory and
+ /// in the engine directory. Distro packagers may define the DEFAULT_NNUE_DIRECTORY
+ /// variable to have the engine search in a special directory in their distro.
+
+ void NNUE::init() {
useNNUE = Options["Use NNUE"];
- std::string eval_file = std::string(Options["EvalFile"]);
- if (useNNUE && eval_file_loaded != eval_file)
- if (Eval::NNUE::load_eval_file(eval_file))
- eval_file_loaded = eval_file;
+ if (!useNNUE)
+ return;
+
+ string eval_file = string(Options["EvalFile"]);
+
+ #if defined(DEFAULT_NNUE_DIRECTORY)
+ #define stringify2(x) #x
+ #define stringify(x) stringify2(x)
+ vector<string> dirs = { "<internal>" , "" , CommandLine::binaryDirectory , stringify(DEFAULT_NNUE_DIRECTORY) };
+ #else
+ vector<string> dirs = { "<internal>" , "" , CommandLine::binaryDirectory };
+ #endif
+
+ for (string directory : dirs)
+ if (eval_file_loaded != eval_file)
+ {
+ if (directory != "<internal>")
+ {
+ ifstream stream(directory + eval_file, ios::binary);
+ if (load_eval(eval_file, stream))
+ eval_file_loaded = eval_file;
+ }
+
+ if (directory == "<internal>" && eval_file == EvalFileDefaultName)
+ {
+ // C++ way to prepare a buffer for a memory stream
+ class MemoryBuffer : public basic_streambuf<char> {
+ public: MemoryBuffer(char* p, size_t n) { setg(p, p, p + n); setp(p, p + n); }
+ };
+
+ MemoryBuffer buffer(const_cast<char*>(reinterpret_cast<const char*>(gEmbeddedNNUEData)),
+ size_t(gEmbeddedNNUESize));
+
+ istream stream(&buffer);
+ if (load_eval(eval_file, stream))
+ eval_file_loaded = eval_file;
+ }
+ }
}
- void verify_NNUE() {
+ /// NNUE::verify() verifies that the last net used was loaded successfully
+ void NNUE::verify() {
+
+ string eval_file = string(Options["EvalFile"]);
- std::string eval_file = std::string(Options["EvalFile"]);
if (useNNUE && eval_file_loaded != eval_file)
{
- std::cerr << "Use of NNUE evaluation, but the file " << eval_file << " was not loaded successfully. "
- << "These network evaluation parameters must be available, compatible with this version of the code. "
- << "The UCI option EvalFile might need to specify the full path, including the directory/folder name, to the file." << std::endl;
- std::exit(EXIT_FAILURE);
+ UCI::OptionsMap defaults;
+ UCI::init(defaults);
+
+ string msg1 = "If the UCI option \"Use NNUE\" is set to true, network evaluation parameters compatible with the engine must be available.";
+ string msg2 = "The option is set to true, but the network file " + eval_file + " was not loaded successfully.";
+ string msg3 = "The UCI option EvalFile might need to specify the full path, including the directory name, to the network file.";
+ string msg4 = "The default net can be downloaded from: https://tests.stockfishchess.org/api/nn/" + string(defaults["EvalFile"]);
+ string msg5 = "The engine will be terminated now.";
+
+ sync_cout << "info string ERROR: " << msg1 << sync_endl;
+ sync_cout << "info string ERROR: " << msg2 << sync_endl;
+ sync_cout << "info string ERROR: " << msg3 << sync_endl;
+ sync_cout << "info string ERROR: " << msg4 << sync_endl;
+ sync_cout << "info string ERROR: " << msg5 << sync_endl;
+
+ exit(EXIT_FAILURE);
}
if (useNNUE)
- sync_cout << "info string NNUE evaluation using " << eval_file << " enabled." << sync_endl;
+ sync_cout << "info string NNUE evaluation using " << eval_file << " enabled" << sync_endl;
else
- sync_cout << "info string classical evaluation enabled." << sync_endl;
+ sync_cout << "info string classical evaluation enabled" << sync_endl;
}
}
namespace {
// Threshold for lazy and space evaluation
- constexpr Value LazyThreshold1 = Value(1400);
- constexpr Value LazyThreshold2 = Value(1300);
- constexpr Value SpaceThreshold = Value(12222);
- constexpr Value NNUEThreshold = Value(520);
+ constexpr Value LazyThreshold1 = Value(1565);
+ constexpr Value LazyThreshold2 = Value(1102);
+ constexpr Value SpaceThreshold = Value(11551);
+ constexpr Value NNUEThreshold1 = Value(682);
+ constexpr Value NNUEThreshold2 = Value(176);
// KingAttackWeights[PieceType] contains king attack weights by piece type
constexpr int KingAttackWeights[PIECE_TYPE_NB] = { 0, 0, 81, 52, 44, 10 };
// SafeCheck[PieceType][single/multiple] contains safe check bonus by piece type,
// higher if multiple safe checks are possible for that piece type.
constexpr int SafeCheck[][2] = {
- {}, {}, {792, 1283}, {645, 967}, {1084, 1897}, {772, 1119}
+ {}, {}, {803, 1292}, {639, 974}, {1087, 1878}, {759, 1132}
};
#define S(mg, eg) make_score(mg, eg)
// MobilityBonus[PieceType-2][attacked] contains bonuses for middle and end game,
// indexed by piece type and number of attacked squares in the mobility area.
constexpr Score MobilityBonus[][32] = {
- { S(-62,-81), S(-53,-56), S(-12,-31), S( -4,-16), S( 3, 5), S( 13, 11), // Knight
- S( 22, 17), S( 28, 20), S( 33, 25) },
- { S(-48,-59), S(-20,-23), S( 16, -3), S( 26, 13), S( 38, 24), S( 51, 42), // Bishop
- S( 55, 54), S( 63, 57), S( 63, 65), S( 68, 73), S( 81, 78), S( 81, 86),
- S( 91, 88), S( 98, 97) },
- { S(-60,-78), S(-20,-17), S( 2, 23), S( 3, 39), S( 3, 70), S( 11, 99), // Rook
- S( 22,103), S( 31,121), S( 40,134), S( 40,139), S( 41,158), S( 48,164),
- S( 57,168), S( 57,169), S( 62,172) },
- { S(-30,-48), S(-12,-30), S( -8, -7), S( -9, 19), S( 20, 40), S( 23, 55), // Queen
- S( 23, 59), S( 35, 75), S( 38, 78), S( 53, 96), S( 64, 96), S( 65,100),
- S( 65,121), S( 66,127), S( 67,131), S( 67,133), S( 72,136), S( 72,141),
- S( 77,147), S( 79,150), S( 93,151), S(108,168), S(108,168), S(108,171),
- S(110,182), S(114,182), S(114,192), S(116,219) }
+ { S(-62,-79), S(-53,-57), S(-12,-31), S( -3,-17), S( 3, 7), S( 12, 13), // Knight
+ S( 21, 16), S( 28, 21), S( 37, 26) },
+ { S(-47,-59), S(-20,-25), S( 14, -8), S( 29, 12), S( 39, 21), S( 53, 40), // Bishop
+ S( 53, 56), S( 60, 58), S( 62, 65), S( 69, 72), S( 78, 78), S( 83, 87),
+ S( 91, 88), S( 96, 98) },
+ { S(-60,-82), S(-24,-15), S( 0, 17) ,S( 3, 43), S( 4, 72), S( 14,100), // Rook
+ S( 20,102), S( 30,122), S( 41,133), S(41 ,139), S( 41,153), S( 45,160),
+ S( 57,165), S( 58,170), S( 67,175) },
+ { S(-29,-49), S(-16,-29), S( -8, -8), S( -8, 17), S( 18, 39), S( 25, 54), // Queen
+ S( 23, 59), S( 37, 73), S( 41, 76), S( 54, 95), S( 65, 95) ,S( 68,101),
+ S( 69,124), S( 70,128), S( 70,132), S( 70,133) ,S( 71,136), S( 72,140),
+ S( 74,147), S( 76,149), S( 90,153), S(104,169), S(105,171), S(106,171),
+ S(112,178), S(114,185), S(114,187), S(119,221) }
+ };
+
+ // BishopPawns[distance from edge] contains a file-dependent penalty for pawns on
+ // squares of the same color as our bishop.
+ constexpr Score BishopPawns[int(FILE_NB) / 2] = {
+ S(3, 8), S(3, 9), S(1, 8), S(3, 7)
};
// KingProtector[knight/bishop] contains penalty for each distance unit to own king
// Outpost[knight/bishop] contains bonuses for each knight or bishop occupying a
// pawn protected square on rank 4 to 6 which is also safe from a pawn attack.
- constexpr Score Outpost[] = { S(56, 36), S(30, 23) };
+ constexpr Score Outpost[] = { S(56, 34), S(31, 23) };
// PassedRank[Rank] contains a bonus according to the rank of a passed pawn
constexpr Score PassedRank[RANK_NB] = {
- S(0, 0), S(10, 28), S(17, 33), S(15, 41), S(62, 72), S(168, 177), S(276, 260)
+ S(0, 0), S(7, 27), S(16, 32), S(17, 40), S(64, 71), S(170, 174), S(278, 262)
};
- // RookOnFile[semiopen/open] contains bonuses for each rook when there is
- // no (friendly) pawn on the rook file.
- constexpr Score RookOnFile[] = { S(19, 7), S(48, 29) };
+ constexpr Score RookOnClosedFile = S(10, 5);
+ constexpr Score RookOnOpenFile[] = { S(19, 6), S(47, 26) };
// ThreatByMinor/ByRook[attacked PieceType] contains bonuses according to
// which piece type attacks which one. Attacks on lesser pieces which are
// pawn-defended are not considered.
constexpr Score ThreatByMinor[PIECE_TYPE_NB] = {
- S(0, 0), S(5, 32), S(57, 41), S(77, 56), S(88, 119), S(79, 161)
+ S(0, 0), S(5, 32), S(55, 41), S(77, 56), S(89, 119), S(79, 162)
};
constexpr Score ThreatByRook[PIECE_TYPE_NB] = {
- S(0, 0), S(3, 46), S(37, 68), S(42, 60), S(0, 38), S(58, 41)
+ S(0, 0), S(3, 44), S(37, 68), S(42, 60), S(0, 39), S(58, 43)
};
// Assorted bonuses and penalties
constexpr Score BadOutpost = S( -7, 36);
constexpr Score BishopOnKingRing = S( 24, 0);
- constexpr Score BishopPawns = S( 3, 7);
constexpr Score BishopXRayPawns = S( 4, 5);
constexpr Score CorneredBishop = S( 50, 50);
constexpr Score FlankAttacks = S( 8, 0);
constexpr Score ReachableOutpost = S( 31, 22);
constexpr Score RestrictedPiece = S( 7, 7);
constexpr Score RookOnKingRing = S( 16, 0);
- constexpr Score RookOnQueenFile = S( 6, 11);
constexpr Score SliderOnQueen = S( 60, 18);
constexpr Score ThreatByKing = S( 24, 89);
constexpr Score ThreatByPawnPush = S( 48, 39);
attackedBy2[Us] = dblAttackByPawn | (attackedBy[Us][KING] & attackedBy[Us][PAWN]);
// Init our king safety tables
- Square s = make_square(Utility::clamp(file_of(ksq), FILE_B, FILE_G),
- Utility::clamp(rank_of(ksq), RANK_2, RANK_7));
+ Square s = make_square(std::clamp(file_of(ksq), FILE_B, FILE_G),
+ std::clamp(rank_of(ksq), RANK_2, RANK_7));
kingRing[Us] = attacks_bb<KING>(s) | s;
kingAttackersCount[Them] = popcount(kingRing[Us] & pe->pawn_attacks(Them));
constexpr Direction Down = -pawn_push(Us);
constexpr Bitboard OutpostRanks = (Us == WHITE ? Rank4BB | Rank5BB | Rank6BB
: Rank5BB | Rank4BB | Rank3BB);
- const Square* pl = pos.squares<Pt>(Us);
-
+ Bitboard b1 = pos.pieces(Us, Pt);
Bitboard b, bb;
Score score = SCORE_ZERO;
attackedBy[Us][Pt] = 0;
- for (Square s = *pl; s != SQ_NONE; s = *++pl)
- {
+ while (b1) {
+ Square s = pop_lsb(&b1);
+
// Find attacked squares, including x-ray attacks for bishops and rooks
b = Pt == BISHOP ? attacks_bb<BISHOP>(s, pos.pieces() ^ pos.pieces(QUEEN))
: Pt == ROOK ? attacks_bb< ROOK>(s, pos.pieces() ^ pos.pieces(QUEEN) ^ pos.pieces(Us, ROOK))
// when the bishop is outside the pawn chain.
Bitboard blocked = pos.pieces(Us, PAWN) & shift<Down>(pos.pieces());
- score -= BishopPawns * pos.pawns_on_same_color_squares(Us, s)
+ score -= BishopPawns[edge_distance(file_of(s))] * pos.pawns_on_same_color_squares(Us, s)
* (!(attackedBy[Us][PAWN] & s) + popcount(blocked & CenterFiles));
// Penalty for all enemy pawns x-rayed
if (Pt == ROOK)
{
- // Bonus for rook on the same file as a queen
- if (file_bb(s) & pos.pieces(QUEEN))
- score += RookOnQueenFile;
-
- // Bonus for rook on an open or semi-open file
+ // Bonuses for rook on a (semi-)open or closed file
if (pos.is_on_semiopen_file(Us, s))
- score += RookOnFile[pos.is_on_semiopen_file(Them, s)];
-
- // Penalty when trapped by the king, even more if the king cannot castle
- else if (mob <= 3)
{
- File kf = file_of(pos.square<KING>(Us));
- if ((kf < FILE_E) == (file_of(s) < kf))
- score -= TrappedRook * (1 + !pos.castling_rights(Us));
+ score += RookOnOpenFile[pos.is_on_semiopen_file(Them, s)];
+ }
+ else
+ {
+ // If our pawn on this file is blocked, increase penalty
+ if ( pos.pieces(Us, PAWN)
+ & shift<Down>(pos.pieces())
+ & file_bb(s))
+ {
+ score -= RookOnClosedFile;
+ }
+
+ // Penalty when trapped by the king, even more if the king cannot castle
+ if (mob <= 3)
+ {
+ File kf = file_of(pos.square<KING>(Us));
+ if ((kf < FILE_E) == (file_of(s) < kf))
+ score -= TrappedRook * (1 + !pos.castling_rights(Us));
+ }
}
}
int kingFlankAttack = popcount(b1) + popcount(b2);
int kingFlankDefense = popcount(b3);
- kingDanger += kingAttackersCount[Them] * kingAttackersWeight[Them]
- + 185 * popcount(kingRing[Us] & weak)
- + 148 * popcount(unsafeChecks)
- + 98 * popcount(pos.blockers_for_king(Us))
- + 69 * kingAttacksCount[Them]
- + 3 * kingFlankAttack * kingFlankAttack / 8
- + mg_value(mobility[Them] - mobility[Us])
- - 873 * !pos.count<QUEEN>(Them)
- - 100 * bool(attackedBy[Us][KNIGHT] & attackedBy[Us][KING])
- - 6 * mg_value(score) / 8
- - 4 * kingFlankDefense
- + 37;
+ kingDanger += kingAttackersCount[Them] * kingAttackersWeight[Them] // (~10 Elo)
+ + 183 * popcount(kingRing[Us] & weak) // (~15 Elo)
+ + 148 * popcount(unsafeChecks) // (~4 Elo)
+ + 98 * popcount(pos.blockers_for_king(Us)) // (~2 Elo)
+ + 69 * kingAttacksCount[Them] // (~0.5 Elo)
+ + 3 * kingFlankAttack * kingFlankAttack / 8 // (~0.5 Elo)
+ + mg_value(mobility[Them] - mobility[Us]) // (~0.5 Elo)
+ - 873 * !pos.count<QUEEN>(Them) // (~24 Elo)
+ - 100 * bool(attackedBy[Us][KNIGHT] & attackedBy[Us][KING]) // (~5 Elo)
+ - 6 * mg_value(score) / 8 // (~8 Elo)
+ - 4 * kingFlankDefense // (~5 Elo)
+ + 37; // (~0.5 Elo)
// Transform the kingDanger units into a Score, and subtract it from the evaluation
if (kingDanger > 100)
Square blockSq = s + Up;
// Adjust bonus based on the king's proximity
- bonus += make_score(0, ( (king_proximity(Them, blockSq) * 19) / 4
- - king_proximity(Us, blockSq) * 2) * w);
+ bonus += make_score(0, ( king_proximity(Them, blockSq) * 19 / 4
+ - king_proximity(Us, blockSq) * 2) * w);
// If blockSq is not the queening square then consider also a second push
if (r != RANK_7)
bb = forward_file_bb(Them, s) & pos.pieces(ROOK, QUEEN);
if (!(pos.pieces(Them) & bb))
- unsafeSquares &= attackedBy[Them][ALL_PIECES];
+ unsafeSquares &= attackedBy[Them][ALL_PIECES] | pos.pieces(Them);
- // If there are no enemy attacks on passed pawn span, assign a big bonus.
+ // If there are no enemy pieces or attacks on passed pawn span, assign a big bonus.
// Otherwise assign a smaller bonus if the path to queen is not attacked
// and even smaller bonus if it is attacked but block square is not.
int k = !unsafeSquares ? 35 :
// Evaluation::space() computes a space evaluation for a given side, aiming to improve game
- // play in the opening. It is based on the number of safe squares on the 4 central files
+ // play in the opening. It is based on the number of safe squares on the four central files
// on ranks 2 to 4. Completely safe squares behind a friendly pawn are counted twice.
// Finally, the space bonus is multiplied by a weight which decreases according to occupancy.
behind |= shift<Down>(behind);
behind |= shift<Down+Down>(behind);
+ // Compute space score based on the number of safe squares and number of our pieces
+ // increased with number of total blocked pawns in position.
int bonus = popcount(safe) + popcount(behind & safe & ~attackedBy[Them][ALL_PIECES]);
int weight = pos.count<ALL_PIECES>(Us) - 3 + std::min(pe->blocked_count(), 9);
Score score = make_score(bonus * weight * weight / 16, 0);
// Now apply the bonus: note that we find the attacking side by extracting the
// sign of the midgame or endgame values, and that we carefully cap the bonus
// so that the midgame and endgame scores do not change sign after the bonus.
- int u = ((mg > 0) - (mg < 0)) * Utility::clamp(complexity + 50, -abs(mg), 0);
+ int u = ((mg > 0) - (mg < 0)) * std::clamp(complexity + 50, -abs(mg), 0);
int v = ((eg > 0) - (eg < 0)) * std::max(complexity, -abs(eg));
mg += u;
{
if (pos.opposite_bishops())
{
+ // For pure opposite colored bishops endgames use scale factor
+ // based on the number of passed pawns of the strong side.
if ( pos.non_pawn_material(WHITE) == BishopValueMg
&& pos.non_pawn_material(BLACK) == BishopValueMg)
sf = 18 + 4 * popcount(pe->passed_pawns(strongSide));
+ // For every other opposite colored bishops endgames use scale factor
+ // based on the number of all pieces of the strong side.
else
sf = 22 + 3 * pos.count<ALL_PIECES>(strongSide);
}
+ // For rook endgames with strong side not having overwhelming pawn number advantage
+ // and its pawns being on one flank and weak side protecting its pieces with a king
+ // use lower scale factor.
else if ( pos.non_pawn_material(WHITE) == RookValueMg
&& pos.non_pawn_material(BLACK) == RookValueMg
&& pos.count<PAWN>(strongSide) - pos.count<PAWN>(~strongSide) <= 1
&& bool(KingSide & pos.pieces(strongSide, PAWN)) != bool(QueenSide & pos.pieces(strongSide, PAWN))
&& (attacks_bb<KING>(pos.square<KING>(~strongSide)) & pos.pieces(~strongSide, PAWN)))
sf = 36;
+ // For queen vs no queen endgames use scale factor
+ // based on number of minors of side that doesn't have queen.
else if (pos.count<QUEEN>() == 1)
sf = 37 + 3 * (pos.count<QUEEN>(WHITE) == 1 ? pos.count<BISHOP>(BLACK) + pos.count<KNIGHT>(BLACK)
: pos.count<BISHOP>(WHITE) + pos.count<KNIGHT>(WHITE));
+ // In every other case use scale factor based on
+ // the number of pawns of the strong side reduced if pawns are on a single flank.
else
- sf = std::min(sf, 36 + 7 * pos.count<PAWN>(strongSide));
+ sf = std::min(sf, 36 + 7 * pos.count<PAWN>(strongSide)) - 4 * !pawnsOnBothFlanks;
+
+ // Reduce scale factor in case of pawns being on a single flank
+ sf -= 4 * !pawnsOnBothFlanks;
}
// Interpolate between the middlegame and (scaled by 'sf') endgame score
// Side to move point of view
v = (pos.side_to_move() == WHITE ? v : -v) + Tempo;
- // Damp down the evaluation linearly when shuffling
- v = v * (100 - pos.rule50_count()) / 100;
-
return v;
}
Value Eval::evaluate(const Position& pos) {
- if (Eval::useNNUE)
+ Value v;
+
+ if (!Eval::useNNUE)
+ v = Evaluation<NO_TRACE>(pos).value();
+ else
{
- Value v = eg_value(pos.psq_score());
- // Take NNUE eval only on balanced positions
- if (abs(v) < NNUEThreshold)
- return NNUE::evaluate(pos) + Tempo;
+ // Scale and shift NNUE for compatibility with search and classical evaluation
+ auto adjusted_NNUE = [&](){
+ int mat = pos.non_pawn_material() + PawnValueMg * pos.count<PAWN>();
+ return NNUE::evaluate(pos) * (679 + mat / 32) / 1024 + Tempo;
+ };
+
+ // If there is PSQ imbalance use classical eval, with small probability if it is small
+ Value psq = Value(abs(eg_value(pos.psq_score())));
+ int r50 = 16 + pos.rule50_count();
+ bool largePsq = psq * 16 > (NNUEThreshold1 + pos.non_pawn_material() / 64) * r50;
+ bool classical = largePsq || (psq > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB));
+
+ // Use classical evaluation for really low piece endgames.
+ // The most critical case is a bishop + A/H file pawn vs naked king draw.
+ bool strongClassical = pos.non_pawn_material() < 2 * RookValueMg && pos.count<PAWN>() < 2;
+
+ v = classical || strongClassical ? Evaluation<NO_TRACE>(pos).value() : adjusted_NNUE();
+
+ // If the classical eval is small and imbalance large, use NNUE nevertheless.
+ // For the case of opposite colored bishops, switch to NNUE eval with
+ // small probability if the classical eval is less than the threshold.
+ if ( largePsq && !strongClassical
+ && ( abs(v) * 16 < NNUEThreshold2 * r50
+ || ( pos.opposite_bishops()
+ && abs(v) * 16 < (NNUEThreshold1 + pos.non_pawn_material() / 64) * r50
+ && !(pos.this_thread()->nodes & 0xB))))
+ v = adjusted_NNUE();
}
- return Evaluation<NO_TRACE>(pos).value();
+
+ // Damp down the evaluation linearly when shuffling
+ v = v * (100 - pos.rule50_count()) / 100;
+
+ // Guarantee evaluation does not hit the tablebase range
+ v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
+
+ return v;
}
/// trace() is like evaluate(), but instead of returning a value, it returns
Value v;
+ std::memset(scores, 0, sizeof(scores));
+
+ pos.this_thread()->contempt = SCORE_ZERO; // Reset any dynamic contempt
+
+ v = Evaluation<TRACE>(pos).value();
+
+ ss << std::showpoint << std::noshowpos << std::fixed << std::setprecision(2)
+ << " Term | White | Black | Total \n"
+ << " | MG EG | MG EG | MG EG \n"
+ << " ------------+-------------+-------------+------------\n"
+ << " Material | " << Term(MATERIAL)
+ << " Imbalance | " << Term(IMBALANCE)
+ << " Pawns | " << Term(PAWN)
+ << " Knights | " << Term(KNIGHT)
+ << " Bishops | " << Term(BISHOP)
+ << " Rooks | " << Term(ROOK)
+ << " Queens | " << Term(QUEEN)
+ << " Mobility | " << Term(MOBILITY)
+ << " King safety | " << Term(KING)
+ << " Threats | " << Term(THREAT)
+ << " Passed | " << Term(PASSED)
+ << " Space | " << Term(SPACE)
+ << " Winnable | " << Term(WINNABLE)
+ << " ------------+-------------+-------------+------------\n"
+ << " Total | " << Term(TOTAL);
+
+ v = pos.side_to_move() == WHITE ? v : -v;
+
+ ss << "\nClassical evaluation: " << to_cp(v) << " (white side)\n";
+
if (Eval::useNNUE)
{
v = NNUE::evaluate(pos);
- }
- else
- {
- std::memset(scores, 0, sizeof(scores));
-
- pos.this_thread()->contempt = SCORE_ZERO; // Reset any dynamic contempt
-
- v = Evaluation<TRACE>(pos).value();
-
- ss << std::showpoint << std::noshowpos << std::fixed << std::setprecision(2)
- << " Term | White | Black | Total \n"
- << " | MG EG | MG EG | MG EG \n"
- << " ------------+-------------+-------------+------------\n"
- << " Material | " << Term(MATERIAL)
- << " Imbalance | " << Term(IMBALANCE)
- << " Pawns | " << Term(PAWN)
- << " Knights | " << Term(KNIGHT)
- << " Bishops | " << Term(BISHOP)
- << " Rooks | " << Term(ROOK)
- << " Queens | " << Term(QUEEN)
- << " Mobility | " << Term(MOBILITY)
- << " King safety | " << Term(KING)
- << " Threats | " << Term(THREAT)
- << " Passed | " << Term(PASSED)
- << " Space | " << Term(SPACE)
- << " Winnable | " << Term(WINNABLE)
- << " ------------+-------------+-------------+------------\n"
- << " Total | " << Term(TOTAL);
+ v = pos.side_to_move() == WHITE ? v : -v;
+ ss << "\nNNUE evaluation: " << to_cp(v) << " (white side)\n";
}
+ v = evaluate(pos);
v = pos.side_to_move() == WHITE ? v : -v;
-
- ss << "\nFinal evaluation: " << to_cp(v) << " (white side)\n";
+ ss << "\nFinal evaluation: " << to_cp(v) << " (white side)\n";
return ss.str();
}
extern bool useNNUE;
extern std::string eval_file_loaded;
- void init_NNUE();
- void verify_NNUE();
+
+ // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
+ // for the build process (profile-build and fishtest) to work. Do not change the
+ // name of the macro, as it is used in the Makefile.
+ #define EvalFileDefaultName "nn-62ef826d1a6d.nnue"
namespace NNUE {
Value evaluate(const Position& pos);
- Value compute_eval(const Position& pos);
- void update_eval(const Position& pos);
- bool load_eval_file(const std::string& evalFile);
+ bool load_eval(std::string name, std::istream& stream);
+ void init();
+ void verify();
} // namespace NNUE
--- /dev/null
+#ifndef HASHPROBE_H_INCLUDED
+#define HASHPROBE_H_INCLUDED
+
+#include "types.h"
+
+#include <deque>
+#include <string>
+
+#include <grpc/grpc.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include "hashprobe.grpc.pb.h"
+
+class HashProbeImpl final : public hashprobe::HashProbe::Service {
+public:
+ grpc::Status Probe(grpc::ServerContext* context,
+ const hashprobe::HashProbeRequest* request,
+ hashprobe::HashProbeResponse *response);
+
+private:
+ void FillMove(Position* pos, Move move, hashprobe::HashProbeMove* decoded);
+ void ProbeMove(Position* pos, std::deque<StateInfo>* setup_states, bool invert, hashprobe::HashProbeLine* response);
+ void FillValue(Value value, hashprobe::HashProbeScore* score);
+};
+
+class HashProbeThread {
+public:
+ HashProbeThread(const std::string &server_address);
+ void Shutdown();
+
+private:
+ HashProbeImpl service;
+ grpc::ServerBuilder builder;
+ std::unique_ptr<grpc::Server> server;
+};
+
+#endif
--- /dev/null
+syntax = "proto3";
+package hashprobe;
+
+message HashProbeRequest {
+ string fen = 1;
+}
+message HashProbeResponse {
+ HashProbeLine root = 2;
+ repeated HashProbeLine line = 1;
+}
+message HashProbeLine {
+ HashProbeMove move = 1;
+ bool found = 2;
+
+ repeated HashProbeMove pv = 3;
+ HashProbeScore value = 4; // Dynamic eval (may be inexact, see the "bound" field)
+ HashProbeScore eval = 5; // Static eval
+ int32 depth = 6;
+
+ enum ValueBound {
+ BOUND_NONE = 0;
+ BOUND_UPPER = 1;
+ BOUND_LOWER = 2;
+ BOUND_EXACT = 3;
+ };
+ ValueBound bound = 7;
+}
+
+message HashProbeMove {
+ string from_sq = 1; // a1, a2, etc.
+ string to_sq = 2;
+ string promotion = 3; // Q, R, etc.
+
+ string pretty = 4; // e.g. Rxf6+
+}
+message HashProbeScore {
+ enum ScoreType {
+ SCORE_NONE = 0;
+ SCORE_CP = 1;
+ SCORE_MATE = 2;
+ }
+ ScoreType score_type = 1;
+ int32 score_cp = 2;
+ int32 score_mate = 3;
+}
+
+service HashProbe {
+ rpc Probe(HashProbeRequest) returns (HashProbeResponse) {}
+}
--- /dev/null
+The file "incbin.h" is free and unencumbered software released into
+the public domain by Dale Weiler, see:
+ <https://github.com/graphitemaster/incbin>
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org/>
--- /dev/null
+/**
+ * @file incbin.h
+ * @author Dale Weiler
+ * @brief Utility for including binary files
+ *
+ * Facilities for including binary files into the current translation unit and
+ * making use from them externally in other translation units.
+ */
+#ifndef INCBIN_HDR
+#define INCBIN_HDR
+#include <limits.h>
+#if defined(__AVX512BW__) || \
+ defined(__AVX512CD__) || \
+ defined(__AVX512DQ__) || \
+ defined(__AVX512ER__) || \
+ defined(__AVX512PF__) || \
+ defined(__AVX512VL__) || \
+ defined(__AVX512F__)
+# define INCBIN_ALIGNMENT_INDEX 6
+#elif defined(__AVX__) || \
+ defined(__AVX2__)
+# define INCBIN_ALIGNMENT_INDEX 5
+#elif defined(__SSE__) || \
+ defined(__SSE2__) || \
+ defined(__SSE3__) || \
+ defined(__SSSE3__) || \
+ defined(__SSE4_1__) || \
+ defined(__SSE4_2__) || \
+ defined(__neon__)
+# define INCBIN_ALIGNMENT_INDEX 4
+#elif ULONG_MAX != 0xffffffffu
+# define INCBIN_ALIGNMENT_INDEX 3
+# else
+# define INCBIN_ALIGNMENT_INDEX 2
+#endif
+
+/* Lookup table of (1 << n) where `n' is `INCBIN_ALIGNMENT_INDEX' */
+#define INCBIN_ALIGN_SHIFT_0 1
+#define INCBIN_ALIGN_SHIFT_1 2
+#define INCBIN_ALIGN_SHIFT_2 4
+#define INCBIN_ALIGN_SHIFT_3 8
+#define INCBIN_ALIGN_SHIFT_4 16
+#define INCBIN_ALIGN_SHIFT_5 32
+#define INCBIN_ALIGN_SHIFT_6 64
+
+/* Actual alignment value */
+#define INCBIN_ALIGNMENT \
+ INCBIN_CONCATENATE( \
+ INCBIN_CONCATENATE(INCBIN_ALIGN_SHIFT, _), \
+ INCBIN_ALIGNMENT_INDEX)
+
+/* Stringize */
+#define INCBIN_STR(X) \
+ #X
+#define INCBIN_STRINGIZE(X) \
+ INCBIN_STR(X)
+/* Concatenate */
+#define INCBIN_CAT(X, Y) \
+ X ## Y
+#define INCBIN_CONCATENATE(X, Y) \
+ INCBIN_CAT(X, Y)
+/* Deferred macro expansion */
+#define INCBIN_EVAL(X) \
+ X
+#define INCBIN_INVOKE(N, ...) \
+ INCBIN_EVAL(N(__VA_ARGS__))
+
+/* Green Hills uses a different directive for including binary data */
+#if defined(__ghs__)
+# if (__ghs_asm == 2)
+# define INCBIN_MACRO ".file"
+/* Or consider the ".myrawdata" entry in the ld file */
+# else
+# define INCBIN_MACRO "\tINCBIN"
+# endif
+#else
+# define INCBIN_MACRO ".incbin"
+#endif
+
+#ifndef _MSC_VER
+# define INCBIN_ALIGN \
+ __attribute__((aligned(INCBIN_ALIGNMENT)))
+#else
+# define INCBIN_ALIGN __declspec(align(INCBIN_ALIGNMENT))
+#endif
+
+#if defined(__arm__) || /* GNU C and RealView */ \
+ defined(__arm) || /* Diab */ \
+ defined(_ARM) /* ImageCraft */
+# define INCBIN_ARM
+#endif
+
+#ifdef __GNUC__
+/* Utilize .balign where supported */
+# define INCBIN_ALIGN_HOST ".balign " INCBIN_STRINGIZE(INCBIN_ALIGNMENT) "\n"
+# define INCBIN_ALIGN_BYTE ".balign 1\n"
+#elif defined(INCBIN_ARM)
+/*
+ * On arm assemblers, the alignment value is calculated as (1 << n) where `n' is
+ * the shift count. This is the value passed to `.align'
+ */
+# define INCBIN_ALIGN_HOST ".align " INCBIN_STRINGIZE(INCBIN_ALIGNMENT_INDEX) "\n"
+# define INCBIN_ALIGN_BYTE ".align 0\n"
+#else
+/* We assume other inline assembler's treat `.align' as `.balign' */
+# define INCBIN_ALIGN_HOST ".align " INCBIN_STRINGIZE(INCBIN_ALIGNMENT) "\n"
+# define INCBIN_ALIGN_BYTE ".align 1\n"
+#endif
+
+/* INCBIN_CONST is used by incbin.c generated files */
+#if defined(__cplusplus)
+# define INCBIN_EXTERNAL extern "C"
+# define INCBIN_CONST extern const
+#else
+# define INCBIN_EXTERNAL extern
+# define INCBIN_CONST const
+#endif
+
+/**
+ * @brief Optionally override the linker section into which data is emitted.
+ *
+ * @warning If you use this facility, you'll have to deal with platform-specific linker output
+ * section naming on your own
+ *
+ * Overriding the default linker output section, e.g for esp8266/Arduino:
+ * @code
+ * #define INCBIN_OUTPUT_SECTION ".irom.text"
+ * #include "incbin.h"
+ * INCBIN(Foo, "foo.txt");
+ * // Data is emitted into program memory that never gets copied to RAM
+ * @endcode
+ */
+#if !defined(INCBIN_OUTPUT_SECTION)
+# if defined(__APPLE__)
+# define INCBIN_OUTPUT_SECTION ".const_data"
+# else
+# define INCBIN_OUTPUT_SECTION ".rodata"
+# endif
+#endif
+
+#if defined(__APPLE__)
+/* The directives are different for Apple branded compilers */
+# define INCBIN_SECTION INCBIN_OUTPUT_SECTION "\n"
+# define INCBIN_GLOBAL(NAME) ".globl " INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME "\n"
+# define INCBIN_INT ".long "
+# define INCBIN_MANGLE "_"
+# define INCBIN_BYTE ".byte "
+# define INCBIN_TYPE(...)
+#else
+# define INCBIN_SECTION ".section " INCBIN_OUTPUT_SECTION "\n"
+# define INCBIN_GLOBAL(NAME) ".global " INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME "\n"
+# if defined(__ghs__)
+# define INCBIN_INT ".word "
+# else
+# define INCBIN_INT ".int "
+# endif
+# if defined(__USER_LABEL_PREFIX__)
+# define INCBIN_MANGLE INCBIN_STRINGIZE(__USER_LABEL_PREFIX__)
+# else
+# define INCBIN_MANGLE ""
+# endif
+# if defined(INCBIN_ARM)
+/* On arm assemblers, `@' is used as a line comment token */
+# define INCBIN_TYPE(NAME) ".type " INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME ", %object\n"
+# elif defined(__MINGW32__) || defined(__MINGW64__)
+/* Mingw doesn't support this directive either */
+# define INCBIN_TYPE(NAME)
+# else
+/* It's safe to use `@' on other architectures */
+# define INCBIN_TYPE(NAME) ".type " INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME ", @object\n"
+# endif
+# define INCBIN_BYTE ".byte "
+#endif
+
+/* List of style types used for symbol names */
+#define INCBIN_STYLE_CAMEL 0
+#define INCBIN_STYLE_SNAKE 1
+
+/**
+ * @brief Specify the prefix to use for symbol names.
+ *
+ * By default this is `g', producing symbols of the form:
+ * @code
+ * #include "incbin.h"
+ * INCBIN(Foo, "foo.txt");
+ *
+ * // Now you have the following symbols:
+ * // const unsigned char gFooData[];
+ * // const unsigned char *const gFooEnd;
+ * // const unsigned int gFooSize;
+ * @endcode
+ *
+ * If however you specify a prefix before including: e.g:
+ * @code
+ * #define INCBIN_PREFIX incbin
+ * #include "incbin.h"
+ * INCBIN(Foo, "foo.txt");
+ *
+ * // Now you have the following symbols instead:
+ * // const unsigned char incbinFooData[];
+ * // const unsigned char *const incbinFooEnd;
+ * // const unsigned int incbinFooSize;
+ * @endcode
+ */
+#if !defined(INCBIN_PREFIX)
+# define INCBIN_PREFIX g
+#endif
+
+/**
+ * @brief Specify the style used for symbol names.
+ *
+ * Possible options are
+ * - INCBIN_STYLE_CAMEL "CamelCase"
+ * - INCBIN_STYLE_SNAKE "snake_case"
+ *
+ * Default option is *INCBIN_STYLE_CAMEL* producing symbols of the form:
+ * @code
+ * #include "incbin.h"
+ * INCBIN(Foo, "foo.txt");
+ *
+ * // Now you have the following symbols:
+ * // const unsigned char <prefix>FooData[];
+ * // const unsigned char *const <prefix>FooEnd;
+ * // const unsigned int <prefix>FooSize;
+ * @endcode
+ *
+ * If however you specify a style before including: e.g:
+ * @code
+ * #define INCBIN_STYLE INCBIN_STYLE_SNAKE
+ * #include "incbin.h"
+ * INCBIN(foo, "foo.txt");
+ *
+ * // Now you have the following symbols:
+ * // const unsigned char <prefix>foo_data[];
+ * // const unsigned char *const <prefix>foo_end;
+ * // const unsigned int <prefix>foo_size;
+ * @endcode
+ */
+#if !defined(INCBIN_STYLE)
+# define INCBIN_STYLE INCBIN_STYLE_CAMEL
+#endif
+
+/* Style lookup tables */
+#define INCBIN_STYLE_0_DATA Data
+#define INCBIN_STYLE_0_END End
+#define INCBIN_STYLE_0_SIZE Size
+#define INCBIN_STYLE_1_DATA _data
+#define INCBIN_STYLE_1_END _end
+#define INCBIN_STYLE_1_SIZE _size
+
+/* Style lookup: returning identifier */
+#define INCBIN_STYLE_IDENT(TYPE) \
+ INCBIN_CONCATENATE( \
+ INCBIN_STYLE_, \
+ INCBIN_CONCATENATE( \
+ INCBIN_EVAL(INCBIN_STYLE), \
+ INCBIN_CONCATENATE(_, TYPE)))
+
+/* Style lookup: returning string literal */
+#define INCBIN_STYLE_STRING(TYPE) \
+ INCBIN_STRINGIZE( \
+ INCBIN_STYLE_IDENT(TYPE)) \
+
+/* Generate the global labels by indirectly invoking the macro with our style
+ * type and concatenating the name against them. */
+#define INCBIN_GLOBAL_LABELS(NAME, TYPE) \
+ INCBIN_INVOKE( \
+ INCBIN_GLOBAL, \
+ INCBIN_CONCATENATE( \
+ NAME, \
+ INCBIN_INVOKE( \
+ INCBIN_STYLE_IDENT, \
+ TYPE))) \
+ INCBIN_INVOKE( \
+ INCBIN_TYPE, \
+ INCBIN_CONCATENATE( \
+ NAME, \
+ INCBIN_INVOKE( \
+ INCBIN_STYLE_IDENT, \
+ TYPE)))
+
+/**
+ * @brief Externally reference binary data included in another translation unit.
+ *
+ * Produces three external symbols that reference the binary data included in
+ * another translation unit.
+ *
+ * The symbol names are a concatenation of `INCBIN_PREFIX' before *NAME*; with
+ * "Data", as well as "End" and "Size" after. An example is provided below.
+ *
+ * @param NAME The name given for the binary data
+ *
+ * @code
+ * INCBIN_EXTERN(Foo);
+ *
+ * // Now you have the following symbols:
+ * // extern const unsigned char <prefix>FooData[];
+ * // extern const unsigned char *const <prefix>FooEnd;
+ * // extern const unsigned int <prefix>FooSize;
+ * @endcode
+ */
+#define INCBIN_EXTERN(NAME) \
+ INCBIN_EXTERNAL const INCBIN_ALIGN unsigned char \
+ INCBIN_CONCATENATE( \
+ INCBIN_CONCATENATE(INCBIN_PREFIX, NAME), \
+ INCBIN_STYLE_IDENT(DATA))[]; \
+ INCBIN_EXTERNAL const INCBIN_ALIGN unsigned char *const \
+ INCBIN_CONCATENATE( \
+ INCBIN_CONCATENATE(INCBIN_PREFIX, NAME), \
+ INCBIN_STYLE_IDENT(END)); \
+ INCBIN_EXTERNAL const unsigned int \
+ INCBIN_CONCATENATE( \
+ INCBIN_CONCATENATE(INCBIN_PREFIX, NAME), \
+ INCBIN_STYLE_IDENT(SIZE))
+
+/**
+ * @brief Include a binary file into the current translation unit.
+ *
+ * Includes a binary file into the current translation unit, producing three symbols
+ * for objects that encode the data and size respectively.
+ *
+ * The symbol names are a concatenation of `INCBIN_PREFIX' before *NAME*; with
+ * "Data", as well as "End" and "Size" after. An example is provided below.
+ *
+ * @param NAME The name to associate with this binary data (as an identifier.)
+ * @param FILENAME The file to include (as a string literal.)
+ *
+ * @code
+ * INCBIN(Icon, "icon.png");
+ *
+ * // Now you have the following symbols:
+ * // const unsigned char <prefix>IconData[];
+ * // const unsigned char *const <prefix>IconEnd;
+ * // const unsigned int <prefix>IconSize;
+ * @endcode
+ *
+ * @warning This must be used in global scope
+ * @warning The identifiers may be different if INCBIN_STYLE is not default
+ *
+ * To externally reference the data included by this in another translation unit
+ * please @see INCBIN_EXTERN.
+ */
+#ifdef _MSC_VER
+#define INCBIN(NAME, FILENAME) \
+ INCBIN_EXTERN(NAME)
+#else
+#define INCBIN(NAME, FILENAME) \
+ __asm__(INCBIN_SECTION \
+ INCBIN_GLOBAL_LABELS(NAME, DATA) \
+ INCBIN_ALIGN_HOST \
+ INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME INCBIN_STYLE_STRING(DATA) ":\n" \
+ INCBIN_MACRO " \"" FILENAME "\"\n" \
+ INCBIN_GLOBAL_LABELS(NAME, END) \
+ INCBIN_ALIGN_BYTE \
+ INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME INCBIN_STYLE_STRING(END) ":\n" \
+ INCBIN_BYTE "1\n" \
+ INCBIN_GLOBAL_LABELS(NAME, SIZE) \
+ INCBIN_ALIGN_HOST \
+ INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME INCBIN_STYLE_STRING(SIZE) ":\n" \
+ INCBIN_INT INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME INCBIN_STYLE_STRING(END) " - " \
+ INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME INCBIN_STYLE_STRING(DATA) "\n" \
+ INCBIN_ALIGN_HOST \
+ ".text\n" \
+ ); \
+ INCBIN_EXTERN(NAME)
+
+#endif
+#endif
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <deque>
#include <iostream>
+#include <stack>
+#include <thread>
#include "bitboard.h"
#include "endgame.h"
#include "uci.h"
#include "syzygy/tbprobe.h"
+#include <grpc/grpc.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include "hashprobe.h"
+#include "hashprobe.grpc.pb.h"
+#include "tt.h"
+
+using grpc::Server;
+using grpc::ServerBuilder;
+using grpc::ServerContext;
+using grpc::Status;
+using grpc::StatusCode;
+using namespace hashprobe;
+
+Status HashProbeImpl::Probe(ServerContext* context,
+ const HashProbeRequest* request,
+ HashProbeResponse *response) {
+ Position pos;
+ StateInfo st;
+ pos.set(request->fen(), /*isChess960=*/false, &st, Threads.main());
+ if (!pos.pos_is_ok()) {
+ return Status(StatusCode::INVALID_ARGUMENT, "Invalid FEN");
+ }
+
+ bool invert = (pos.side_to_move() == BLACK);
+ StateListPtr setup_states = StateListPtr(new std::deque<StateInfo>(1));
+
+ ProbeMove(&pos, setup_states.get(), invert, response->mutable_root());
+
+ MoveList<LEGAL> moves(pos);
+ for (const ExtMove* em = moves.begin(); em != moves.end(); ++em) {
+ HashProbeLine *line = response->add_line();
+ FillMove(&pos, em->move, line->mutable_move());
+ setup_states->push_back(StateInfo());
+ pos.do_move(em->move, setup_states->back());
+ ProbeMove(&pos, setup_states.get(), !invert, line);
+ pos.undo_move(em->move);
+ }
+
+ return Status::OK;
+}
+
+void HashProbeImpl::FillMove(Position *pos, Move move, HashProbeMove* decoded) {
+ if (!is_ok(move)) return;
+
+ Square from = from_sq(move);
+ Square to = to_sq(move);
+
+ if (type_of(move) == CASTLING) {
+ to = make_square(to > from ? FILE_G : FILE_C, rank_of(from));
+ }
+
+ Piece moved_piece = pos->moved_piece(move);
+ std::string pretty;
+ if (type_of(move) == CASTLING) {
+ if (to > from) {
+ pretty = "O-O";
+ } else {
+ pretty = "O-O-O";
+ }
+ } else if (type_of(moved_piece) == PAWN) {
+ if (type_of(move) == ENPASSANT || pos->piece_on(to) != NO_PIECE) {
+ // Capture.
+ pretty = char('a' + file_of(from));
+ pretty += "x";
+ }
+ pretty += UCI::square(to);
+ if (type_of(move) == PROMOTION) {
+ pretty += "=";
+ pretty += " PNBRQK"[promotion_type(move)];
+ }
+ } else {
+ pretty = " PNBRQK"[type_of(moved_piece)];
+ Bitboard attackers = pos->attackers_to(to) & pos->pieces(color_of(moved_piece), type_of(moved_piece));
+ if (more_than_one(attackers)) {
+ // Remove all illegal moves to disambiguate.
+ Bitboard att_copy = attackers;
+ while (att_copy) {
+ Square s = pop_lsb(&att_copy);
+ Move m = make_move(s, to);
+ if (!pos->pseudo_legal(m) || !pos->legal(m)) {
+ attackers &= ~SquareBB[s];
+ }
+ }
+ }
+ if (more_than_one(attackers)) {
+ // Disambiguate by file if possible.
+ Bitboard attackers_this_file = attackers & file_bb(file_of(from));
+ if (attackers != attackers_this_file) {
+ pretty += char('a' + file_of(from));
+ attackers = attackers_this_file;
+ }
+ if (more_than_one(attackers)) {
+ // Still ambiguous, so need to disambiguate by rank.
+ pretty += char('1' + rank_of(from));
+ }
+ }
+
+ if (type_of(move) == ENPASSANT || pos->piece_on(to) != NO_PIECE) {
+ pretty += "x";
+ }
+
+ pretty += UCI::square(to);
+ }
+
+ if (pos->gives_check(move)) {
+ // Check if mate.
+ StateInfo si;
+ pos->do_move(move, si, true);
+ if (MoveList<LEGAL>(*pos).size() > 0) {
+ pretty += "+";
+ } else {
+ pretty += "#";
+ }
+ pos->undo_move(move);
+ }
+
+ decoded->set_pretty(pretty);
+}
+
+void HashProbeImpl::ProbeMove(Position* pos, std::deque<StateInfo>* setup_states, bool invert, HashProbeLine* response) {
+ bool found;
+ TTEntry *entry = TT.probe(pos->key(), found);
+ response->set_found(found);
+ if (found) {
+ Value value = entry->value();
+ Value eval = entry->eval();
+ Bound bound = entry->bound();
+
+ if (invert) {
+ value = -value;
+ eval = -eval;
+ if (bound == BOUND_UPPER) {
+ bound = BOUND_LOWER;
+ } else if (bound == BOUND_LOWER) {
+ bound = BOUND_UPPER;
+ }
+ }
+
+ response->set_depth(entry->depth());
+ FillValue(eval, response->mutable_eval());
+ if (entry->depth() > DEPTH_NONE) {
+ FillValue(value, response->mutable_value());
+ }
+ response->set_bound(HashProbeLine::ValueBound(bound));
+
+ // Follow the PV until we hit an illegal move.
+ std::stack<Move> pv;
+ std::set<Key> seen;
+ while (found && is_ok(entry->move()) &&
+ pos->pseudo_legal(entry->move()) &&
+ pos->legal(entry->move())) {
+ FillMove(pos, entry->move(), response->add_pv());
+ if (seen.count(pos->key())) break;
+ pv.push(entry->move());
+ seen.insert(pos->key());
+ setup_states->push_back(StateInfo());
+ pos->do_move(entry->move(), setup_states->back());
+ entry = TT.probe(pos->key(), found);
+ }
+
+ // Unroll the PV back again, so the Position object remains unchanged.
+ while (!pv.empty()) {
+ pos->undo_move(pv.top());
+ pv.pop();
+ }
+ }
+}
+
+void HashProbeImpl::FillValue(Value value, HashProbeScore* score) {
+ if (abs(value) < VALUE_MATE - MAX_PLY) {
+ score->set_score_type(HashProbeScore::SCORE_CP);
+ score->set_score_cp(value * 100 / PawnValueEg);
+ } else {
+ score->set_score_type(HashProbeScore::SCORE_MATE);
+ score->set_score_mate((value > 0 ? VALUE_MATE - value + 1 : -VALUE_MATE - value) / 2);
+ }
+}
+
+HashProbeThread::HashProbeThread(const std::string &server_address) {
+ builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
+ builder.RegisterService(&service);
+ server = std::move(builder.BuildAndStart());
+ std::cout << "Server listening on " << server_address << std::endl;
+ std::thread([this]{ server->Wait(); }).detach();
+}
+
+void HashProbeThread::Shutdown() {
+ server->Shutdown();
+}
+
namespace PSQT {
void init();
}
std::cout << engine_info() << std::endl;
+ CommandLine::init(argc, argv);
UCI::init(Options);
Tune::init();
PSQT::init();
Endgames::init();
Threads.set(size_t(Options["Threads"]));
Search::clear(); // After threads are up
- Eval::init_NNUE();
+ Eval::NNUE::init();
UCI::loop(argc, argv);
using namespace std;
namespace {
+ #define S(mg, eg) make_score(mg, eg)
// Polynomial material imbalance parameters
- constexpr int QuadraticOurs[][PIECE_TYPE_NB] = {
+ constexpr Score QuadraticOurs[][PIECE_TYPE_NB] = {
// OUR PIECES
// pair pawn knight bishop rook queen
- {1438 }, // Bishop pair
- { 40, 38 }, // Pawn
- { 32, 255, -62 }, // Knight OUR PIECES
- { 0, 104, 4, 0 }, // Bishop
- { -26, -2, 47, 105, -208 }, // Rook
- {-189, 24, 117, 133, -134, -6 } // Queen
+ {S(1419, 1455) }, // Bishop pair
+ {S( 101, 28), S( 37, 39) }, // Pawn
+ {S( 57, 64), S(249, 187), S(-49, -62) }, // Knight OUR PIECES
+ {S( 0, 0), S(118, 137), S( 10, 27), S( 0, 0) }, // Bishop
+ {S( -63, -68), S( -5, 3), S(100, 81), S(132, 118), S(-246, -244) }, // Rook
+ {S(-210, -211), S( 37, 14), S(147, 141), S(161, 105), S(-158, -174), S(-9,-31) } // Queen
};
- constexpr int QuadraticTheirs[][PIECE_TYPE_NB] = {
+ constexpr Score QuadraticTheirs[][PIECE_TYPE_NB] = {
// THEIR PIECES
// pair pawn knight bishop rook queen
- { }, // Bishop pair
- { 36, }, // Pawn
- { 9, 63, }, // Knight OUR PIECES
- { 59, 65, 42, }, // Bishop
- { 46, 39, 24, -24, }, // Rook
- { 97, 100, -42, 137, 268, } // Queen
+ { }, // Bishop pair
+ {S( 33, 30) }, // Pawn
+ {S( 46, 18), S(106, 84) }, // Knight OUR PIECES
+ {S( 75, 35), S( 59, 44), S( 60, 15) }, // Bishop
+ {S( 26, 35), S( 6, 22), S( 38, 39), S(-12, -2) }, // Rook
+ {S( 97, 93), S(100, 163), S(-58, -91), S(112, 192), S(276, 225) } // Queen
};
+ #undef S
+
// Endgame evaluation and scaling functions are accessed directly and not through
// the function maps because they correspond to more than one material hash key.
Endgame<KXK> EvaluateKXK[] = { Endgame<KXK>(WHITE), Endgame<KXK>(BLACK) };
/// piece type for both colors.
template<Color Us>
- int imbalance(const int pieceCount[][PIECE_TYPE_NB]) {
+ Score imbalance(const int pieceCount[][PIECE_TYPE_NB]) {
constexpr Color Them = ~Us;
- int bonus = 0;
+ Score bonus = SCORE_ZERO;
// Second-degree polynomial material imbalance, by Tord Romstad
for (int pt1 = NO_PIECE_TYPE; pt1 <= QUEEN; ++pt1)
Value npm_w = pos.non_pawn_material(WHITE);
Value npm_b = pos.non_pawn_material(BLACK);
- Value npm = Utility::clamp(npm_w + npm_b, EndgameLimit, MidgameLimit);
+ Value npm = std::clamp(npm_w + npm_b, EndgameLimit, MidgameLimit);
// Map total non-pawn material into [PHASE_ENDGAME, PHASE_MIDGAME]
e->gamePhase = Phase(((npm - EndgameLimit) * PHASE_MIDGAME) / (MidgameLimit - EndgameLimit));
{ pos.count<BISHOP>(BLACK) > 1, pos.count<PAWN>(BLACK), pos.count<KNIGHT>(BLACK),
pos.count<BISHOP>(BLACK) , pos.count<ROOK>(BLACK), pos.count<QUEEN >(BLACK) } };
- e->value = int16_t((imbalance<WHITE>(pieceCount) - imbalance<BLACK>(pieceCount)) / 16);
+ e->score = (imbalance<WHITE>(pieceCount) - imbalance<BLACK>(pieceCount)) / 16;
return e;
}
struct Entry {
- Score imbalance() const { return make_score(value, value); }
- Phase game_phase() const { return gamePhase; }
+ Score imbalance() const { return score; }
+ Phase game_phase() const { return (Phase)gamePhase; }
bool specialized_eval_exists() const { return evaluationFunction != nullptr; }
Value evaluate(const Position& pos) const { return (*evaluationFunction)(pos); }
const EndgameBase<Value>* evaluationFunction;
const EndgameBase<ScaleFactor>* scalingFunction[COLOR_NB]; // Could be one for each
// side (e.g. KPKP, KBPsK)
- int16_t value;
+ Score score;
+ int16_t gamePhase;
uint8_t factor[COLOR_NB];
- Phase gamePhase;
};
typedef HashTable<Entry, 8192> Table;
#include <sys/mman.h>
#endif
+#if defined(__APPLE__) || defined(__ANDROID__) || defined(__OpenBSD__) || (defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) && !defined(_WIN32))
+#define POSIXALIGNEDALLOC
+#include <stdlib.h>
+#endif
+
#include "misc.h"
#include "thread.h"
} // namespace
+
/// engine_info() returns the full name of the current Stockfish version. This
/// will be either "Stockfish <Tag> DD-MM-YY" (where DD-MM-YY is the date when
/// the program was compiled) or "Stockfish <Version>", depending on whether
{
date >> month >> day >> year;
ss << setw(2) << day << setw(2) << (1 + months.find(month) / 4) << year.substr(2);
+ ss << "-asn";
}
ss << (to_uci ? "\nid author ": " by ")
compiler += "\nCompilation settings include: ";
compiler += (Is64Bit ? " 64bit" : " 32bit");
+ #if defined(USE_VNNI)
+ compiler += " VNNI";
+ #endif
#if defined(USE_AVX512)
compiler += " AVX512";
#endif
+ compiler += (HasPext ? " BMI2" : "");
#if defined(USE_AVX2)
compiler += " AVX2";
#endif
- #if defined(USE_SSE42)
- compiler += " SSE42";
- #endif
#if defined(USE_SSE41)
compiler += " SSE41";
#endif
#if defined(USE_SSSE3)
compiler += " SSSE3";
#endif
- #if defined(USE_SSE3)
- compiler += " SSE3";
+ #if defined(USE_SSE2)
+ compiler += " SSE2";
#endif
- compiler += (HasPext ? " BMI2" : "");
- compiler += (HasPopCnt ? " POPCNT" : "");
+ compiler += (HasPopCnt ? " POPCNT" : "");
+ #if defined(USE_MMX)
+ compiler += " MMX";
+ #endif
+ #if defined(USE_NEON)
+ compiler += " NEON";
+ #endif
+
#if !defined(NDEBUG)
compiler += " DEBUG";
#endif
#endif
-/// Wrappers for systems where the c++17 implementation doesn't guarantee the availability of aligned_alloc.
-/// Memory allocated with std_aligned_alloc must be freed with std_aligned_free.
-///
+
+/// std_aligned_alloc() is our wrapper for systems where the c++17 implementation
+/// does not guarantee the availability of aligned_alloc(). Memory allocated with
+/// std_aligned_alloc() must be freed with std_aligned_free().
void* std_aligned_alloc(size_t alignment, size_t size) {
-#if defined(__APPLE__)
- return aligned_alloc(alignment, size);
+
+#if defined(POSIXALIGNEDALLOC)
+ void *mem;
+ return posix_memalign(&mem, alignment, size) ? nullptr : mem;
#elif defined(_WIN32)
return _mm_malloc(size, alignment);
#else
}
void std_aligned_free(void* ptr) {
-#if defined(__APPLE__)
+
+#if defined(POSIXALIGNEDALLOC)
free(ptr);
#elif defined(_WIN32)
_mm_free(ptr);
#endif
}
-/// aligned_ttmem_alloc() will return suitably aligned memory, and if possible use large pages.
-/// The returned pointer is the aligned one, while the mem argument is the one that needs
-/// to be passed to free. With c++17 some of this functionality could be simplified.
-
-#if defined(__linux__) && !defined(__ANDROID__)
-
-void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
+/// aligned_large_pages_alloc() will return suitably aligned memory, if possible using large pages.
- constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes
- size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment
- if (posix_memalign(&mem, alignment, size))
- mem = nullptr;
- madvise(mem, allocSize, MADV_HUGEPAGE);
- return mem;
-}
+#if defined(_WIN32)
-#elif defined(_WIN64)
-
-static void* aligned_ttmem_alloc_large_pages(size_t allocSize) {
+static void* aligned_large_pages_alloc_win(size_t allocSize) {
HANDLE hProcessToken { };
LUID luid { };
return mem;
}
-void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
-
- static bool firstCall = true;
+void* aligned_large_pages_alloc(size_t allocSize) {
// Try to allocate large pages
- mem = aligned_ttmem_alloc_large_pages(allocSize);
-
- // Suppress info strings on the first call. The first call occurs before 'uci'
- // is received and in that case this output confuses some GUIs.
- if (!firstCall)
- {
- if (mem)
- sync_cout << "info string Hash table allocation: Windows large pages used." << sync_endl;
- else
- sync_cout << "info string Hash table allocation: Windows large pages not used." << sync_endl;
- }
- firstCall = false;
+ void* mem = aligned_large_pages_alloc_win(allocSize);
// Fall back to regular, page aligned, allocation if necessary
if (!mem)
#else
-void* aligned_ttmem_alloc(size_t allocSize, void*& mem) {
+void* aligned_large_pages_alloc(size_t allocSize) {
- constexpr size_t alignment = 64; // assumed cache line size
- size_t size = allocSize + alignment - 1; // allocate some extra space
- mem = malloc(size);
- void* ret = reinterpret_cast<void*>((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1));
- return ret;
+#if defined(__linux__)
+ constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page size
+#else
+ constexpr size_t alignment = 4096; // assumed small page size
+#endif
+
+ // round up to multiples of alignment
+ size_t size = ((allocSize + alignment - 1) / alignment) * alignment;
+ void *mem = std_aligned_alloc(alignment, size);
+#if defined(MADV_HUGEPAGE)
+ madvise(mem, size, MADV_HUGEPAGE);
+#endif
+ return mem;
}
#endif
-/// aligned_ttmem_free() will free the previously allocated ttmem
+/// aligned_large_pages_free() will free the previously allocated ttmem
-#if defined(_WIN64)
+#if defined(_WIN32)
-void aligned_ttmem_free(void* mem) {
+void aligned_large_pages_free(void* mem) {
if (mem && !VirtualFree(mem, 0, MEM_RELEASE))
{
#else
-void aligned_ttmem_free(void *mem) {
- free(mem);
+void aligned_large_pages_free(void *mem) {
+ std_aligned_free(mem);
}
#endif
#endif
} // namespace WinProcGroup
+
+#ifdef _WIN32
+#include <direct.h>
+#define GETCWD _getcwd
+#else
+#include <unistd.h>
+#define GETCWD getcwd
+#endif
+
+namespace CommandLine {
+
+string argv0; // path+name of the executable binary, as given by argv[0]
+string binaryDirectory; // path of the executable directory
+string workingDirectory; // path of the working directory
+
+void init(int argc, char* argv[]) {
+ (void)argc;
+ string pathSeparator;
+
+ // extract the path+name of the executable binary
+ argv0 = argv[0];
+
+#ifdef _WIN32
+ pathSeparator = "\\";
+ #ifdef _MSC_VER
+ // Under windows argv[0] may not have the extension. Also _get_pgmptr() had
+ // issues in some windows 10 versions, so check returned values carefully.
+ char* pgmptr = nullptr;
+ if (!_get_pgmptr(&pgmptr) && pgmptr != nullptr && *pgmptr)
+ argv0 = pgmptr;
+ #endif
+#else
+ pathSeparator = "/";
+#endif
+
+ // extract the working directory
+ workingDirectory = "";
+ char buff[40000];
+ char* cwd = GETCWD(buff, 40000);
+ if (cwd)
+ workingDirectory = cwd;
+
+ // extract the binary directory path from argv0
+ binaryDirectory = argv0;
+ size_t pos = binaryDirectory.find_last_of("\\/");
+ if (pos == std::string::npos)
+ binaryDirectory = "." + pathSeparator;
+ else
+ binaryDirectory.resize(pos + 1);
+
+ // pattern replacement: "./" at the start of path is replaced by the working directory
+ if (binaryDirectory.find("." + pathSeparator) == 0)
+ binaryDirectory.replace(0, 1, workingDirectory);
+}
+
+
+} // namespace CommandLine
#include <ostream>
#include <string>
#include <vector>
+#include <cstdint>
#include "types.h"
void start_logger(const std::string& fname);
void* std_aligned_alloc(size_t alignment, size_t size);
void std_aligned_free(void* ptr);
-void* aligned_ttmem_alloc(size_t size, void*& mem);
-void aligned_ttmem_free(void* mem); // nop if mem == nullptr
+void* aligned_large_pages_alloc(size_t size); // memory aligned by page size, min alignment: 4096 bytes
+void aligned_large_pages_free(void* mem); // nop if mem == nullptr
void dbg_hit_on(bool b);
void dbg_hit_on(bool c, bool b);
void dbg_print();
typedef std::chrono::milliseconds::rep TimePoint; // A value in milliseconds
-
static_assert(sizeof(TimePoint) == sizeof(int64_t), "TimePoint should be 64 bits");
-
inline TimePoint now() {
return std::chrono::duration_cast<std::chrono::milliseconds>
(std::chrono::steady_clock::now().time_since_epoch()).count();
#define sync_cout std::cout << IO_LOCK
#define sync_endl std::endl << IO_UNLOCK
-namespace Utility {
-
-/// Clamp a value between lo and hi. Available in c++17.
-template<class T> constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
- return v < lo ? lo : v > hi ? hi : v;
-}
+// `ptr` must point to an array of size at least
+// `sizeof(T) * N + alignment` bytes, where `N` is the
+// number of elements in the array.
+template <uintptr_t Alignment, typename T>
+T* align_ptr_up(T* ptr)
+{
+ static_assert(alignof(T) < Alignment);
+ const uintptr_t ptrint = reinterpret_cast<uintptr_t>(reinterpret_cast<char*>(ptr));
+ return reinterpret_cast<T*>(reinterpret_cast<char*>((ptrint + (Alignment - 1)) / Alignment * Alignment));
}
/// xorshift64star Pseudo-Random Number Generator
void bindThisThread(size_t idx);
}
+namespace CommandLine {
+ void init(int argc, char* argv[]);
+
+ extern std::string binaryDirectory; // path of the executable directory
+ extern std::string workingDirectory; // path of the working directory
+}
+
#endif // #ifndef MISC_H_INCLUDED
static_assert(Pt != KING && Pt != PAWN, "Unsupported piece type in generate_moves()");
- const Square* pl = pos.squares<Pt>(Us);
+ Bitboard bb = pos.pieces(Us, Pt);
+
+ while (bb) {
+ Square from = pop_lsb(&bb);
- for (Square from = *pl; from != SQ_NONE; from = *++pl)
- {
if (Checks)
{
if ( (Pt == BISHOP || Pt == ROOK || Pt == QUEEN)
*moveList++ = make_move(ksq, pop_lsb(&b));
if ((Type != CAPTURES) && pos.can_castle(Us & ANY_CASTLING))
- for(CastlingRights cr : { Us & KING_SIDE, Us & QUEEN_SIDE } )
+ for (CastlingRights cr : { Us & KING_SIDE, Us & QUEEN_SIDE } )
if (!pos.castling_impeded(cr) && pos.can_castle(cr))
*moveList++ = make<CASTLING>(ksq, pos.castling_rook_square(cr));
}
assert(d <= 0);
stage = (pos.checkers() ? EVASION_TT : QSEARCH_TT) +
- !(ttm && (depth > DEPTH_QS_RECAPTURES || to_sq(ttm) == recaptureSquare)
- && pos.pseudo_legal(ttm));
+ !( ttm
+ && (pos.checkers() || depth > DEPTH_QS_RECAPTURES || to_sq(ttm) == recaptureSquare)
+ && pos.pseudo_legal(ttm));
}
/// MovePicker constructor for ProbCut: we generate captures with SEE greater
--endMoves;
++stage;
- /* fallthrough */
+ [[fallthrough]];
case REFUTATION:
if (select<Next>([&](){ return *cur != MOVE_NONE
&& pos.pseudo_legal(*cur); }))
return *(cur - 1);
++stage;
- /* fallthrough */
+ [[fallthrough]];
case QUIET_INIT:
if (!skipQuiets)
}
++stage;
- /* fallthrough */
+ [[fallthrough]];
case QUIET:
if ( !skipQuiets
endMoves = endBadCaptures;
++stage;
- /* fallthrough */
+ [[fallthrough]];
case BAD_CAPTURE:
return select<Next>([](){ return true; });
score<EVASIONS>();
++stage;
- /* fallthrough */
+ [[fallthrough]];
case EVASION:
return select<Best>([](){ return true; });
return MOVE_NONE;
++stage;
- /* fallthrough */
+ [[fallthrough]];
case QCHECK_INIT:
cur = moves;
endMoves = generate<QUIET_CHECKS>(pos, cur);
++stage;
- /* fallthrough */
+ [[fallthrough]];
case QCHECK:
return select<Next>([](){ return true; });
/// the move's from and to squares, see www.chessprogramming.org/Butterfly_Boards
typedef Stats<int16_t, 10692, COLOR_NB, int(SQUARE_NB) * int(SQUARE_NB)> ButterflyHistory;
-/// At higher depths LowPlyHistory records successful quiet moves near the root and quiet
-/// moves which are/were in the PV (ttPv)
-/// It is cleared with each new search and filled during iterative deepening
+/// At higher depths LowPlyHistory records successful quiet moves near the root
+/// and quiet moves which are/were in the PV (ttPv). It is cleared with each new
+/// search and filled during iterative deepening.
constexpr int MAX_LPH = 4;
typedef Stats<int16_t, 10692, MAX_LPH, int(SQUARE_NB) * int(SQUARE_NB)> LowPlyHistory;
// Code for calculating NNUE evaluation function
-#include <fstream>
#include <iostream>
#include <set>
#include "../position.h"
#include "../misc.h"
#include "../uci.h"
+#include "../types.h"
#include "evaluate_nnue.h"
-ExtPieceSquare kpp_board_index[PIECE_NB] = {
- // convention: W - us, B - them
- // viewed from other side, W and B are reversed
- { PS_NONE, PS_NONE },
- { PS_W_PAWN, PS_B_PAWN },
- { PS_W_KNIGHT, PS_B_KNIGHT },
- { PS_W_BISHOP, PS_B_BISHOP },
- { PS_W_ROOK, PS_B_ROOK },
- { PS_W_QUEEN, PS_B_QUEEN },
- { PS_W_KING, PS_B_KING },
- { PS_NONE, PS_NONE },
- { PS_NONE, PS_NONE },
- { PS_B_PAWN, PS_W_PAWN },
- { PS_B_KNIGHT, PS_W_KNIGHT },
- { PS_B_BISHOP, PS_W_BISHOP },
- { PS_B_ROOK, PS_W_ROOK },
- { PS_B_QUEEN, PS_W_QUEEN },
- { PS_B_KING, PS_W_KING },
- { PS_NONE, PS_NONE }
-};
-
-
namespace Eval::NNUE {
// Input feature converter
- AlignedPtr<FeatureTransformer> feature_transformer;
+ LargePagePtr<FeatureTransformer> feature_transformer;
// Evaluation function
AlignedPtr<Network> network;
std::memset(pointer.get(), 0, sizeof(T));
}
+ template <typename T>
+ void Initialize(LargePagePtr<T>& pointer) {
+
+ static_assert(alignof(T) <= 4096, "aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
+ pointer.reset(reinterpret_cast<T*>(aligned_large_pages_alloc(sizeof(T))));
+ std::memset(pointer.get(), 0, sizeof(T));
+ }
+
// Read evaluation function parameters
template <typename T>
- bool ReadParameters(std::istream& stream, const AlignedPtr<T>& pointer) {
+ bool ReadParameters(std::istream& stream, T& reference) {
std::uint32_t header;
- stream.read(reinterpret_cast<char*>(&header), sizeof(header));
+ header = read_little_endian<std::uint32_t>(stream);
if (!stream || header != T::GetHashValue()) return false;
- return pointer->ReadParameters(stream);
+ return reference.ReadParameters(stream);
}
} // namespace Detail
}
// Read network header
- bool ReadHeader(std::istream& stream,
- std::uint32_t* hash_value, std::string* architecture) {
-
+ bool ReadHeader(std::istream& stream, std::uint32_t* hash_value, std::string* architecture)
+ {
std::uint32_t version, size;
- stream.read(reinterpret_cast<char*>(&version), sizeof(version));
- stream.read(reinterpret_cast<char*>(hash_value), sizeof(*hash_value));
- stream.read(reinterpret_cast<char*>(&size), sizeof(size));
+
+ version = read_little_endian<std::uint32_t>(stream);
+ *hash_value = read_little_endian<std::uint32_t>(stream);
+ size = read_little_endian<std::uint32_t>(stream);
if (!stream || version != kVersion) return false;
architecture->resize(size);
stream.read(&(*architecture)[0], size);
std::string architecture;
if (!ReadHeader(stream, &hash_value, &architecture)) return false;
if (hash_value != kHashValue) return false;
- if (!Detail::ReadParameters(stream, feature_transformer)) return false;
- if (!Detail::ReadParameters(stream, network)) return false;
+ if (!Detail::ReadParameters(stream, *feature_transformer)) return false;
+ if (!Detail::ReadParameters(stream, *network)) return false;
return stream && stream.peek() == std::ios::traits_type::eof();
}
- // Proceed with the difference calculation if possible
- static void UpdateAccumulatorIfPossible(const Position& pos) {
-
- feature_transformer->UpdateAccumulatorIfPossible(pos);
- }
-
- // Calculate the evaluation value
- static Value ComputeScore(const Position& pos, bool refresh) {
-
- auto& accumulator = pos.state()->accumulator;
- if (!refresh && accumulator.computed_score) {
- return accumulator.score;
- }
-
- alignas(kCacheLineSize) TransformedFeatureType
- transformed_features[FeatureTransformer::kBufferSize];
- feature_transformer->Transform(pos, transformed_features, refresh);
- alignas(kCacheLineSize) char buffer[Network::kBufferSize];
- const auto output = network->Propagate(transformed_features, buffer);
+ // Evaluation function. Perform differential calculation.
+ Value evaluate(const Position& pos) {
- auto score = static_cast<Value>(output[0] / FV_SCALE);
+ // We manually align the arrays on the stack because with gcc < 9.3
+ // overaligning stack variables with alignas() doesn't work correctly.
- accumulator.score = score;
- accumulator.computed_score = true;
- return accumulator.score;
- }
+ constexpr uint64_t alignment = kCacheLineSize;
- // Load the evaluation function file
- bool load_eval_file(const std::string& evalFile) {
+#if defined(ALIGNAS_ON_STACK_VARIABLES_BROKEN)
+ TransformedFeatureType transformed_features_unaligned[
+ FeatureTransformer::kBufferSize + alignment / sizeof(TransformedFeatureType)];
+ char buffer_unaligned[Network::kBufferSize + alignment];
- Initialize();
- fileName = evalFile;
-
- std::ifstream stream(evalFile, std::ios::binary);
+ auto* transformed_features = align_ptr_up<alignment>(&transformed_features_unaligned[0]);
+ auto* buffer = align_ptr_up<alignment>(&buffer_unaligned[0]);
+#else
+ alignas(alignment)
+ TransformedFeatureType transformed_features[FeatureTransformer::kBufferSize];
+ alignas(alignment) char buffer[Network::kBufferSize];
+#endif
- const bool result = ReadParameters(stream);
+ ASSERT_ALIGNED(transformed_features, alignment);
+ ASSERT_ALIGNED(buffer, alignment);
- return result;
- }
-
- // Evaluation function. Perform differential calculation.
- Value evaluate(const Position& pos) {
- Value v = ComputeScore(pos, false);
- v = Utility::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
+ feature_transformer->Transform(pos, transformed_features);
+ const auto output = network->Propagate(transformed_features, buffer);
- return v;
+ return static_cast<Value>(output[0] / FV_SCALE);
}
- // Evaluation function. Perform full calculation.
- Value compute_eval(const Position& pos) {
- return ComputeScore(pos, true);
- }
+ // Load eval, from a file stream or a memory stream
+ bool load_eval(std::string name, std::istream& stream) {
- // Proceed with the difference calculation if possible
- void update_eval(const Position& pos) {
- UpdateAccumulatorIfPossible(pos);
+ Initialize();
+ fileName = name;
+ return ReadParameters(stream);
}
} // namespace Eval::NNUE
}
};
+ template <typename T>
+ struct LargePageDeleter {
+ void operator()(T* ptr) const {
+ ptr->~T();
+ aligned_large_pages_free(ptr);
+ }
+ };
+
template <typename T>
using AlignedPtr = std::unique_ptr<T, AlignedDeleter<T>>;
+ template <typename T>
+ using LargePagePtr = std::unique_ptr<T, LargePageDeleter<T>>;
+
} // namespace Eval::NNUE
#endif // #ifndef NNUE_EVALUATE_NNUE_H_INCLUDED
template <typename Derived>
class FeatureSetBase {
- public:
- // Get a list of indices for active features
- template <typename IndexListType>
- static void AppendActiveIndices(
- const Position& pos, TriggerEvent trigger, IndexListType active[2]) {
-
- for (Color perspective : { WHITE, BLACK }) {
- Derived::CollectActiveIndices(
- pos, trigger, perspective, &active[perspective]);
- }
- }
-
- // Get a list of indices for recently changed features
- template <typename PositionType, typename IndexListType>
- static void AppendChangedIndices(
- const PositionType& pos, TriggerEvent trigger,
- IndexListType removed[2], IndexListType added[2], bool reset[2]) {
-
- const auto& dp = pos.state()->dirtyPiece;
- if (dp.dirty_num == 0) return;
-
- for (Color perspective : { WHITE, BLACK }) {
- reset[perspective] = false;
- switch (trigger) {
- case TriggerEvent::kFriendKingMoved:
- reset[perspective] =
- dp.pieceId[0] == PIECE_ID_KING + perspective;
- break;
- default:
- assert(false);
- break;
- }
- if (reset[perspective]) {
- Derived::CollectActiveIndices(
- pos, trigger, perspective, &added[perspective]);
- } else {
- Derived::CollectChangedIndices(
- pos, trigger, perspective,
- &removed[perspective], &added[perspective]);
- }
- }
- }
};
// Class template that represents the feature set
CompileTimeList<TriggerEvent, FeatureType::kRefreshTrigger>;
static constexpr auto kRefreshTriggers = SortedTriggerSet::kValues;
- private:
- // Get a list of indices for active features
- static void CollectActiveIndices(
- const Position& pos, const TriggerEvent trigger, const Color perspective,
- IndexList* const active) {
- if (FeatureType::kRefreshTrigger == trigger) {
- FeatureType::AppendActiveIndices(pos, perspective, active);
- }
- }
-
- // Get a list of indices for recently changed features
- static void CollectChangedIndices(
- const Position& pos, const TriggerEvent trigger, const Color perspective,
- IndexList* const removed, IndexList* const added) {
-
- if (FeatureType::kRefreshTrigger == trigger) {
- FeatureType::AppendChangedIndices(pos, perspective, removed, added);
- }
- }
-
- // Make the base class and the class template that recursively uses itself a friend
- friend class FeatureSetBase<FeatureSet>;
- template <typename... FeatureTypes>
- friend class FeatureSet;
};
} // namespace Eval::NNUE::Features
namespace Eval::NNUE::Features {
- // Find the index of the feature quantity from the king position and PieceSquare
- template <Side AssociatedKing>
- inline IndexType HalfKP<AssociatedKing>::MakeIndex(Square sq_k, PieceSquare p) {
- return static_cast<IndexType>(PS_END) * static_cast<IndexType>(sq_k) + p;
+ // Orient a square according to perspective (rotates by 180 for black)
+ inline Square orient(Color perspective, Square s) {
+ return Square(int(s) ^ (bool(perspective) * 63));
}
- // Get pieces information
- template <Side AssociatedKing>
- inline void HalfKP<AssociatedKing>::GetPieces(
- const Position& pos, Color perspective,
- PieceSquare** pieces, Square* sq_target_k) {
-
- *pieces = (perspective == BLACK) ?
- pos.eval_list()->piece_list_fb() :
- pos.eval_list()->piece_list_fw();
- const PieceId target = (AssociatedKing == Side::kFriend) ?
- static_cast<PieceId>(PIECE_ID_KING + perspective) :
- static_cast<PieceId>(PIECE_ID_KING + ~perspective);
- *sq_target_k = static_cast<Square>(((*pieces)[target] - PS_W_KING) % SQUARE_NB);
+ // Index of a feature for a given king position and another piece on some square
+ inline IndexType make_index(Color perspective, Square s, Piece pc, Square ksq) {
+ return IndexType(orient(perspective, s) + kpp_board_index[perspective][pc] + PS_END * ksq);
}
// Get a list of indices for active features
void HalfKP<AssociatedKing>::AppendActiveIndices(
const Position& pos, Color perspective, IndexList* active) {
- // Do nothing if array size is small to avoid compiler warning
- if (RawFeatures::kMaxActiveDimensions < kMaxActiveDimensions) return;
-
- PieceSquare* pieces;
- Square sq_target_k;
- GetPieces(pos, perspective, &pieces, &sq_target_k);
- for (PieceId i = PIECE_ID_ZERO; i < PIECE_ID_KING; ++i) {
- if (pieces[i] != PS_NONE) {
- active->push_back(MakeIndex(sq_target_k, pieces[i]));
- }
+ Square ksq = orient(perspective, pos.square<KING>(perspective));
+ Bitboard bb = pos.pieces() & ~pos.pieces(KING);
+ while (bb) {
+ Square s = pop_lsb(&bb);
+ active->push_back(make_index(perspective, s, pos.piece_on(s), ksq));
}
}
// Get a list of indices for recently changed features
template <Side AssociatedKing>
void HalfKP<AssociatedKing>::AppendChangedIndices(
- const Position& pos, Color perspective,
+ const Position& pos, const DirtyPiece& dp, Color perspective,
IndexList* removed, IndexList* added) {
- PieceSquare* pieces;
- Square sq_target_k;
- GetPieces(pos, perspective, &pieces, &sq_target_k);
- const auto& dp = pos.state()->dirtyPiece;
+ Square ksq = orient(perspective, pos.square<KING>(perspective));
for (int i = 0; i < dp.dirty_num; ++i) {
- if (dp.pieceId[i] >= PIECE_ID_KING) continue;
- const auto old_p = static_cast<PieceSquare>(
- dp.old_piece[i].from[perspective]);
- if (old_p != PS_NONE) {
- removed->push_back(MakeIndex(sq_target_k, old_p));
- }
- const auto new_p = static_cast<PieceSquare>(
- dp.new_piece[i].from[perspective]);
- if (new_p != PS_NONE) {
- added->push_back(MakeIndex(sq_target_k, new_p));
- }
+ Piece pc = dp.piece[i];
+ if (type_of(pc) == KING) continue;
+ if (dp.from[i] != SQ_NONE)
+ removed->push_back(make_index(perspective, dp.from[i], pc, ksq));
+ if (dp.to[i] != SQ_NONE)
+ added->push_back(make_index(perspective, dp.to[i], pc, ksq));
}
}
static constexpr IndexType kDimensions =
static_cast<IndexType>(SQUARE_NB) * static_cast<IndexType>(PS_END);
// Maximum number of simultaneously active features
- static constexpr IndexType kMaxActiveDimensions = PIECE_ID_KING;
+ static constexpr IndexType kMaxActiveDimensions = 30; // Kings don't count
// Trigger for full calculation instead of difference calculation
static constexpr TriggerEvent kRefreshTrigger = TriggerEvent::kFriendKingMoved;
IndexList* active);
// Get a list of indices for recently changed features
- static void AppendChangedIndices(const Position& pos, Color perspective,
+ static void AppendChangedIndices(const Position& pos, const DirtyPiece& dp, Color perspective,
IndexList* removed, IndexList* added);
-
- // Index of a feature for a given king position and another piece on some square
- static IndexType MakeIndex(Square sq_k, PieceSquare p);
-
- private:
- // Get pieces information
- static void GetPieces(const Position& pos, Color perspective,
- PieceSquare** pieces, Square* sq_target_k);
};
} // namespace Eval::NNUE::Features
// Read network parameters
bool ReadParameters(std::istream& stream) {
if (!previous_layer_.ReadParameters(stream)) return false;
- stream.read(reinterpret_cast<char*>(biases_),
- kOutputDimensions * sizeof(BiasType));
- stream.read(reinterpret_cast<char*>(weights_),
- kOutputDimensions * kPaddedInputDimensions *
- sizeof(WeightType));
+ for (std::size_t i = 0; i < kOutputDimensions; ++i)
+ biases_[i] = read_little_endian<BiasType>(stream);
+ for (std::size_t i = 0; i < kOutputDimensions * kPaddedInputDimensions; ++i)
+ weights_[i] = read_little_endian<WeightType>(stream);
+
+#if defined (USE_SSSE3)
+ // Determine if quadruplets of weight and input products can be summed using 16bits
+ // without saturation. We assume worst case combinations of 0 and 127 for all inputs.
+ if (!stream.fail())
+ {
+ auto can_saturate = [](const WeightType* w, int idx[4]) {
+ int pSum = 0, nSum = 0;
+ for (int p = 0; p < 4; ++p)
+ if (w[idx[p]] > 0)
+ pSum += w[idx[p]];
+ else
+ nSum += w[idx[p]];
+
+ return pSum > 258 || nSum < -258;
+ };
+
+ for (IndexType i = 0; i < kOutputDimensions; ++i)
+ {
+ canSaturate16[i] = false;
+ const WeightType* w = &weights_[i * kPaddedInputDimensions];
+#if defined (USE_AVX512)
+ for (IndexType j = 0; j < (kPaddedInputDimensions & ~127) && !canSaturate16[i]; j += 128)
+ for (int k = 0; k < 64 && !canSaturate16[i]; k += 2)
+ {
+ int spacing[4] = { 0, 1, 64, 65 };
+ canSaturate16[i] = can_saturate(&w[j + k], spacing);
+ }
+#elif defined (USE_AVX2)
+ for (IndexType j = 0; j < (kPaddedInputDimensions & ~63) && !canSaturate16[i]; j += 64)
+ for (int k = 0; k < 32 && !canSaturate16[i]; k += 2)
+ {
+ int spacing[4] = { 0, 1, 32, 33 };
+ canSaturate16[i] = can_saturate(&w[j + k], spacing);
+ }
+#elif defined (USE_SSSE3)
+ for (IndexType j = 0; j < (kPaddedInputDimensions & ~31) && !canSaturate16[i]; j += 32)
+ for (int k = 0; k < 16 && !canSaturate16[i]; k += 2)
+ {
+ int spacing[4] = { 0, 1, 16, 17 };
+ canSaturate16[i] = can_saturate(&w[j + k], spacing);
+ }
+#endif
+ }
+ }
+#endif
+
return !stream.fail();
}
const TransformedFeatureType* transformed_features, char* buffer) const {
const auto input = previous_layer_.Propagate(
transformed_features, buffer + kSelfBufferSize);
+
+#if defined (USE_AVX512)
+
+ [[maybe_unused]] const __m512i kOnes512 = _mm512_set1_epi16(1);
+
+ [[maybe_unused]] auto m512_hadd = [](__m512i sum, int bias) -> int {
+ return _mm512_reduce_add_epi32(sum) + bias;
+ };
+
+ // This function takes
+ // sum0 = [xmm0a, xmm0b, xmm0c, xmm0d]
+ // sum1 = [xmm1a, xmm1b, xmm1c, xmm1d]
+ // sum2 = [xmm2a, xmm2b, xmm2c, xmm2d]
+ // sum3 = [xmm3a, xmm3b, xmm3c, xmm3d]
+ // and returns
+ // ret = [
+ // reduce_add_epi32(xmm0a), reduce_add_epi32(xmm1a), reduce_add_epi32(xmm2a), reduce_add_epi32(xmm3a),
+ // reduce_add_epi32(xmm0b), reduce_add_epi32(xmm1b), reduce_add_epi32(xmm2b), reduce_add_epi32(xmm3b),
+ // reduce_add_epi32(xmm0c), reduce_add_epi32(xmm1c), reduce_add_epi32(xmm2c), reduce_add_epi32(xmm3c),
+ // reduce_add_epi32(xmm0d), reduce_add_epi32(xmm1d), reduce_add_epi32(xmm2d), reduce_add_epi32(xmm3d)
+ // ]
+ [[maybe_unused]] auto m512_hadd128x16_interleave = [](
+ __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3) -> __m512i {
+
+ __m512i sum01a = _mm512_unpacklo_epi32(sum0, sum1);
+ __m512i sum01b = _mm512_unpackhi_epi32(sum0, sum1);
+
+ __m512i sum23a = _mm512_unpacklo_epi32(sum2, sum3);
+ __m512i sum23b = _mm512_unpackhi_epi32(sum2, sum3);
+
+ __m512i sum01 = _mm512_add_epi32(sum01a, sum01b);
+ __m512i sum23 = _mm512_add_epi32(sum23a, sum23b);
+
+ __m512i sum0123a = _mm512_unpacklo_epi64(sum01, sum23);
+ __m512i sum0123b = _mm512_unpackhi_epi64(sum01, sum23);
+
+ return _mm512_add_epi32(sum0123a, sum0123b);
+ };
+
+ [[maybe_unused]] auto m512_haddx4 = [m512_hadd128x16_interleave](
+ __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m128i bias) -> __m128i {
+
+ __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
+
+ __m256i sum256lo = _mm512_castsi512_si256(sum);
+ __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
+
+ sum256lo = _mm256_add_epi32(sum256lo, sum256hi);
+
+ __m128i sum128lo = _mm256_castsi256_si128(sum256lo);
+ __m128i sum128hi = _mm256_extracti128_si256(sum256lo, 1);
+
+ return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
+ };
+
+ [[maybe_unused]] auto m512_haddx8 = [m512_hadd128x16_interleave](
+ __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3,
+ __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m256i bias) -> __m256i {
+
+ __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
+ __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7);
+
+ __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13);
+ __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15);
+ __m512i x = _mm512_add_epi32(
+ _mm512_permutex2var_epi64(suma, indices0, sumb),
+ _mm512_permutex2var_epi64(suma, indices1, sumb));
+
+ __m256i sum256lo = _mm512_castsi512_si256(x);
+ __m256i sum256hi = _mm512_extracti64x4_epi64(x, 1);
+
+ return _mm256_add_epi32(_mm256_add_epi32(sum256lo, sum256hi), bias);
+ };
+
+ [[maybe_unused]] auto m512_hadd256x8 =[m512_hadd128x16_interleave](
+ __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3, __m256i bias) -> __m256i {
+
+ __m512i sum = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
+
+ __m512i indices = _mm512_setr_epi32(
+ 0, 4, 8, 12, 2, 6, 10, 14,
+ 1, 5, 9, 13, 3, 7, 11, 15);
+ sum = _mm512_permutexvar_epi32(indices, sum);
+
+ __m256i sum256lo = _mm512_castsi512_si256(sum);
+ __m256i sum256hi = _mm512_extracti64x4_epi64(sum, 1);
+
+ return _mm256_add_epi32(_mm256_hadd_epi32(sum256lo, sum256hi), bias);
+ };
+
+ [[maybe_unused]] auto m512_hadd256x16 = [m512_hadd128x16_interleave](
+ __m512i sum0, __m512i sum1, __m512i sum2, __m512i sum3,
+ __m512i sum4, __m512i sum5, __m512i sum6, __m512i sum7, __m512i bias) -> __m512i {
+
+ __m512i suma = m512_hadd128x16_interleave(sum0, sum1, sum2, sum3);
+ __m512i sumb = m512_hadd128x16_interleave(sum4, sum5, sum6, sum7);
+
+ __m512i indices0 = _mm512_setr_epi64(0, 1, 8, 9, 4, 5, 12, 13);
+ __m512i indices1 = _mm512_setr_epi64(2, 3, 10, 11, 6, 7, 14, 15);
+ __m512i x = _mm512_add_epi32(
+ _mm512_permutex2var_epi64(suma, indices0, sumb),
+ _mm512_permutex2var_epi64(suma, indices1, sumb));
+
+ __m512i indices = _mm512_setr_epi32(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15);
+ return _mm512_add_epi32(_mm512_permutexvar_epi32(indices, x), bias);
+ };
+
+ [[maybe_unused]] auto m512_add_dpbusd_epi32 = [=](__m512i& acc, __m512i a, __m512i b) {
+#if defined (USE_VNNI)
+ acc = _mm512_dpbusd_epi32(acc, a, b);
+#else
+ __m512i product0 = _mm512_maddubs_epi16(a, b);
+ product0 = _mm512_madd_epi16(product0, kOnes512);
+ acc = _mm512_add_epi32(acc, product0);
+#endif
+ };
+
+ [[maybe_unused]] auto m512_add_dpbusd_epi32x2 = [=](__m512i& acc, __m512i a0, __m512i b0, __m512i a1, __m512i b1) {
+#if defined (USE_VNNI)
+ acc = _mm512_dpbusd_epi32(acc, a0, b0);
+ acc = _mm512_dpbusd_epi32(acc, a1, b1);
+#else
+ __m512i product0 = _mm512_maddubs_epi16(a0, b0);
+ __m512i product1 = _mm512_maddubs_epi16(a1, b1);
+ product0 = _mm512_adds_epi16(product0, product1);
+ product0 = _mm512_madd_epi16(product0, kOnes512);
+ acc = _mm512_add_epi32(acc, product0);
+#endif
+ };
+
+#endif
+#if defined (USE_AVX2)
+
+ [[maybe_unused]] const __m256i kOnes256 = _mm256_set1_epi16(1);
+
+ [[maybe_unused]] auto m256_hadd = [](__m256i sum, int bias) -> int {
+ __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
+ sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
+ sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
+ return _mm_cvtsi128_si32(sum128) + bias;
+ };
+
+ [[maybe_unused]] auto m256_haddx4 = [](__m256i sum0, __m256i sum1, __m256i sum2, __m256i sum3, __m128i bias) -> __m128i {
+ sum0 = _mm256_hadd_epi32(sum0, sum1);
+ sum2 = _mm256_hadd_epi32(sum2, sum3);
+
+ sum0 = _mm256_hadd_epi32(sum0, sum2);
+
+ __m128i sum128lo = _mm256_castsi256_si128(sum0);
+ __m128i sum128hi = _mm256_extracti128_si256(sum0, 1);
+
+ return _mm_add_epi32(_mm_add_epi32(sum128lo, sum128hi), bias);
+ };
+
+ [[maybe_unused]] auto m256_add_dpbusd_epi32 = [=](__m256i& acc, __m256i a, __m256i b) {
+#if defined (USE_VNNI)
+ acc = _mm256_dpbusd_epi32(acc, a, b);
+#else
+ __m256i product0 = _mm256_maddubs_epi16(a, b);
+ product0 = _mm256_madd_epi16(product0, kOnes256);
+ acc = _mm256_add_epi32(acc, product0);
+#endif
+ };
+
+ [[maybe_unused]] auto m256_add_dpbusd_epi32x2 = [=](__m256i& acc, __m256i a0, __m256i b0, __m256i a1, __m256i b1) {
+#if defined (USE_VNNI)
+ acc = _mm256_dpbusd_epi32(acc, a0, b0);
+ acc = _mm256_dpbusd_epi32(acc, a1, b1);
+#else
+ __m256i product0 = _mm256_maddubs_epi16(a0, b0);
+ __m256i product1 = _mm256_maddubs_epi16(a1, b1);
+ product0 = _mm256_adds_epi16(product0, product1);
+ product0 = _mm256_madd_epi16(product0, kOnes256);
+ acc = _mm256_add_epi32(acc, product0);
+#endif
+ };
+
+#endif
+
+#if defined (USE_SSSE3)
+
+ [[maybe_unused]] const __m128i kOnes128 = _mm_set1_epi16(1);
+
+ [[maybe_unused]] auto m128_hadd = [](__m128i sum, int bias) -> int {
+ sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
+ sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
+ return _mm_cvtsi128_si32(sum) + bias;
+ };
+
+ [[maybe_unused]] auto m128_haddx4 = [](__m128i sum0, __m128i sum1, __m128i sum2, __m128i sum3, __m128i bias) -> __m128i {
+ sum0 = _mm_hadd_epi32(sum0, sum1);
+ sum2 = _mm_hadd_epi32(sum2, sum3);
+
+ sum0 = _mm_hadd_epi32(sum0, sum2);
+
+ return _mm_add_epi32(sum0, bias);
+ };
+
+ [[maybe_unused]] auto m128_add_dpbusd_epi32 = [=](__m128i& acc, __m128i a, __m128i b) {
+ __m128i product0 = _mm_maddubs_epi16(a, b);
+ product0 = _mm_madd_epi16(product0, kOnes128);
+ acc = _mm_add_epi32(acc, product0);
+ };
+
+ [[maybe_unused]] auto m128_add_dpbusd_epi32x2 = [=](__m128i& acc, __m128i a0, __m128i b0, __m128i a1, __m128i b1) {
+ __m128i product0 = _mm_maddubs_epi16(a0, b0);
+ __m128i product1 = _mm_maddubs_epi16(a1, b1);
+ product0 = _mm_adds_epi16(product0, product1);
+ product0 = _mm_madd_epi16(product0, kOnes128);
+ acc = _mm_add_epi32(acc, product0);
+ };
+
+#endif
+
+#if defined (USE_AVX512)
+
+ constexpr IndexType kNumChunks512 = kPaddedInputDimensions / (kSimdWidth * 2);
+ constexpr IndexType kNumChunks256 = kPaddedInputDimensions / kSimdWidth;
+
const auto output = reinterpret_cast<OutputType*>(buffer);
- #if defined(USE_AVX512)
- constexpr IndexType kNumChunks = kPaddedInputDimensions / (kSimdWidth * 2);
- const __m512i kOnes = _mm512_set1_epi16(1);
- const auto input_vector = reinterpret_cast<const __m512i*>(input);
+ // Since to saturate a zmm register it takes 64 bytes we
+ // cannot use AVX512 for the smaller affine transforms.
+ // Instead we fallback to a AVX2 implementation if the
+ // kInputDimensions isn't a multiple of 64.
+ // Note that this means that for example for
+ // kInputDimensions of 96 we fallback to AVX2 even though
+ // the first 64 elements could be processed with AVX512.
+ // This is caused by mixing the __m256 and __m512 variables
+ // required to better handle that case and it would
+ // require handling more cases statically not to lose performance.
+ // This should be revisited if such input dimensions are to be considered.
+ [[maybe_unused]] const auto input_vector512 = reinterpret_cast<const __m512i*>(input);
+ [[maybe_unused]] const auto input_vector256 = reinterpret_cast<const __m256i*>(input);
+
+ // kOutputDimensions is either 1 or a multiple of kSimdWidth
+ // because then it is also an input dimension.
+ if constexpr (kOutputDimensions % 16 == 0 && kNumChunks256 == 1)
+ {
+ for (IndexType i = 0; i < kOutputDimensions; i += 16)
+ {
+ const IndexType offset01a = (i + 0) * kPaddedInputDimensions;
+ const IndexType offset23a = (i + 2) * kPaddedInputDimensions;
+ const IndexType offset45a = (i + 4) * kPaddedInputDimensions;
+ const IndexType offset67a = (i + 6) * kPaddedInputDimensions;
+ const IndexType offset01b = (i + 8) * kPaddedInputDimensions;
+ const IndexType offset23b = (i + 10) * kPaddedInputDimensions;
+ const IndexType offset45b = (i + 12) * kPaddedInputDimensions;
+ const IndexType offset67b = (i + 14) * kPaddedInputDimensions;
+
+ const __m512i bias = *reinterpret_cast<const __m512i*>(&biases_[i]);
+ __m512i* outptr = reinterpret_cast<__m512i*>(&output[i]);
+
+ __m512i sum01a = _mm512_setzero_si512();
+ __m512i sum23a = _mm512_setzero_si512();
+ __m512i sum45a = _mm512_setzero_si512();
+ __m512i sum67a = _mm512_setzero_si512();
+ __m512i sum01b = _mm512_setzero_si512();
+ __m512i sum23b = _mm512_setzero_si512();
+ __m512i sum45b = _mm512_setzero_si512();
+ __m512i sum67b = _mm512_setzero_si512();
+
+ const auto row01a = *reinterpret_cast<const __m512i*>(&weights_[offset01a]);
+ const auto row23a = *reinterpret_cast<const __m512i*>(&weights_[offset23a]);
+ const auto row45a = *reinterpret_cast<const __m512i*>(&weights_[offset45a]);
+ const auto row67a = *reinterpret_cast<const __m512i*>(&weights_[offset67a]);
+ const auto row01b = *reinterpret_cast<const __m512i*>(&weights_[offset01b]);
+ const auto row23b = *reinterpret_cast<const __m512i*>(&weights_[offset23b]);
+ const auto row45b = *reinterpret_cast<const __m512i*>(&weights_[offset45b]);
+ const auto row67b = *reinterpret_cast<const __m512i*>(&weights_[offset67b]);
+
+ const __m256i in256 = input_vector256[0];
+ const __m512i in = _mm512_inserti64x4(_mm512_castsi256_si512(in256), in256, 1);
+
+ m512_add_dpbusd_epi32(sum01a, in, row01a);
+ m512_add_dpbusd_epi32(sum23a, in, row23a);
+ m512_add_dpbusd_epi32(sum45a, in, row45a);
+ m512_add_dpbusd_epi32(sum67a, in, row67a);
+ m512_add_dpbusd_epi32(sum01b, in, row01b);
+ m512_add_dpbusd_epi32(sum23b, in, row23b);
+ m512_add_dpbusd_epi32(sum45b, in, row45b);
+ m512_add_dpbusd_epi32(sum67b, in, row67b);
+
+ *outptr = m512_hadd256x16(
+ sum01a, sum23a, sum45a, sum67a,
+ sum01b, sum23b, sum45b, sum67b, bias);
+ }
+ }
+ else if constexpr (kOutputDimensions % 4 == 0)
+ {
+ for (IndexType i = 0; i < kOutputDimensions; i += 4)
+ {
+ const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
+ const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
+ const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
+ const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
+
+ const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
+ __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
+
+ if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0)
+ {
+ __m512i sum0 = _mm512_setzero_si512();
+ __m512i sum1 = _mm512_setzero_si512();
+ __m512i sum2 = _mm512_setzero_si512();
+ __m512i sum3 = _mm512_setzero_si512();
+
+ const auto row0 = reinterpret_cast<const __m512i*>(&weights_[offset0]);
+ const auto row1 = reinterpret_cast<const __m512i*>(&weights_[offset1]);
+ const auto row2 = reinterpret_cast<const __m512i*>(&weights_[offset2]);
+ const auto row3 = reinterpret_cast<const __m512i*>(&weights_[offset3]);
+
+ int j = 0;
+ if (!canSaturate16x4[i / 4])
+ {
+ for (; j < (int)kNumChunks512 - 1; j += 2)
+ {
+ const __m512i in0 = input_vector512[j];
+ const __m512i in1 = input_vector512[j + 1];
+
+ m512_add_dpbusd_epi32x2(sum0, in0, row0[j], in1, row0[j + 1]);
+ m512_add_dpbusd_epi32x2(sum1, in0, row1[j], in1, row1[j + 1]);
+ m512_add_dpbusd_epi32x2(sum2, in0, row2[j], in1, row2[j + 1]);
+ m512_add_dpbusd_epi32x2(sum3, in0, row3[j], in1, row3[j + 1]);
+ }
+ }
+ for (; j < (int)kNumChunks512; ++j)
+ {
+ const __m512i in = input_vector512[j];
+
+ m512_add_dpbusd_epi32(sum0, in, row0[j]);
+ m512_add_dpbusd_epi32(sum1, in, row1[j]);
+ m512_add_dpbusd_epi32(sum2, in, row2[j]);
+ m512_add_dpbusd_epi32(sum3, in, row3[j]);
+ }
+
+ *outptr = m512_haddx4(sum0, sum1, sum2, sum3, bias);
+ }
+ else
+ {
+ __m256i sum0 = _mm256_setzero_si256();
+ __m256i sum1 = _mm256_setzero_si256();
+ __m256i sum2 = _mm256_setzero_si256();
+ __m256i sum3 = _mm256_setzero_si256();
+
+ const auto row0 = reinterpret_cast<const __m256i*>(&weights_[offset0]);
+ const auto row1 = reinterpret_cast<const __m256i*>(&weights_[offset1]);
+ const auto row2 = reinterpret_cast<const __m256i*>(&weights_[offset2]);
+ const auto row3 = reinterpret_cast<const __m256i*>(&weights_[offset3]);
+
+ for (IndexType j = 0; j < kNumChunks256; ++j)
+ {
+ const __m256i in = input_vector256[j];
+
+ m256_add_dpbusd_epi32(sum0, in, row0[j]);
+ m256_add_dpbusd_epi32(sum1, in, row1[j]);
+ m256_add_dpbusd_epi32(sum2, in, row2[j]);
+ m256_add_dpbusd_epi32(sum3, in, row3[j]);
+ }
+
+ *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias);
+ }
+ }
+ }
+ else if constexpr (kOutputDimensions == 1)
+ {
+ if constexpr (kPaddedInputDimensions % (kSimdWidth * 2) == 0)
+ {
+ __m512i sum0 = _mm512_setzero_si512();
+
+ const auto row0 = reinterpret_cast<const __m512i*>(&weights_[0]);
+
+ for (IndexType j = 0; j < kNumChunks512; ++j)
+ {
+ const __m512i in = input_vector512[j];
+
+ m512_add_dpbusd_epi32(sum0, in, row0[j]);
+ }
+
+ output[0] = m512_hadd(sum0, biases_[0]);
+ }
+ else
+ {
+ __m256i sum0 = _mm256_setzero_si256();
+
+ const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
+
+ for (IndexType j = 0; j < kNumChunks256; ++j)
+ {
+ const __m256i in = input_vector256[j];
+
+ m256_add_dpbusd_epi32(sum0, in, row0[j]);
+ }
+
+ output[0] = m256_hadd(sum0, biases_[0]);
+ }
+ }
+ else
+ {
+ // This case can never happen because kOutputDimensions
+ // is always 1 or a multiple of kSimdWidth.
+ assert(false);
+ }
+
+#elif defined (USE_AVX2)
- #elif defined(USE_AVX2)
constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
- const __m256i kOnes = _mm256_set1_epi16(1);
+
+ const auto output = reinterpret_cast<OutputType*>(buffer);
const auto input_vector = reinterpret_cast<const __m256i*>(input);
- #elif defined(USE_SSSE3)
+ // kOutputDimensions is either 1 or a multiple of kSimdWidth
+ // because then it is also an input dimension.
+ if constexpr (kOutputDimensions % 4 == 0)
+ {
+ for (IndexType i = 0; i < kOutputDimensions; i += 4)
+ {
+ const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
+ const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
+ const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
+ const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
+
+ const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
+ __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
+
+ __m256i sum0 = _mm256_setzero_si256();
+ __m256i sum1 = _mm256_setzero_si256();
+ __m256i sum2 = _mm256_setzero_si256();
+ __m256i sum3 = _mm256_setzero_si256();
+
+ const auto row0 = reinterpret_cast<const __m256i*>(&weights_[offset0]);
+ const auto row1 = reinterpret_cast<const __m256i*>(&weights_[offset1]);
+ const auto row2 = reinterpret_cast<const __m256i*>(&weights_[offset2]);
+ const auto row3 = reinterpret_cast<const __m256i*>(&weights_[offset3]);
+
+ int j = 0;
+ if (!canSaturate16x4[i / 4])
+ {
+ for (; j < (int)kNumChunks - 1; j += 2)
+ {
+ const __m256i in0 = input_vector[j];
+ const __m256i in1 = input_vector[j + 1];
+
+ m256_add_dpbusd_epi32x2(sum0, in0, row0[j], in1, row0[j + 1]);
+ m256_add_dpbusd_epi32x2(sum1, in0, row1[j], in1, row1[j + 1]);
+ m256_add_dpbusd_epi32x2(sum2, in0, row2[j], in1, row2[j + 1]);
+ m256_add_dpbusd_epi32x2(sum3, in0, row3[j], in1, row3[j + 1]);
+ }
+ }
+ for (; j < (int)kNumChunks; ++j)
+ {
+ const __m256i in = input_vector[j];
+
+ m256_add_dpbusd_epi32(sum0, in, row0[j]);
+ m256_add_dpbusd_epi32(sum1, in, row1[j]);
+ m256_add_dpbusd_epi32(sum2, in, row2[j]);
+ m256_add_dpbusd_epi32(sum3, in, row3[j]);
+ }
+
+ *outptr = m256_haddx4(sum0, sum1, sum2, sum3, bias);
+ }
+ }
+ else if constexpr (kOutputDimensions == 1)
+ {
+ __m256i sum0 = _mm256_setzero_si256();
+
+ const auto row0 = reinterpret_cast<const __m256i*>(&weights_[0]);
+
+ for (IndexType j = 0; j < kNumChunks; ++j)
+ {
+ const __m256i in = input_vector[j];
+
+ m256_add_dpbusd_epi32(sum0, in, row0[j]);
+ }
+
+ output[0] = m256_hadd(sum0, biases_[0]);
+ }
+ else
+ {
+ // This case can never happen because kOutputDimensions
+ // is always 1 or a multiple of kSimdWidth.
+ assert(false);
+ }
+
+#elif defined (USE_SSSE3)
+
constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
+
+ auto output = reinterpret_cast<OutputType*>(buffer);
+ const auto input_vector = reinterpret_cast<const __m128i*>(input);
+
+ // kOutputDimensions is either 1 or a multiple of kSimdWidth
+ // because then it is also an input dimension.
+ if constexpr (kOutputDimensions % 4 == 0)
+ {
+ for (IndexType i = 0; i < kOutputDimensions; i += 4)
+ {
+ const IndexType offset0 = (i + 0) * kPaddedInputDimensions;
+ const IndexType offset1 = (i + 1) * kPaddedInputDimensions;
+ const IndexType offset2 = (i + 2) * kPaddedInputDimensions;
+ const IndexType offset3 = (i + 3) * kPaddedInputDimensions;
+
+ const __m128i bias = *reinterpret_cast<const __m128i*>(&biases_[i]);
+ __m128i* outptr = reinterpret_cast<__m128i*>(&output[i]);
+
+ __m128i sum0 = _mm_setzero_si128();
+ __m128i sum1 = _mm_setzero_si128();
+ __m128i sum2 = _mm_setzero_si128();
+ __m128i sum3 = _mm_setzero_si128();
+
+ const auto row0 = reinterpret_cast<const __m128i*>(&weights_[offset0]);
+ const auto row1 = reinterpret_cast<const __m128i*>(&weights_[offset1]);
+ const auto row2 = reinterpret_cast<const __m128i*>(&weights_[offset2]);
+ const auto row3 = reinterpret_cast<const __m128i*>(&weights_[offset3]);
+
+ int j = 0;
+ if (!canSaturate16x4[i / 4])
+ {
+ for (; j < (int)kNumChunks - 1; j += 2)
+ {
+ const __m128i in0 = input_vector[j];
+ const __m128i in1 = input_vector[j + 1];
+
+ m128_add_dpbusd_epi32x2(sum0, in0, row0[j], in1, row0[j + 1]);
+ m128_add_dpbusd_epi32x2(sum1, in0, row1[j], in1, row1[j + 1]);
+ m128_add_dpbusd_epi32x2(sum2, in0, row2[j], in1, row2[j + 1]);
+ m128_add_dpbusd_epi32x2(sum3, in0, row3[j], in1, row3[j + 1]);
+ }
+ }
+ for (; j < (int)kNumChunks; ++j)
+ {
+ const __m128i in = input_vector[j];
+
+ m128_add_dpbusd_epi32(sum0, in, row0[j]);
+ m128_add_dpbusd_epi32(sum1, in, row1[j]);
+ m128_add_dpbusd_epi32(sum2, in, row2[j]);
+ m128_add_dpbusd_epi32(sum3, in, row3[j]);
+ }
+
+ *outptr = m128_haddx4(sum0, sum1, sum2, sum3, bias);
+ }
+ }
+ else if constexpr (kOutputDimensions == 1)
+ {
+ __m128i sum0 = _mm_setzero_si128();
+
+ const auto row0 = reinterpret_cast<const __m128i*>(&weights_[0]);
+
+ for (int j = 0; j < (int)kNumChunks; ++j)
+ {
+ const __m128i in = input_vector[j];
+
+ m128_add_dpbusd_epi32(sum0, in, row0[j]);
+ }
+
+ output[0] = m128_hadd(sum0, biases_[0]);
+ }
+ else
+ {
+ // This case can never happen because kOutputDimensions
+ // is always 1 or a multiple of kSimdWidth.
+ assert(false);
+ }
+
+#else
+
+// Use old implementation for the other architectures.
+
+ auto output = reinterpret_cast<OutputType*>(buffer);
+
+#if defined(USE_SSE2)
+ constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
+#ifndef USE_SSSE3
+ const __m128i kZeros = _mm_setzero_si128();
+#else
const __m128i kOnes = _mm_set1_epi16(1);
+#endif
const auto input_vector = reinterpret_cast<const __m128i*>(input);
- #elif defined(USE_NEON)
+#elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
+ const __m64 kZeros = _mm_setzero_si64();
+ const auto input_vector = reinterpret_cast<const __m64*>(input);
+
+#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kPaddedInputDimensions / kSimdWidth;
const auto input_vector = reinterpret_cast<const int8x8_t*>(input);
- #endif
+#endif
for (IndexType i = 0; i < kOutputDimensions; ++i) {
const IndexType offset = i * kPaddedInputDimensions;
- #if defined(USE_AVX512)
- __m512i sum = _mm512_setzero_si512();
- const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
+#if defined(USE_SSE2)
+ __m128i sum_lo = _mm_cvtsi32_si128(biases_[i]);
+ __m128i sum_hi = kZeros;
+ const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- __m512i product = _mm512_maddubs_epi16(_mm512_loadu_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
- #else
- __m512i product = _mm512_maddubs_epi16(_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
- #endif
-
- product = _mm512_madd_epi16(product, kOnes);
- sum = _mm512_add_epi32(sum, product);
- }
- output[i] = _mm512_reduce_add_epi32(sum) + biases_[i];
-
- // Note: Changing kMaxSimdWidth from 32 to 64 breaks loading existing networks.
- // As a result kPaddedInputDimensions may not be an even multiple of 64(512bit)
- // and we have to do one more 256bit chunk.
- if (kPaddedInputDimensions != kNumChunks * kSimdWidth * 2)
- {
- const auto iv_256 = reinterpret_cast<const __m256i*>(input);
- const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]);
- int j = kNumChunks * 2;
-
- #if defined(__MINGW32__) || defined(__MINGW64__) // See HACK comment below in AVX2.
- __m256i sum256 = _mm256_maddubs_epi16(_mm256_loadu_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
- #else
- __m256i sum256 = _mm256_maddubs_epi16(_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
- #endif
-
- sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1));
- sum256 = _mm256_hadd_epi32(sum256, sum256);
- sum256 = _mm256_hadd_epi32(sum256, sum256);
- const __m128i lo = _mm256_extracti128_si256(sum256, 0);
- const __m128i hi = _mm256_extracti128_si256(sum256, 1);
- output[i] += _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi);
+ __m128i row_j = _mm_load_si128(&row[j]);
+ __m128i input_j = _mm_load_si128(&input_vector[j]);
+ __m128i extended_row_lo = _mm_srai_epi16(_mm_unpacklo_epi8(row_j, row_j), 8);
+ __m128i extended_row_hi = _mm_srai_epi16(_mm_unpackhi_epi8(row_j, row_j), 8);
+ __m128i extended_input_lo = _mm_unpacklo_epi8(input_j, kZeros);
+ __m128i extended_input_hi = _mm_unpackhi_epi8(input_j, kZeros);
+ __m128i product_lo = _mm_madd_epi16(extended_row_lo, extended_input_lo);
+ __m128i product_hi = _mm_madd_epi16(extended_row_hi, extended_input_hi);
+ sum_lo = _mm_add_epi32(sum_lo, product_lo);
+ sum_hi = _mm_add_epi32(sum_hi, product_hi);
}
+ __m128i sum = _mm_add_epi32(sum_lo, sum_hi);
+ __m128i sum_high_64 = _mm_shuffle_epi32(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sum_high_64);
+ __m128i sum_second_32 = _mm_shufflelo_epi16(sum, _MM_SHUFFLE(1, 0, 3, 2));
+ sum = _mm_add_epi32(sum, sum_second_32);
+ output[i] = _mm_cvtsi128_si32(sum);
- #elif defined(USE_AVX2)
- __m256i sum = _mm256_setzero_si256();
- const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
+#elif defined(USE_MMX)
+ __m64 sum_lo = _mm_cvtsi32_si64(biases_[i]);
+ __m64 sum_hi = kZeros;
+ const auto row = reinterpret_cast<const __m64*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
- __m256i product = _mm256_maddubs_epi16(
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
- // compiled with g++ in MSYS2 crashes here because the output memory is not aligned
- // even though alignas is specified.
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&input_vector[j]), _mm256_load_si256(&row[j]));
- product = _mm256_madd_epi16(product, kOnes);
- sum = _mm256_add_epi32(sum, product);
+ __m64 row_j = row[j];
+ __m64 input_j = input_vector[j];
+ __m64 extended_row_lo = _mm_srai_pi16(_mm_unpacklo_pi8(row_j, row_j), 8);
+ __m64 extended_row_hi = _mm_srai_pi16(_mm_unpackhi_pi8(row_j, row_j), 8);
+ __m64 extended_input_lo = _mm_unpacklo_pi8(input_j, kZeros);
+ __m64 extended_input_hi = _mm_unpackhi_pi8(input_j, kZeros);
+ __m64 product_lo = _mm_madd_pi16(extended_row_lo, extended_input_lo);
+ __m64 product_hi = _mm_madd_pi16(extended_row_hi, extended_input_hi);
+ sum_lo = _mm_add_pi32(sum_lo, product_lo);
+ sum_hi = _mm_add_pi32(sum_hi, product_hi);
}
- sum = _mm256_hadd_epi32(sum, sum);
- sum = _mm256_hadd_epi32(sum, sum);
- const __m128i lo = _mm256_extracti128_si256(sum, 0);
- const __m128i hi = _mm256_extracti128_si256(sum, 1);
- output[i] = _mm_cvtsi128_si32(lo) + _mm_cvtsi128_si32(hi) + biases_[i];
-
- #elif defined(USE_SSSE3)
- __m128i sum = _mm_cvtsi32_si128(biases_[i]);
- const auto row = reinterpret_cast<const __m128i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- __m128i product = _mm_maddubs_epi16(
- _mm_load_si128(&input_vector[j]), _mm_load_si128(&row[j]));
- product = _mm_madd_epi16(product, kOnes);
- sum = _mm_add_epi32(sum, product);
- }
- sum = _mm_hadd_epi32(sum, sum);
- sum = _mm_hadd_epi32(sum, sum);
- output[i] = _mm_cvtsi128_si32(sum);
+ __m64 sum = _mm_add_pi32(sum_lo, sum_hi);
+ sum = _mm_add_pi32(sum, _mm_unpackhi_pi32(sum, sum));
+ output[i] = _mm_cvtsi64_si32(sum);
- #elif defined(USE_NEON)
+#elif defined(USE_NEON)
int32x4_t sum = {biases_[i]};
const auto row = reinterpret_cast<const int8x8_t*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
}
output[i] = sum[0] + sum[1] + sum[2] + sum[3];
- #else
+#else
OutputType sum = biases_[i];
for (IndexType j = 0; j < kInputDimensions; ++j) {
sum += weights_[offset + j] * input[j];
}
output[i] = sum;
- #endif
+#endif
}
+#if defined(USE_MMX)
+ _mm_empty();
+#endif
+
+#endif
+
return output;
}
PreviousLayer previous_layer_;
alignas(kCacheLineSize) BiasType biases_[kOutputDimensions];
- alignas(kCacheLineSize)
- WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
+ alignas(kCacheLineSize) WeightType weights_[kOutputDimensions * kPaddedInputDimensions];
+ union {
+ uint32_t canSaturate16x4[(kOutputDimensions + 3) / 4];
+ bool canSaturate16[kOutputDimensions];
+ };
};
} // namespace Eval::NNUE::Layers
const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < kNumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
- // compiled with g++ in MSYS2 crashes here because the output memory is not aligned
- // even though alignas is specified.
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&in[i * 4 + 0]),
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&in[i * 4 + 1])), kWeightScaleBits);
+ _mm256_load_si256(&in[i * 4 + 0]),
+ _mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&in[i * 4 + 2]),
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&in[i * 4 + 3])), kWeightScaleBits);
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_storeu_si256
- #else
- _mm256_store_si256
- #endif
-
- (&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
+ _mm256_load_si256(&in[i * 4 + 2]),
+ _mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits);
+ _mm256_store_si256(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), kZero), kOffsets));
}
constexpr IndexType kStart = kNumChunks * kSimdWidth;
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
constexpr IndexType kNumChunks = kInputDimensions / kSimdWidth;
#ifdef USE_SSE41
}
constexpr IndexType kStart = kNumChunks * kSimdWidth;
+ #elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kInputDimensions / kSimdWidth;
+ const __m64 k0x80s = _mm_set1_pi8(-128);
+ const auto in = reinterpret_cast<const __m64*>(input);
+ const auto out = reinterpret_cast<__m64*>(output);
+ for (IndexType i = 0; i < kNumChunks; ++i) {
+ const __m64 words0 = _mm_srai_pi16(
+ _mm_packs_pi32(in[i * 4 + 0], in[i * 4 + 1]),
+ kWeightScaleBits);
+ const __m64 words1 = _mm_srai_pi16(
+ _mm_packs_pi32(in[i * 4 + 2], in[i * 4 + 3]),
+ kWeightScaleBits);
+ const __m64 packedbytes = _mm_packs_pi16(words0, words1);
+ out[i] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
+ }
+ _mm_empty();
+ constexpr IndexType kStart = kNumChunks * kSimdWidth;
+
#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kInputDimensions / (kSimdWidth / 2);
const int8x8_t kZero = {0};
namespace Eval::NNUE {
+ // The accumulator of a StateInfo without parent is set to the INIT state
+ enum AccumulatorState { EMPTY, COMPUTED, INIT };
+
// Class that holds the result of affine transformation of input features
- struct alignas(32) Accumulator {
+ struct alignas(kCacheLineSize) Accumulator {
std::int16_t
accumulation[2][kRefreshTriggers.size()][kTransformedFeatureDimensions];
- Value score;
- bool computed_accumulation;
- bool computed_score;
+ AccumulatorState state[2];
};
} // namespace Eval::NNUE
#ifndef NNUE_COMMON_H_INCLUDED
#define NNUE_COMMON_H_INCLUDED
+#include <cstring>
+#include <iostream>
+
#if defined(USE_AVX2)
#include <immintrin.h>
#elif defined(USE_SSE2)
#include <emmintrin.h>
+#elif defined(USE_MMX)
+#include <mmintrin.h>
+
#elif defined(USE_NEON)
#include <arm_neon.h>
#endif
#elif defined(USE_SSE2)
constexpr std::size_t kSimdWidth = 16;
+ #elif defined(USE_MMX)
+ constexpr std::size_t kSimdWidth = 8;
+
#elif defined(USE_NEON)
constexpr std::size_t kSimdWidth = 16;
#endif
constexpr std::size_t kMaxSimdWidth = 32;
+ // unique number for each piece type on each square
+ enum {
+ PS_NONE = 0,
+ PS_W_PAWN = 1,
+ PS_B_PAWN = 1 * SQUARE_NB + 1,
+ PS_W_KNIGHT = 2 * SQUARE_NB + 1,
+ PS_B_KNIGHT = 3 * SQUARE_NB + 1,
+ PS_W_BISHOP = 4 * SQUARE_NB + 1,
+ PS_B_BISHOP = 5 * SQUARE_NB + 1,
+ PS_W_ROOK = 6 * SQUARE_NB + 1,
+ PS_B_ROOK = 7 * SQUARE_NB + 1,
+ PS_W_QUEEN = 8 * SQUARE_NB + 1,
+ PS_B_QUEEN = 9 * SQUARE_NB + 1,
+ PS_W_KING = 10 * SQUARE_NB + 1,
+ PS_END = PS_W_KING, // pieces without kings (pawns included)
+ PS_B_KING = 11 * SQUARE_NB + 1,
+ PS_END2 = 12 * SQUARE_NB + 1
+ };
+
+ constexpr uint32_t kpp_board_index[COLOR_NB][PIECE_NB] = {
+ // convention: W - us, B - them
+ // viewed from other side, W and B are reversed
+ { PS_NONE, PS_W_PAWN, PS_W_KNIGHT, PS_W_BISHOP, PS_W_ROOK, PS_W_QUEEN, PS_W_KING, PS_NONE,
+ PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_B_KING, PS_NONE },
+ { PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_B_KING, PS_NONE,
+ PS_NONE, PS_W_PAWN, PS_W_KNIGHT, PS_W_BISHOP, PS_W_ROOK, PS_W_QUEEN, PS_W_KING, PS_NONE }
+ };
+
// Type of input feature after conversion
using TransformedFeatureType = std::uint8_t;
using IndexType = std::uint32_t;
// Round n up to be a multiple of base
template <typename IntType>
constexpr IntType CeilToMultiple(IntType n, IntType base) {
- return (n + base - 1) / base * base;
+ return (n + base - 1) / base * base;
+ }
+
+ // read_little_endian() is our utility to read an integer (signed or unsigned, any size)
+ // from a stream in little-endian order. We swap the byte order after the read if
+ // necessary to return a result with the byte ordering of the compiling machine.
+ template <typename IntType>
+ inline IntType read_little_endian(std::istream& stream) {
+
+ IntType result;
+ std::uint8_t u[sizeof(IntType)];
+ typename std::make_unsigned<IntType>::type v = 0;
+
+ stream.read(reinterpret_cast<char*>(u), sizeof(IntType));
+ for (std::size_t i = 0; i < sizeof(IntType); ++i)
+ v = (v << 8) | u[sizeof(IntType) - i - 1];
+
+ std::memcpy(&result, &v, sizeof(IntType));
+ return result;
}
} // namespace Eval::NNUE
namespace Eval::NNUE {
+ // If vector instructions are enabled, we update and refresh the
+ // accumulator tile by tile such that each tile fits in the CPU's
+ // vector registers.
+ #define VECTOR
+
+ #ifdef USE_AVX512
+ typedef __m512i vec_t;
+ #define vec_load(a) _mm512_load_si512(a)
+ #define vec_store(a,b) _mm512_store_si512(a,b)
+ #define vec_add_16(a,b) _mm512_add_epi16(a,b)
+ #define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
+ static constexpr IndexType kNumRegs = 8; // only 8 are needed
+
+ #elif USE_AVX2
+ typedef __m256i vec_t;
+ #define vec_load(a) _mm256_load_si256(a)
+ #define vec_store(a,b) _mm256_store_si256(a,b)
+ #define vec_add_16(a,b) _mm256_add_epi16(a,b)
+ #define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
+ static constexpr IndexType kNumRegs = 16;
+
+ #elif USE_SSE2
+ typedef __m128i vec_t;
+ #define vec_load(a) (*(a))
+ #define vec_store(a,b) *(a)=(b)
+ #define vec_add_16(a,b) _mm_add_epi16(a,b)
+ #define vec_sub_16(a,b) _mm_sub_epi16(a,b)
+ static constexpr IndexType kNumRegs = Is64Bit ? 16 : 8;
+
+ #elif USE_MMX
+ typedef __m64 vec_t;
+ #define vec_load(a) (*(a))
+ #define vec_store(a,b) *(a)=(b)
+ #define vec_add_16(a,b) _mm_add_pi16(a,b)
+ #define vec_sub_16(a,b) _mm_sub_pi16(a,b)
+ static constexpr IndexType kNumRegs = 8;
+
+ #elif USE_NEON
+ typedef int16x8_t vec_t;
+ #define vec_load(a) (*(a))
+ #define vec_store(a,b) *(a)=(b)
+ #define vec_add_16(a,b) vaddq_s16(a,b)
+ #define vec_sub_16(a,b) vsubq_s16(a,b)
+ static constexpr IndexType kNumRegs = 16;
+
+ #else
+ #undef VECTOR
+
+ #endif
+
// Input feature converter
class FeatureTransformer {
// Number of output dimensions for one side
static constexpr IndexType kHalfDimensions = kTransformedFeatureDimensions;
+ #ifdef VECTOR
+ static constexpr IndexType kTileHeight = kNumRegs * sizeof(vec_t) / 2;
+ static_assert(kHalfDimensions % kTileHeight == 0, "kTileHeight must divide kHalfDimensions");
+ #endif
+
public:
// Output type
using OutputType = TransformedFeatureType;
// Hash value embedded in the evaluation file
static constexpr std::uint32_t GetHashValue() {
+
return RawFeatures::kHashValue ^ kOutputDimensions;
}
// Read network parameters
bool ReadParameters(std::istream& stream) {
- stream.read(reinterpret_cast<char*>(biases_),
- kHalfDimensions * sizeof(BiasType));
- stream.read(reinterpret_cast<char*>(weights_),
- kHalfDimensions * kInputDimensions * sizeof(WeightType));
- return !stream.fail();
- }
- // Proceed with the difference calculation if possible
- bool UpdateAccumulatorIfPossible(const Position& pos) const {
- const auto now = pos.state();
- if (now->accumulator.computed_accumulation) {
- return true;
- }
- const auto prev = now->previous;
- if (prev && prev->accumulator.computed_accumulation) {
- UpdateAccumulator(pos);
- return true;
- }
- return false;
+ for (std::size_t i = 0; i < kHalfDimensions; ++i)
+ biases_[i] = read_little_endian<BiasType>(stream);
+ for (std::size_t i = 0; i < kHalfDimensions * kInputDimensions; ++i)
+ weights_[i] = read_little_endian<WeightType>(stream);
+ return !stream.fail();
}
// Convert input features
- void Transform(const Position& pos, OutputType* output, bool refresh) const {
- if (refresh || !UpdateAccumulatorIfPossible(pos)) {
- RefreshAccumulator(pos);
- }
+ void Transform(const Position& pos, OutputType* output) const {
+
+ UpdateAccumulator(pos, WHITE);
+ UpdateAccumulator(pos, BLACK);
+
const auto& accumulation = pos.state()->accumulator.accumulation;
- #if defined(USE_AVX2)
+ #if defined(USE_AVX512)
+ constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth * 2);
+ static_assert(kHalfDimensions % (kSimdWidth * 2) == 0);
+ const __m512i kControl = _mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7);
+ const __m512i kZero = _mm512_setzero_si512();
+
+ #elif defined(USE_AVX2)
constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
constexpr int kControl = 0b11011000;
const __m256i kZero = _mm256_setzero_si256();
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
#ifdef USE_SSE41
const __m128i k0x80s = _mm_set1_epi8(-128);
#endif
+ #elif defined(USE_MMX)
+ constexpr IndexType kNumChunks = kHalfDimensions / kSimdWidth;
+ const __m64 k0x80s = _mm_set1_pi8(-128);
+
#elif defined(USE_NEON)
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
const int8x8_t kZero = {0};
for (IndexType p = 0; p < 2; ++p) {
const IndexType offset = kHalfDimensions * p;
- #if defined(USE_AVX2)
- auto out = reinterpret_cast<__m256i*>(&output[offset]);
+ #if defined(USE_AVX512)
+ auto out = reinterpret_cast<__m512i*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
- __m256i sum0 =
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- // HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
- // compiled with g++ in MSYS2 crashes here because the output memory is not aligned
- // even though alignas is specified.
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&reinterpret_cast<const __m256i*>(
- accumulation[perspectives[p]][0])[j * 2 + 0]);
- __m256i sum1 =
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_loadu_si256
- #else
- _mm256_load_si256
- #endif
-
- (&reinterpret_cast<const __m256i*>(
- accumulation[perspectives[p]][0])[j * 2 + 1]);
-
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_storeu_si256
- #else
- _mm256_store_si256
- #endif
+ __m512i sum0 = _mm512_load_si512(
+ &reinterpret_cast<const __m512i*>(accumulation[perspectives[p]][0])[j * 2 + 0]);
+ __m512i sum1 = _mm512_load_si512(
+ &reinterpret_cast<const __m512i*>(accumulation[perspectives[p]][0])[j * 2 + 1]);
+ _mm512_store_si512(&out[j], _mm512_permutexvar_epi64(kControl,
+ _mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), kZero)));
+ }
- (&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
+ #elif defined(USE_AVX2)
+ auto out = reinterpret_cast<__m256i*>(&output[offset]);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ __m256i sum0 = _mm256_load_si256(
+ &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 0]);
+ __m256i sum1 = _mm256_load_si256(
+ &reinterpret_cast<const __m256i*>(accumulation[perspectives[p]][0])[j * 2 + 1]);
+ _mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
_mm256_packs_epi16(sum0, sum1), kZero), kControl));
}
- #elif defined(USE_SSSE3)
+ #elif defined(USE_SSE2)
auto out = reinterpret_cast<__m128i*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
__m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>(
_mm_store_si128(&out[j],
#ifdef USE_SSE41
- _mm_max_epi8(packedbytes, kZero)
+ _mm_max_epi8(packedbytes, kZero)
#else
- _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
+ _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
#endif
);
}
+ #elif defined(USE_MMX)
+ auto out = reinterpret_cast<__m64*>(&output[offset]);
+ for (IndexType j = 0; j < kNumChunks; ++j) {
+ __m64 sum0 = *(&reinterpret_cast<const __m64*>(
+ accumulation[perspectives[p]][0])[j * 2 + 0]);
+ __m64 sum1 = *(&reinterpret_cast<const __m64*>(
+ accumulation[perspectives[p]][0])[j * 2 + 1]);
+ const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
+ out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
+ }
+
#elif defined(USE_NEON)
const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
#endif
}
+ #if defined(USE_MMX)
+ _mm_empty();
+ #endif
}
private:
- // Calculate cumulative value without using difference calculation
- void RefreshAccumulator(const Position& pos) const {
- auto& accumulator = pos.state()->accumulator;
- IndexType i = 0;
- Features::IndexList active_indices[2];
- RawFeatures::AppendActiveIndices(pos, kRefreshTriggers[i],
- active_indices);
- for (Color perspective : { WHITE, BLACK }) {
- std::memcpy(accumulator.accumulation[perspective][i], biases_,
- kHalfDimensions * sizeof(BiasType));
- for (const auto index : active_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index;
+ void UpdateAccumulator(const Position& pos, const Color c) const {
- #if defined(USE_AVX2)
- auto accumulation = reinterpret_cast<__m256i*>(
- &accumulator.accumulation[perspective][i][0]);
- auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- #if defined(__MINGW32__) || defined(__MINGW64__)
- _mm256_storeu_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadu_si256(&accumulation[j]), column[j]));
- #else
- accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
+ #ifdef VECTOR
+ // Gcc-10.2 unnecessarily spills AVX2 registers if this array
+ // is defined in the VECTOR code below, once in each branch
+ vec_t acc[kNumRegs];
#endif
- }
-
- #elif defined(USE_SSE2)
- auto accumulation = reinterpret_cast<__m128i*>(
- &accumulator.accumulation[perspective][i][0]);
- auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
- }
- #elif defined(USE_NEON)
- auto accumulation = reinterpret_cast<int16x8_t*>(
- &accumulator.accumulation[perspective][i][0]);
- auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = vaddq_s16(accumulation[j], column[j]);
- }
-
- #else
- for (IndexType j = 0; j < kHalfDimensions; ++j) {
- accumulator.accumulation[perspective][i][j] += weights_[offset + j];
- }
- #endif
-
- }
+ // Look for a usable accumulator of an earlier position. We keep track
+ // of the estimated gain in terms of features to be added/subtracted.
+ StateInfo *st = pos.state(), *next = nullptr;
+ int gain = pos.count<ALL_PIECES>() - 2;
+ while (st->accumulator.state[c] == EMPTY)
+ {
+ auto& dp = st->dirtyPiece;
+ // The first condition tests whether an incremental update is
+ // possible at all: if this side's king has moved, it is not possible.
+ static_assert(std::is_same_v<RawFeatures::SortedTriggerSet,
+ Features::CompileTimeList<Features::TriggerEvent, Features::TriggerEvent::kFriendKingMoved>>,
+ "Current code assumes that only kFriendlyKingMoved refresh trigger is being used.");
+ if ( dp.piece[0] == make_piece(c, KING)
+ || (gain -= dp.dirty_num + 1) < 0)
+ break;
+ next = st;
+ st = st->previous;
}
- accumulator.computed_accumulation = true;
- accumulator.computed_score = false;
- }
+ if (st->accumulator.state[c] == COMPUTED)
+ {
+ if (next == nullptr)
+ return;
+
+ // Update incrementally in two steps. First, we update the "next"
+ // accumulator. Then, we update the current accumulator (pos.state()).
+
+ // Gather all features to be updated. This code assumes HalfKP features
+ // only and doesn't support refresh triggers.
+ static_assert(std::is_same_v<Features::FeatureSet<Features::HalfKP<Features::Side::kFriend>>,
+ RawFeatures>);
+ Features::IndexList removed[2], added[2];
+ Features::HalfKP<Features::Side::kFriend>::AppendChangedIndices(pos,
+ next->dirtyPiece, c, &removed[0], &added[0]);
+ for (StateInfo *st2 = pos.state(); st2 != next; st2 = st2->previous)
+ Features::HalfKP<Features::Side::kFriend>::AppendChangedIndices(pos,
+ st2->dirtyPiece, c, &removed[1], &added[1]);
+
+ // Mark the accumulators as computed.
+ next->accumulator.state[c] = COMPUTED;
+ pos.state()->accumulator.state[c] = COMPUTED;
+
+ // Now update the accumulators listed in info[], where the last element is a sentinel.
+ StateInfo *info[3] =
+ { next, next == pos.state() ? nullptr : pos.state(), nullptr };
+ #ifdef VECTOR
+ for (IndexType j = 0; j < kHalfDimensions / kTileHeight; ++j)
+ {
+ // Load accumulator
+ auto accTile = reinterpret_cast<vec_t*>(
+ &st->accumulator.accumulation[c][0][j * kTileHeight]);
+ for (IndexType k = 0; k < kNumRegs; ++k)
+ acc[k] = vec_load(&accTile[k]);
+
+ for (IndexType i = 0; info[i]; ++i)
+ {
+ // Difference calculation for the deactivated features
+ for (const auto index : removed[i])
+ {
+ const IndexType offset = kHalfDimensions * index + j * kTileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
+ for (IndexType k = 0; k < kNumRegs; ++k)
+ acc[k] = vec_sub_16(acc[k], column[k]);
+ }
- // Calculate cumulative value using difference calculation
- void UpdateAccumulator(const Position& pos) const {
- const auto prev_accumulator = pos.state()->previous->accumulator;
- auto& accumulator = pos.state()->accumulator;
- IndexType i = 0;
- Features::IndexList removed_indices[2], added_indices[2];
- bool reset[2];
- RawFeatures::AppendChangedIndices(pos, kRefreshTriggers[i],
- removed_indices, added_indices, reset);
- for (Color perspective : { WHITE, BLACK }) {
-
- #if defined(USE_AVX2)
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- auto accumulation = reinterpret_cast<__m256i*>(
- &accumulator.accumulation[perspective][i][0]);
+ // Difference calculation for the activated features
+ for (const auto index : added[i])
+ {
+ const IndexType offset = kHalfDimensions * index + j * kTileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
+ for (IndexType k = 0; k < kNumRegs; ++k)
+ acc[k] = vec_add_16(acc[k], column[k]);
+ }
- #elif defined(USE_SSE2)
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- auto accumulation = reinterpret_cast<__m128i*>(
- &accumulator.accumulation[perspective][i][0]);
+ // Store accumulator
+ accTile = reinterpret_cast<vec_t*>(
+ &info[i]->accumulator.accumulation[c][0][j * kTileHeight]);
+ for (IndexType k = 0; k < kNumRegs; ++k)
+ vec_store(&accTile[k], acc[k]);
+ }
+ }
- #elif defined(USE_NEON)
- constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
- auto accumulation = reinterpret_cast<int16x8_t*>(
- &accumulator.accumulation[perspective][i][0]);
- #endif
+ #else
+ for (IndexType i = 0; info[i]; ++i)
+ {
+ std::memcpy(info[i]->accumulator.accumulation[c][0],
+ st->accumulator.accumulation[c][0],
+ kHalfDimensions * sizeof(BiasType));
+ st = info[i];
- if (reset[perspective]) {
- std::memcpy(accumulator.accumulation[perspective][i], biases_,
- kHalfDimensions * sizeof(BiasType));
- } else {
- std::memcpy(accumulator.accumulation[perspective][i],
- prev_accumulator.accumulation[perspective][i],
- kHalfDimensions * sizeof(BiasType));
// Difference calculation for the deactivated features
- for (const auto index : removed_indices[perspective]) {
+ for (const auto index : removed[i])
+ {
const IndexType offset = kHalfDimensions * index;
- #if defined(USE_AVX2)
- auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = _mm256_sub_epi16(accumulation[j], column[j]);
- }
-
- #elif defined(USE_SSE2)
- auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = _mm_sub_epi16(accumulation[j], column[j]);
- }
-
- #elif defined(USE_NEON)
- auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = vsubq_s16(accumulation[j], column[j]);
- }
+ for (IndexType j = 0; j < kHalfDimensions; ++j)
+ st->accumulator.accumulation[c][0][j] -= weights_[offset + j];
+ }
- #else
- for (IndexType j = 0; j < kHalfDimensions; ++j) {
- accumulator.accumulation[perspective][i][j] -=
- weights_[offset + j];
- }
- #endif
+ // Difference calculation for the activated features
+ for (const auto index : added[i])
+ {
+ const IndexType offset = kHalfDimensions * index;
+ for (IndexType j = 0; j < kHalfDimensions; ++j)
+ st->accumulator.accumulation[c][0][j] += weights_[offset + j];
}
}
- { // Difference calculation for the activated features
- for (const auto index : added_indices[perspective]) {
- const IndexType offset = kHalfDimensions * index;
-
- #if defined(USE_AVX2)
- auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
- }
-
- #elif defined(USE_SSE2)
- auto column = reinterpret_cast<const __m128i*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = _mm_add_epi16(accumulation[j], column[j]);
- }
+ #endif
+ }
+ else
+ {
+ // Refresh the accumulator
+ auto& accumulator = pos.state()->accumulator;
+ accumulator.state[c] = COMPUTED;
+ Features::IndexList active;
+ Features::HalfKP<Features::Side::kFriend>::AppendActiveIndices(pos, c, &active);
+
+ #ifdef VECTOR
+ for (IndexType j = 0; j < kHalfDimensions / kTileHeight; ++j)
+ {
+ auto biasesTile = reinterpret_cast<const vec_t*>(
+ &biases_[j * kTileHeight]);
+ for (IndexType k = 0; k < kNumRegs; ++k)
+ acc[k] = biasesTile[k];
+
+ for (const auto index : active)
+ {
+ const IndexType offset = kHalfDimensions * index + j * kTileHeight;
+ auto column = reinterpret_cast<const vec_t*>(&weights_[offset]);
+
+ for (unsigned k = 0; k < kNumRegs; ++k)
+ acc[k] = vec_add_16(acc[k], column[k]);
+ }
- #elif defined(USE_NEON)
- auto column = reinterpret_cast<const int16x8_t*>(&weights_[offset]);
- for (IndexType j = 0; j < kNumChunks; ++j) {
- accumulation[j] = vaddq_s16(accumulation[j], column[j]);
- }
+ auto accTile = reinterpret_cast<vec_t*>(
+ &accumulator.accumulation[c][0][j * kTileHeight]);
+ for (unsigned k = 0; k < kNumRegs; k++)
+ vec_store(&accTile[k], acc[k]);
+ }
#else
- for (IndexType j = 0; j < kHalfDimensions; ++j) {
- accumulator.accumulation[perspective][i][j] +=
- weights_[offset + j];
- }
- #endif
+ std::memcpy(accumulator.accumulation[c][0], biases_,
+ kHalfDimensions * sizeof(BiasType));
- }
+ for (const auto index : active)
+ {
+ const IndexType offset = kHalfDimensions * index;
+
+ for (IndexType j = 0; j < kHalfDimensions; ++j)
+ accumulator.accumulation[c][0][j] += weights_[offset + j];
}
+ #endif
}
- accumulator.computed_accumulation = true;
- accumulator.computed_score = false;
+ #if defined(USE_MMX)
+ _mm_empty();
+ #endif
}
using BiasType = std::int16_t;
#define S(mg, eg) make_score(mg, eg)
// Pawn penalties
- constexpr Score Backward = S( 8, 27);
- constexpr Score Doubled = S(11, 55);
- constexpr Score Isolated = S( 5, 17);
- constexpr Score WeakLever = S( 2, 54);
- constexpr Score WeakUnopposed = S(15, 25);
+ constexpr Score Backward = S( 8, 25);
+ constexpr Score Doubled = S(10, 55);
+ constexpr Score Isolated = S( 3, 15);
+ constexpr Score WeakLever = S( 3, 55);
+ constexpr Score WeakUnopposed = S(13, 25);
// Bonus for blocked pawns at 5th or 6th rank
- constexpr Score BlockedPawn[2] = { S(-13, -4), S(-4, 3) };
+ constexpr Score BlockedPawn[2] = { S(-15, -3), S(-6, 3) };
constexpr Score BlockedStorm[RANK_NB] = {
- S(0, 0), S(0, 0), S(76, 78), S(-10, 15), S(-7, 10), S(-4, 6), S(-1, 2)
+ S(0, 0), S(0, 0), S(75, 78), S(-8, 16), S(-6, 10), S(-6, 6), S(0, 2)
};
// Connected pawn bonus
- constexpr int Connected[RANK_NB] = { 0, 7, 8, 11, 24, 45, 85 };
+ constexpr int Connected[RANK_NB] = { 0, 5, 7, 11, 24, 48, 86 };
// Strength of pawn shelter for our king by [distance from edge][rank].
// RANK_1 = 0 is used for files where we have no pawn, or pawn is behind our king.
constexpr Value ShelterStrength[int(FILE_NB) / 2][RANK_NB] = {
- { V( -6), V( 81), V( 93), V( 58), V( 39), V( 18), V( 25) },
- { V(-43), V( 61), V( 35), V(-49), V(-29), V(-11), V( -63) },
- { V(-10), V( 75), V( 23), V( -2), V( 32), V( 3), V( -45) },
- { V(-39), V(-13), V(-29), V(-52), V(-48), V(-67), V(-166) }
+ { V( -5), V( 82), V( 92), V( 54), V( 36), V( 22), V( 28) },
+ { V(-44), V( 63), V( 33), V(-50), V(-30), V(-12), V( -62) },
+ { V(-11), V( 77), V( 22), V( -6), V( 31), V( 8), V( -45) },
+ { V(-39), V(-12), V(-29), V(-50), V(-43), V(-68), V(-164) }
};
// Danger of enemy pawns moving toward our king by [distance from edge][rank].
// is behind our king. Note that UnblockedStorm[0][1-2] accommodate opponent pawn
// on edge, likely blocked by our king.
constexpr Value UnblockedStorm[int(FILE_NB) / 2][RANK_NB] = {
- { V( 85), V(-289), V(-166), V(97), V(50), V( 45), V( 50) },
- { V( 46), V( -25), V( 122), V(45), V(37), V(-10), V( 20) },
- { V( -6), V( 51), V( 168), V(34), V(-2), V(-22), V(-14) },
- { V(-15), V( -11), V( 101), V( 4), V(11), V(-15), V(-29) }
+ { V( 87), V(-288), V(-168), V( 96), V( 47), V( 44), V( 46) },
+ { V( 42), V( -25), V( 120), V( 45), V( 34), V( -9), V( 24) },
+ { V( -8), V( 51), V( 167), V( 35), V( -4), V(-16), V(-12) },
+ { V(-17), V( -13), V( 100), V( 4), V( 9), V(-16), V(-31) }
};
+
+ // KingOnFile[semi-open Us][semi-open Them] contains bonuses/penalties
+ // for king when the king is on a semi-open or open file.
+ constexpr Score KingOnFile[2][2] = {{ S(-19,12), S(-6, 7) },
+ { S( 0, 2), S( 6,-5) }};
+
#undef S
#undef V
Square s;
bool backward, passed, doubled;
Score score = SCORE_ZERO;
- const Square* pl = pos.squares<PAWN>(Us);
+ Bitboard b = pos.pieces(Us, PAWN);
Bitboard ourPawns = pos.pieces( Us, PAWN);
Bitboard theirPawns = pos.pieces(Them, PAWN);
e->blockedCount += popcount(shift<Up>(ourPawns) & (theirPawns | doubleAttackThem));
// Loop through all pawns of the current color and score each pawn
- while ((s = *pl++) != SQ_NONE)
- {
+ while (b) {
+ s = pop_lsb(&b);
+
assert(pos.piece_on(s) == make_piece(Us, PAWN));
Rank r = relative_rank(Us, s);
if (support | phalanx)
{
int v = Connected[r] * (2 + bool(phalanx) - bool(opposed))
- + 21 * popcount(support);
+ + 22 * popcount(support);
score += make_score(v, v * (r - 2) / 4);
}
score -= Doubled * doubled
+ WeakLever * more_than_one(lever);
- if (blocked && r > RANK_4)
- score += BlockedPawn[r-4];
+ if (blocked && r >= RANK_5)
+ score += BlockedPawn[r - RANK_5];
}
return score;
Score bonus = make_score(5, 5);
- File center = Utility::clamp(file_of(ksq), FILE_B, FILE_G);
+ File center = std::clamp(file_of(ksq), FILE_B, FILE_G);
for (File f = File(center - 1); f <= File(center + 1); ++f)
{
b = ourPawns & file_bb(f);
bonus -= make_score(UnblockedStorm[d][theirRank], 0);
}
+ // King On File
+ bonus -= KingOnFile[pos.is_on_semiopen_file(Us, ksq)][pos.is_on_semiopen_file(Them, ksq)];
+
return bonus;
}
&& !pos.can_castle(ANY_CASTLING))
{
StateInfo st;
+ ASSERT_ALIGNED(&st, Eval::NNUE::kCacheLineSize);
+
Position p;
p.set(pos.fen(), pos.is_chess960(), &st, pos.this_thread());
Tablebases::ProbeState s1, s2;
std::memset(this, 0, sizeof(Position));
std::memset(si, 0, sizeof(StateInfo));
- std::fill_n(&pieceList[0][0], sizeof(pieceList) / sizeof(Square), SQ_NONE);
st = si;
- // Each piece on board gets a unique ID used to track the piece later
- PieceId piece_id, next_piece_id = PIECE_ID_ZERO;
-
ss >> std::noskipws;
// 1. Piece placement
else if (token == '/')
sq += 2 * SOUTH;
- else if ((idx = PieceToChar.find(token)) != string::npos)
- {
- auto pc = Piece(idx);
- put_piece(pc, sq);
-
- if (Eval::useNNUE)
- {
- // Kings get a fixed ID, other pieces get ID in order of placement
- piece_id =
- (idx == W_KING) ? PIECE_ID_WKING :
- (idx == B_KING) ? PIECE_ID_BKING :
- next_piece_id++;
- evalList.put_piece(piece_id, sq, pc);
- }
-
+ else if ((idx = PieceToChar.find(token)) != string::npos) {
+ put_piece(Piece(idx), sq);
++sq;
}
}
chess960 = isChess960;
thisThread = th;
set_state(st);
-
- assert(pos_is_ok());
+ st->accumulator.state[WHITE] = Eval::NNUE::INIT;
+ st->accumulator.state[BLACK] = Eval::NNUE::INIT;
return *this;
}
++st->pliesFromNull;
// Used by NNUE
- st->accumulator.computed_accumulation = false;
- st->accumulator.computed_score = false;
- PieceId dp0 = PIECE_ID_NONE;
- PieceId dp1 = PIECE_ID_NONE;
+ st->accumulator.state[WHITE] = Eval::NNUE::EMPTY;
+ st->accumulator.state[BLACK] = Eval::NNUE::EMPTY;
auto& dp = st->dirtyPiece;
dp.dirty_num = 1;
if (Eval::useNNUE)
{
- dp.dirty_num = 2; // 2 pieces moved
- dp1 = piece_id_on(capsq);
- dp.pieceId[1] = dp1;
- dp.old_piece[1] = evalList.piece_with_id(dp1);
- evalList.put_piece(dp1, capsq, NO_PIECE);
- dp.new_piece[1] = evalList.piece_with_id(dp1);
+ dp.dirty_num = 2; // 1 piece moved, 1 piece captured
+ dp.piece[1] = captured;
+ dp.from[1] = capsq;
+ dp.to[1] = SQ_NONE;
}
// Update board and piece lists
{
if (Eval::useNNUE)
{
- dp0 = piece_id_on(from);
- dp.pieceId[0] = dp0;
- dp.old_piece[0] = evalList.piece_with_id(dp0);
- evalList.put_piece(dp0, to, pc);
- dp.new_piece[0] = evalList.piece_with_id(dp0);
+ dp.piece[0] = pc;
+ dp.from[0] = from;
+ dp.to[0] = to;
}
move_piece(from, to);
if (Eval::useNNUE)
{
- dp0 = piece_id_on(to);
- evalList.put_piece(dp0, to, promotion);
- dp.new_piece[0] = evalList.piece_with_id(dp0);
+ // Promoting pawn to SQ_NONE, promoted piece from SQ_NONE
+ dp.to[0] = SQ_NONE;
+ dp.piece[dp.dirty_num] = promotion;
+ dp.from[dp.dirty_num] = SQ_NONE;
+ dp.to[dp.dirty_num] = to;
+ dp.dirty_num++;
}
// Update hash keys
{
move_piece(to, from); // Put the piece back at the source square
- if (Eval::useNNUE)
- {
- PieceId dp0 = st->dirtyPiece.pieceId[0];
- evalList.put_piece(dp0, from, pc);
- }
-
if (st->capturedPiece)
{
Square capsq = to;
}
put_piece(st->capturedPiece, capsq); // Restore the captured piece
-
- if (Eval::useNNUE)
- {
- PieceId dp1 = st->dirtyPiece.pieceId[1];
- assert(evalList.piece_with_id(dp1).from[WHITE] == PS_NONE);
- assert(evalList.piece_with_id(dp1).from[BLACK] == PS_NONE);
- evalList.put_piece(dp1, capsq, st->capturedPiece);
- }
}
}
rto = relative_square(us, kingSide ? SQ_F1 : SQ_D1);
to = relative_square(us, kingSide ? SQ_G1 : SQ_C1);
- if (Eval::useNNUE)
+ if (Do && Eval::useNNUE)
{
- PieceId dp0, dp1;
auto& dp = st->dirtyPiece;
- dp.dirty_num = 2; // 2 pieces moved
-
- if (Do)
- {
- dp0 = piece_id_on(from);
- dp1 = piece_id_on(rfrom);
- dp.pieceId[0] = dp0;
- dp.old_piece[0] = evalList.piece_with_id(dp0);
- evalList.put_piece(dp0, to, make_piece(us, KING));
- dp.new_piece[0] = evalList.piece_with_id(dp0);
- dp.pieceId[1] = dp1;
- dp.old_piece[1] = evalList.piece_with_id(dp1);
- evalList.put_piece(dp1, rto, make_piece(us, ROOK));
- dp.new_piece[1] = evalList.piece_with_id(dp1);
- }
- else
- {
- dp0 = piece_id_on(to);
- dp1 = piece_id_on(rto);
- evalList.put_piece(dp0, from, make_piece(us, KING));
- evalList.put_piece(dp1, rfrom, make_piece(us, ROOK));
- }
+ dp.piece[0] = make_piece(us, KING);
+ dp.from[0] = from;
+ dp.to[0] = to;
+ dp.piece[1] = make_piece(us, ROOK);
+ dp.from[1] = rfrom;
+ dp.to[1] = rto;
+ dp.dirty_num = 2;
}
// Remove both pieces first since squares could overlap in Chess960
assert(!checkers());
assert(&newSt != st);
- if (Eval::useNNUE)
- {
- std::memcpy(&newSt, st, sizeof(StateInfo));
- st->accumulator.computed_score = false;
- }
- else
- std::memcpy(&newSt, st, offsetof(StateInfo, accumulator));
+ std::memcpy(&newSt, st, offsetof(StateInfo, accumulator));
newSt.previous = st;
st = &newSt;
+ st->dirtyPiece.dirty_num = 0;
+ st->dirtyPiece.piece[0] = NO_PIECE; // Avoid checks in UpdateAccumulator()
+ st->accumulator.state[WHITE] = Eval::NNUE::EMPTY;
+ st->accumulator.state[BLACK] = Eval::NNUE::EMPTY;
+
if (st->epSquare != SQ_NONE)
{
st->key ^= Zobrist::enpassant[file_of(st->epSquare)];
// Don't allow pinned pieces to attack (except the king) as long as
// there are pinners on their original square.
- if (st->pinners[~stm] & occupied)
- stmAttackers &= ~st->blockersForKing[stm];
+ if (pinners(~stm) & occupied)
+ stmAttackers &= ~blockers_for_king(stm);
if (!stmAttackers)
break;
assert(0 && "pos_is_ok: Bitboards");
StateInfo si = *st;
+ ASSERT_ALIGNED(&si, Eval::NNUE::kCacheLineSize);
+
set_state(&si);
if (std::memcmp(&si, st, sizeof(StateInfo)))
assert(0 && "pos_is_ok: State");
for (Piece pc : Pieces)
- {
if ( pieceCount[pc] != popcount(pieces(color_of(pc), type_of(pc)))
|| pieceCount[pc] != std::count(board, board + SQUARE_NB, pc))
assert(0 && "pos_is_ok: Pieces");
- for (int i = 0; i < pieceCount[pc]; ++i)
- if (board[pieceList[pc][i]] != pc || index[pieceList[pc][i]] != i)
- assert(0 && "pos_is_ok: Index");
- }
-
for (Color c : { WHITE, BLACK })
for (CastlingRights cr : {c & KING_SIDE, c & QUEEN_SIDE})
{
bool empty(Square s) const;
template<PieceType Pt> int count(Color c) const;
template<PieceType Pt> int count() const;
- template<PieceType Pt> const Square* squares(Color c) const;
template<PieceType Pt> Square square(Color c) const;
bool is_on_semiopen_file(Color c, Square s) const;
Bitboard checkers() const;
Bitboard blockers_for_king(Color c) const;
Bitboard check_squares(PieceType pt) const;
+ Bitboard pinners(Color c) const;
bool is_discovery_check_on_king(Color c, Move m) const;
// Attacks to/from a given square
// Used by NNUE
StateInfo* state() const;
- const EvalList* eval_list() const;
private:
// Initialization helpers (used while setting up a position)
template<bool Do>
void do_castling(Color us, Square from, Square& to, Square& rfrom, Square& rto);
- // ID of a piece on a given square
- PieceId piece_id_on(Square sq) const;
-
// Data members
Piece board[SQUARE_NB];
Bitboard byTypeBB[PIECE_TYPE_NB];
Bitboard byColorBB[COLOR_NB];
int pieceCount[PIECE_NB];
- Square pieceList[PIECE_NB][16];
- int index[SQUARE_NB];
int castlingRightsMask[SQUARE_NB];
Square castlingRookSquare[CASTLING_RIGHT_NB];
Bitboard castlingPath[CASTLING_RIGHT_NB];
Thread* thisThread;
StateInfo* st;
bool chess960;
-
- // List of pieces used in NNUE evaluation function
- EvalList evalList;
};
namespace PSQT {
return count<Pt>(WHITE) + count<Pt>(BLACK);
}
-template<PieceType Pt> inline const Square* Position::squares(Color c) const {
- return pieceList[make_piece(c, Pt)];
-}
-
template<PieceType Pt> inline Square Position::square(Color c) const {
- assert(pieceCount[make_piece(c, Pt)] == 1);
- return squares<Pt>(c)[0];
+ assert(count<Pt>(c) == 1);
+ return lsb(pieces(c, Pt));
}
inline Square Position::ep_square() const {
return st->blockersForKing[c];
}
+inline Bitboard Position::pinners(Color c) const {
+ return st->pinners[c];
+}
+
inline Bitboard Position::check_squares(PieceType pt) const {
return st->checkSquares[pt];
}
board[s] = pc;
byTypeBB[ALL_PIECES] |= byTypeBB[type_of(pc)] |= s;
byColorBB[color_of(pc)] |= s;
- index[s] = pieceCount[pc]++;
- pieceList[pc][index[s]] = s;
+ pieceCount[pc]++;
pieceCount[make_piece(color_of(pc), ALL_PIECES)]++;
psq += PSQT::psq[pc][s];
}
inline void Position::remove_piece(Square s) {
- // WARNING: This is not a reversible operation. If we remove a piece in
- // do_move() and then replace it in undo_move() we will put it at the end of
- // the list and not in its original place, it means index[] and pieceList[]
- // are not invariant to a do_move() + undo_move() sequence.
Piece pc = board[s];
byTypeBB[ALL_PIECES] ^= s;
byTypeBB[type_of(pc)] ^= s;
byColorBB[color_of(pc)] ^= s;
/* board[s] = NO_PIECE; Not needed, overwritten by the capturing one */
- Square lastSquare = pieceList[pc][--pieceCount[pc]];
- index[lastSquare] = index[s];
- pieceList[pc][index[lastSquare]] = lastSquare;
- pieceList[pc][pieceCount[pc]] = SQ_NONE;
+ pieceCount[pc]--;
pieceCount[make_piece(color_of(pc), ALL_PIECES)]--;
psq -= PSQT::psq[pc][s];
}
inline void Position::move_piece(Square from, Square to) {
- // index[from] is not updated and becomes stale. This works as long as index[]
- // is accessed just by known occupied squares.
Piece pc = board[from];
Bitboard fromTo = from | to;
byTypeBB[ALL_PIECES] ^= fromTo;
byColorBB[color_of(pc)] ^= fromTo;
board[from] = NO_PIECE;
board[to] = pc;
- index[to] = index[from];
- pieceList[pc][index[to]] = to;
psq += PSQT::psq[pc][to] - PSQT::psq[pc][from];
}
return st;
}
-inline const EvalList* Position::eval_list() const {
-
- return &evalList;
-}
-
-inline PieceId Position::piece_id_on(Square sq) const
-{
-
- assert(piece_on(sq) != NO_PIECE);
-
- PieceId pid = evalList.piece_id_list[sq];
- assert(is_ok(pid));
-
- return pid;
-}
-
#endif // #ifndef POSITION_H_INCLUDED
constexpr uint64_t TtHitAverageResolution = 1024;
// Razor and futility margins
- constexpr int RazorMargin = 527;
+ constexpr int RazorMargin = 510;
Value futility_margin(Depth d, bool improving) {
- return Value(227 * (d - improving));
+ return Value(234 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn) {
int r = Reductions[d] * Reductions[mn];
- return (r + 570) / 1024 + (!i && r > 1018);
+ return (r + 503) / 1024 + (!i && r > 915);
}
constexpr int futility_move_count(bool improving, Depth depth) {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return d > 15 ? 27 : 17 * d * d + 133 * d - 134;
+ return d > 13 ? 29 : 17 * d * d + 134 * d - 134;
}
// Add a small random component to draw evaluations to avoid 3fold-blindness
uint64_t perft(Position& pos, Depth depth) {
StateInfo st;
+ ASSERT_ALIGNED(&st, Eval::NNUE::kCacheLineSize);
+
uint64_t cnt, nodes = 0;
const bool leaf = (depth == 2);
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((24.8 + std::log(Threads.size())) * std::log(i));
+ Reductions[i] = int((21.3 + 2 * std::log(Threads.size())) * std::log(i + 0.25 * std::log(i)));
}
Time.init(Limits, us, rootPos.game_ply());
TT.new_search();
- Eval::verify_NNUE();
+ Eval::NNUE::verify();
if (rootMoves.empty())
{
// for match (TC 60+0.6) results spanning a wide range of k values.
PRNG rng(now());
double floatLevel = Options["UCI_LimitStrength"] ?
- Utility::clamp(std::pow((Options["UCI_Elo"] - 1346.6) / 143.4, 1 / 0.806), 0.0, 20.0) :
+ std::clamp(std::pow((Options["UCI_Elo"] - 1346.6) / 143.4, 1 / 0.806), 0.0, 20.0) :
double(Options["Skill Level"]);
int intLevel = int(floatLevel) +
((floatLevel - int(floatLevel)) * 1024 > rng.rand<unsigned>() % 1024 ? 1 : 0);
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].previousScore;
- delta = Value(19);
+ delta = Value(17);
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust contempt based on root move's previousScore (dynamic contempt)
- int dct = ct + (110 - ct / 2) * prev / (abs(prev) + 140);
+ int dct = ct + (113 - ct / 2) * prev / (abs(prev) + 147);
contempt = (us == WHITE ? make_score(dct, dct / 2)
: -make_score(dct, dct / 2));
// Start with a small aspiration window and, in the case of a fail
// high/low, re-search with a bigger window until we don't fail
// high/low anymore.
- int failedHighCnt = 0;
+ failedHighCnt = 0;
while (true)
{
Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter);
++failedHighCnt;
}
else
- {
- ++rootMoves[pvIdx].bestMoveCount;
break;
- }
delta += delta / 4 + 5;
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (296 + 6 * (mainThread->bestPreviousScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 725.0;
- fallingEval = Utility::clamp(fallingEval, 0.5, 1.5);
+ double fallingEval = (318 + 6 * (mainThread->bestPreviousScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 825.0;
+ fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.92 : 0.95;
- double reduction = (1.47 + mainThread->previousTimeReduction) / (2.22 * timeReduction);
+ timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.92 : 0.95;
+ double reduction = (1.47 + mainThread->previousTimeReduction) / (2.32 * timeReduction);
// Use part of the gained time from a previous stable move for the current move
for (Thread* th : Threads)
totBestMoveChanges += th->bestMoveChanges;
th->bestMoveChanges = 0;
}
- double bestMoveInstability = 1 + totBestMoveChanges / Threads.size();
+ double bestMoveInstability = 1 + 2 * totBestMoveChanges / Threads.size();
- double totalTime = rootMoves.size() == 1 ? 0 :
- Time.optimum() * fallingEval * reduction * bestMoveInstability;
+ double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability;
- // Stop the search if we have exceeded the totalTime, at least 1ms search
+ // Cap used time in case of a single legal move for a better viewer experience in tournaments
+ // yielding correct scores and sufficiently fast moves.
+ if (rootMoves.size() == 1)
+ totalTime = std::min(500.0, totalTime);
+
+ // Stop the search if we have exceeded the totalTime
if (Time.elapsed() > totalTime)
{
// If we are allowed to ponder do not stop the search now but
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
- && Time.elapsed() > totalTime * 0.56)
+ && Time.elapsed() > totalTime * 0.58)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
constexpr bool PvNode = NT == PV;
const bool rootNode = PvNode && ss->ply == 0;
+ const Depth maxNextDepth = rootNode ? depth : depth + 1;
// Check if we have an upcoming move which draws by repetition, or
// if the opponent had an alternative move earlier to this position.
Move pv[MAX_PLY+1], capturesSearched[32], quietsSearched[64];
StateInfo st;
+ ASSERT_ALIGNED(&st, Eval::NNUE::kCacheLineSize);
+
TTEntry* tte;
Key posKey;
Move ttMove, move, excludedMove, bestMove;
Depth extension, newDepth;
- Value bestValue, value, ttValue, eval, maxValue, probcutBeta;
- bool ttHit, ttPv, formerPv, givesCheck, improving, didLMR, priorCapture;
+ Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
+ bool formerPv, givesCheck, improving, didLMR, priorCapture;
bool captureOrPromotion, doFullDepthSearch, moveCountPruning,
ttCapture, singularQuietLMR;
Piece movedPiece;
assert(0 <= ss->ply && ss->ply < MAX_PLY);
(ss+1)->ply = ss->ply + 1;
+ (ss+1)->ttPv = false;
(ss+1)->excludedMove = bestMove = MOVE_NONE;
(ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
Square prevSq = to_sq((ss-1)->currentMove);
// starts with statScore = 0. Later grandchildren start with the last calculated
// statScore of the previous grandchild. This influences the reduction rules in
// LMR which are based on the statScore of parent position.
- if (rootNode)
- (ss+4)->statScore = 0;
- else
+ if (!rootNode)
(ss+2)->statScore = 0;
// Step 4. Transposition table lookup. We don't want the score of a partial
// position key in case of an excluded move.
excludedMove = ss->excludedMove;
posKey = excludedMove == MOVE_NONE ? pos.key() : pos.key() ^ make_key(excludedMove);
- tte = TT.probe(posKey, ttHit);
- ttValue = ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
+ tte = TT.probe(posKey, ss->ttHit);
+ ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
- : ttHit ? tte->move() : MOVE_NONE;
- ttPv = PvNode || (ttHit && tte->is_pv());
- formerPv = ttPv && !PvNode;
+ : ss->ttHit ? tte->move() : MOVE_NONE;
+ if (!excludedMove)
+ ss->ttPv = PvNode || (ss->ttHit && tte->is_pv());
+ formerPv = ss->ttPv && !PvNode;
- if ( ttPv
+ // Update low ply history for previous move if we are near root and position is or has been in PV
+ if ( ss->ttPv
&& depth > 12
&& ss->ply - 1 < MAX_LPH
&& !priorCapture
// thisThread->ttHitAverage can be used to approximate the running average of ttHit
thisThread->ttHitAverage = (TtHitAverageWindow - 1) * thisThread->ttHitAverage / TtHitAverageWindow
- + TtHitAverageResolution * ttHit;
+ + TtHitAverageResolution * ss->ttHit;
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
- && ttHit
+ && ss->ttHit
&& tte->depth() >= depth
&& ttValue != VALUE_NONE // Possible in case of TT access race
&& (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
{
if (ttValue >= beta)
{
+ // Bonus for a quiet ttMove that fails high
if (!pos.capture_or_promotion(ttMove))
update_quiet_stats(pos, ss, ttMove, stat_bonus(depth), depth);
}
}
+ // Partial workaround for the graph history interaction problem
+ // For high rule50 counts don't produce transposition table cutoffs.
if (pos.rule50_count() < 90)
return ttValue;
}
if ( b == BOUND_EXACT
|| (b == BOUND_LOWER ? value >= beta : value <= alpha))
{
- tte->save(posKey, value_to_tt(value, ss->ply), ttPv, b,
+ tte->save(posKey, value_to_tt(value, ss->ply), ss->ttPv, b,
std::min(MAX_PLY - 1, depth + 6),
MOVE_NONE, VALUE_NONE);
improving = false;
goto moves_loop;
}
- else if (ttHit)
+ else if (ss->ttHit)
{
// Never assume anything about values stored in TT
ss->staticEval = eval = tte->eval();
if (eval == VALUE_NONE)
ss->staticEval = eval = evaluate(pos);
+ // Randomize draw evaluation
if (eval == VALUE_DRAW)
eval = value_draw(thisThread);
}
else
{
+ // In case of null move search use previous static eval with a different sign
+ // and addition of two tempos
if ((ss-1)->currentMove != MOVE_NULL)
- {
- int bonus = -(ss-1)->statScore / 512;
-
- ss->staticEval = eval = evaluate(pos) + bonus;
- }
+ ss->staticEval = eval = evaluate(pos);
else
ss->staticEval = eval = -(ss-1)->staticEval + 2 * Tempo;
- tte->save(posKey, VALUE_NONE, ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
+ // Save static evaluation into transposition table
+ tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
+ }
+
+ // Use static evaluation difference to improve quiet move ordering
+ if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
+ {
+ int bonus = std::clamp(-depth * 4 * int((ss-1)->staticEval + ss->staticEval - 2 * Tempo), -1000, 1000);
+ thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// Step 7. Razoring (~1 Elo)
&& eval <= alpha - RazorMargin)
return qsearch<NT>(pos, ss, alpha, beta);
- improving = (ss-2)->staticEval == VALUE_NONE ? (ss->staticEval > (ss-4)->staticEval
- || (ss-4)->staticEval == VALUE_NONE) : ss->staticEval > (ss-2)->staticEval;
+ // Set up improving flag that is used in various pruning heuristics
+ // We define position as improving if static evaluation of position is better
+ // Than the previous static evaluation at our turn
+ // In case of us being in check at our previous move we look at move prior to it
+ improving = (ss-2)->staticEval == VALUE_NONE
+ ? ss->staticEval > (ss-4)->staticEval || (ss-4)->staticEval == VALUE_NONE
+ : ss->staticEval > (ss-2)->staticEval;
// Step 8. Futility pruning: child node (~50 Elo)
if ( !PvNode
// Step 9. Null move search with verification search (~40 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 23824
+ && (ss-1)->statScore < 22977
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 33 * depth - 33 * improving + 112 * ttPv + 311
+ && ss->staticEval >= beta - 30 * depth - 28 * improving + 84 * ss->ttPv + 168
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and value
- Depth R = (737 + 77 * depth) / 246 + std::min(int(eval - beta) / 192, 3);
+ Depth R = (1015 + 85 * depth) / 256 + std::min(int(eval - beta) / 191, 3);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
if (nullValue >= VALUE_TB_WIN_IN_MAX_PLY)
nullValue = beta;
- if (thisThread->nmpMinPly || (abs(beta) < VALUE_KNOWN_WIN && depth < 13))
+ if (thisThread->nmpMinPly || (abs(beta) < VALUE_KNOWN_WIN && depth < 14))
return nullValue;
assert(!thisThread->nmpMinPly); // Recursive verification is not allowed
}
}
- probcutBeta = beta + 176 - 49 * improving;
+ probCutBeta = beta + 183 - 49 * improving;
// Step 10. ProbCut (~10 Elo)
// If we have a good enough capture and a reduced search returns a value
if ( !PvNode
&& depth > 4
&& abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
- && !( ttHit
+ // if value from transposition table is lower than probCutBeta, don't attempt probCut
+ // there and in further interactions with transposition table cutoff depth is set to depth - 3
+ // because probCut search has depth set to depth - 4 but we also do a move before it
+ // so effective depth is equal to depth - 3
+ && !( ss->ttHit
&& tte->depth() >= depth - 3
&& ttValue != VALUE_NONE
- && ttValue < probcutBeta))
+ && ttValue < probCutBeta))
{
- if ( ttHit
+ // if ttMove is a capture and value from transposition table is good enough produce probCut
+ // cutoff without digging into actual probCut search
+ if ( ss->ttHit
&& tte->depth() >= depth - 3
&& ttValue != VALUE_NONE
- && ttValue >= probcutBeta
+ && ttValue >= probCutBeta
&& ttMove
&& pos.capture_or_promotion(ttMove))
- return probcutBeta;
+ return probCutBeta;
- assert(probcutBeta < VALUE_INFINITE);
- MovePicker mp(pos, ttMove, probcutBeta - ss->staticEval, &captureHistory);
+ assert(probCutBeta < VALUE_INFINITE);
+ MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory);
int probCutCount = 0;
+ bool ttPv = ss->ttPv;
+ ss->ttPv = false;
while ( (move = mp.next_move()) != MOVE_NONE
&& probCutCount < 2 + 2 * cutNode)
pos.do_move(move, st);
// Perform a preliminary qsearch to verify that the move holds
- value = -qsearch<NonPV>(pos, ss+1, -probcutBeta, -probcutBeta+1);
+ value = -qsearch<NonPV>(pos, ss+1, -probCutBeta, -probCutBeta+1);
// If the qsearch held, perform the regular search
- if (value >= probcutBeta)
- value = -search<NonPV>(pos, ss+1, -probcutBeta, -probcutBeta+1, depth - 4, !cutNode);
+ if (value >= probCutBeta)
+ value = -search<NonPV>(pos, ss+1, -probCutBeta, -probCutBeta+1, depth - 4, !cutNode);
pos.undo_move(move);
- if (value >= probcutBeta)
+ if (value >= probCutBeta)
{
- if ( !(ttHit
+ // if transposition table doesn't have equal or more deep info write probCut data into it
+ if ( !(ss->ttHit
&& tte->depth() >= depth - 3
&& ttValue != VALUE_NONE))
tte->save(posKey, value_to_tt(value, ss->ply), ttPv,
return value;
}
}
+ ss->ttPv = ttPv;
}
- // Step 11. Internal iterative deepening (~1 Elo)
- if (depth >= 7 && !ttMove)
- {
- search<NT>(pos, ss, alpha, beta, depth - 7, cutNode);
-
- tte = TT.probe(posKey, ttHit);
- ttValue = ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
- ttMove = ttHit ? tte->move() : MOVE_NONE;
- }
+ // Step 11. If the position is not in TT, decrease depth by 2
+ if ( PvNode
+ && depth >= 6
+ && !ttMove)
+ depth -= 2;
moves_loop: // When in check, search starts from here
continue;
// Futility pruning: parent node (~5 Elo)
- if ( lmrDepth < 8
+ if ( lmrDepth < 7
&& !ss->inCheck
- && ss->staticEval + 284 + 188 * lmrDepth <= alpha
+ && ss->staticEval + 266 + 170 * lmrDepth <= alpha
&& (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- + (*contHist[5])[movedPiece][to_sq(move)] / 2 < 28388)
+ + (*contHist[5])[movedPiece][to_sq(move)] / 2 < 27376)
continue;
// Prune moves with negative SEE (~20 Elo)
- if (!pos.see_ge(move, Value(-(29 - std::min(lmrDepth, 17)) * lmrDepth * lmrDepth)))
+ if (!pos.see_ge(move, Value(-(30 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth)))
continue;
}
else
&& captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] < 0)
continue;
- // Futility pruning for captures
- if ( !givesCheck
- && lmrDepth < 6
- && !(PvNode && abs(bestValue) < 2)
- && PieceValue[MG][type_of(movedPiece)] >= PieceValue[MG][type_of(pos.piece_on(to_sq(move)))]
- && !ss->inCheck
- && ss->staticEval + 267 + 391 * lmrDepth
- + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha)
- continue;
-
- // See based pruning
- if (!pos.see_ge(move, Value(-202) * depth)) // (~25 Elo)
+ // SEE based pruning
+ if (!pos.see_ge(move, Value(-213) * depth)) // (~25 Elo)
continue;
}
}
// then that move is singular and should be extended. To verify this we do
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
- if ( depth >= 6
+ if ( depth >= 7
&& move == ttMove
&& !rootNode
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& abs(ttValue) < VALUE_KNOWN_WIN
&& (tte->bound() & BOUND_LOWER)
- && tte->depth() >= depth - 3
- && pos.legal(move))
+ && tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - ((formerPv + 4) * depth) / 2;
Depth singularDepth = (depth - 1 + 3 * formerPv) / 2;
&& (pos.is_discovery_check_on_king(~us, move) || pos.see_ge(move)))
extension = 1;
- // Passed pawn extension
- else if ( move == ss->killers[0]
- && pos.advanced_pawn_push(move)
- && pos.pawn_passed(us, to_sq(move)))
- extension = 1;
-
// Last captures extension
else if ( PieceValue[EG][pos.captured_piece()] > PawnValueEg
&& pos.non_pawn_material() <= 2 * RookValueMg)
extension = 1;
- // Castling extension
- if (type_of(move) == CASTLING)
- extension = 1;
-
// Late irreversible move extension
if ( move == ttMove
&& pos.rule50_count() > 80
// re-searched at full depth.
if ( depth >= 3
&& moveCount > 1 + 2 * rootNode
- && (!rootNode || thisThread->best_move_count(move) == 0)
&& ( !captureOrPromotion
|| moveCountPruning
|| ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha
|| cutNode
- || thisThread->ttHitAverage < 415 * TtHitAverageResolution * TtHitAverageWindow / 1024))
+ || thisThread->ttHitAverage < 432 * TtHitAverageResolution * TtHitAverageWindow / 1024))
{
Depth r = reduction(improving, depth, moveCount);
- // Decrease reduction at non-check cut nodes for second move at low depths
- if ( cutNode
- && depth <= 10
- && moveCount <= 2
- && !ss->inCheck)
- r--;
-
// Decrease reduction if the ttHit running average is large
- if (thisThread->ttHitAverage > 473 * TtHitAverageResolution * TtHitAverageWindow / 1024)
+ if (thisThread->ttHitAverage > 537 * TtHitAverageResolution * TtHitAverageWindow / 1024)
r--;
- // Reduction if other threads are searching this position
+ // Increase reduction if other threads are searching this position
if (th.marked())
r++;
// Decrease reduction if position is or has been on the PV (~10 Elo)
- if (ttPv)
+ if (ss->ttPv)
r -= 2;
+ // Increase reduction at root and non-PV nodes when the best move does not change frequently
+ if ((rootNode || !PvNode) && thisThread->rootDepth > 10 && thisThread->bestMoveChanges <= 2)
+ r++;
+
+ // More reductions for late moves if position was not in previous PV
if (moveCountPruning && !formerPv)
r++;
// Decrease reduction if ttMove has been singularly extended (~3 Elo)
if (singularQuietLMR)
- r -= 1 + formerPv;
+ r--;
if (!captureOrPromotion)
{
if (ttCapture)
r++;
+ // Increase reduction at root if failing high
+ r += rootNode ? thisThread->failedHighCnt * thisThread->failedHighCnt * moveCount / 512 : 0;
+
// Increase reduction for cut nodes (~10 Elo)
if (cutNode)
r += 2;
// hence break make_move(). (~2 Elo)
else if ( type_of(move) == NORMAL
&& !pos.see_ge(reverse_move(move)))
- r -= 2 + ttPv - (type_of(movedPiece) == PAWN);
+ r -= 2 + ss->ttPv - (type_of(movedPiece) == PAWN);
ss->statScore = thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4826;
+ - 5287;
// Decrease/increase reduction by comparing opponent's stat score (~10 Elo)
- if (ss->statScore >= -100 && (ss-1)->statScore < -112)
+ if (ss->statScore >= -105 && (ss-1)->statScore < -103)
r--;
- else if ((ss-1)->statScore >= -125 && ss->statScore < -138)
+ else if ((ss-1)->statScore >= -122 && ss->statScore < -129)
r++;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / 14615;
+ r -= ss->statScore / 14884;
}
else
{
- // Increase reduction for captures/promotions if late move and at low depth
- if (depth < 8 && moveCount > 2)
- r++;
-
- // Unless giving check, this capture is likely bad
- if ( !givesCheck
- && ss->staticEval + PieceValue[EG][pos.captured_piece()] + 211 * depth <= alpha)
- r++;
+ // Unless giving check, this capture is likely bad
+ if ( !givesCheck
+ && ss->staticEval + PieceValue[EG][pos.captured_piece()] + 210 * depth <= alpha)
+ r++;
}
- Depth d = Utility::clamp(newDepth - r, 1, newDepth);
+ Depth d = std::clamp(newDepth - r, 1, newDepth);
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, true);
{
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
+ // If the move passed LMR update its stats
if (didLMR && !captureOrPromotion)
{
int bonus = value > alpha ? stat_bonus(newDepth)
: -stat_bonus(newDepth);
- if (move == ss->killers[0])
- bonus += bonus / 4;
-
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
}
}
(ss+1)->pv = pv;
(ss+1)->pv[0] = MOVE_NONE;
- value = -search<PV>(pos, ss+1, -beta, -alpha, newDepth, false);
+ value = -search<PV>(pos, ss+1, -beta, -alpha,
+ std::min(maxNextDepth, newDepth), false);
}
// Step 18. Undo move
rm.pv.push_back(*m);
// We record how often the best move has been changed in each
- // iteration. This information is used for time management: when
- // the best move changes frequently, we allocate some more time.
+ // iteration. This information is used for time management and LMR
if (moveCount > 1)
++thisThread->bestMoveChanges;
}
}
}
+ // If the move is worse than some previously searched move, remember it to update its stats later
if (move != bestMove)
{
if (captureOrPromotion && captureCount < 32)
bestValue = excludedMove ? alpha
: ss->inCheck ? mated_in(ss->ply) : VALUE_DRAW;
+ // If there is a move which produces search value greater than alpha we update stats of searched moves
else if (bestMove)
update_all_stats(pos, ss, bestMove, bestValue, beta, prevSq,
quietsSearched, quietCount, capturesSearched, captureCount, depth);
if (PvNode)
bestValue = std::min(bestValue, maxValue);
+ // If no good move is found and the previous position was ttPv, then the previous
+ // opponent move is probably good and the new position is added to the search tree.
+ if (bestValue <= alpha)
+ ss->ttPv = ss->ttPv || ((ss-1)->ttPv && depth > 3);
+ // Otherwise, a counter move has been found and if the position is the last leaf
+ // in the search tree, remove the position from the search tree.
+ else if (depth > 3)
+ ss->ttPv = ss->ttPv && (ss+1)->ttPv;
+
+ // Write gathered information in transposition table
if (!excludedMove && !(rootNode && thisThread->pvIdx))
- tte->save(posKey, value_to_tt(bestValue, ss->ply), ttPv,
+ tte->save(posKey, value_to_tt(bestValue, ss->ply), ss->ttPv,
bestValue >= beta ? BOUND_LOWER :
PvNode && bestMove ? BOUND_EXACT : BOUND_UPPER,
depth, bestMove, ss->staticEval);
Move pv[MAX_PLY+1];
StateInfo st;
+ ASSERT_ALIGNED(&st, Eval::NNUE::kCacheLineSize);
+
TTEntry* tte;
Key posKey;
Move ttMove, move, bestMove;
Depth ttDepth;
Value bestValue, value, ttValue, futilityValue, futilityBase, oldAlpha;
- bool ttHit, pvHit, givesCheck, captureOrPromotion;
+ bool pvHit, givesCheck, captureOrPromotion;
int moveCount;
if (PvNode)
: DEPTH_QS_NO_CHECKS;
// Transposition table lookup
posKey = pos.key();
- tte = TT.probe(posKey, ttHit);
- ttValue = ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
- ttMove = ttHit ? tte->move() : MOVE_NONE;
- pvHit = ttHit && tte->is_pv();
+ tte = TT.probe(posKey, ss->ttHit);
+ ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
+ ttMove = ss->ttHit ? tte->move() : MOVE_NONE;
+ pvHit = ss->ttHit && tte->is_pv();
if ( !PvNode
- && ttHit
+ && ss->ttHit
&& tte->depth() >= ttDepth
&& ttValue != VALUE_NONE // Only in case of TT access race
&& (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
}
else
{
- if (ttHit)
+ if (ss->ttHit)
{
// Never assume anything about values stored in TT
if ((ss->staticEval = bestValue = tte->eval()) == VALUE_NONE)
bestValue = ttValue;
}
else
+ // In case of null move search use previous static eval with a different sign
+ // and addition of two tempos
ss->staticEval = bestValue =
(ss-1)->currentMove != MOVE_NULL ? evaluate(pos)
: -(ss-1)->staticEval + 2 * Tempo;
// Stand pat. Return immediately if static value is at least beta
if (bestValue >= beta)
{
- if (!ttHit)
+ // Save gathered info in transposition table
+ if (!ss->ttHit)
tte->save(posKey, value_to_tt(bestValue, ss->ply), false, BOUND_LOWER,
DEPTH_NONE, MOVE_NONE, ss->staticEval);
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 141;
+ futilityBase = bestValue + 155;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
moveCount++;
// Futility pruning
- if ( !ss->inCheck
+ if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
&& !givesCheck
&& futilityBase > -VALUE_KNOWN_WIN
&& !pos.advanced_pawn_push(move))
{
assert(type_of(move) != ENPASSANT); // Due to !pos.advanced_pawn_push
+ // moveCount pruning
+ if (moveCount > 2)
+ continue;
+
futilityValue = futilityBase + PieceValue[EG][pos.piece_on(to_sq(move))];
if (futilityValue <= alpha)
}
// Do not search moves with negative SEE values
- if ( !ss->inCheck && !pos.see_ge(move))
+ if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
+ && !pos.see_ge(move))
continue;
// Speculative prefetch as early as possible
[pos.moved_piece(move)]
[to_sq(move)];
+ // CounterMove based pruning
+ if ( !captureOrPromotion
+ && bestValue > VALUE_TB_LOSS_IN_MAX_PLY
+ && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold
+ && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold)
+ continue;
+
// Make and search the move
pos.do_move(move, st, givesCheck);
value = -qsearch<NT>(pos, ss+1, -beta, -alpha, depth - 1);
// All legal moves have been searched. A special case: if we're in check
// and no legal moves were found, it is checkmate.
if (ss->inCheck && bestValue == -VALUE_INFINITE)
+ {
+ assert(!MoveList<LEGAL>(pos).size());
+
return mated_in(ss->ply); // Plies to mate from the root
+ }
+ // Save gathered info in transposition table
tte->save(posKey, value_to_tt(bestValue, ss->ply), pvHit,
bestValue >= beta ? BOUND_LOWER :
PvNode && bestValue > oldAlpha ? BOUND_EXACT : BOUND_UPPER,
if (!pos.capture_or_promotion(bestMove))
{
+ // Increase stats for the best move in case it was a quiet move
update_quiet_stats(pos, ss, bestMove, bonus2, depth);
- // Decrease all the non-best quiet moves
+ // Decrease stats for all non-best quiet moves
for (int i = 0; i < quietCount; ++i)
{
thisThread->mainHistory[us][from_to(quietsSearched[i])] << -bonus2;
}
}
else
+ // Increase stats for the best move in case it was a capture move
captureHistory[moved_piece][to_sq(bestMove)][captured] << bonus1;
- // Extra penalty for a quiet TT or main killer move in previous ply when it gets refuted
- if ( ((ss-1)->moveCount == 1 || ((ss-1)->currentMove == (ss-1)->killers[0]))
+ // Extra penalty for a quiet early move that was not a TT move or
+ // main killer move in previous ply when it gets refuted.
+ if ( ((ss-1)->moveCount == 1 + (ss-1)->ttHit || ((ss-1)->currentMove == (ss-1)->killers[0]))
&& !pos.captured_piece())
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, -bonus1);
- // Decrease all the non-best capture moves
+ // Decrease stats for all non-best capture moves
for (int i = 0; i < captureCount; ++i)
{
moved_piece = pos.moved_piece(capturesSearched[i]);
for (int i : {1, 2, 4, 6})
{
+ // Only update first 2 continuation histories if we are in check
if (ss->inCheck && i > 2)
break;
if (is_ok((ss-i)->currentMove))
void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus, int depth) {
+ // Update killers
if (ss->killers[0] != move)
{
ss->killers[1] = ss->killers[0];
thisThread->mainHistory[us][from_to(move)] << bonus;
update_continuation_histories(ss, pos.moved_piece(move), to_sq(move), bonus);
+ // Penalty for reversed move in case of moved piece not being a pawn
if (type_of(pos.moved_piece(move)) != PAWN)
thisThread->mainHistory[us][from_to(reverse_move(move))] << -bonus;
+ // Update countermove history
if (is_ok((ss-1)->currentMove))
{
Square prevSq = to_sq((ss-1)->currentMove);
thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] = move;
}
+ // Update low ply history
if (depth > 11 && ss->ply < MAX_LPH)
- thisThread->lowPlyHistory[ss->ply][from_to(move)] << stat_bonus(depth - 6);
+ thisThread->lowPlyHistory[ss->ply][from_to(move)] << stat_bonus(depth - 7);
}
// When playing with strength handicap, choose best move among a set of RootMoves
{
bool updated = rootMoves[i].score != -VALUE_INFINITE;
- if (depth == 1 && !updated)
+ if (depth == 1 && !updated && i > 0)
continue;
- Depth d = updated ? depth : depth - 1;
+ Depth d = updated ? depth : std::max(1, depth - 1);
Value v = updated ? rootMoves[i].score : rootMoves[i].previousScore;
+ if (v == -VALUE_INFINITE)
+ v = VALUE_ZERO;
+
bool tb = TB::RootInTB && abs(v) < VALUE_MATE_IN_MAX_PLY;
v = tb ? rootMoves[i].tbScore : v;
bool RootMove::extract_ponder_from_tt(Position& pos) {
StateInfo st;
+ ASSERT_ALIGNED(&st, Eval::NNUE::kCacheLineSize);
+
bool ttHit;
assert(pv.size() == 1);
if (RootInTB)
{
// Sort moves according to TB rank
- std::sort(rootMoves.begin(), rootMoves.end(),
+ std::stable_sort(rootMoves.begin(), rootMoves.end(),
[](const RootMove &a, const RootMove &b) { return a.tbRank > b.tbRank; } );
// Probe during search only if DTZ is not available and we are winning
int statScore;
int moveCount;
bool inCheck;
+ bool ttPv;
+ bool ttHit;
};
Value previousScore = -VALUE_INFINITE;
int selDepth = 0;
int tbRank = 0;
- int bestMoveCount = 0;
Value tbScore;
std::vector<Move> pv;
};
int MapA1D1D4[SQUARE_NB];
int MapKK[10][SQUARE_NB]; // [MapA1D1D4][SQUARE_NB]
-int Binomial[6][SQUARE_NB]; // [k][n] k elements from a set of n elements
+int Binomial[7][SQUARE_NB]; // [k][n] k elements from a set of n elements
int LeadPawnIdx[6][SQUARE_NB]; // [leadPawnsCnt][SQUARE_NB]
int LeadPawnsSize[6][4]; // [leadPawnsCnt][FILE_A..FILE_D]
*mapping = statbuf.st_size;
*baseAddress = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_SHARED, fd, 0);
+#if defined(MADV_RANDOM)
madvise(*baseAddress, statbuf.st_size, MADV_RANDOM);
+#endif
::close(fd);
if (*baseAddress == MAP_FAILED)
if (entry->hasPawns) {
idx = LeadPawnIdx[leadPawnsCnt][squares[0]];
- std::sort(squares + 1, squares + leadPawnsCnt, pawns_comp);
+ std::stable_sort(squares + 1, squares + leadPawnsCnt, pawns_comp);
for (int i = 1; i < leadPawnsCnt; ++i)
idx += Binomial[i][MapPawns[squares[i]]];
while (d->groupLen[++next])
{
- std::sort(groupSq, groupSq + d->groupLen[next]);
+ std::stable_sort(groupSq, groupSq + d->groupLen[next]);
uint64_t n = 0;
// Map down a square if "comes later" than a square in the previous
Binomial[0][0] = 1;
for (int n = 1; n < 64; n++) // Squares
- for (int k = 0; k < 6 && k <= n; ++k) // Pieces
+ for (int k = 0; k < 7 && k <= n; ++k) // Pieces
Binomial[k][n] = (k > 0 ? Binomial[k - 1][n - 1] : 0)
+ (k < n ? Binomial[k ][n - 1] : 0);
}
-/// Thread::bestMoveCount(Move move) return best move counter for the given root move
-
-int Thread::best_move_count(Move move) const {
-
- auto rm = std::find(rootMoves.begin() + pvIdx,
- rootMoves.begin() + pvLast, move);
-
- return rm != rootMoves.begin() + pvLast ? rm->bestMoveCount : 0;
-}
-
-
/// Thread::clear() reset histories, usually before a new game
void Thread::clear() {
// We use Position::set() to set root position across threads. But there are
// some StateInfo fields (previous, pliesFromNull, capturedPiece) that cannot
- // be deduced from a fen string, so set() clears them and to not lose the info
- // we need to backup and later restore setupStates->back(). Note that setupStates
- // is shared by threads but is accessed in read-only mode.
- StateInfo tmp = setupStates->back();
-
+ // be deduced from a fen string, so set() clears them and they are set from
+ // setupStates->back() later. The rootState is per thread, earlier states are shared
+ // since they are read-only.
for (Thread* th : *this)
{
th->nodes = th->tbHits = th->nmpMinPly = th->bestMoveChanges = 0;
th->rootDepth = th->completedDepth = 0;
th->rootMoves = rootMoves;
- th->rootPos.set(pos.fen(), pos.is_chess960(), &setupStates->back(), th);
+ th->rootPos.set(pos.fen(), pos.is_chess960(), &th->rootState, th);
+ th->rootState = setupStates->back();
}
- setupStates->back() = tmp;
-
main()->start_searching();
}
votes[th->rootMoves[0].pv[0]] +=
(th->rootMoves[0].score - minScore + 14) * int(th->completedDepth);
- if (abs(bestThread->rootMoves[0].score) >= VALUE_TB_WIN_IN_MAX_PLY)
- {
- // Make sure we pick the shortest mate / TB conversion or stave off mate the longest
- if (th->rootMoves[0].score > bestThread->rootMoves[0].score)
- bestThread = th;
- }
- else if ( th->rootMoves[0].score >= VALUE_TB_WIN_IN_MAX_PLY
- || ( th->rootMoves[0].score > VALUE_TB_LOSS_IN_MAX_PLY
- && votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]]))
- bestThread = th;
+ if (abs(bestThread->rootMoves[0].score) >= VALUE_TB_WIN_IN_MAX_PLY)
+ {
+ // Make sure we pick the shortest mate / TB conversion or stave off mate the longest
+ if (th->rootMoves[0].score > bestThread->rootMoves[0].score)
+ bestThread = th;
+ }
+ else if ( th->rootMoves[0].score >= VALUE_TB_WIN_IN_MAX_PLY
+ || ( th->rootMoves[0].score > VALUE_TB_LOSS_IN_MAX_PLY
+ && votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]]))
+ bestThread = th;
}
return bestThread;
void idle_loop();
void start_searching();
void wait_for_search_finished();
- int best_move_count(Move move) const;
Pawns::Table pawnsTable;
Material::Table materialTable;
std::atomic<uint64_t> nodes, tbHits, bestMoveChanges;
Position rootPos;
+ StateInfo rootState;
Search::RootMoves rootMoves;
Depth rootDepth, completedDepth;
CounterMoveHistory counterMoves;
CapturePieceToHistory captureHistory;
ContinuationHistory continuationHistory[2][2];
Score contempt;
+ int failedHighCnt;
};
/// The implementation calls pthread_create() with the stack size parameter
/// equal to the linux 8MB default, on platforms that support it.
-#if defined(__APPLE__) || defined(__MINGW32__) || defined(__MINGW64__)
+#if defined(__APPLE__) || defined(__MINGW32__) || defined(__MINGW64__) || defined(USE_PTHREADS)
#include <pthread.h>
TimePoint slowMover = TimePoint(Options["Slow Mover"]);
TimePoint npmsec = TimePoint(Options["nodestime"]);
- // opt_scale is a percentage of available time to use for the current move.
- // max_scale is a multiplier applied to optimumTime.
- double opt_scale, max_scale;
+ // optScale is a percentage of available time to use for the current move.
+ // maxScale is a multiplier applied to optimumTime.
+ double optScale, maxScale;
// If we have to play in 'nodes as time' mode, then convert from time
// to nodes, and use resulting values in time management formulas.