strategy:
matrix:
config:
+ # set the variable for the required tests:
+ # run_expensive_tests: true
+ # run_32bit_tests: true
+ # run_64bit_tests: true
+ # run_armv8_tests: true
+ # run_armv7_tests: true
- {
name: "Ubuntu 20.04 GCC",
os: ubuntu-20.04,
os: ubuntu-20.04,
compiler: clang++,
comp: clang,
- run_expensive_tests: false,
run_32bit_tests: true,
run_64bit_tests: true,
shell: 'bash {0}'
}
+ - {
+ name: "Ubuntu 20.04 NDK armv8",
+ os: ubuntu-20.04,
+ compiler: aarch64-linux-android21-clang++,
+ comp: ndk,
+ run_armv8_tests: false,
+ shell: 'bash {0}'
+ }
+ - {
+ name: "Ubuntu 20.04 NDK armv7",
+ os: ubuntu-20.04,
+ compiler: armv7a-linux-androideabi21-clang++,
+ comp: ndk,
+ run_armv7_tests: false,
+ shell: 'bash {0}'
+ }
- {
name: "MacOS 10.15 Apple Clang",
os: macos-10.15,
compiler: clang++,
comp: clang,
- run_expensive_tests: false,
- run_32bit_tests: false,
run_64bit_tests: true,
shell: 'bash {0}'
}
os: macos-10.15,
compiler: g++-10,
comp: gcc,
- run_expensive_tests: false,
- run_32bit_tests: false,
run_64bit_tests: true,
shell: 'bash {0}'
}
- {
- name: "Windows 2019 Mingw-w64 GCC x86_64",
- os: windows-2019,
+ name: "Windows 2022 Mingw-w64 GCC x86_64",
+ os: windows-2022,
compiler: g++,
- comp: gcc,
- run_expensive_tests: false,
- run_32bit_tests: false,
+ comp: mingw,
run_64bit_tests: true,
msys_sys: 'mingw64',
- msys_env: 'x86_64',
+ msys_env: 'x86_64-gcc',
shell: 'msys2 {0}'
}
- {
- name: "Windows 2019 Mingw-w64 GCC i686",
- os: windows-2019,
+ name: "Windows 2022 Mingw-w64 GCC i686",
+ os: windows-2022,
compiler: g++,
- comp: gcc,
- run_expensive_tests: false,
+ comp: mingw,
run_32bit_tests: true,
- run_64bit_tests: false,
msys_sys: 'mingw32',
- msys_env: 'i686',
+ msys_env: 'i686-gcc',
+ shell: 'msys2 {0}'
+ }
+ - {
+ name: "Windows 2022 Mingw-w64 Clang x86_64",
+ os: windows-2022,
+ compiler: clang++,
+ comp: clang,
+ run_64bit_tests: true,
+ msys_sys: 'clang64',
+ msys_env: 'clang-x86_64-clang',
shell: 'msys2 {0}'
}
if: runner.os == 'Linux'
run: |
sudo apt update
- sudo apt install expect valgrind g++-multilib
+ sudo apt install expect valgrind g++-multilib qemu-user
- name: Setup msys and install required packages
if: runner.os == 'Windows'
uses: msys2/setup-msys2@v2
with:
msystem: ${{matrix.config.msys_sys}}
- install: mingw-w64-${{matrix.config.msys_env}}-gcc make git expect
+ install: mingw-w64-${{matrix.config.msys_env}} make git expect
- name: Download the used network from the fishtest framework
run: |
- name: Check compiler
run: |
+ export PATH=$PATH:$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin
$COMPILER -v
- name: Test help target
make clean
make -j2 ARCH=x86-64-vnni256 build
+ # armv8 tests
+
+ - name: Test armv8 build
+ if: ${{ matrix.config.run_armv8_tests }}
+ run: |
+ ANDROID_ROOT=/usr/local/lib/android
+ ANDROID_SDK_ROOT=${ANDROID_ROOT}/sdk
+ SDKMANAGER=${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager
+ echo "y" | $SDKMANAGER "ndk;21.4.7075529"
+ ANDROID_NDK_ROOT=${ANDROID_SDK_ROOT}/ndk-bundle
+ ln -sfn $ANDROID_SDK_ROOT/ndk/21.4.7075529 $ANDROID_NDK_ROOT
+ export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
+ export LDFLAGS="-static -Wno-unused-command-line-argument"
+ make clean
+ make -j2 ARCH=armv8 build
+ ../tests/signature.sh $benchref
+
+ # armv7 tests
+
+ - name: Test armv7 build
+ if: ${{ matrix.config.run_armv7_tests }}
+ run: |
+ ANDROID_ROOT=/usr/local/lib/android
+ ANDROID_SDK_ROOT=${ANDROID_ROOT}/sdk
+ SDKMANAGER=${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager
+ echo "y" | $SDKMANAGER "ndk;21.4.7075529"
+ ANDROID_NDK_ROOT=${ANDROID_SDK_ROOT}/ndk-bundle
+ ln -sfn $ANDROID_SDK_ROOT/ndk/21.4.7075529 $ANDROID_NDK_ROOT
+ export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
+ export LDFLAGS="-static -Wno-unused-command-line-argument"
+ make clean
+ make -j2 ARCH=armv7 build
+ ../tests/signature.sh $benchref
+
+ - name: Test armv7-neon build
+ if: ${{ matrix.config.run_armv7_tests }}
+ run: |
+ ANDROID_ROOT=/usr/local/lib/android
+ ANDROID_SDK_ROOT=${ANDROID_ROOT}/sdk
+ SDKMANAGER=${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager
+ echo "y" | $SDKMANAGER "ndk;21.4.7075529"
+ ANDROID_NDK_ROOT=${ANDROID_SDK_ROOT}/ndk-bundle
+ ln -sfn $ANDROID_SDK_ROOT/ndk/21.4.7075529 $ANDROID_NDK_ROOT
+ export PATH=$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
+ export LDFLAGS="-static -Wno-unused-command-line-argument"
+ make clean
+ make -j2 ARCH=armv7-neon build
+ ../tests/signature.sh $benchref
+
# Other tests
- name: Check perft and search reproducibility
Artem Solopiy (EntityFX)
Auguste Pop
Balint Pfliegel
+Ben Chaney (Chaneybenjamini)
Ben Koshy (BKSpurgeon)
Bill Henry (VoyagerOne)
Bojun Guo (noobpwnftw, Nooby)
+Boštjan Mejak (PedanticHacker)
braich
Brian Sheppard (SapphireBrand, briansheppard-toast)
Bruno de Melo Costa (BM123499)
Dominik Schlösser (domschl)
double-beep
Douglas Matos Gomes (dsmsgms)
+Dubslow
Eduardo Cáceres (eduherminio)
Eelco de Groot (KingDefender)
Elvin Liu (solarlight2)
Justin Blanchard (UncombedCoconut)
Kelly Wilson
Ken Takusagawa
+Kian E (KJE-98)
kinderchocolate
Kiran Panditrao (Krgp)
Kojirion
Matt Ginsberg (mattginsberg)
Matthew Lai (matthewlai)
Matthew Sullivan (Matt14916)
+Max A. (Disservin)
Maxim Molchanov (Maxim)
Michael An (man)
Michael Byrne (MichaelB7)
Pasquale Pigazzini (ppigazzini)
Patrick Jansen (mibere)
pellanda
+Peter Schneider (pschneider1968)
Peter Zsifkovits (CoffeeOne)
Praveen Kumar Tummala (praveentml)
Rahul Dsilva (silversolver1)
-## Overview
-
-[![Build Status](https://github.com/official-stockfish/Stockfish/actions/workflows/stockfish.yml/badge.svg)](https://github.com/official-stockfish/Stockfish/actions)
-[![Build Status](https://ci.appveyor.com/api/projects/status/github/official-stockfish/Stockfish?branch=master&svg=true)](https://ci.appveyor.com/project/mcostalba/stockfish/branch/master)
+<div align="center">
+
+ [![Stockfish][stockfish128-logo]][website-link]
+
+ [![Build][build-badge]][build-link]
+ [![License][license-badge]][license-link]
+ <br>
+ [![Release][release-badge]][release-link]
+ [![Commits][commits-badge]][commits-link]
+ <br>
+ [![Website][website-badge]][website-link]
+ [![Fishtest][fishtest-badge]][fishtest-link]
+ [![Discord][discord-badge]][discord-link]
+
+</div>
-[Stockfish](https://stockfishchess.org) is a free, powerful UCI chess engine
-derived from Glaurung 2.1. Stockfish is not a complete chess program and requires a
-UCI-compatible graphical user interface (GUI) (e.g. XBoard with PolyGlot, Scid,
-Cute Chess, eboard, Arena, Sigma Chess, Shredder, Chess Partner or Fritz) in order
-to be used comfortably. Read the documentation for your GUI of choice for information
-about how to use Stockfish with it.
+## Overview
-The Stockfish engine features two evaluation functions for chess, the classical
-evaluation based on handcrafted terms, and the NNUE evaluation based on efficiently
-updatable neural networks. The classical evaluation runs efficiently on almost all
-CPU architectures, while the NNUE evaluation benefits from the vector
-intrinsics available on most CPUs (sse2, avx2, neon, or similar).
+[Stockfish][website-link] is a free, powerful UCI chess engine derived from
+Glaurung 2.1. Stockfish is not a complete chess program and requires a UCI-compatible
+graphical user interface (GUI) (e.g. XBoard with PolyGlot, Scid, Cute Chess, eboard,
+Arena, Sigma Chess, Shredder, Chess Partner or Fritz) in order to be used comfortably.
+Read the documentation for your GUI of choice for informationabout how to use
+Stockfish with it.
+The Stockfish engine features two evaluation functions for chess. The efficiently
+updatable neural network (NNUE) based evaluation is the default and by far the strongest.
+The classical evaluation based on handcrafted terms remains available. The strongest
+network is integrated in the binary and downloaded automatically during the build process.
+The NNUE evaluation benefits from the vector intrinsics available on most CPUs (sse2,
+avx2, neon, or similar).
## Files
This distribution of Stockfish consists of the following files:
- * [Readme.md](https://github.com/official-stockfish/Stockfish/blob/master/README.md), the file you are currently reading.
+ * [README.md][readme-link], the file you are currently reading.
- * [Copying.txt](https://github.com/official-stockfish/Stockfish/blob/master/Copying.txt), a text file containing the GNU General Public License version 3.
+ * [Copying.txt][license-link], a text file containing the GNU General Public License
+ version 3.
- * [AUTHORS](https://github.com/official-stockfish/Stockfish/blob/master/AUTHORS), a text file with the list of authors for the project
+ * [AUTHORS][authors-link], a text file with the list of authors for the project.
- * [src](https://github.com/official-stockfish/Stockfish/tree/master/src), a subdirectory containing the full source code, including a Makefile
+ * [src][src-link], a subdirectory containing the full source code, including a Makefile
that can be used to compile Stockfish on Unix-like systems.
- * a file with the .nnue extension, storing the neural network for the NNUE
- evaluation. Binary distributions will have this file embedded.
+ * a file with the .nnue extension, storing the neural network for the NNUE evaluation.
+ Binary distributions will have this file embedded.
## The UCI protocol and available options
The Universal Chess Interface (UCI) is a standard protocol used to communicate with
a chess engine, and is the recommended way to do so for typical graphical user interfaces
-(GUI) or chess tools. Stockfish implements the majority of it options as described
-in [the UCI protocol](https://www.shredderchess.com/download/div/uci.zip).
+(GUI) or chess tools. Stockfish implements the majority of its options as described
+in [the UCI protocol][uci-link].
Developers can see the default values for UCI options available in Stockfish by typing
`./stockfish uci` in a terminal, but the majority of users will typically see them and
* #### EvalFile
The name of the file of the NNUE evaluation parameters. Depending on the GUI the
- filename might have to include the full path to the folder/directory that contains the file.
- Other locations, such as the directory that contains the binary and the working directory,
- are also searched.
+ filename might have to include the full path to the folder/directory that contains
+ the file. Other locations, such as the directory that contains the binary and the
+ working directory, are also searched.
* #### UCI_AnalyseMode
An option handled by your GUI.
Example: `C:\tablebases\wdl345;C:\tablebases\wdl6;D:\tablebases\dtz345;D:\tablebases\dtz6`
It is recommended to store .rtbw files on an SSD. There is no loss in storing
- the .rtbz files on a regular HD. It is recommended to verify all md5 checksums
+ the .rtbz files on a regular HDD. It is recommended to verify all md5 checksums
of the downloaded tablebase files (`md5sum -c checksum.md5`) as corruption will
lead to engine crashes.
For developers the following non-standard commands might be of interest, mainly useful for debugging:
* #### bench *ttSize threads limit fenFile limitType evalType*
- Performs a standard benchmark using various options. The signature of a version (standard node
- count) is obtained using all defaults. `bench` is currently `bench 16 1 13 default depth mixed`.
+ Performs a standard benchmark using various options. The signature of a version
+ (standard node count) is obtained using all defaults. `bench` is currently
+ `bench 16 1 13 default depth mixed`.
* #### compiler
Give information about the compiler and environment used for building a binary.
The NNUE evaluation was first introduced in shogi, and ported to Stockfish afterward.
It can be evaluated efficiently on CPUs, and exploits the fact that only parts
of the neural network need to be updated after a typical chess move.
-[The nodchip repository](https://github.com/nodchip/Stockfish) provided the first version of
-the needed tools to train and develop the NNUE networks. Today, more advanced training tools are available
-in [the nnue-pytorch repository](https://github.com/glinscott/nnue-pytorch/), while data generation tools
-are available in [a dedicated branch](https://github.com/official-stockfish/Stockfish/tree/tools).
+[The nodchip repository][nodchip-link] provided the first version of the needed tools
+to train and develop the NNUE networks. Today, more advanced training tools are
+available in [the nnue-pytorch repository][pytorch-link], while data generation tools
+are available in [a dedicated branch][tools-link].
-On CPUs supporting modern vector instructions
-(avx2 and similar), the NNUE evaluation results in much stronger playing strength, even
-if the nodes per second computed by the engine is somewhat lower (roughly 80% of nps
-is typical).
+On CPUs supporting modern vector instructions (avx2 and similar), the NNUE evaluation
+results in much stronger playing strength, even if the nodes per second computed by
+the engine is somewhat lower (roughly 80% of nps is typical).
Notes:
-1) the NNUE evaluation depends on the Stockfish binary and the network parameter
-file (see the EvalFile UCI option). Not every parameter file is compatible with a given
-Stockfish binary, but the default value of the EvalFile UCI option is the name of a network
-that is guaranteed to be compatible with that binary.
+1) the NNUE evaluation depends on the Stockfish binary and the network parameter file
+(see the EvalFile UCI option). Not every parameter file is compatible with a given
+Stockfish binary, but the default value of the EvalFile UCI option is the name of a
+network that is guaranteed to be compatible with that binary.
2) to use the NNUE evaluation, the additional data file with neural network parameters
-needs to be available. Normally, this file is already embedded in the binary or it
-can be downloaded. The filename for the default (recommended) net can be found as the default
+needs to be available. Normally, this file is already embedded in the binary or it can
+be downloaded. The filename for the default (recommended) net can be found as the default
value of the `EvalFile` UCI option, with the format `nn-[SHA256 first 12 digits].nnue`
(for instance, `nn-c157e0a5755b.nnue`). This file can be downloaded from
```
### Support on Windows
The use of large pages requires "Lock Pages in Memory" privilege. See
-[Enable the Lock Pages in Memory Option (Windows)](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows)
-on how to enable this privilege, then run [RAMMap](https://docs.microsoft.com/en-us/sysinternals/downloads/rammap)
+[Enable the Lock Pages in Memory Option (Windows)][lockpages-link]
+on how to enable this privilege, then run [RAMMap][rammap-link]
to double-check that large pages are used. We suggest that you reboot
your computer after you have enabled large pages, because long Windows
sessions suffer from memory fragmentation, which may prevent Stockfish
### Donating hardware
Improving Stockfish requires a massive amount of testing. You can donate
-your hardware resources by installing the [Fishtest Worker](https://github.com/glinscott/fishtest/wiki/Running-the-worker:-overview)
-and view the current tests on [Fishtest](https://tests.stockfishchess.org/tests).
+your hardware resources by installing the [Fishtest Worker][worker-link]
+and view the current tests on [Fishtest][fishtest-link].
### Improving the code
If you want to help improve the code, there are several valuable resources:
-* [In this wiki,](https://www.chessprogramming.org) many techniques used in
+* [In this wiki,][programming-link] many techniques used in
Stockfish are explained with a lot of background information.
-* [The section on Stockfish](https://www.chessprogramming.org/Stockfish)
+* [The section on Stockfish][programmingsf-link]
describes many features and techniques used by Stockfish. However, it is
generic rather than being focused on Stockfish's precise implementation.
Nevertheless, a helpful resource.
-* The latest source can always be found on [GitHub](https://github.com/official-stockfish/Stockfish).
-Discussions about Stockfish take place these days mainly in the [FishCooking](https://groups.google.com/forum/#!forum/fishcooking)
-group and on the [Stockfish Discord channel](https://discord.gg/nv8gDtt).
-The engine testing is done on [Fishtest](https://tests.stockfishchess.org/tests).
-If you want to help improve Stockfish, please read this [guideline](https://github.com/glinscott/fishtest/wiki/Creating-my-first-test)
+* The latest source can always be found on [GitHub][github-link].
+Discussions about Stockfish take place these days mainly in the [FishCooking][fishcooking-link]
+group and on the [Stockfish Discord channel][discord-link].
+The engine testing is done on [Fishtest][fishtest-link].
+If you want to help improve Stockfish, please read this [guideline][guideline-link]
first, where the basics of Stockfish development are explained.
using it as the starting point for a software project of your own.
The only real limitation is that whenever you distribute Stockfish in
-some way, you MUST always include the full source code, or a pointer
-to where the source code can be found, to generate the exact binary
-you are distributing. If you make any changes to the source code,
-these changes must also be made available under the GPL.
+some way, you MUST always include the license and the full source code
+(or a pointer to where the source code can be found) to generate the
+exact binary you are distributing. If you make any changes to the
+source code, these changes must also be made available under the GPL v3.
For full details, read the copy of the GPL v3 found in the file named
-[*Copying.txt*](https://github.com/official-stockfish/Stockfish/blob/master/Copying.txt).
+[*Copying.txt*][license-link].
+
+[authors-link]:https://github.com/official-stockfish/Stockfish/blob/master/AUTHORS
+[build-badge]:https://img.shields.io/github/workflow/status/official-stockfish/Stockfish/Stockfish?style=for-the-badge&label=stockfish&logo=github
+[build-link]:https://github.com/official-stockfish/Stockfish/actions/workflows/stockfish.yml
+[commits-badge]:https://img.shields.io/github/commits-since/official-stockfish/Stockfish/latest?style=for-the-badge
+[commits-link]:https://github.com/official-stockfish/Stockfish/commits/master
+[discord-badge]:https://img.shields.io/discord/435943710472011776?style=for-the-badge&label=discord&logo=Discord
+[discord-link]:https://discord.com/invite/aefaxmq
+[fishcooking-link]:https://groups.google.com/g/fishcooking
+[fishtest-badge]:https://img.shields.io/website?style=for-the-badge&down_color=red&down_message=Offline&label=Fishtest&up_color=success&up_message=Online&url=https%3A%2F%2Ftests.stockfishchess.org%2Ftests
+[fishtest-link]:https://tests.stockfishchess.org/tests
+[github-link]:https://github.com/official-stockfish/Stockfish
+[guideline-link]:https://github.com/glinscott/fishtest/wiki/Creating-my-first-test
+[license-badge]:https://img.shields.io/github/license/official-stockfish/Stockfish?style=for-the-badge&label=license&color=success
+[license-link]:https://github.com/official-stockfish/Stockfish/blob/master/Copying.txt
+[lockpages-link]:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows
+[nodchip-link]:https://github.com/nodchip/Stockfish
+[programming-link]:https://www.chessprogramming.org/Main_Page
+[programmingsf-link]:https://www.chessprogramming.org/Stockfish
+[pytorch-link]:https://github.com/glinscott/nnue-pytorch
+[rammap-link]:https://docs.microsoft.com/en-us/sysinternals/downloads/rammap
+[readme-link]:https://github.com/official-stockfish/Stockfish/blob/master/README.md
+[release-badge]:https://img.shields.io/github/v/release/official-stockfish/Stockfish?style=for-the-badge&label=official%20release
+[release-link]:https://github.com/official-stockfish/Stockfish/releases/latest
+[src-link]:https://github.com/official-stockfish/Stockfish/tree/master/src
+[stockfish128-logo]:https://stockfishchess.org/images/logo/icon_128x128.png
+[tools-link]:https://github.com/official-stockfish/Stockfish/tree/tools
+[uci-link]:https://www.shredderchess.com/download/div/uci.zip
+[website-badge]:https://img.shields.io/website?style=for-the-badge&down_color=red&down_message=Offline&label=website&up_color=success&up_message=Online&url=https%3A%2F%2Fstockfishchess.org
+[website-link]:https://stockfishchess.org
+[worker-link]:https://github.com/glinscott/fishtest/wiki/Running-the-worker:-overview
-Contributors to Fishtest with >10,000 CPU hours, as of 2022-01-08.
+Contributors to Fishtest with >10,000 CPU hours, as of 2022-07-31.
Thank you!
Username CPU Hours Games played
------------------------------------------------------------------
-noobpwnftw 30323785 2111752181
-mlang 2597136 178003354
-dew 1598255 95747056
-technologov 1395130 59347018
-grandphish2 1028906 63396841
+noobpwnftw 33202707 2423743815
+technologov 5064327 270208248
+mlang 2963357 198937430
+dew 1677196 99717674
+grandphish2 1231326 74551309
+okrout 1102747 98977462
+TueRens 925904 57404676
+pemo 911980 35581261
tvijlbrief 795993 51894442
-TueRens 737922 46359276
-okrout 719183 57150314
+JojoM 774270 47311084
mibere 703840 46867607
-JojoM 689134 42001146
-linrock 594355 16779359
-pemo 575248 28386103
-gvreuls 509219 33205908
-cw 500695 33575803
-fastgm 479238 28830588
+linrock 697283 18804969
+gvreuls 564284 36392236
+cw 515739 34775505
+fastgm 500949 30101898
+oz 439015 31794460
+CSU_Dynasty 438017 29369136
crunchy 427035 27344275
-CSU_Dynasty 410969 27877556
-ctoks 393901 26299629
-oz 354661 26331020
+ctoks 422671 27812261
+bcross 363335 25108521
+leszek 360149 22674005
+velislav 333325 21444360
Fisherman 327231 21829379
-bcross 325119 22871639
-velislav 320581 20663382
-leszek 291605 18475167
-Dantist 239411 15236750
-mgrabiak 229336 15004308
+Dantist 292327 17951982
+mgrabiak 247220 16137378
+nordlandia 226543 14601042
+robal 224740 14314972
glinscott 217799 13780820
-robal 211837 13563250
-nordlandia 211692 13484886
-drabel 200377 13730626
+ncfish1 207751 13909639
+drabel 203884 13922680
+mhoram 200022 12533963
bking_US 198894 11876016
+rpngn 191764 12236583
Thanar 179852 12365359
-vdv 175535 9904264
-mhoram 173134 11257113
+vdv 175544 9904472
spams 157128 10319326
marrco 150300 9402229
sqrt2 147963 9724586
-vdbergh 137425 8954767
+vdbergh 137480 8958795
CoffeeOne 137100 5024116
malala 136182 8002293
xoto 133759 9159372
-davar 122113 7961971
+davar 128645 8367253
+DesolatedDodo 124877 8056482
dsmith 122059 7570238
-amicic 119659 7937885
-rpngn 118952 8100045
+amicic 119661 7938029
Data 113305 8220352
BrunoBanani 112960 7436849
CypressChess 108321 7759588
MaZePallas 102823 6633619
+skiminki 102168 6778440
sterni1971 100532 5880772
sunu 100167 7040199
ElbertoOne 99028 7023771
-skiminki 98123 6478402
-DesolatedDodo 93686 6139198
+zeryl 96984 6162287
brabos 92118 6186135
-cuistot 90357 5350988
+cuistot 91738 5447070
psk 89957 5984901
racerschmacer 85712 6119648
Vizvezdenec 83761 5344740
+sschnee 83003 4840890
0x3C33 82614 5271253
+armo9494 82501 5806056
BRAVONE 81239 5054681
-sschnee 78091 4678078
nssy 76497 5259388
+thirdlife 76478 1544524
+Calis007 76457 4281018
+jromang 75885 5230523
teddybaer 75125 5407666
Pking_cda 73776 5293873
-jromang 72192 5057715
+Wolfgang 72750 4538670
+sebastronomy 70784 1329428
solarlight 70517 5028306
dv8silencer 70287 3883992
Bobo1239 68515 4652287
-zeryl 68203 4516139
+yurikvelo 67651 4578970
manap 66273 4121774
tinker 64333 4268790
-yurikvelo 61692 4262042
qurashee 61208 3429862
robnjr 57262 4053117
+megaman7de 57023 3525850
Freja 56938 3733019
+MaxKlaxxMiner 56279 3410158
ttruscott 56010 3680085
rkl 55132 4164467
-Wolfgang 54087 3415872
renouve 53811 3501516
+tolkki963 53294 3354682
+DMBK 52963 3933332
finfish 51360 3370515
eva42 51272 3599691
-eastorwest 51055 3451203
+Spprtr 51139 3299983
+eastorwest 51058 3451555
rap 49985 3219146
pb00067 49727 3298270
-Spprtr 48260 3141959
bigpen0r 47667 3336927
ronaldjerum 47654 3240695
-MaxKlaxxMiner 47584 2972142
biffhero 46564 3111352
Fifis 45843 3088497
VoyagerOne 45476 3452465
speedycpu 43842 3003273
jbwiebe 43305 2805433
-megaman7de 43042 2823256
Antihistamine 41788 2761312
mhunt 41735 2691355
homyur 39893 2850481
gri 39871 2515779
-oryx 38867 2976992
+oryx 39602 3024830
SC 37299 2731694
Garf 37213 2986270
+Dubslow 36714 2409254
csnodgrass 36207 2688994
jmdana 36157 2210661
+markkulix 35994 2226860
strelock 34716 2074055
EthanOConnor 33370 2090311
slakovv 32915 2021889
-Calis007 32024 2163604
+gopeto 31078 2033362
manapbk 30987 1810399
-DMBK 30675 2383552
Prcuvu 30377 2170122
anst 30301 2190091
-armo9494 30198 2438202
jkiiski 30136 1904470
-tolkki963 29918 1822290
hyperbolic.tom 29840 2017394
chuckstablers 29659 2093438
Pyafue 29650 1902349
-gopeto 28881 1896862
+MarcusTullius 28611 1646671
+spcc 28241 1821198
+belzedar94 27935 1789106
OuaisBla 27636 1578800
chriswk 26902 1868317
achambord 26582 1767323
SFTUser 25182 1675689
nabildanial 24942 1519409
Sharaf_DG 24765 1786697
-ncfish1 24411 1520927
-rodneyc 24275 1410450
+rodneyc 24375 1416258
+Ulysses 24017 1626140
agg177 23890 1395014
JanErik 23408 1703875
+Ente 23403 1660988
+kdave 23392 1630462
Isidor 23388 1680691
Norabor 23339 1602636
cisco2015 22897 1762669
-Ente 22810 1628234
+Wencey 22573 1121406
Zirie 22542 1472937
team-oh 22272 1636708
MazeOfGalious 21978 1629593
xor12 21628 1680365
dex 21612 1467203
nesoneg 21494 1463031
+Roady 21323 1433822
sphinx 21211 1384728
+user213718 21196 1397710
jjoshua2 21001 1423089
horst.prack 20878 1465656
0xB00B1ES 20590 1208666
j3corre 20405 941444
Adrian.Schmidt123 20316 1281436
+jcAEie 20221 1504162
wei 19973 1745989
-belzedar94 19818 1434252
-user213718 19608 1334650
rstoesser 19569 1293588
eudhan 19274 1283717
+fishtester 19145 1242668
vulcan 18871 1729392
jundery 18445 1115855
iisiraider 18247 1101015
chris 17698 1487385
purplefishies 17595 1092533
dju 17353 978595
+AndreasKrug 17191 1317997
DragonLord 17014 1162790
+Jopo12321 16966 944924
+GPUex 16744 1077826
+xwziegtm 16608 1276372
IgorLeMasson 16064 1147232
-Roady 15677 1121476
ako027ako 15671 1173203
-kdave 15539 1160356
+jsys14 15474 917092
Nikolay.IT 15154 1068349
Andrew Grant 15114 895539
+scuzzi 15112 960373
OssumOpossum 14857 1007129
-spcc 14838 1034050
Karby 14808 867120
enedene 14476 905279
-fishtester 14411 1016252
-jsys14 14340 844792
bpfliegel 14298 884523
-AndreasKrug 14096 1126301
mpx86 14019 759568
jpulman 13982 870599
-Ulysses 13977 1073410
+Naven94 13879 811552
+Karpovbot 13808 734276
crocogoat 13803 1117422
joster 13794 950160
Nesa92 13786 1114691
mabichito 12903 749391
thijsk 12886 722107
AdrianSA 12860 804972
+infinigon 12807 937332
Flopzee 12698 894821
-infinigon 12638 933684
+pirt 12551 965597
fatmurphy 12547 853210
-scuzzi 12511 845761
SapphireBrand 12416 969604
modolief 12386 896470
Farseer 12249 694108
pgontarz 12151 848794
stocky 11954 699440
mschmidt 11941 803401
+Oakwen 11925 818865
+MooTheCow 11851 772628
+deflectooor 11642 565132
+dbernier 11609 818636
+Skiff84 11604 602786
Maxim 11543 836024
infinity 11470 727027
-pirt 11434 889369
+FormazChar 11430 856559
aga 11409 695071
+Jackfish 11403 750526
torbjo 11395 729145
Thomas A. Anderson 11372 732094
savage84 11358 670860
-FormazChar 11304 847663
-dbernier 11274 806566
d64 11263 789184
-MooTheCow 11237 720174
+qoo_charly_cai 11127 671959
snicolet 11106 869170
ali-al-zhrani 11098 768494
+whelanh 11067 235676
basepi 10637 744851
Cubox 10621 826448
+Alb11747 10558 689794
michaelrpg 10509 739239
OIVAS7572 10420 995586
+Garruk 10343 704723
dzjp 10343 732529
-Garruk 10332 703905
ols 10259 570669
lbraesch 10252 647825
+Karmatron 10195 661432
### Section 1. General Configuration
### ==========================================================================
+### Establish the operating system name
+KERNEL = $(shell uname -s)
+ifeq ($(KERNEL),Linux)
+ OS = $(shell uname -o)
+endif
+
+### Target Windows OS
+ifeq ($(OS),Windows_NT)
+ ifneq ($(COMP),ndk)
+ target_windows = yes
+ endif
+else ifeq ($(COMP),mingw)
+ target_windows = yes
+ ifeq ($(WINE_PATH),)
+ WINE_PATH = $(shell which wine)
+ endif
+endif
+
### Executable name
-ifeq ($(COMP),mingw)
-EXE = stockfish.exe
+ifeq ($(target_windows),yes)
+ EXE = stockfish.exe
else
-EXE = stockfish
+ EXE = stockfish
endif
### Installation dir definitions
### Built-in benchmark for pgo-builds
ifeq ($(SDE_PATH),)
- PGOBENCH = ./$(EXE) bench
+ PGOBENCH = $(WINE_PATH) ./$(EXE) bench
else
- PGOBENCH = $(SDE_PATH) -- ./$(EXE) bench
+ PGOBENCH = $(SDE_PATH) -- $(WINE_PATH) ./$(EXE) bench
endif
### Source and object files
VPATH = syzygy:nnue:nnue/features
-### Establish the operating system name
-KERNEL = $(shell uname -s)
-ifeq ($(KERNEL),Linux)
- OS = $(shell uname -o)
-endif
-
### ==========================================================================
### Section 2. High-level Configuration
### ==========================================================================
ifeq ($(findstring x86-32,$(ARCH)),x86-32)
arch = i386
bits = 32
- sse = yes
+ sse = no
mmx = yes
else
arch = x86_64
### ==========================================================================
### 3.1 Selecting compiler (default = gcc)
-CXXFLAGS += -Wall -Wcast-qual -fno-exceptions -std=c++17 $(EXTRACXXFLAGS)
-DEPENDFLAGS += -std=c++17
-LDFLAGS += $(EXTRALDFLAGS)
+ifeq ($(MAKELEVEL),0)
+ export ENV_CXXFLAGS := $(CXXFLAGS)
+ export ENV_DEPENDFLAGS := $(DEPENDFLAGS)
+ export ENV_LDFLAGS := $(LDFLAGS)
+endif
+
+CXXFLAGS = $(ENV_CXXFLAGS) -Wall -Wcast-qual -fno-exceptions -std=c++17 $(EXTRACXXFLAGS)
+DEPENDFLAGS = $(ENV_DEPENDFLAGS) -std=c++17
+LDFLAGS = $(ENV_LDFLAGS) $(EXTRALDFLAGS)
ifeq ($(COMP),)
COMP=gcc
endif
endif
+ifeq ($(target_windows),yes)
+ LDFLAGS += -static
+endif
+
ifeq ($(COMP),mingw)
comp=mingw
- ifeq ($(KERNEL),Linux)
- ifeq ($(bits),64)
- ifeq ($(shell which x86_64-w64-mingw32-c++-posix),)
- CXX=x86_64-w64-mingw32-c++
- else
- CXX=x86_64-w64-mingw32-c++-posix
- endif
+ ifeq ($(bits),64)
+ ifeq ($(shell which x86_64-w64-mingw32-c++-posix 2> /dev/null),)
+ CXX=x86_64-w64-mingw32-c++
else
- ifeq ($(shell which i686-w64-mingw32-c++-posix),)
- CXX=i686-w64-mingw32-c++
- else
- CXX=i686-w64-mingw32-c++-posix
- endif
+ CXX=x86_64-w64-mingw32-c++-posix
endif
else
- CXX=g++
+ ifeq ($(shell which i686-w64-mingw32-c++-posix 2> /dev/null),)
+ CXX=i686-w64-mingw32-c++
+ else
+ CXX=i686-w64-mingw32-c++-posix
+ endif
endif
-
CXXFLAGS += -pedantic -Wextra -Wshadow
- LDFLAGS += -static
endif
ifeq ($(COMP),icc)
ifeq ($(COMP),clang)
comp=clang
CXX=clang++
+ ifeq ($(target_windows),yes)
+ CXX=x86_64-w64-mingw32-clang++
+ endif
+
CXXFLAGS += -pedantic -Wextra -Wshadow
- ifneq ($(KERNEL),Darwin)
- ifneq ($(KERNEL),OpenBSD)
- ifneq ($(KERNEL),FreeBSD)
- ifneq ($(findstring MINGW,$(KERNEL)),MINGW)
+ ifeq ($(filter $(KERNEL),Darwin OpenBSD FreeBSD),)
+ ifeq ($(target_windows),)
ifneq ($(RTLIB),compiler-rt)
LDFLAGS += -latomic
endif
endif
endif
- endif
- endif
ifeq ($(arch),$(filter $(arch),armv7 armv8))
ifeq ($(OS),Android)
CXXFLAGS += -m$(bits)
LDFLAGS += -m$(bits)
endif
-
- ifeq ($(findstring MINGW,$(KERNEL)),MINGW)
- LDFLAGS += -static
- endif
-
endif
ifeq ($(KERNEL),Darwin)
ifeq ($(arch),armv7)
CXX=armv7a-linux-androideabi16-clang++
CXXFLAGS += -mthumb -march=armv7-a -mfloat-abi=softfp -mfpu=neon
- STRIP=arm-linux-androideabi-strip
+ ifneq ($(shell which arm-linux-androideabi-strip 2>/dev/null),)
+ STRIP=arm-linux-androideabi-strip
+ else
+ STRIP=llvm-strip
+ endif
endif
ifeq ($(arch),armv8)
CXX=aarch64-linux-android21-clang++
- STRIP=aarch64-linux-android-strip
+ ifneq ($(shell which aarch64-linux-android-strip 2>/dev/null),)
+ STRIP=aarch64-linux-android-strip
+ else
+ STRIP=llvm-strip
+ endif
endif
LDFLAGS += -static-libstdc++ -pie -lm -latomic
endif
endif
endif
- ifeq ($(KERNEL),Darwin)
- ifeq ($(comp),$(filter $(comp),clang icc))
- CXXFLAGS += -mdynamic-no-pic
- endif
+ ifeq ($(KERNEL),Darwin)
+ ifeq ($(comp),$(filter $(comp),clang icc))
+ CXXFLAGS += -mdynamic-no-pic
+ endif
- ifeq ($(comp),gcc)
- ifneq ($(arch),arm64)
- CXXFLAGS += -mdynamic-no-pic
- endif
- endif
- endif
+ ifeq ($(comp),gcc)
+ ifneq ($(arch),arm64)
+ CXXFLAGS += -mdynamic-no-pic
+ endif
+ endif
+ endif
ifeq ($(comp),clang)
CXXFLAGS += -fexperimental-new-pass-manager
ifeq ($(debug), no)
ifeq ($(comp),clang)
CXXFLAGS += -flto
- ifneq ($(findstring MINGW,$(KERNEL)),)
- CXXFLAGS += -fuse-ld=lld
- else ifneq ($(findstring MSYS,$(KERNEL)),)
+ ifeq ($(target_windows),yes)
CXXFLAGS += -fuse-ld=lld
endif
LDFLAGS += $(CXXFLAGS)
ifeq ($(gccisclang),)
CXXFLAGS += -flto
LDFLAGS += $(CXXFLAGS) -flto=jobserver
- ifneq ($(findstring MINGW,$(KERNEL)),)
- LDFLAGS += -save-temps
- else ifneq ($(findstring MSYS,$(KERNEL)),)
- LDFLAGS += -save-temps
- endif
else
CXXFLAGS += -flto
LDFLAGS += $(CXXFLAGS)
endif
-# To use LTO and static linking on windows, the tool chain requires a recent gcc:
-# gcc version 10.1 in msys2 or TDM-GCC version 9.2 are known to work, older might not.
-# So, only enable it for a cross from Linux by default.
+# To use LTO and static linking on Windows,
+# the tool chain requires gcc version 10.1 or later.
else ifeq ($(comp),mingw)
- ifeq ($(KERNEL),Linux)
ifneq ($(arch),i386)
CXXFLAGS += -flto
- LDFLAGS += $(CXXFLAGS) -flto=jobserver
- endif
+ LDFLAGS += $(CXXFLAGS) -save-temps
endif
endif
endif
install:
-mkdir -p -m 755 $(BINDIR)
-cp $(EXE) $(BINDIR)
- -strip $(BINDIR)/$(EXE)
+ $(STRIP) $(BINDIR)/$(EXE)
# clean all
clean: objclean profileclean
net:
$(eval nnuenet := $(shell grep EvalFileDefaultName evaluate.h | grep define | sed 's/.*\(nn-[a-z0-9]\{12\}.nnue\).*/\1/'))
@echo "Default net: $(nnuenet)"
- $(eval nnuedownloadurl := https://tests.stockfishchess.org/api/nn/$(nnuenet))
+ $(eval nnuedownloadurl1 := https://tests.stockfishchess.org/api/nn/$(nnuenet))
+ $(eval nnuedownloadurl2 := https://github.com/official-stockfish/networks/raw/master/$(nnuenet))
$(eval curl_or_wget := $(shell if hash curl 2>/dev/null; then echo "curl -skL"; elif hash wget 2>/dev/null; then echo "wget -qO-"; fi))
- @if test -f "$(nnuenet)"; then \
- echo "Already available."; \
- else \
- if [ "x$(curl_or_wget)" = "x" ]; then \
- echo "Automatic download failed: neither curl nor wget is installed. Install one of these tools or download the net manually"; exit 1; \
- else \
- echo "Downloading $(nnuedownloadurl)"; $(curl_or_wget) $(nnuedownloadurl) > $(nnuenet);\
- fi; \
- fi;
+ @if [ "x$(curl_or_wget)" = "x" ]; then \
+ echo "Automatic download failed: neither curl nor wget is installed. Install one of these tools or download the net manually"; exit 1; \
+ fi
$(eval shasum_command := $(shell if hash shasum 2>/dev/null; then echo "shasum -a 256 "; elif hash sha256sum 2>/dev/null; then echo "sha256sum "; fi))
- @if [ "x$(shasum_command)" != "x" ]; then \
- if [ "$(nnuenet)" != "nn-"`$(shasum_command) $(nnuenet) | cut -c1-12`".nnue" ]; then \
- echo "Failed download or $(nnuenet) corrupted, please delete!"; exit 1; \
- fi \
- else \
+ @if [ "x$(shasum_command)" = "x" ]; then \
echo "shasum / sha256sum not found, skipping net validation"; \
fi
+ @for nnuedownloadurl in "$(nnuedownloadurl1)" "$(nnuedownloadurl2)"; do \
+ if test -f "$(nnuenet)"; then \
+ echo "$(nnuenet) available."; \
+ else \
+ if [ "x$(curl_or_wget)" != "x" ]; then \
+ echo "Downloading $${nnuedownloadurl}"; $(curl_or_wget) $${nnuedownloadurl} > $(nnuenet);\
+ fi; \
+ fi; \
+ if [ "x$(shasum_command)" != "x" ]; then \
+ if [ "$(nnuenet)" != "nn-"`$(shasum_command) $(nnuenet) | cut -c1-12`".nnue" ]; then \
+ echo "Removing failed download"; rm -f $(nnuenet); \
+ else \
+ echo "Network validated"; break; \
+ fi; \
+ fi; \
+ done
+ @if ! test -f "$(nnuenet)"; then \
+ echo "Failed to download $(nnuenet)."; \
+ fi
# clean binaries and objects
objclean:
- @rm -f $(EXE) *.o ./syzygy/*.o ./nnue/*.o ./nnue/features/*.o
+ @rm -f stockfish stockfish.exe *.o ./syzygy/*.o ./nnue/*.o ./nnue/features/*.o
# clean auxiliary profiling files
profileclean:
@rm -rf profdir
@rm -f bench.txt *.gcda *.gcno ./syzygy/*.gcda ./nnue/*.gcda ./nnue/features/*.gcda *.s
@rm -f stockfish.profdata *.profraw
- @rm -f stockfish.exe.lto_wrapper_args
- @rm -f stockfish.exe.ltrans.out
+ @rm -f stockfish.*args*
+ @rm -f stockfish.*lt*
+ @rm -f stockfish.res
@rm -f ./-lstdc++.res
default:
MemoryBuffer buffer(const_cast<char*>(reinterpret_cast<const char*>(gEmbeddedNNUEData)),
size_t(gEmbeddedNNUESize));
+ (void) gEmbeddedNNUEEnd; // Silence warning on unused variable
istream stream(&buffer);
if (load_eval(eval_file, stream))
constexpr Value SpaceThreshold = Value(11551);
// KingAttackWeights[PieceType] contains king attack weights by piece type
- constexpr int KingAttackWeights[PIECE_TYPE_NB] = { 0, 0, 81, 52, 44, 10 };
+ constexpr int KingAttackWeights[PIECE_TYPE_NB] = { 0, 0, 76, 46, 45, 14 };
// SafeCheck[PieceType][single/multiple] contains safe check bonus by piece type,
// higher if multiple safe checks are possible for that piece type.
constexpr int SafeCheck[][2] = {
- {}, {}, {803, 1292}, {639, 974}, {1087, 1878}, {759, 1132}
+ {}, {}, {805, 1292}, {650, 984}, {1071, 1886}, {730, 1128}
};
#define S(mg, eg) make_score(mg, eg)
// BishopPawns[distance from edge] contains a file-dependent penalty for pawns on
// squares of the same color as our bishop.
constexpr Score BishopPawns[int(FILE_NB) / 2] = {
- S(3, 8), S(3, 9), S(2, 8), S(3, 8)
+ S(3, 8), S(3, 9), S(2, 7), S(3, 7)
};
// KingProtector[knight/bishop] contains penalty for each distance unit to own king
- constexpr Score KingProtector[] = { S(8, 9), S(6, 9) };
+ constexpr Score KingProtector[] = { S(9, 9), S(7, 9) };
// Outpost[knight/bishop] contains bonuses for each knight or bishop occupying a
// pawn protected square on rank 4 to 6 which is also safe from a pawn attack.
- constexpr Score Outpost[] = { S(57, 38), S(31, 24) };
+ constexpr Score Outpost[] = { S(54, 34), S(31, 25) };
// PassedRank[Rank] contains a bonus according to the rank of a passed pawn
constexpr Score PassedRank[RANK_NB] = {
- S(0, 0), S(7, 27), S(16, 32), S(17, 40), S(64, 71), S(170, 174), S(278, 262)
+ S(0, 0), S(2, 38), S(15, 36), S(22, 50), S(64, 81), S(166, 184), S(284, 269)
};
constexpr Score RookOnClosedFile = S(10, 5);
- constexpr Score RookOnOpenFile[] = { S(19, 6), S(47, 26) };
+ constexpr Score RookOnOpenFile[] = { S(18, 8), S(49, 26) };
// ThreatByMinor/ByRook[attacked PieceType] contains bonuses according to
// which piece type attacks which one. Attacks on lesser pieces which are
// pawn-defended are not considered.
constexpr Score ThreatByMinor[PIECE_TYPE_NB] = {
- S(0, 0), S(5, 32), S(55, 41), S(77, 56), S(89, 119), S(79, 162)
+ S(0, 0), S(6, 37), S(64, 50), S(82, 57), S(103, 130), S(81, 163)
};
constexpr Score ThreatByRook[PIECE_TYPE_NB] = {
- S(0, 0), S(3, 44), S(37, 68), S(42, 60), S(0, 39), S(58, 43)
+ S(0, 0), S(3, 44), S(36, 71), S(44, 59), S(0, 39), S(60, 39)
};
constexpr Value CorneredBishop = Value(50);
// Assorted bonuses and penalties
- constexpr Score UncontestedOutpost = S( 1, 10);
+ constexpr Score UncontestedOutpost = S( 0, 10);
constexpr Score BishopOnKingRing = S( 24, 0);
constexpr Score BishopXRayPawns = S( 4, 5);
constexpr Score FlankAttacks = S( 8, 0);
- constexpr Score Hanging = S( 69, 36);
+ constexpr Score Hanging = S( 72, 40);
constexpr Score KnightOnQueen = S( 16, 11);
constexpr Score LongDiagonalBishop = S( 45, 0);
constexpr Score MinorBehindPawn = S( 18, 3);
- constexpr Score PassedFile = S( 11, 8);
- constexpr Score PawnlessFlank = S( 17, 95);
- constexpr Score ReachableOutpost = S( 31, 22);
- constexpr Score RestrictedPiece = S( 7, 7);
+ constexpr Score PassedFile = S( 13, 8);
+ constexpr Score PawnlessFlank = S( 19, 97);
+ constexpr Score ReachableOutpost = S( 33, 19);
+ constexpr Score RestrictedPiece = S( 6, 7);
constexpr Score RookOnKingRing = S( 16, 0);
- constexpr Score SliderOnQueen = S( 60, 18);
- constexpr Score ThreatByKing = S( 24, 89);
+ constexpr Score SliderOnQueen = S( 62, 21);
+ constexpr Score ThreatByKing = S( 24, 87);
constexpr Score ThreatByPawnPush = S( 48, 39);
- constexpr Score ThreatBySafePawn = S(173, 94);
+ constexpr Score ThreatBySafePawn = S(167, 99);
constexpr Score TrappedRook = S( 55, 13);
constexpr Score WeakQueenProtection = S( 14, 0);
- constexpr Score WeakQueen = S( 56, 15);
+ constexpr Score WeakQueen = S( 57, 19);
#undef S
return v;
}
-
- /// Fisher Random Chess: correction for cornered bishops, to fix chess960 play with NNUE
-
- Value fix_FRC(const Position& pos) {
-
- constexpr Bitboard Corners = 1ULL << SQ_A1 | 1ULL << SQ_H1 | 1ULL << SQ_A8 | 1ULL << SQ_H8;
-
- if (!(pos.pieces(BISHOP) & Corners))
- return VALUE_ZERO;
-
- int correction = 0;
-
- if ( pos.piece_on(SQ_A1) == W_BISHOP
- && pos.piece_on(SQ_B2) == W_PAWN)
- correction -= CorneredBishop;
-
- if ( pos.piece_on(SQ_H1) == W_BISHOP
- && pos.piece_on(SQ_G2) == W_PAWN)
- correction -= CorneredBishop;
-
- if ( pos.piece_on(SQ_A8) == B_BISHOP
- && pos.piece_on(SQ_B7) == B_PAWN)
- correction += CorneredBishop;
-
- if ( pos.piece_on(SQ_H8) == B_BISHOP
- && pos.piece_on(SQ_G7) == B_PAWN)
- correction += CorneredBishop;
-
- return pos.side_to_move() == WHITE ? Value(3 * correction)
- : -Value(3 * correction);
- }
-
} // namespace Eval
/// evaluate() is the evaluator for the outer world. It returns a static
/// evaluation of the position from the point of view of the side to move.
-Value Eval::evaluate(const Position& pos) {
+Value Eval::evaluate(const Position& pos, int* complexity) {
Value v;
- bool useClassical = false;
-
- // Deciding between classical and NNUE eval (~10 Elo): for high PSQ imbalance we use classical,
- // but we switch to NNUE during long shuffling or with high material on the board.
- if ( !useNNUE
- || abs(eg_value(pos.psq_score())) * 5 > (849 + pos.non_pawn_material() / 64) * (5 + pos.rule50_count()))
+ Color stm = pos.side_to_move();
+ Value psq = pos.psq_eg_stm();
+
+ // We use the much less accurate but faster Classical eval when the NNUE
+ // option is set to false. Otherwise we use the NNUE eval unless the
+ // PSQ advantage is decisive and several pieces remain (~3 Elo)
+ bool useClassical = !useNNUE || (pos.count<ALL_PIECES>() > 7 && abs(psq) > 1760);
+ if (useClassical)
+ v = Evaluation<NO_TRACE>(pos).value();
+ else
{
- v = Evaluation<NO_TRACE>(pos).value(); // classical
- useClassical = abs(v) >= 298;
- }
-
- // If result of a classical evaluation is much lower than threshold fall back to NNUE
- if (useNNUE && !useClassical)
- {
- Value nnue = NNUE::evaluate(pos, true); // NNUE
- int scale = 1136 + 20 * pos.non_pawn_material() / 1024;
- Color stm = pos.side_to_move();
- Value optimism = pos.this_thread()->optimism[stm];
- Value psq = (stm == WHITE ? 1 : -1) * eg_value(pos.psq_score());
- int complexity = 35 * abs(nnue - psq) / 256;
-
- optimism = optimism * (44 + complexity) / 32;
- v = (nnue + optimism) * scale / 1024 - optimism;
-
- if (pos.is_chess960())
- v += fix_FRC(pos);
+ int nnueComplexity;
+ int scale = 1064 + 106 * pos.non_pawn_material() / 5120;
+ Value optimism = pos.this_thread()->optimism[stm];
+
+ Value nnue = NNUE::evaluate(pos, true, &nnueComplexity);
+ // Blend nnue complexity with (semi)classical complexity
+ nnueComplexity = (104 * nnueComplexity + 131 * abs(nnue - psq)) / 256;
+ if (complexity) // Return hybrid NNUE complexity to caller
+ *complexity = nnueComplexity;
+
+ optimism = optimism * (269 + nnueComplexity) / 256;
+ v = (nnue * scale + optimism * (scale - 754)) / 1024;
}
// Damp down the evaluation linearly when shuffling
- v = v * (208 - pos.rule50_count()) / 208;
+ v = v * (195 - pos.rule50_count()) / 211;
// Guarantee evaluation does not hit the tablebase range
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
+ // When not using NNUE, return classical complexity to caller
+ if (complexity && (!useNNUE || useClassical))
+ *complexity = abs(v - psq);
+
return v;
}
namespace Eval {
std::string trace(Position& pos);
- Value evaluate(const Position& pos);
+ Value evaluate(const Position& pos, int* complexity = nullptr);
extern bool useNNUE;
extern std::string currentEvalFileName;
// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
// for the build process (profile-build and fishtest) to work. Do not change the
// name of the macro, as it is used in the Makefile.
- #define EvalFileDefaultName "nn-ac07bd334b62.nnue"
+ #define EvalFileDefaultName "nn-ad9b42354671.nnue"
namespace NNUE {
std::string trace(Position& pos);
- Value evaluate(const Position& pos, bool adjusted = false);
+ Value evaluate(const Position& pos, bool adjusted = false, int* complexity = nullptr);
void init();
void verify();
#if defined(_WIN32)
-static void* aligned_large_pages_alloc_windows(size_t allocSize) {
+static void* aligned_large_pages_alloc_windows([[maybe_unused]] size_t allocSize) {
#if !defined(_WIN64)
- (void)allocSize; // suppress unused-parameter compiler warning
return nullptr;
#else
string binaryDirectory; // path of the executable directory
string workingDirectory; // path of the working directory
-void init(int argc, char* argv[]) {
- (void)argc;
+void init([[maybe_unused]] int argc, char* argv[]) {
string pathSeparator;
// extract the path+name of the executable binary
class RunningAverage {
public:
- // Constructor
- RunningAverage() {}
-
// Reset the running average to rational value p / q
void set(int64_t p, int64_t q)
{ average = p * PERIOD * RESOLUTION / q; }
{ average = RESOLUTION * v + (PERIOD - 1) * average / PERIOD; }
// Test if average is strictly greater than rational a / b
- bool is_greater(int64_t a, int64_t b)
- { return b * average > a * PERIOD * RESOLUTION ; }
+ bool is_greater(int64_t a, int64_t b) const
+ { return b * average > a * (PERIOD * RESOLUTION); }
+
+ int64_t value() const
+ { return average / (PERIOD * RESOLUTION); }
private :
static constexpr int64_t PERIOD = 4096;
public:
std::size_t size() const { return size_; }
- void resize(std::size_t newSize) { size_ = newSize; }
void push_back(const T& value) { values_[size_++] = value; }
- T& operator[](std::size_t index) { return values_[index]; }
- T* begin() { return values_; }
- T* end() { return values_ + size_; }
- const T& operator[](std::size_t index) const { return values_[index]; }
const T* begin() const { return values_; }
const T* end() const { return values_ + size_; }
- void swap(ValueList& other) {
- const std::size_t maxSize = std::max(size_, other.size_);
- for (std::size_t i = 0; i < maxSize; ++i) {
- std::swap(values_[i], other.values_[i]);
- }
- std::swap(size_, other.size_);
- }
-
private:
T values_[MaxSize];
std::size_t size_ = 0;
};
-/// sigmoid(t, x0, y0, C, P, Q) implements a sigmoid-like function using only integers,
-/// with the following properties:
-///
-/// - sigmoid is centered in (x0, y0)
-/// - sigmoid has amplitude [-P/Q , P/Q] instead of [-1 , +1]
-/// - limit is (y0 - P/Q) when t tends to -infinity
-/// - limit is (y0 + P/Q) when t tends to +infinity
-/// - the slope can be adjusted using C > 0, smaller C giving a steeper sigmoid
-/// - the slope of the sigmoid when t = x0 is P/(Q*C)
-/// - sigmoid is increasing with t when P > 0 and Q > 0
-/// - to get a decreasing sigmoid, call with -t, or change sign of P
-/// - mean value of the sigmoid is y0
-///
-/// Use <https://www.desmos.com/calculator/jhh83sqq92> to draw the sigmoid
-
-inline int64_t sigmoid(int64_t t, int64_t x0,
- int64_t y0,
- int64_t C,
- int64_t P,
- int64_t Q)
-{
- assert(C > 0);
- return y0 + P * (t-x0) / (Q * (std::abs(t-x0) + C)) ;
-}
-
-
/// xorshift64star Pseudo-Random Number Generator
/// This class is based on original code written and dedicated
/// to the public domain by Sebastiano Vigna (2014).
#include <cassert>
+#include "bitboard.h"
#include "movepick.h"
namespace Stockfish {
stage = (pos.checkers() ? EVASION_TT : MAIN_TT) +
!(ttm && pos.pseudo_legal(ttm));
+ threatenedPieces = 0;
}
/// MovePicker constructor for quiescence search
/// MovePicker constructor for ProbCut: we generate captures with SEE greater
/// than or equal to the given threshold.
-MovePicker::MovePicker(const Position& p, Move ttm, Value th, const CapturePieceToHistory* cph)
- : pos(p), captureHistory(cph), ttMove(ttm), threshold(th)
+MovePicker::MovePicker(const Position& p, Move ttm, Value th, Depth d, const CapturePieceToHistory* cph)
+ : pos(p), captureHistory(cph), ttMove(ttm), threshold(th), depth(d)
{
assert(!pos.checkers());
static_assert(Type == CAPTURES || Type == QUIETS || Type == EVASIONS, "Wrong type");
+ [[maybe_unused]] Bitboard threatenedByPawn, threatenedByMinor, threatenedByRook;
+ if constexpr (Type == QUIETS)
+ {
+ Color us = pos.side_to_move();
+
+ threatenedByPawn = pos.attacks_by<PAWN>(~us);
+ threatenedByMinor = pos.attacks_by<KNIGHT>(~us) | pos.attacks_by<BISHOP>(~us) | threatenedByPawn;
+ threatenedByRook = pos.attacks_by<ROOK>(~us) | threatenedByMinor;
+
+ // Pieces threatened by pieces of lesser material value
+ threatenedPieces = (pos.pieces(us, QUEEN) & threatenedByRook)
+ | (pos.pieces(us, ROOK) & threatenedByMinor)
+ | (pos.pieces(us, KNIGHT, BISHOP) & threatenedByPawn);
+ }
+
for (auto& m : *this)
if constexpr (Type == CAPTURES)
- m.value = int(PieceValue[MG][pos.piece_on(to_sq(m))]) * 6
- + (*captureHistory)[pos.moved_piece(m)][to_sq(m)][type_of(pos.piece_on(to_sq(m)))];
+ m.value = 6 * int(PieceValue[MG][pos.piece_on(to_sq(m))])
+ + (*captureHistory)[pos.moved_piece(m)][to_sq(m)][type_of(pos.piece_on(to_sq(m)))];
else if constexpr (Type == QUIETS)
- m.value = (*mainHistory)[pos.side_to_move()][from_to(m)]
+ m.value = 2 * (*mainHistory)[pos.side_to_move()][from_to(m)]
+ 2 * (*continuationHistory[0])[pos.moved_piece(m)][to_sq(m)]
+ (*continuationHistory[1])[pos.moved_piece(m)][to_sq(m)]
+ (*continuationHistory[3])[pos.moved_piece(m)][to_sq(m)]
- + (*continuationHistory[5])[pos.moved_piece(m)][to_sq(m)];
-
+ + (*continuationHistory[5])[pos.moved_piece(m)][to_sq(m)]
+ + (threatenedPieces & from_sq(m) ?
+ (type_of(pos.moved_piece(m)) == QUEEN && !(to_sq(m) & threatenedByRook) ? 50000
+ : type_of(pos.moved_piece(m)) == ROOK && !(to_sq(m) & threatenedByMinor) ? 25000
+ : !(to_sq(m) & threatenedByPawn) ? 15000
+ : 0)
+ : 0)
+ + bool(pos.check_squares(type_of(pos.moved_piece(m))) & to_sq(m)) * 16384;
else // Type == EVASIONS
{
if (pos.capture(m))
m.value = PieceValue[MG][pos.piece_on(to_sq(m))]
- - Value(type_of(pos.moved_piece(m)));
+ - Value(type_of(pos.moved_piece(m)))
+ + (1 << 28);
else
- m.value = (*mainHistory)[pos.side_to_move()][from_to(m)]
- + 2 * (*continuationHistory[0])[pos.moved_piece(m)][to_sq(m)]
- - (1 << 28);
+ m.value = (*mainHistory)[pos.side_to_move()][from_to(m)]
+ + (*continuationHistory[0])[pos.moved_piece(m)][to_sq(m)];
}
}
endMoves = generate<CAPTURES>(pos, cur);
score<CAPTURES>();
+ partial_insertion_sort(cur, endMoves, -3000 * depth);
++stage;
goto top;
case GOOD_CAPTURE:
- if (select<Best>([&](){
+ if (select<Next>([&](){
return pos.see_ge(*cur, Value(-69 * cur->value / 1024)) ?
// Move losing capture to endBadCaptures to be tried later
true : (*endBadCaptures++ = *cur, false); }))
return select<Best>([](){ return true; });
case PROBCUT:
- return select<Best>([&](){ return pos.see_ge(*cur, threshold); });
+ return select<Next>([&](){ return pos.see_ge(*cur, threshold); });
case QCAPTURE:
- if (select<Best>([&](){ return depth > DEPTH_QS_RECAPTURES
+ if (select<Next>([&](){ return depth > DEPTH_QS_RECAPTURES
|| to_sq(*cur) == recaptureSquare; }))
return *(cur - 1);
/// unsuccessful during the current search, and is used for reduction and move
/// ordering decisions. It uses 2 tables (one for each color) indexed by
/// the move's from and to squares, see www.chessprogramming.org/Butterfly_Boards
-typedef Stats<int16_t, 14365, COLOR_NB, int(SQUARE_NB) * int(SQUARE_NB)> ButterflyHistory;
+/// (~11 elo)
+typedef Stats<int16_t, 7183, COLOR_NB, int(SQUARE_NB) * int(SQUARE_NB)> ButterflyHistory;
/// CounterMoveHistory stores counter moves indexed by [piece][to] of the previous
/// move, see www.chessprogramming.org/Countermove_Heuristic
/// ContinuationHistory is the combined history of a given pair of moves, usually
/// the current one given a previous one. The nested history table is based on
/// PieceToHistory instead of ButterflyBoards.
+/// (~63 elo)
typedef Stats<PieceToHistory, NOT_USED, PIECE_NB, SQUARE_NB> ContinuationHistory;
const CapturePieceToHistory*,
const PieceToHistory**,
Square);
- MovePicker(const Position&, Move, Value, const CapturePieceToHistory*);
+ MovePicker(const Position&, Move, Value, Depth, const CapturePieceToHistory*);
Move next_move(bool skipQuiets = false);
+ Bitboard threatenedPieces;
+
private:
template<PickType T, typename Pred> Move select(Pred);
template<GenType> void score();
{
write_little_endian<std::uint32_t>(stream, Version);
write_little_endian<std::uint32_t>(stream, hashValue);
- write_little_endian<std::uint32_t>(stream, desc.size());
+ write_little_endian<std::uint32_t>(stream, (std::uint32_t)desc.size());
stream.write(&desc[0], desc.size());
return !stream.fail();
}
}
// Evaluation function. Perform differential calculation.
- Value evaluate(const Position& pos, bool adjusted) {
+ Value evaluate(const Position& pos, bool adjusted, int* complexity) {
// We manually align the arrays on the stack because with gcc < 9.3
// overaligning stack variables with alignas() doesn't work correctly.
constexpr uint64_t alignment = CacheLineSize;
- int delta = 7;
+ int delta = 24 - pos.non_pawn_material() / 9560;
#if defined(ALIGNAS_ON_STACK_VARIABLES_BROKEN)
TransformedFeatureType transformedFeaturesUnaligned[
FeatureTransformer::BufferSize + alignment / sizeof(TransformedFeatureType)];
- char bufferUnaligned[Network::BufferSize + alignment];
auto* transformedFeatures = align_ptr_up<alignment>(&transformedFeaturesUnaligned[0]);
- auto* buffer = align_ptr_up<alignment>(&bufferUnaligned[0]);
#else
alignas(alignment)
TransformedFeatureType transformedFeatures[FeatureTransformer::BufferSize];
- alignas(alignment) char buffer[Network::BufferSize];
#endif
ASSERT_ALIGNED(transformedFeatures, alignment);
- ASSERT_ALIGNED(buffer, alignment);
- const std::size_t bucket = (pos.count<ALL_PIECES>() - 1) / 4;
+ const int bucket = (pos.count<ALL_PIECES>() - 1) / 4;
const auto psqt = featureTransformer->transform(pos, transformedFeatures, bucket);
- const auto positional = network[bucket]->propagate(transformedFeatures, buffer)[0];
+ const auto positional = network[bucket]->propagate(transformedFeatures);
+
+ if (complexity)
+ *complexity = abs(psqt - positional) / OutputScale;
// Give more value to positional evaluation when adjusted flag is set
if (adjusted)
- return static_cast<Value>(((128 - delta) * psqt + (128 + delta) * positional) / 128 / OutputScale);
+ return static_cast<Value>(((1024 - delta) * psqt + (1024 + delta) * positional) / (1024 * OutputScale));
else
return static_cast<Value>((psqt + positional) / OutputScale);
}
#if defined(ALIGNAS_ON_STACK_VARIABLES_BROKEN)
TransformedFeatureType transformedFeaturesUnaligned[
FeatureTransformer::BufferSize + alignment / sizeof(TransformedFeatureType)];
- char bufferUnaligned[Network::BufferSize + alignment];
auto* transformedFeatures = align_ptr_up<alignment>(&transformedFeaturesUnaligned[0]);
- auto* buffer = align_ptr_up<alignment>(&bufferUnaligned[0]);
#else
alignas(alignment)
TransformedFeatureType transformedFeatures[FeatureTransformer::BufferSize];
- alignas(alignment) char buffer[Network::BufferSize];
#endif
ASSERT_ALIGNED(transformedFeatures, alignment);
- ASSERT_ALIGNED(buffer, alignment);
NnueEvalTrace t{};
t.correctBucket = (pos.count<ALL_PIECES>() - 1) / 4;
- for (std::size_t bucket = 0; bucket < LayerStacks; ++bucket) {
- const auto psqt = featureTransformer->transform(pos, transformedFeatures, bucket);
- const auto output = network[bucket]->propagate(transformedFeatures, buffer);
-
- int materialist = psqt;
- int positional = output[0];
+ for (IndexType bucket = 0; bucket < LayerStacks; ++bucket) {
+ const auto materialist = featureTransformer->transform(pos, transformedFeatures, bucket);
+ const auto positional = network[bucket]->propagate(transformedFeatures);
t.psqt[bucket] = static_cast<Value>( materialist / OutputScale );
t.positional[bucket] = static_cast<Value>( positional / OutputScale );
#include <algorithm>
#include <type_traits>
#include "../nnue_common.h"
-#include "../../simd.h"
+#include "simd.h"
/*
This file contains the definition for a fully connected layer (aka affine transform).
{
# if defined(USE_SSE2)
// At least a multiple of 16, with SSE2.
- static_assert(PaddedInputDimensions % 16 == 0);
- constexpr IndexType NumChunks = PaddedInputDimensions / 16;
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
const __m128i Zeros = _mm_setzero_si128();
const auto inputVector = reinterpret_cast<const __m128i*>(input);
# elif defined(USE_MMX)
- static_assert(InputDimensions % 8 == 0);
- constexpr IndexType NumChunks = InputDimensions / 8;
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 8;
const __m64 Zeros = _mm_setzero_si64();
const auto inputVector = reinterpret_cast<const __m64*>(input);
# elif defined(USE_NEON)
- constexpr IndexType NumChunks = (InputDimensions + 15) / 16;
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 16) / 16;
const auto inputVector = reinterpret_cast<const int8x8_t*>(input);
# endif
}
#endif
- template <typename PreviousLayer, IndexType OutDims, typename Enabled = void>
+ template <IndexType InDims, IndexType OutDims, typename Enabled = void>
class AffineTransform;
+#if defined (USE_AVX512)
+ constexpr IndexType LargeInputSize = 2 * 64;
+#else
+ constexpr IndexType LargeInputSize = std::numeric_limits<IndexType>::max();
+#endif
+
// A specialization for large inputs.
- template <typename PreviousLayer, IndexType OutDims>
- class AffineTransform<PreviousLayer, OutDims, std::enable_if_t<(PreviousLayer::OutputDimensions >= 2*64-1)>> {
+ template <IndexType InDims, IndexType OutDims>
+ class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) >= LargeInputSize)>> {
public:
// Input/output type
- using InputType = typename PreviousLayer::OutputType;
+ using InputType = std::uint8_t;
using OutputType = std::int32_t;
- static_assert(std::is_same<InputType, std::uint8_t>::value, "");
// Number of input/output dimensions
- static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
+ static constexpr IndexType InputDimensions = InDims;
static constexpr IndexType OutputDimensions = OutDims;
static constexpr IndexType PaddedInputDimensions =
ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
+ static constexpr IndexType PaddedOutputDimensions =
+ ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
- static_assert(PaddedInputDimensions >= 128, "Something went wrong. This specialization should not have been chosen.");
+ using OutputBuffer = OutputType[PaddedOutputDimensions];
+
+ static_assert(PaddedInputDimensions >= LargeInputSize, "Something went wrong. This specialization should not have been chosen.");
#if defined (USE_AVX512)
static constexpr const IndexType InputSimdWidth = 64;
static_assert(OutputDimensions % NumOutputRegs == 0);
- // Size of forward propagation buffer used in this layer
- static constexpr std::size_t SelfBufferSize =
- ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
-
- // Size of the forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t BufferSize =
- PreviousLayer::BufferSize + SelfBufferSize;
-
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t get_hash_value() {
+ static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0xCC03DAE4u;
hashValue += OutputDimensions;
- hashValue ^= PreviousLayer::get_hash_value() >> 1;
- hashValue ^= PreviousLayer::get_hash_value() << 31;
+ hashValue ^= prevHash >> 1;
+ hashValue ^= prevHash << 31;
return hashValue;
}
// Read network parameters
bool read_parameters(std::istream& stream) {
- if (!previousLayer.read_parameters(stream)) return false;
- for (std::size_t i = 0; i < OutputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions; ++i)
biases[i] = read_little_endian<BiasType>(stream);
- for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
return !stream.fail();
// Write network parameters
bool write_parameters(std::ostream& stream) const {
- if (!previousLayer.write_parameters(stream)) return false;
- for (std::size_t i = 0; i < OutputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions; ++i)
write_little_endian<BiasType>(stream, biases[i]);
- for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
return !stream.fail();
// Forward propagation
const OutputType* propagate(
- const TransformedFeatureType* transformedFeatures, char* buffer) const {
- const auto input = previousLayer.propagate(
- transformedFeatures, buffer + SelfBufferSize);
- OutputType* output = reinterpret_cast<OutputType*>(buffer);
+ const InputType* input, OutputType* output) const {
#if defined (USE_AVX512)
using acc_vec_t = __m512i;
#if defined (USE_SSSE3) || defined (USE_NEON)
const in_vec_t* invec = reinterpret_cast<const in_vec_t*>(input);
-
// Perform accumulation to registers for each big block
for (IndexType bigBlock = 0; bigBlock < NumBigBlocks; ++bigBlock)
{
using BiasType = OutputType;
using WeightType = std::int8_t;
- PreviousLayer previousLayer;
-
alignas(CacheLineSize) BiasType biases[OutputDimensions];
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
};
- template <typename PreviousLayer, IndexType OutDims>
- class AffineTransform<PreviousLayer, OutDims, std::enable_if_t<(PreviousLayer::OutputDimensions < 2*64-1)>> {
+ template <IndexType InDims, IndexType OutDims>
+ class AffineTransform<InDims, OutDims, std::enable_if_t<(ceil_to_multiple<IndexType>(InDims, MaxSimdWidth) < LargeInputSize)>> {
public:
// Input/output type
- using InputType = typename PreviousLayer::OutputType;
+ // Input/output type
+ using InputType = std::uint8_t;
using OutputType = std::int32_t;
- static_assert(std::is_same<InputType, std::uint8_t>::value, "");
// Number of input/output dimensions
- static constexpr IndexType InputDimensions =
- PreviousLayer::OutputDimensions;
+ static constexpr IndexType InputDimensions = InDims;
static constexpr IndexType OutputDimensions = OutDims;
+
static constexpr IndexType PaddedInputDimensions =
- ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
+ ceil_to_multiple<IndexType>(InputDimensions, MaxSimdWidth);
+ static constexpr IndexType PaddedOutputDimensions =
+ ceil_to_multiple<IndexType>(OutputDimensions, MaxSimdWidth);
- static_assert(PaddedInputDimensions < 128, "Something went wrong. This specialization should not have been chosen.");
+ using OutputBuffer = OutputType[PaddedOutputDimensions];
+
+ static_assert(PaddedInputDimensions < LargeInputSize, "Something went wrong. This specialization should not have been chosen.");
#if defined (USE_SSSE3)
static constexpr const IndexType OutputSimdWidth = SimdWidth / 4;
static constexpr const IndexType InputSimdWidth = SimdWidth;
#endif
- // Size of forward propagation buffer used in this layer
- static constexpr std::size_t SelfBufferSize =
- ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
-
- // Size of the forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t BufferSize =
- PreviousLayer::BufferSize + SelfBufferSize;
-
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t get_hash_value() {
+ static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0xCC03DAE4u;
hashValue += OutputDimensions;
- hashValue ^= PreviousLayer::get_hash_value() >> 1;
- hashValue ^= PreviousLayer::get_hash_value() << 31;
+ hashValue ^= prevHash >> 1;
+ hashValue ^= prevHash << 31;
return hashValue;
}
// Read network parameters
bool read_parameters(std::istream& stream) {
- if (!previousLayer.read_parameters(stream)) return false;
- for (std::size_t i = 0; i < OutputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions; ++i)
biases[i] = read_little_endian<BiasType>(stream);
- for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
weights[get_weight_index(i)] = read_little_endian<WeightType>(stream);
return !stream.fail();
// Write network parameters
bool write_parameters(std::ostream& stream) const {
- if (!previousLayer.write_parameters(stream)) return false;
- for (std::size_t i = 0; i < OutputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions; ++i)
write_little_endian<BiasType>(stream, biases[i]);
- for (std::size_t i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
+ for (IndexType i = 0; i < OutputDimensions * PaddedInputDimensions; ++i)
write_little_endian<WeightType>(stream, weights[get_weight_index(i)]);
return !stream.fail();
}
// Forward propagation
const OutputType* propagate(
- const TransformedFeatureType* transformedFeatures, char* buffer) const {
- const auto input = previousLayer.propagate(
- transformedFeatures, buffer + SelfBufferSize);
- const auto output = reinterpret_cast<OutputType*>(buffer);
+ const InputType* input, OutputType* output) const {
#if defined (USE_AVX2)
using vec_t = __m256i;
#if defined (USE_SSSE3)
const auto inputVector = reinterpret_cast<const vec_t*>(input);
- static_assert(InputDimensions % 8 == 0);
static_assert(OutputDimensions % OutputSimdWidth == 0 || OutputDimensions == 1);
if constexpr (OutputDimensions % OutputSimdWidth == 0)
{
- constexpr IndexType NumChunks = InputDimensions / 4;
+ constexpr IndexType NumChunks = ceil_to_multiple<IndexType>(InputDimensions, 8) / 4;
constexpr IndexType NumRegs = OutputDimensions / OutputSimdWidth;
const auto input32 = reinterpret_cast<const std::int32_t*>(input);
using BiasType = OutputType;
using WeightType = std::int8_t;
- PreviousLayer previousLayer;
-
alignas(CacheLineSize) BiasType biases[OutputDimensions];
alignas(CacheLineSize) WeightType weights[OutputDimensions * PaddedInputDimensions];
};
namespace Stockfish::Eval::NNUE::Layers {
// Clipped ReLU
- template <typename PreviousLayer>
+ template <IndexType InDims>
class ClippedReLU {
public:
// Input/output type
- using InputType = typename PreviousLayer::OutputType;
+ using InputType = std::int32_t;
using OutputType = std::uint8_t;
- static_assert(std::is_same<InputType, std::int32_t>::value, "");
// Number of input/output dimensions
- static constexpr IndexType InputDimensions = PreviousLayer::OutputDimensions;
+ static constexpr IndexType InputDimensions = InDims;
static constexpr IndexType OutputDimensions = InputDimensions;
static constexpr IndexType PaddedOutputDimensions =
ceil_to_multiple<IndexType>(OutputDimensions, 32);
- // Size of forward propagation buffer used in this layer
- static constexpr std::size_t SelfBufferSize =
- ceil_to_multiple(OutputDimensions * sizeof(OutputType), CacheLineSize);
-
- // Size of the forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t BufferSize =
- PreviousLayer::BufferSize + SelfBufferSize;
+ using OutputBuffer = OutputType[PaddedOutputDimensions];
// Hash value embedded in the evaluation file
- static constexpr std::uint32_t get_hash_value() {
+ static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
std::uint32_t hashValue = 0x538D24C7u;
- hashValue += PreviousLayer::get_hash_value();
+ hashValue += prevHash;
return hashValue;
}
// Read network parameters
- bool read_parameters(std::istream& stream) {
- return previousLayer.read_parameters(stream);
+ bool read_parameters(std::istream&) {
+ return true;
}
// Write network parameters
- bool write_parameters(std::ostream& stream) const {
- return previousLayer.write_parameters(stream);
+ bool write_parameters(std::ostream&) const {
+ return true;
}
// Forward propagation
const OutputType* propagate(
- const TransformedFeatureType* transformedFeatures, char* buffer) const {
- const auto input = previousLayer.propagate(
- transformedFeatures, buffer + SelfBufferSize);
- const auto output = reinterpret_cast<OutputType*>(buffer);
+ const InputType* input, OutputType* output) const {
#if defined(USE_AVX2)
if constexpr (InputDimensions % SimdWidth == 0) {
std::max(0, std::min(127, input[i] >> WeightScaleBits)));
}
- // Affine transform layers expect that there is at least
- // ceil_to_multiple(OutputDimensions, 32) initialized values.
- // We cannot do this in the affine transform because it requires
- // preallocating space here.
- for (IndexType i = OutputDimensions; i < PaddedOutputDimensions; ++i) {
- output[i] = 0;
- }
-
return output;
}
-
- private:
- PreviousLayer previousLayer;
};
} // namespace Stockfish::Eval::NNUE::Layers
+++ /dev/null
-/*
- Stockfish, a UCI chess playing engine derived from Glaurung 2.1
- Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
-
- Stockfish is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Stockfish is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-// NNUE evaluation function layer InputSlice definition
-
-#ifndef NNUE_LAYERS_INPUT_SLICE_H_INCLUDED
-#define NNUE_LAYERS_INPUT_SLICE_H_INCLUDED
-
-#include "../nnue_common.h"
-
-namespace Stockfish::Eval::NNUE::Layers {
-
-// Input layer
-template <IndexType OutDims, IndexType Offset = 0>
-class InputSlice {
- public:
- // Need to maintain alignment
- static_assert(Offset % MaxSimdWidth == 0, "");
-
- // Output type
- using OutputType = TransformedFeatureType;
-
- // Output dimensionality
- static constexpr IndexType OutputDimensions = OutDims;
-
- // Size of forward propagation buffer used from the input layer to this layer
- static constexpr std::size_t BufferSize = 0;
-
- // Hash value embedded in the evaluation file
- static constexpr std::uint32_t get_hash_value() {
- std::uint32_t hashValue = 0xEC42E90Du;
- hashValue ^= OutputDimensions ^ (Offset << 10);
- return hashValue;
- }
-
- // Read network parameters
- bool read_parameters(std::istream& /*stream*/) {
- return true;
- }
-
- // Write network parameters
- bool write_parameters(std::ostream& /*stream*/) const {
- return true;
- }
-
- // Forward propagation
- const OutputType* propagate(
- const TransformedFeatureType* transformedFeatures,
- char* /*buffer*/) const {
- return transformedFeatures + Offset;
- }
-
- private:
-};
-
-} // namespace Stockfish::Eval::NNUE::Layers
-
-#endif // #ifndef NNUE_LAYERS_INPUT_SLICE_H_INCLUDED
--- /dev/null
+/*
+ Stockfish, a UCI chess playing engine derived from Glaurung 2.1
+ Copyright (C) 2004-2022 The Stockfish developers (see AUTHORS file)
+
+ Stockfish is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Stockfish is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+// Definition of layer ClippedReLU of NNUE evaluation function
+
+#ifndef NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
+#define NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
+
+#include "../nnue_common.h"
+
+namespace Stockfish::Eval::NNUE::Layers {
+
+ // Clipped ReLU
+ template <IndexType InDims>
+ class SqrClippedReLU {
+ public:
+ // Input/output type
+ using InputType = std::int32_t;
+ using OutputType = std::uint8_t;
+
+ // Number of input/output dimensions
+ static constexpr IndexType InputDimensions = InDims;
+ static constexpr IndexType OutputDimensions = InputDimensions;
+ static constexpr IndexType PaddedOutputDimensions =
+ ceil_to_multiple<IndexType>(OutputDimensions, 32);
+
+ using OutputBuffer = OutputType[PaddedOutputDimensions];
+
+ // Hash value embedded in the evaluation file
+ static constexpr std::uint32_t get_hash_value(std::uint32_t prevHash) {
+ std::uint32_t hashValue = 0x538D24C7u;
+ hashValue += prevHash;
+ return hashValue;
+ }
+
+ // Read network parameters
+ bool read_parameters(std::istream&) {
+ return true;
+ }
+
+ // Write network parameters
+ bool write_parameters(std::ostream&) const {
+ return true;
+ }
+
+ // Forward propagation
+ const OutputType* propagate(
+ const InputType* input, OutputType* output) const {
+
+ #if defined(USE_SSE2)
+ constexpr IndexType NumChunks = InputDimensions / 16;
+
+ #ifdef USE_SSE41
+ const __m128i Zero = _mm_setzero_si128();
+ #else
+ const __m128i k0x80s = _mm_set1_epi8(-128);
+ #endif
+
+ static_assert(WeightScaleBits == 6);
+ const auto in = reinterpret_cast<const __m128i*>(input);
+ const auto out = reinterpret_cast<__m128i*>(output);
+ for (IndexType i = 0; i < NumChunks; ++i) {
+ __m128i words0 = _mm_packs_epi32(
+ _mm_load_si128(&in[i * 4 + 0]),
+ _mm_load_si128(&in[i * 4 + 1]));
+ __m128i words1 = _mm_packs_epi32(
+ _mm_load_si128(&in[i * 4 + 2]),
+ _mm_load_si128(&in[i * 4 + 3]));
+
+ // Not sure if
+ words0 = _mm_srli_epi16(_mm_mulhi_epi16(words0, words0), 3);
+ words1 = _mm_srli_epi16(_mm_mulhi_epi16(words1, words1), 3);
+
+ const __m128i packedbytes = _mm_packs_epi16(words0, words1);
+
+ _mm_store_si128(&out[i],
+
+ #ifdef USE_SSE41
+ _mm_max_epi8(packedbytes, Zero)
+ #else
+ _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s)
+ #endif
+
+ );
+ }
+ constexpr IndexType Start = NumChunks * 16;
+
+ #else
+ constexpr IndexType Start = 0;
+ #endif
+
+ for (IndexType i = Start; i < InputDimensions; ++i) {
+ output[i] = static_cast<OutputType>(
+ // realy should be /127 but we need to make it fast
+ // needs to be accounted for in the trainer
+ std::max(0ll, std::min(127ll, (((long long)input[i] * input[i]) >> (2 * WeightScaleBits)) / 128)));
+ }
+
+ return output;
+ }
+ };
+
+} // namespace Stockfish::Eval::NNUE::Layers
+
+#endif // NNUE_LAYERS_SQR_CLIPPED_RELU_H_INCLUDED
#ifndef NNUE_ARCHITECTURE_H_INCLUDED
#define NNUE_ARCHITECTURE_H_INCLUDED
+#include <memory>
+
#include "nnue_common.h"
#include "features/half_ka_v2_hm.h"
-#include "layers/input_slice.h"
#include "layers/affine_transform.h"
#include "layers/clipped_relu.h"
+#include "layers/sqr_clipped_relu.h"
-namespace Stockfish::Eval::NNUE {
-
- // Input features used in evaluation function
- using FeatureSet = Features::HalfKAv2_hm;
-
- // Number of input feature dimensions after conversion
- constexpr IndexType TransformedFeatureDimensions = 1024;
- constexpr IndexType PSQTBuckets = 8;
- constexpr IndexType LayerStacks = 8;
+#include "../misc.h"
- namespace Layers {
-
- // Define network structure
- using InputLayer = InputSlice<TransformedFeatureDimensions * 2>;
- using HiddenLayer1 = ClippedReLU<AffineTransform<InputLayer, 8>>;
- using HiddenLayer2 = ClippedReLU<AffineTransform<HiddenLayer1, 32>>;
- using OutputLayer = AffineTransform<HiddenLayer2, 1>;
-
- } // namespace Layers
-
- using Network = Layers::OutputLayer;
+namespace Stockfish::Eval::NNUE {
- static_assert(TransformedFeatureDimensions % MaxSimdWidth == 0, "");
- static_assert(Network::OutputDimensions == 1, "");
- static_assert(std::is_same<Network::OutputType, std::int32_t>::value, "");
+// Input features used in evaluation function
+using FeatureSet = Features::HalfKAv2_hm;
+
+// Number of input feature dimensions after conversion
+constexpr IndexType TransformedFeatureDimensions = 1024;
+constexpr IndexType PSQTBuckets = 8;
+constexpr IndexType LayerStacks = 8;
+
+struct Network
+{
+ static constexpr int FC_0_OUTPUTS = 15;
+ static constexpr int FC_1_OUTPUTS = 32;
+
+ Layers::AffineTransform<TransformedFeatureDimensions, FC_0_OUTPUTS + 1> fc_0;
+ Layers::SqrClippedReLU<FC_0_OUTPUTS + 1> ac_sqr_0;
+ Layers::ClippedReLU<FC_0_OUTPUTS + 1> ac_0;
+ Layers::AffineTransform<FC_0_OUTPUTS * 2, FC_1_OUTPUTS> fc_1;
+ Layers::ClippedReLU<FC_1_OUTPUTS> ac_1;
+ Layers::AffineTransform<FC_1_OUTPUTS, 1> fc_2;
+
+ // Hash value embedded in the evaluation file
+ static constexpr std::uint32_t get_hash_value() {
+ // input slice hash
+ std::uint32_t hashValue = 0xEC42E90Du;
+ hashValue ^= TransformedFeatureDimensions * 2;
+
+ hashValue = decltype(fc_0)::get_hash_value(hashValue);
+ hashValue = decltype(ac_0)::get_hash_value(hashValue);
+ hashValue = decltype(fc_1)::get_hash_value(hashValue);
+ hashValue = decltype(ac_1)::get_hash_value(hashValue);
+ hashValue = decltype(fc_2)::get_hash_value(hashValue);
+
+ return hashValue;
+ }
+
+ // Read network parameters
+ bool read_parameters(std::istream& stream) {
+ if (!fc_0.read_parameters(stream)) return false;
+ if (!ac_0.read_parameters(stream)) return false;
+ if (!fc_1.read_parameters(stream)) return false;
+ if (!ac_1.read_parameters(stream)) return false;
+ if (!fc_2.read_parameters(stream)) return false;
+ return true;
+ }
+
+ // Read network parameters
+ bool write_parameters(std::ostream& stream) const {
+ if (!fc_0.write_parameters(stream)) return false;
+ if (!ac_0.write_parameters(stream)) return false;
+ if (!fc_1.write_parameters(stream)) return false;
+ if (!ac_1.write_parameters(stream)) return false;
+ if (!fc_2.write_parameters(stream)) return false;
+ return true;
+ }
+
+ std::int32_t propagate(const TransformedFeatureType* transformedFeatures)
+ {
+ struct alignas(CacheLineSize) Buffer
+ {
+ alignas(CacheLineSize) decltype(fc_0)::OutputBuffer fc_0_out;
+ alignas(CacheLineSize) decltype(ac_sqr_0)::OutputType ac_sqr_0_out[ceil_to_multiple<IndexType>(FC_0_OUTPUTS * 2, 32)];
+ alignas(CacheLineSize) decltype(ac_0)::OutputBuffer ac_0_out;
+ alignas(CacheLineSize) decltype(fc_1)::OutputBuffer fc_1_out;
+ alignas(CacheLineSize) decltype(ac_1)::OutputBuffer ac_1_out;
+ alignas(CacheLineSize) decltype(fc_2)::OutputBuffer fc_2_out;
+
+ Buffer()
+ {
+ std::memset(this, 0, sizeof(*this));
+ }
+ };
+
+#if defined(__clang__) && (__APPLE__)
+ // workaround for a bug reported with xcode 12
+ static thread_local auto tlsBuffer = std::make_unique<Buffer>();
+ // Access TLS only once, cache result.
+ Buffer& buffer = *tlsBuffer;
+#else
+ alignas(CacheLineSize) static thread_local Buffer buffer;
+#endif
+
+ fc_0.propagate(transformedFeatures, buffer.fc_0_out);
+ ac_sqr_0.propagate(buffer.fc_0_out, buffer.ac_sqr_0_out);
+ ac_0.propagate(buffer.fc_0_out, buffer.ac_0_out);
+ std::memcpy(buffer.ac_sqr_0_out + FC_0_OUTPUTS, buffer.ac_0_out, FC_0_OUTPUTS * sizeof(decltype(ac_0)::OutputType));
+ fc_1.propagate(buffer.ac_sqr_0_out, buffer.fc_1_out);
+ ac_1.propagate(buffer.fc_1_out, buffer.ac_1_out);
+ fc_2.propagate(buffer.ac_1_out, buffer.fc_2_out);
+
+ // buffer.fc_0_out[FC_0_OUTPUTS] is such that 1.0 is equal to 127*(1<<WeightScaleBits) in quantized form
+ // but we want 1.0 to be equal to 600*OutputScale
+ std::int32_t fwdOut = int(buffer.fc_0_out[FC_0_OUTPUTS]) * (600*OutputScale) / (127*(1<<WeightScaleBits));
+ std::int32_t outputValue = buffer.fc_2_out[0] + fwdOut;
+
+ return outputValue;
+ }
+};
} // namespace Stockfish::Eval::NNUE
{
for (; i + 1 < sizeof(IntType); ++i)
{
- u[i] = v;
+ u[i] = (std::uint8_t)v;
v >>= 8;
}
}
- u[i] = v;
+ u[i] = (std::uint8_t)v;
stream.write(reinterpret_cast<char*>(u), sizeof(IntType));
}
#define vec_store(a,b) _mm512_store_si512(a,b)
#define vec_add_16(a,b) _mm512_add_epi16(a,b)
#define vec_sub_16(a,b) _mm512_sub_epi16(a,b)
+ #define vec_mul_16(a,b) _mm512_mullo_epi16(a,b)
+ #define vec_zero() _mm512_setzero_epi32()
+ #define vec_set_16(a) _mm512_set1_epi16(a)
+ #define vec_max_16(a,b) _mm512_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm512_min_epi16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ vec_t compacted = _mm512_packs_epi16(_mm512_srli_epi16(a,7),_mm512_srli_epi16(b,7));
+ return _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), compacted);
+ }
#define vec_load_psqt(a) _mm256_load_si256(a)
#define vec_store_psqt(a,b) _mm256_store_si256(a,b)
#define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
#define vec_zero_psqt() _mm256_setzero_si256()
#define NumRegistersSIMD 32
+ #define MaxChunkSize 64
#elif USE_AVX2
typedef __m256i vec_t;
#define vec_store(a,b) _mm256_store_si256(a,b)
#define vec_add_16(a,b) _mm256_add_epi16(a,b)
#define vec_sub_16(a,b) _mm256_sub_epi16(a,b)
+ #define vec_mul_16(a,b) _mm256_mullo_epi16(a,b)
+ #define vec_zero() _mm256_setzero_si256()
+ #define vec_set_16(a) _mm256_set1_epi16(a)
+ #define vec_max_16(a,b) _mm256_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm256_min_epi16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ vec_t compacted = _mm256_packs_epi16(_mm256_srli_epi16(a,7), _mm256_srli_epi16(b,7));
+ return _mm256_permute4x64_epi64(compacted, 0b11011000);
+ }
#define vec_load_psqt(a) _mm256_load_si256(a)
#define vec_store_psqt(a,b) _mm256_store_si256(a,b)
#define vec_add_psqt_32(a,b) _mm256_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm256_sub_epi32(a,b)
#define vec_zero_psqt() _mm256_setzero_si256()
#define NumRegistersSIMD 16
+ #define MaxChunkSize 32
#elif USE_SSE2
typedef __m128i vec_t;
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) _mm_add_epi16(a,b)
#define vec_sub_16(a,b) _mm_sub_epi16(a,b)
+ #define vec_mul_16(a,b) _mm_mullo_epi16(a,b)
+ #define vec_zero() _mm_setzero_si128()
+ #define vec_set_16(a) _mm_set1_epi16(a)
+ #define vec_max_16(a,b) _mm_max_epi16(a,b)
+ #define vec_min_16(a,b) _mm_min_epi16(a,b)
+ #define vec_msb_pack_16(a,b) _mm_packs_epi16(_mm_srli_epi16(a,7),_mm_srli_epi16(b,7))
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) _mm_add_epi32(a,b)
#define vec_sub_psqt_32(a,b) _mm_sub_epi32(a,b)
#define vec_zero_psqt() _mm_setzero_si128()
#define NumRegistersSIMD (Is64Bit ? 16 : 8)
+ #define MaxChunkSize 16
#elif USE_MMX
typedef __m64 vec_t;
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) _mm_add_pi16(a,b)
#define vec_sub_16(a,b) _mm_sub_pi16(a,b)
+ #define vec_mul_16(a,b) _mm_mullo_pi16(a,b)
+ #define vec_zero() _mm_setzero_si64()
+ #define vec_set_16(a) _mm_set1_pi16(a)
+ inline vec_t vec_max_16(vec_t a,vec_t b){
+ vec_t comparison = _mm_cmpgt_pi16(a,b);
+ return _mm_or_si64(_mm_and_si64(comparison, a), _mm_andnot_si64(comparison, b));
+ }
+ inline vec_t vec_min_16(vec_t a,vec_t b){
+ vec_t comparison = _mm_cmpgt_pi16(a,b);
+ return _mm_or_si64(_mm_and_si64(comparison, b), _mm_andnot_si64(comparison, a));
+ }
+ #define vec_msb_pack_16(a,b) _mm_packs_pi16(_mm_srli_pi16(a,7),_mm_srli_pi16(b,7))
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) _mm_add_pi32(a,b)
#define vec_sub_psqt_32(a,b) _mm_sub_pi32(a,b)
#define vec_zero_psqt() _mm_setzero_si64()
+ #define vec_cleanup() _mm_empty()
#define NumRegistersSIMD 8
+ #define MaxChunkSize 8
#elif USE_NEON
typedef int16x8_t vec_t;
#define vec_store(a,b) *(a)=(b)
#define vec_add_16(a,b) vaddq_s16(a,b)
#define vec_sub_16(a,b) vsubq_s16(a,b)
+ #define vec_mul_16(a,b) vmulq_s16(a,b)
+ #define vec_zero() vec_t{0}
+ #define vec_set_16(a) vdupq_n_s16(a)
+ #define vec_max_16(a,b) vmaxq_s16(a,b)
+ #define vec_min_16(a,b) vminq_s16(a,b)
+ inline vec_t vec_msb_pack_16(vec_t a, vec_t b){
+ const int8x8_t shifta = vshrn_n_s16(a, 7);
+ const int8x8_t shiftb = vshrn_n_s16(b, 7);
+ const int8x16_t compacted = vcombine_s8(shifta,shiftb);
+ return *reinterpret_cast<const vec_t*> (&compacted);
+ }
#define vec_load_psqt(a) (*(a))
#define vec_store_psqt(a,b) *(a)=(b)
#define vec_add_psqt_32(a,b) vaddq_s32(a,b)
#define vec_sub_psqt_32(a,b) vsubq_s32(a,b)
#define vec_zero_psqt() psqt_vec_t{0}
#define NumRegistersSIMD 16
+ #define MaxChunkSize 16
#else
#undef VECTOR
// We use __m* types as template arguments, which causes GCC to emit warnings
// about losing some attribute information. This is irrelevant to us as we
// only take their size, so the following pragma are harmless.
+ #if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wignored-attributes"
+ #endif
template <typename SIMDRegisterType,
typename LaneType,
static constexpr int NumRegs = BestRegisterCount<vec_t, WeightType, TransformedFeatureDimensions, NumRegistersSIMD>();
static constexpr int NumPsqtRegs = BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
-
+ #if defined(__GNUC__)
#pragma GCC diagnostic pop
-
+ #endif
#endif
// Number of input/output dimensions
static constexpr IndexType InputDimensions = FeatureSet::Dimensions;
- static constexpr IndexType OutputDimensions = HalfDimensions * 2;
+ static constexpr IndexType OutputDimensions = HalfDimensions;
// Size of forward propagation buffer
static constexpr std::size_t BufferSize =
// Hash value embedded in the evaluation file
static constexpr std::uint32_t get_hash_value() {
- return FeatureSet::HashValue ^ OutputDimensions;
+ return FeatureSet::HashValue ^ (OutputDimensions * 2);
}
// Read network parameters
) / 2;
- #if defined(USE_AVX512)
-
- constexpr IndexType NumChunks = HalfDimensions / (SimdWidth * 2);
- static_assert(HalfDimensions % (SimdWidth * 2) == 0);
- const __m512i Control = _mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7);
- const __m512i Zero = _mm512_setzero_si512();
-
for (IndexType p = 0; p < 2; ++p)
{
- const IndexType offset = HalfDimensions * p;
- auto out = reinterpret_cast<__m512i*>(&output[offset]);
- for (IndexType j = 0; j < NumChunks; ++j)
- {
- __m512i sum0 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
- (accumulation[perspectives[p]])[j * 2 + 0]);
- __m512i sum1 = _mm512_load_si512(&reinterpret_cast<const __m512i*>
- (accumulation[perspectives[p]])[j * 2 + 1]);
+ const IndexType offset = (HalfDimensions / 2) * p;
- _mm512_store_si512(&out[j], _mm512_permutexvar_epi64(Control,
- _mm512_max_epi8(_mm512_packs_epi16(sum0, sum1), Zero)));
- }
- }
- return psqt;
+#if defined(VECTOR)
- #elif defined(USE_AVX2)
+ constexpr IndexType OutputChunkSize = MaxChunkSize;
+ static_assert((HalfDimensions / 2) % OutputChunkSize == 0);
+ constexpr IndexType NumOutputChunks = HalfDimensions / 2 / OutputChunkSize;
- constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
- constexpr int Control = 0b11011000;
- const __m256i Zero = _mm256_setzero_si256();
+ vec_t Zero = vec_zero();
+ vec_t One = vec_set_16(127);
- for (IndexType p = 0; p < 2; ++p)
- {
- const IndexType offset = HalfDimensions * p;
- auto out = reinterpret_cast<__m256i*>(&output[offset]);
- for (IndexType j = 0; j < NumChunks; ++j)
- {
- __m256i sum0 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
- (accumulation[perspectives[p]])[j * 2 + 0]);
- __m256i sum1 = _mm256_load_si256(&reinterpret_cast<const __m256i*>
- (accumulation[perspectives[p]])[j * 2 + 1]);
+ const vec_t* in0 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][0]));
+ const vec_t* in1 = reinterpret_cast<const vec_t*>(&(accumulation[perspectives[p]][HalfDimensions / 2]));
+ vec_t* out = reinterpret_cast< vec_t*>(output + offset);
- _mm256_store_si256(&out[j], _mm256_permute4x64_epi64(
- _mm256_max_epi8(_mm256_packs_epi16(sum0, sum1), Zero), Control));
- }
- }
- return psqt;
-
- #elif defined(USE_SSE2)
-
- #ifdef USE_SSE41
- constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
- const __m128i Zero = _mm_setzero_si128();
- #else
- constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
- const __m128i k0x80s = _mm_set1_epi8(-128);
- #endif
-
- for (IndexType p = 0; p < 2; ++p)
- {
- const IndexType offset = HalfDimensions * p;
- auto out = reinterpret_cast<__m128i*>(&output[offset]);
- for (IndexType j = 0; j < NumChunks; ++j)
+ for (IndexType j = 0; j < NumOutputChunks; j += 1)
{
- __m128i sum0 = _mm_load_si128(&reinterpret_cast<const __m128i*>
- (accumulation[perspectives[p]])[j * 2 + 0]);
- __m128i sum1 = _mm_load_si128(&reinterpret_cast<const __m128i*>
- (accumulation[perspectives[p]])[j * 2 + 1]);
- const __m128i packedbytes = _mm_packs_epi16(sum0, sum1);
-
- #ifdef USE_SSE41
- _mm_store_si128(&out[j], _mm_max_epi8(packedbytes, Zero));
- #else
- _mm_store_si128(&out[j], _mm_subs_epi8(_mm_adds_epi8(packedbytes, k0x80s), k0x80s));
- #endif
- }
- }
- return psqt;
+ const vec_t sum0a = vec_max_16(vec_min_16(in0[j * 2 + 0], One), Zero);
+ const vec_t sum0b = vec_max_16(vec_min_16(in0[j * 2 + 1], One), Zero);
+ const vec_t sum1a = vec_max_16(vec_min_16(in1[j * 2 + 0], One), Zero);
+ const vec_t sum1b = vec_max_16(vec_min_16(in1[j * 2 + 1], One), Zero);
- #elif defined(USE_MMX)
+ const vec_t pa = vec_mul_16(sum0a, sum1a);
+ const vec_t pb = vec_mul_16(sum0b, sum1b);
- constexpr IndexType NumChunks = HalfDimensions / SimdWidth;
- const __m64 k0x80s = _mm_set1_pi8(-128);
-
- for (IndexType p = 0; p < 2; ++p)
- {
- const IndexType offset = HalfDimensions * p;
- auto out = reinterpret_cast<__m64*>(&output[offset]);
- for (IndexType j = 0; j < NumChunks; ++j)
- {
- __m64 sum0 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 0]);
- __m64 sum1 = *(&reinterpret_cast<const __m64*>(accumulation[perspectives[p]])[j * 2 + 1]);
- const __m64 packedbytes = _mm_packs_pi16(sum0, sum1);
- out[j] = _mm_subs_pi8(_mm_adds_pi8(packedbytes, k0x80s), k0x80s);
+ out[j] = vec_msb_pack_16(pa, pb);
}
- }
- _mm_empty();
- return psqt;
-
- #elif defined(USE_NEON)
-
- constexpr IndexType NumChunks = HalfDimensions / (SimdWidth / 2);
- const int8x8_t Zero = {0};
-
- for (IndexType p = 0; p < 2; ++p)
- {
- const IndexType offset = HalfDimensions * p;
- const auto out = reinterpret_cast<int8x8_t*>(&output[offset]);
- constexpr IndexType UnrollFactor = 16;
- static_assert(UnrollFactor % UnrollFactor == 0);
- for (IndexType j = 0; j < NumChunks; j += UnrollFactor)
- {
- int16x8_t sums[UnrollFactor];
- for (IndexType i = 0; i < UnrollFactor; ++i)
- sums[i] = reinterpret_cast<const int16x8_t*>(accumulation[perspectives[p]])[j+i];
+#else
- for (IndexType i = 0; i < UnrollFactor; ++i)
- out[j+i] = vmax_s8(vqmovn_s16(sums[i]), Zero);
+ for (IndexType j = 0; j < HalfDimensions / 2; ++j) {
+ BiasType sum0 = accumulation[static_cast<int>(perspectives[p])][j + 0];
+ BiasType sum1 = accumulation[static_cast<int>(perspectives[p])][j + HalfDimensions / 2];
+ sum0 = std::max<int>(0, std::min<int>(127, sum0));
+ sum1 = std::max<int>(0, std::min<int>(127, sum1));
+ output[offset + j] = static_cast<OutputType>(sum0 * sum1 / 128);
}
+
+#endif
}
- return psqt;
- #else
+#if defined(vec_cleanup)
+ vec_cleanup();
+#endif
- for (IndexType p = 0; p < 2; ++p)
- {
- const IndexType offset = HalfDimensions * p;
- for (IndexType j = 0; j < HalfDimensions; ++j)
- {
- BiasType sum = accumulation[perspectives[p]][j];
- output[offset + j] = static_cast<OutputType>(std::max<int>(0, std::min<int>(127, sum)));
- }
- }
return psqt;
- #endif
-
} // end of function transform()
#define S(mg, eg) make_score(mg, eg)
// Pawn penalties
- constexpr Score Backward = S( 9, 22);
- constexpr Score Doubled = S(13, 51);
- constexpr Score DoubledEarly = S(20, 7);
- constexpr Score Isolated = S( 3, 15);
- constexpr Score WeakLever = S( 4, 58);
- constexpr Score WeakUnopposed = S(13, 24);
+ constexpr Score Backward = S( 6, 19);
+ constexpr Score Doubled = S(11, 51);
+ constexpr Score DoubledEarly = S(17, 7);
+ constexpr Score Isolated = S( 1, 20);
+ constexpr Score WeakLever = S( 2, 57);
+ constexpr Score WeakUnopposed = S(15, 18);
// Bonus for blocked pawns at 5th or 6th rank
- constexpr Score BlockedPawn[2] = { S(-17, -6), S(-9, 2) };
+ constexpr Score BlockedPawn[2] = { S(-19, -8), S(-7, 3) };
constexpr Score BlockedStorm[RANK_NB] = {
- S(0, 0), S(0, 0), S(75, 78), S(-8, 16), S(-6, 10), S(-6, 6), S(0, 2)
+ S(0, 0), S(0, 0), S(64, 75), S(-3, 14), S(-12, 19), S(-7, 4), S(-10, 5)
};
// Connected pawn bonus
- constexpr int Connected[RANK_NB] = { 0, 5, 7, 11, 23, 48, 87 };
+ constexpr int Connected[RANK_NB] = { 0, 3, 7, 7, 15, 54, 86 };
// Strength of pawn shelter for our king by [distance from edge][rank].
// RANK_1 = 0 is used for files where we have no pawn, or pawn is behind our king.
constexpr Value ShelterStrength[int(FILE_NB) / 2][RANK_NB] = {
- { V( -5), V( 82), V( 92), V( 54), V( 36), V( 22), V( 28) },
- { V(-44), V( 63), V( 33), V(-50), V(-30), V(-12), V( -62) },
- { V(-11), V( 77), V( 22), V( -6), V( 31), V( 8), V( -45) },
- { V(-39), V(-12), V(-29), V(-50), V(-43), V(-68), V(-164) }
+ { V(-2), V(85), V(95), V(53), V(39), V(23), V(25) },
+ { V(-55), V(64), V(32), V(-55), V(-30), V(-11), V(-61) },
+ { V(-11), V(75), V(19), V(-6), V(26), V(9), V(-47) },
+ { V(-41), V(-11), V(-27), V(-58), V(-42), V(-66), V(-163) }
};
// Danger of enemy pawns moving toward our king by [distance from edge][rank].
// is behind our king. Note that UnblockedStorm[0][1-2] accommodate opponent pawn
// on edge, likely blocked by our king.
constexpr Value UnblockedStorm[int(FILE_NB) / 2][RANK_NB] = {
- { V( 87), V(-288), V(-168), V( 96), V( 47), V( 44), V( 46) },
- { V( 42), V( -25), V( 120), V( 45), V( 34), V( -9), V( 24) },
- { V( -8), V( 51), V( 167), V( 35), V( -4), V(-16), V(-12) },
- { V(-17), V( -13), V( 100), V( 4), V( 9), V(-16), V(-31) }
+ { V(94), V(-280), V(-170), V(90), V(59), V(47), V(53) },
+ { V(43), V(-17), V(128), V(39), V(26), V(-17), V(15) },
+ { V(-9), V(62), V(170), V(34), V(-5), V(-20), V(-11) },
+ { V(-27), V(-19), V(106), V(10), V(2), V(-13), V(-24) }
};
// KingOnFile[semi-open Us][semi-open Them] contains bonuses/penalties
// for king when the king is on a semi-open or open file.
- constexpr Score KingOnFile[2][2] = {{ S(-21,10), S(-7, 1) },
- { S( 0,-3), S( 9,-4) }};
+ constexpr Score KingOnFile[2][2] = {{ S(-18,11), S(-6,-3) },
+ { S( 0, 0), S( 5,-4) }};
#undef S
#undef V
if (captured)
k ^= Zobrist::psq[captured][to];
- return k ^ Zobrist::psq[pc][to] ^ Zobrist::psq[pc][from];
+ k ^= Zobrist::psq[pc][to] ^ Zobrist::psq[pc][from];
+
+ return (captured || type_of(pc) == PAWN)
+ ? k : adjust_key50<true>(k);
}
// Don't allow pinned pieces to attack as long as there are
// pinners on their original square.
if (pinners(~stm) & occupied)
+ {
stmAttackers &= ~blockers_for_king(stm);
- if (!stmAttackers)
- break;
+ if (!stmAttackers)
+ break;
+ }
res ^= 1;
Bitboard attackers_to(Square s) const;
Bitboard attackers_to(Square s, Bitboard occupied) const;
Bitboard slider_blockers(Bitboard sliders, Square s, Bitboard& pinners) const;
+ template<PieceType Pt> Bitboard attacks_by(Color c) const;
// Properties of moves
bool legal(Move m) const;
bool pseudo_legal(const Move m) const;
bool capture(Move m) const;
- bool capture_or_promotion(Move m) const;
bool gives_check(Move m) const;
Piece moved_piece(Move m) const;
Piece captured_piece() const;
bool has_repeated() const;
int rule50_count() const;
Score psq_score() const;
+ Value psq_eg_stm() const;
Value non_pawn_material(Color c) const;
Value non_pawn_material() const;
void move_piece(Square from, Square to);
template<bool Do>
void do_castling(Color us, Square from, Square& to, Square& rfrom, Square& rto);
+ template<bool AfterMove>
+ Key adjust_key50(Key k) const;
// Data members
Piece board[SQUARE_NB];
return attackers_to(s, pieces());
}
+template<PieceType Pt>
+inline Bitboard Position::attacks_by(Color c) const {
+
+ if constexpr (Pt == PAWN)
+ return c == WHITE ? pawn_attacks_bb<WHITE>(pieces(WHITE, PAWN))
+ : pawn_attacks_bb<BLACK>(pieces(BLACK, PAWN));
+ else
+ {
+ Bitboard threats = 0;
+ Bitboard attackers = pieces(c, Pt);
+ while (attackers)
+ threats |= attacks_bb<Pt>(pop_lsb(attackers), pieces());
+ return threats;
+ }
+}
+
inline Bitboard Position::checkers() const {
return st->checkersBB;
}
}
inline Key Position::key() const {
- return st->rule50 < 14 ? st->key
- : st->key ^ make_key((st->rule50 - 14) / 8);
+ return adjust_key50<false>(st->key);
+}
+
+template<bool AfterMove>
+inline Key Position::adjust_key50(Key k) const
+{
+ return st->rule50 < 14 - AfterMove
+ ? k : k ^ make_key((st->rule50 - (14 - AfterMove)) / 8);
}
inline Key Position::pawn_key() const {
return psq;
}
+inline Value Position::psq_eg_stm() const {
+ return (sideToMove == WHITE ? 1 : -1) * eg_value(psq);
+}
+
inline Value Position::non_pawn_material(Color c) const {
return st->nonPawnMaterial[c];
}
return chess960;
}
-inline bool Position::capture_or_promotion(Move m) const {
- assert(is_ok(m));
- return type_of(m) != NORMAL ? type_of(m) != CASTLING : !empty(to_sq(m));
-}
-
inline bool Position::capture(Move m) const {
assert(is_ok(m));
// Castling is encoded as "king captures rook"
// Futility margin
Value futility_margin(Depth d, bool improving) {
- return Value(214 * (d - improving));
+ return Value(165 * (d - improving));
}
// Reductions lookup table, initialized at startup
Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
- return (r + 1358 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 904);
+ return (r + 1642 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 916);
}
constexpr int futility_move_count(bool improving, Depth depth) {
- return (3 + depth * depth) / (2 - improving);
+ return improving ? (3 + depth * depth)
+ : (3 + depth * depth) / 2;
}
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
- return std::min((6 * d + 229) * d - 215 , 2000);
+ return std::min((12 * d + 282) * d - 349 , 1594);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
- Value value_draw(Thread* thisThread) {
- return VALUE_DRAW + Value(2 * (thisThread->nodes & 1) - 1);
- }
-
- // Check if the current thread is in a search explosion
- ExplosionState search_explosion(Thread* thisThread) {
-
- uint64_t nodesNow = thisThread->nodes;
- bool explosive = thisThread->doubleExtensionAverage[WHITE].is_greater(2, 100)
- || thisThread->doubleExtensionAverage[BLACK].is_greater(2, 100);
-
- if (explosive)
- thisThread->nodesLastExplosive = nodesNow;
- else
- thisThread->nodesLastNormal = nodesNow;
-
- if ( explosive
- && thisThread->state == EXPLOSION_NONE
- && nodesNow - thisThread->nodesLastNormal > 6000000)
- thisThread->state = MUST_CALM_DOWN;
-
- if ( thisThread->state == MUST_CALM_DOWN
- && nodesNow - thisThread->nodesLastExplosive > 6000000)
- thisThread->state = EXPLOSION_NONE;
-
- return thisThread->state;
+ Value value_draw(const Thread* thisThread) {
+ return VALUE_DRAW - 1 + Value(thisThread->nodes & 0x2);
}
// Skill structure is used to implement strength limit. If we have an uci_elo then
Value value_to_tt(Value v, int ply);
Value value_from_tt(Value v, int ply, int r50c);
- void update_pv(Move* pv, Move move, Move* childPv);
+ void update_pv(Move* pv, Move move, const Move* childPv);
void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus);
void update_quiet_stats(const Position& pos, Stack* ss, Move move, int bonus);
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
void Search::init() {
for (int i = 1; i < MAX_MOVES; ++i)
- Reductions[i] = int((21.9 + std::log(Threads.size()) / 2) * std::log(i));
+ Reductions[i] = int((20.26 + std::log(Threads.size()) / 2) * std::log(i));
}
bestPreviousScore = bestThread->rootMoves[0].score;
bestPreviousAverageScore = bestThread->rootMoves[0].averageScore;
+ for (Thread* th : Threads)
+ th->previousDepth = bestThread->completedDepth;
+
// Send again PV info if we have a new best thread
if (bestThread != this)
sync_cout << UCI::pv(bestThread->rootPos, bestThread->completedDepth, -VALUE_INFINITE, VALUE_INFINITE) << sync_endl;
multiPV = std::min(multiPV, rootMoves.size());
- doubleExtensionAverage[WHITE].set(0, 100); // initialize the running average at 0%
- doubleExtensionAverage[BLACK].set(0, 100); // initialize the running average at 0%
+ complexityAverage.set(155, 1);
- nodesLastExplosive = nodes;
- nodesLastNormal = nodes;
- state = EXPLOSION_NONE;
- trend = SCORE_ZERO;
- optimism[ us] = Value(25);
- optimism[~us] = -optimism[us];
+ trend = SCORE_ZERO;
+ optimism[us] = optimism[~us] = VALUE_ZERO;
int searchAgainCounter = 0;
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
- delta = Value(17) + int(prev) * prev / 16384;
+ delta = Value(10) + int(prev) * prev / 15620;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);
// Adjust trend and optimism based on root move's previousScore
- int tr = sigmoid(prev, 0, 0, 147, 113, 1);
+ int tr = 116 * prev / (std::abs(prev) + 89);
trend = (us == WHITE ? make_score(tr, tr / 2)
: -make_score(tr, tr / 2));
- int opt = sigmoid(prev, 0, 25, 147, 14464, 256);
+ int opt = 118 * prev / (std::abs(prev) + 169);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
int failedHighCnt = 0;
while (true)
{
- Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - searchAgainCounter);
+ // Adjust the effective depth searched, but ensuring at least one effective increment for every
+ // four searchAgain steps (see issue #2717).
+ Depth adjustedDepth = std::max(1, rootDepth - failedHighCnt - 3 * (searchAgainCounter + 1) / 4);
bestValue = Stockfish::search<Root>(rootPos, ss, alpha, beta, adjustedDepth, false);
// Bring the best move to the front. It is critical that sorting
else
break;
- delta += delta / 4 + 5;
+ delta += delta / 4 + 2;
assert(alpha >= -VALUE_INFINITE && beta <= VALUE_INFINITE);
}
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
- double fallingEval = (142 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
- + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 825.0;
+ double fallingEval = (71 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
+ + 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 656.7;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);
// If the bestMove is stable over several iterations, reduce time accordingly
- timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.92 : 0.95;
- double reduction = (1.47 + mainThread->previousTimeReduction) / (2.32 * timeReduction);
- double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth)
- * totBestMoveChanges / Threads.size();
- double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability;
+ timeReduction = lastBestMoveDepth + 9 < completedDepth ? 1.37 : 0.65;
+ double reduction = (1.4 + mainThread->previousTimeReduction) / (2.15 * timeReduction);
+ double bestMoveInstability = 1 + 1.7 * totBestMoveChanges / Threads.size();
+ int complexity = mainThread->complexityAverage.value();
+ double complexPosition = std::min(1.0 + (complexity - 261) / 1738.7, 1.5);
+
+ double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;
// Cap used time in case of a single legal move for a better viewer experience in tournaments
// yielding correct scores and sufficiently fast moves.
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
- && Time.elapsed() > totalTime * 0.58)
+ && Time.elapsed() > totalTime * 0.53)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
template <NodeType nodeType>
Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, bool cutNode) {
- Thread* thisThread = pos.this_thread();
-
- // Step 0. Limit search explosion
- if ( ss->ply > 10
- && search_explosion(thisThread) == MUST_CALM_DOWN
- && depth > (ss-1)->depth)
- depth = (ss-1)->depth;
-
constexpr bool PvNode = nodeType != NonPV;
constexpr bool rootNode = nodeType == Root;
const Depth maxNextDepth = rootNode ? depth : depth + 1;
Move ttMove, move, excludedMove, bestMove;
Depth extension, newDepth;
Value bestValue, value, ttValue, eval, maxValue, probCutBeta;
- bool givesCheck, improving, didLMR, priorCapture;
- bool captureOrPromotion, doFullDepthSearch, moveCountPruning, ttCapture;
+ bool givesCheck, improving, priorCapture, singularQuietLMR;
+ bool capture, moveCountPruning, ttCapture;
Piece movedPiece;
- int moveCount, captureCount, quietCount, bestMoveCount, improvement, complexity;
+ int moveCount, captureCount, quietCount, improvement, complexity;
// Step 1. Initialize node
+ Thread* thisThread = pos.this_thread();
ss->inCheck = pos.checkers();
priorCapture = pos.captured_piece();
Color us = pos.side_to_move();
- moveCount = bestMoveCount = captureCount = quietCount = ss->moveCount = 0;
+ moveCount = captureCount = quietCount = ss->moveCount = 0;
bestValue = -VALUE_INFINITE;
maxValue = VALUE_INFINITE;
(ss+1)->ttPv = false;
(ss+1)->excludedMove = bestMove = MOVE_NONE;
(ss+2)->killers[0] = (ss+2)->killers[1] = MOVE_NONE;
+ (ss+2)->cutoffCnt = 0;
ss->doubleExtensions = (ss-1)->doubleExtensions;
- ss->depth = depth;
Square prevSq = to_sq((ss-1)->currentMove);
- // Update the running average statistics for double extensions
- thisThread->doubleExtensionAverage[us].update(ss->depth > (ss-1)->depth);
-
// Initialize statScore to zero for the grandchildren of the current position.
// So statScore is shared between all grandchildren and only the first grandchild
// starts with statScore = 0. Later grandchildren start with the last calculated
ttValue = ss->ttHit ? value_from_tt(tte->value(), ss->ply, pos.rule50_count()) : VALUE_NONE;
ttMove = rootNode ? thisThread->rootMoves[thisThread->pvIdx].pv[0]
: ss->ttHit ? tte->move() : MOVE_NONE;
- ttCapture = ttMove && pos.capture_or_promotion(ttMove);
+ ttCapture = ttMove && pos.capture(ttMove);
if (!excludedMove)
ss->ttPv = PvNode || (ss->ttHit && tte->is_pv());
// At non-PV nodes we check for an early TT cutoff
if ( !PvNode
&& ss->ttHit
- && tte->depth() > depth - (thisThread->id() % 2 == 1)
+ && tte->depth() > depth - (tte->bound() == BOUND_EXACT)
&& ttValue != VALUE_NONE // Possible in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
{
// If ttMove is quiet, update move sorting heuristics on TT hit (~1 Elo)
if (ttMove)
// Never assume anything about values stored in TT
ss->staticEval = eval = tte->eval();
if (eval == VALUE_NONE)
- ss->staticEval = eval = evaluate(pos);
-
- // Randomize draw evaluation
- if (eval == VALUE_DRAW)
- eval = value_draw(thisThread);
+ ss->staticEval = eval = evaluate(pos, &complexity);
+ else // Fall back to (semi)classical complexity for TT hits, the NNUE complexity is lost
+ complexity = abs(ss->staticEval - pos.psq_eg_stm());
// ttValue can be used as a better position evaluation (~4 Elo)
if ( ttValue != VALUE_NONE
}
else
{
- ss->staticEval = eval = evaluate(pos);
+ ss->staticEval = eval = evaluate(pos, &complexity);
// Save static evaluation into transposition table
if (!excludedMove)
tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval);
}
+ thisThread->complexityAverage.update(complexity);
+
// Use static evaluation difference to improve quiet move ordering (~3 Elo)
if (is_ok((ss-1)->currentMove) && !(ss-1)->inCheck && !priorCapture)
{
- int bonus = std::clamp(-16 * int((ss-1)->staticEval + ss->staticEval), -2000, 2000);
+ int bonus = std::clamp(-19 * int((ss-1)->staticEval + ss->staticEval), -1914, 1914);
thisThread->mainHistory[~us][from_to((ss-1)->currentMove)] << bonus;
}
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
- : 200;
-
+ : 168;
improving = improvement > 0;
- complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score())));
- // Step 7. Futility pruning: child node (~25 Elo).
+ // Step 7. Razoring.
+ // If eval is really low check with qsearch if it can exceed alpha, if it can't,
+ // return a fail low.
+ if ( depth <= 7
+ && eval < alpha - 369 - 254 * depth * depth)
+ {
+ value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
+ if (value < alpha)
+ return value;
+ }
+
+ // Step 8. Futility pruning: child node (~25 Elo).
// The depth condition is important for mate finding.
if ( !ss->ttPv
- && depth < 9
- && eval - futility_margin(depth, improving) >= beta
- && eval < 15000) // 50% larger than VALUE_KNOWN_WIN, but smaller than TB wins.
+ && depth < 8
+ && eval - futility_margin(depth, improving) - (ss-1)->statScore / 303 >= beta
+ && eval >= beta
+ && eval < 28031) // larger than VALUE_KNOWN_WIN, but smaller than TB wins
return eval;
- // Step 8. Null move search with verification search (~22 Elo)
+ // Step 9. Null move search with verification search (~22 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
- && (ss-1)->statScore < 23767
+ && (ss-1)->statScore < 17139
&& eval >= beta
&& eval >= ss->staticEval
- && ss->staticEval >= beta - 20 * depth - improvement / 15 + 204 + complexity / 25
+ && ss->staticEval >= beta - 20 * depth - improvement / 13 + 233 + complexity / 25
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
{
assert(eval - beta >= 0);
- // Null move dynamic reduction based on depth and value
- Depth R = std::min(int(eval - beta) / 205, 3) + depth / 3 + 4;
+ // Null move dynamic reduction based on depth, eval and complexity of position
+ Depth R = std::min(int(eval - beta) / 168, 7) + depth / 3 + 4 - (complexity > 861);
ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
}
}
- probCutBeta = beta + 209 - 44 * improving;
+ probCutBeta = beta + 191 - 54 * improving;
- // Step 9. ProbCut (~4 Elo)
+ // Step 10. ProbCut (~4 Elo)
// If we have a good enough capture and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
if ( !PvNode
{
assert(probCutBeta < VALUE_INFINITE);
- MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, &captureHistory);
- bool ttPv = ss->ttPv;
- ss->ttPv = false;
+ MovePicker mp(pos, ttMove, probCutBeta - ss->staticEval, depth - 3, &captureHistory);
while ((move = mp.next_move()) != MOVE_NONE)
if (move != excludedMove && pos.legal(move))
{
- assert(pos.capture_or_promotion(move));
- assert(depth >= 5);
-
- captureOrPromotion = true;
+ assert(pos.capture(move) || promotion_type(move) == QUEEN);
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [true]
[pos.moved_piece(move)]
[to_sq(move)];
if (value >= probCutBeta)
{
- // if transposition table doesn't have equal or more deep info write probCut data into it
- if ( !(ss->ttHit
- && tte->depth() >= depth - 3
- && ttValue != VALUE_NONE))
- tte->save(posKey, value_to_tt(value, ss->ply), ttPv,
- BOUND_LOWER,
- depth - 3, move, ss->staticEval);
+ // Save ProbCut data into transposition table
+ tte->save(posKey, value_to_tt(value, ss->ply), ss->ttPv, BOUND_LOWER, depth - 3, move, ss->staticEval);
return value;
}
}
- ss->ttPv = ttPv;
}
- // Step 10. If the position is not in TT, decrease depth by 2 or 1 depending on node type (~3 Elo)
- if ( PvNode
- && depth >= 6
+ // Step 11. If the position is not in TT, decrease depth by 3.
+ // Use qsearch if depth is equal or below zero (~4 Elo)
+ if ( PvNode
&& !ttMove)
- depth -= 2;
+ depth -= 3;
+
+ if (depth <= 0)
+ return qsearch<PV>(pos, ss, alpha, beta);
- if ( cutNode
- && depth >= 9
+ if ( cutNode
+ && depth >= 9
&& !ttMove)
- depth--;
+ depth -= 2;
moves_loop: // When in check, search starts here
- // Step 11. A small Probcut idea, when we are in check (~0 Elo)
- probCutBeta = beta + 409;
+ // Step 12. A small Probcut idea, when we are in check (~0 Elo)
+ probCutBeta = beta + 417;
if ( ss->inCheck
&& !PvNode
- && depth >= 4
+ && depth >= 2
&& ttCapture
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3
ss->killers);
value = bestValue;
- moveCountPruning = false;
+ moveCountPruning = singularQuietLMR = false;
// Indicate PvNodes that will probably fail low if the node was searched
// at a depth equal or greater than the current depth, and the result of this search was a fail low.
&& (tte->bound() & BOUND_UPPER)
&& tte->depth() >= depth;
- // Step 12. Loop through all pseudo-legal moves until no moves remain
+ // Step 13. Loop through all pseudo-legal moves until no moves remain
// or a beta cutoff occurs.
while ((move = mp.next_move(moveCountPruning)) != MOVE_NONE)
{
(ss+1)->pv = nullptr;
extension = 0;
- captureOrPromotion = pos.capture_or_promotion(move);
+ capture = pos.capture(move);
movedPiece = pos.moved_piece(move);
givesCheck = pos.gives_check(move);
Value delta = beta - alpha;
- // Step 13. Pruning at shallow depth (~98 Elo). Depth conditions are important for mate finding.
+ // Step 14. Pruning at shallow depth (~98 Elo). Depth conditions are important for mate finding.
if ( !rootNode
&& pos.non_pawn_material(us)
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY)
// Reduced depth of the next LMR search
int lmrDepth = std::max(newDepth - reduction(improving, depth, moveCount, delta, thisThread->rootDelta), 0);
- if ( captureOrPromotion
+ if ( capture
|| givesCheck)
{
// Futility pruning for captures (~0 Elo)
- if ( !pos.empty(to_sq(move))
- && !givesCheck
+ if ( !givesCheck
&& !PvNode
- && lmrDepth < 6
+ && lmrDepth < 7
&& !ss->inCheck
- && ss->staticEval + 342 + 238 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
- + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 8 < alpha)
+ && ss->staticEval + 180 + 201 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ + captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;
// SEE based pruning (~9 Elo)
- if (!pos.see_ge(move, Value(-217) * depth))
+ if (!pos.see_ge(move, Value(-222) * depth))
continue;
}
else
&& history < -3875 * (depth - 1))
continue;
- history += thisThread->mainHistory[us][from_to(move)];
+ history += 2 * thisThread->mainHistory[us][from_to(move)];
// Futility pruning: parent node (~9 Elo)
if ( !ss->inCheck
- && lmrDepth < 8
- && ss->staticEval + 138 + 137 * lmrDepth + history / 64 <= alpha)
+ && lmrDepth < 13
+ && ss->staticEval + 106 + 145 * lmrDepth + history / 52 <= alpha)
continue;
// Prune moves with negative SEE (~3 Elo)
- if (!pos.see_ge(move, Value(-21 * lmrDepth * lmrDepth - 21 * lmrDepth)))
+ if (!pos.see_ge(move, Value(-24 * lmrDepth * lmrDepth - 15 * lmrDepth)))
continue;
}
}
- // Step 14. Extensions (~66 Elo)
-
- // Singular extension search (~58 Elo). If all moves but one fail low on a
- // search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
- // then that move is singular and should be extended. To verify this we do
- // a reduced search on all the other moves but the ttMove and if the
- // result is lower than ttValue minus a margin, then we will extend the ttMove.
- if ( !rootNode
- && depth >= 6 + 2 * (PvNode && tte->is_pv())
- && move == ttMove
- && !excludedMove // Avoid recursive singular search
- /* && ttValue != VALUE_NONE Already implicit in the next condition */
- && abs(ttValue) < VALUE_KNOWN_WIN
- && (tte->bound() & BOUND_LOWER)
- && tte->depth() >= depth - 3)
+ // Step 15. Extensions (~66 Elo)
+ // We take care to not overdo to avoid search getting stuck.
+ if (ss->ply < thisThread->rootDepth * 2)
{
- Value singularBeta = ttValue - 3 * depth;
- Depth singularDepth = (depth - 1) / 2;
+ // Singular extension search (~58 Elo). If all moves but one fail low on a
+ // search of (alpha-s, beta-s), and just one fails high on (alpha, beta),
+ // then that move is singular and should be extended. To verify this we do
+ // a reduced search on all the other moves but the ttMove and if the
+ // result is lower than ttValue minus a margin, then we will extend the ttMove.
+ if ( !rootNode
+ && depth >= 4 - (thisThread->previousDepth > 24) + 2 * (PvNode && tte->is_pv())
+ && move == ttMove
+ && !excludedMove // Avoid recursive singular search
+ /* && ttValue != VALUE_NONE Already implicit in the next condition */
+ && abs(ttValue) < VALUE_KNOWN_WIN
+ && (tte->bound() & BOUND_LOWER)
+ && tte->depth() >= depth - 3)
+ {
+ Value singularBeta = ttValue - (3 + (ss->ttPv && !PvNode)) * depth;
+ Depth singularDepth = (depth - 1) / 2;
- ss->excludedMove = move;
- value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
- ss->excludedMove = MOVE_NONE;
+ ss->excludedMove = move;
+ value = search<NonPV>(pos, ss, singularBeta - 1, singularBeta, singularDepth, cutNode);
+ ss->excludedMove = MOVE_NONE;
- if (value < singularBeta)
- {
- extension = 1;
+ if (value < singularBeta)
+ {
+ extension = 1;
+ singularQuietLMR = !ttCapture;
+
+ // Avoid search explosion by limiting the number of double extensions
+ if ( !PvNode
+ && value < singularBeta - 25
+ && ss->doubleExtensions <= 9)
+ extension = 2;
+ }
- // Avoid search explosion by limiting the number of double extensions
- if ( !PvNode
- && value < singularBeta - 75
- && ss->doubleExtensions <= 6)
- extension = 2;
+ // Multi-cut pruning
+ // Our ttMove is assumed to fail high, and now we failed high also on a reduced
+ // search without the ttMove. So we assume this expected Cut-node is not singular,
+ // that multiple moves fail high, and we can prune the whole subtree by returning
+ // a soft bound.
+ else if (singularBeta >= beta)
+ return singularBeta;
+
+ // If the eval of ttMove is greater than beta, we reduce it (negative extension)
+ else if (ttValue >= beta)
+ extension = -2;
+
+ // If the eval of ttMove is less than alpha and value, we reduce it (negative extension)
+ else if (ttValue <= alpha && ttValue <= value)
+ extension = -1;
}
- // Multi-cut pruning
- // Our ttMove is assumed to fail high, and now we failed high also on a reduced
- // search without the ttMove. So we assume this expected Cut-node is not singular,
- // that multiple moves fail high, and we can prune the whole subtree by returning
- // a soft bound.
- else if (singularBeta >= beta)
- return singularBeta;
-
- // If the eval of ttMove is greater than beta, we reduce it (negative extension)
- else if (ttValue >= beta)
- extension = -2;
- }
-
- // Check extensions (~1 Elo)
- else if ( givesCheck
- && depth > 6
- && abs(ss->staticEval) > 100)
- extension = 1;
+ // Check extensions (~1 Elo)
+ else if ( givesCheck
+ && depth > 9
+ && abs(ss->staticEval) > 82)
+ extension = 1;
- // Quiet ttMove extensions (~0 Elo)
- else if ( PvNode
- && move == ttMove
- && move == ss->killers[0]
- && (*contHist[0])[movedPiece][to_sq(move)] >= 10000)
- extension = 1;
+ // Quiet ttMove extensions (~0 Elo)
+ else if ( PvNode
+ && move == ttMove
+ && move == ss->killers[0]
+ && (*contHist[0])[movedPiece][to_sq(move)] >= 5177)
+ extension = 1;
+ }
// Add extension to new depth
newDepth += extension;
// Update the current move (this must be done after singular extension search)
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [capture]
[movedPiece]
[to_sq(move)];
- // Step 15. Make the move
+ // Step 16. Make the move
pos.do_move(move, st, givesCheck);
- bool doDeeperSearch = false;
-
- // Step 16. Late moves reduction / extension (LMR, ~98 Elo)
+ // Step 17. Late moves reduction / extension (LMR, ~98 Elo)
// We use various heuristics for the sons of a node after the first son has
// been searched. In general we would like to reduce them, but there are many
// cases where we extend a son if it has good chances to be "interesting".
- if ( depth >= 3
- && moveCount > 1 + 2 * rootNode
+ if ( depth >= 2
+ && moveCount > 1 + (PvNode && ss->ply <= 1)
&& ( !ss->ttPv
- || !captureOrPromotion
+ || !capture
|| (cutNode && (ss-1)->moveCount > 1)))
{
Depth r = reduction(improving, depth, moveCount, delta, thisThread->rootDelta);
- // Decrease reduction at some PvNodes (~2 Elo)
- if ( PvNode
- && bestMoveCount <= 3)
- r--;
-
// Decrease reduction if position is or has been on the PV
// and node is not likely to fail low. (~3 Elo)
if ( ss->ttPv
r -= 2;
// Decrease reduction if opponent's move count is high (~1 Elo)
- if ((ss-1)->moveCount > 13)
+ if ((ss-1)->moveCount > 7)
r--;
// Increase reduction for cut nodes (~3 Elo)
- if (cutNode && move != ss->killers[0])
+ if (cutNode)
r += 2;
// Increase reduction if ttMove is a capture (~3 Elo)
if (ttCapture)
r++;
- ss->statScore = thisThread->mainHistory[us][from_to(move)]
+ // Decrease reduction for PvNodes based on depth
+ if (PvNode)
+ r -= 1 + 11 / (3 + depth);
+
+ // Decrease reduction if ttMove has been singularly extended (~1 Elo)
+ if (singularQuietLMR)
+ r--;
+
+ // Dicrease reduction if we move a threatened piece (~1 Elo)
+ if ( depth > 9
+ && (mp.threatenedPieces & from_sq(move)))
+ r--;
+
+ // Increase reduction if next ply has a lot of fail high
+ if ((ss+1)->cutoffCnt > 3 && !PvNode)
+ r++;
+
+ ss->statScore = 2 * thisThread->mainHistory[us][from_to(move)]
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- - 4923;
+ - 4433;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
- r -= ss->statScore / 14721;
-
- // In general we want to cap the LMR depth search at newDepth. But if reductions
- // are really negative and movecount is low, we allow this move to be searched
- // deeper than the first move (this may lead to hidden double extensions).
- int deeper = r >= -1 ? 0
- : moveCount <= 5 ? 2
- : PvNode && depth > 6 ? 1
- : cutNode && moveCount <= 7 ? 1
- : 0;
+ r -= ss->statScore / 13628;
- Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
+ // In general we want to cap the LMR depth search at newDepth, but when
+ // reduction is negative, we allow this move a limited search extension
+ // beyond the first move depth. This may lead to hidden double extensions.
+ Depth d = std::clamp(newDepth - r, 1, newDepth + 1);
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, d, true);
- // If the son is reduced and fails high it will be re-searched at full depth
- doFullDepthSearch = value > alpha && d < newDepth;
- doDeeperSearch = value > (alpha + 62 + 20 * (newDepth - d));
- didLMR = true;
- }
- else
- {
- doFullDepthSearch = !PvNode || moveCount > 1;
- didLMR = false;
- }
-
- // Step 17. Full depth search when LMR is skipped or fails high
- if (doFullDepthSearch)
- {
- value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
-
- // If the move passed LMR update its stats
- if (didLMR && !captureOrPromotion)
+ // Do full depth search when reduced LMR search fails high
+ if (value > alpha && d < newDepth)
{
+ const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth + doDeeperSearch, !cutNode);
+
int bonus = value > alpha ? stat_bonus(newDepth)
: -stat_bonus(newDepth);
+ if (capture)
+ bonus /= 6;
+
update_continuation_histories(ss, movedPiece, to_sq(move), bonus);
}
}
+ // Step 18. Full depth search when LMR is skipped
+ else if (!PvNode || moveCount > 1)
+ {
+ value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);
+ }
+
// For PV nodes only, do a full PV search on the first move or after a fail
// high (in the latter case search only if value < beta), otherwise let the
// parent node fail low with value <= alpha and try another move.
std::min(maxNextDepth, newDepth), false);
}
- // Step 18. Undo move
+ // Step 19. Undo move
pos.undo_move(move);
assert(value > -VALUE_INFINITE && value < VALUE_INFINITE);
- // Step 19. Check for a new best move
+ // Step 20. Check for a new best move
// Finished searching the move. If a stop occurred, the return value of
// the search cannot be trusted, and we return immediately without
// updating best move, PV and TT.
if (PvNode && value < beta) // Update alpha! Always alpha < beta
{
alpha = value;
- bestMoveCount++;
+
+ // Reduce other moves if we have found at least one score improvement
+ if ( depth > 1
+ && depth < 6
+ && beta < VALUE_KNOWN_WIN
+ && alpha > -VALUE_KNOWN_WIN)
+ depth -= 1;
+
+ assert(depth > 0);
}
else
{
+ ss->cutoffCnt++;
assert(value >= beta); // Fail high
break;
}
}
}
+ else
+ ss->cutoffCnt = 0;
+
// If the move is worse than some previously searched move, remember it to update its stats later
if (move != bestMove)
{
- if (captureOrPromotion && captureCount < 32)
+ if (capture && captureCount < 32)
capturesSearched[captureCount++] = move;
- else if (!captureOrPromotion && quietCount < 64)
+ else if (!capture && quietCount < 64)
quietsSearched[quietCount++] = move;
}
}
return VALUE_DRAW;
*/
- // Step 20. Check for mate and stalemate
+ // Step 21. Check for mate and stalemate
// All legal moves have been searched and if there are no legal moves, it
// must be a mate or a stalemate. If we are in a singular extension search then
// return a fail low score.
quietsSearched, quietCount, capturesSearched, captureCount, depth);
// Bonus for prior countermove that caused the fail low
- else if ( (depth >= 3 || PvNode)
+ else if ( (depth >= 5 || PvNode)
&& !priorCapture)
{
//Assign extra bonus if current node is PvNode or cutNode
//or fail low was really bad
bool extraBonus = PvNode
|| cutNode
- || bestValue < alpha - 94 * depth;
+ || bestValue < alpha - 62 * depth;
update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
}
// opponent move is probably good and the new position is added to the search tree.
if (bestValue <= alpha)
ss->ttPv = ss->ttPv || ((ss-1)->ttPv && depth > 3);
- // Otherwise, a counter move has been found and if the position is the last leaf
- // in the search tree, remove the position from the search tree.
- else if (depth > 3)
- ss->ttPv = ss->ttPv && (ss+1)->ttPv;
// Write gathered information in transposition table
if (!excludedMove && !(rootNode && thisThread->pvIdx))
// qsearch() is the quiescence search function, which is called by the main search
// function with zero depth, or recursively with further decreasing depth per call.
+ // (~155 elo)
template <NodeType nodeType>
Value qsearch(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth) {
Move ttMove, move, bestMove;
Depth ttDepth;
Value bestValue, value, ttValue, futilityValue, futilityBase;
- bool pvHit, givesCheck, captureOrPromotion;
+ bool pvHit, givesCheck, capture;
int moveCount;
if (PvNode)
&& ss->ttHit
&& tte->depth() >= ttDepth
&& ttValue != VALUE_NONE // Only in case of TT access race
- && (ttValue >= beta ? (tte->bound() & BOUND_LOWER)
- : (tte->bound() & BOUND_UPPER)))
+ && (tte->bound() & (ttValue >= beta ? BOUND_LOWER : BOUND_UPPER)))
return ttValue;
// Evaluate the position statically
if (PvNode && bestValue > alpha)
alpha = bestValue;
- futilityBase = bestValue + 155;
+ futilityBase = bestValue + 153;
}
const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
contHist,
prevSq);
+ int quietCheckEvasions = 0;
+
// Loop through the moves until no moves remain or a beta cutoff occurs
while ((move = mp.next_move()) != MOVE_NONE)
{
continue;
givesCheck = pos.gives_check(move);
- captureOrPromotion = pos.capture_or_promotion(move);
+ capture = pos.capture(move);
moveCount++;
ss->currentMove = move;
ss->continuationHistory = &thisThread->continuationHistory[ss->inCheck]
- [captureOrPromotion]
+ [capture]
[pos.moved_piece(move)]
[to_sq(move)];
// Continuation history based pruning (~2 Elo)
- if ( !captureOrPromotion
+ if ( !capture
&& bestValue > VALUE_TB_LOSS_IN_MAX_PLY
- && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold
- && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold)
+ && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < 0
+ && (*contHist[1])[pos.moved_piece(move)][to_sq(move)] < 0)
continue;
+ // movecount pruning for quiet check evasions
+ if ( bestValue > VALUE_TB_LOSS_IN_MAX_PLY
+ && quietCheckEvasions > 1
+ && !capture
+ && ss->inCheck)
+ continue;
+
+ quietCheckEvasions += !capture && ss->inCheck;
+
// Make and search the move
pos.do_move(move, st, givesCheck);
value = -qsearch<nodeType>(pos, ss+1, -beta, -alpha, depth - 1);
// update_pv() adds current move and appends child pv[]
- void update_pv(Move* pv, Move move, Move* childPv) {
+ void update_pv(Move* pv, Move move, const Move* childPv) {
for (*pv++ = move; childPv && *childPv != MOVE_NONE; )
*pv++ = *childPv++;
void update_all_stats(const Position& pos, Stack* ss, Move bestMove, Value bestValue, Value beta, Square prevSq,
Move* quietsSearched, int quietCount, Move* capturesSearched, int captureCount, Depth depth) {
- int bonus1, bonus2;
Color us = pos.side_to_move();
Thread* thisThread = pos.this_thread();
CapturePieceToHistory& captureHistory = thisThread->captureHistory;
Piece moved_piece = pos.moved_piece(bestMove);
PieceType captured = type_of(pos.piece_on(to_sq(bestMove)));
+ int bonus1 = stat_bonus(depth + 1);
- bonus1 = stat_bonus(depth + 1);
- bonus2 = bestValue > beta + PawnValueMg ? bonus1 // larger bonus
+ if (!pos.capture(bestMove))
+ {
+ int bonus2 = bestValue > beta + 137 ? bonus1 // larger bonus
: stat_bonus(depth); // smaller bonus
- if (!pos.capture_or_promotion(bestMove))
- {
// Increase stats for the best move in case it was a quiet move
update_quiet_stats(pos, ss, bestMove, bonus2);
ss << (v >= beta ? " lowerbound" : v <= alpha ? " upperbound" : "");
ss << " nodes " << nodesSearched
- << " nps " << nodesSearched * 1000 / elapsed;
-
- if (elapsed > 1000) // Earlier makes little sense
- ss << " hashfull " << TT.hashfull();
-
- ss << " tbhits " << tbHits
+ << " nps " << nodesSearched * 1000 / elapsed
+ << " hashfull " << TT.hashfull()
+ << " tbhits " << tbHits
<< " time " << elapsed
<< " pv";
namespace Search {
-/// Threshold used for countermoves based pruning
-constexpr int CounterMovePruneThreshold = 0;
-
/// Stack struct keeps track of the information we need to remember from nodes
/// shallower and deeper in the tree during the search. Each search thread has
Move excludedMove;
Move killers[2];
Value staticEval;
- Depth depth;
int statScore;
int moveCount;
bool inCheck;
bool ttPv;
bool ttHit;
int doubleExtensions;
+ int cutoffCnt;
};
for (auto s : diagonal)
MapA1D1D4[s] = code++;
- // MapKK[] encodes all the 461 possible legal positions of two kings where
+ // MapKK[] encodes all the 462 possible legal positions of two kings where
// the first is in the a1-d1-d4 triangle. If the first king is on the a1-d4
// diagonal, the other one shall not to be above the a1-h8 diagonal.
std::vector<std::pair<int, Square>> bothOnDiagonal;
counterMoves.fill(MOVE_NONE);
mainHistory.fill(0);
captureHistory.fill(0);
+ previousDepth = 0;
for (bool inCheck : { false, true })
for (StatsType c : { NoCaptures, Captures })
- {
for (auto& to : continuationHistory[inCheck][c])
- for (auto& h : to)
- h->fill(-71);
- continuationHistory[inCheck][c][NO_PIECE][0]->fill(Search::CounterMovePruneThreshold - 1);
- }
+ for (auto& h : to)
+ h->fill(-71);
}
}
else if ( th->rootMoves[0].score >= VALUE_TB_WIN_IN_MAX_PLY
|| ( th->rootMoves[0].score > VALUE_TB_LOSS_IN_MAX_PLY
- && votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]]))
+ && ( votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]]
+ || ( votes[th->rootMoves[0].pv[0]] == votes[bestThread->rootMoves[0].pv[0]]
+ && th->rootMoves[0].pv.size() > bestThread->rootMoves[0].pv.size()))))
bestThread = th;
}
Pawns::Table pawnsTable;
Material::Table materialTable;
size_t pvIdx, pvLast;
- RunningAverage doubleExtensionAverage[COLOR_NB];
- uint64_t nodesLastExplosive;
- uint64_t nodesLastNormal;
+ RunningAverage complexityAverage;
std::atomic<uint64_t> nodes, tbHits, bestMoveChanges;
- Value bestValue;
int selDepth, nmpMinPly;
Color nmpColor;
- ExplosionState state;
- Value optimism[COLOR_NB];
+ Value bestValue, optimism[COLOR_NB];
Position rootPos;
StateInfo rootState;
Search::RootMoves rootMoves;
- Depth rootDepth, completedDepth;
+ Depth rootDepth, completedDepth, previousDepth;
Value rootDelta;
CounterMoveHistory counterMoves;
ButterflyHistory mainHistory;
BOUND_EXACT = BOUND_UPPER | BOUND_LOWER
};
-enum ExplosionState {
- EXPLOSION_NONE,
- MUST_CALM_DOWN
-};
-
enum Value : int {
VALUE_ZERO = 0,
VALUE_DRAW = 0,
}
constexpr int from_to(Move m) {
- return m & 0xFFF;
+ return m & 0xFFF;
}
constexpr MoveType type_of(Move m) {
namespace {
- // FEN string of the initial position, normal chess
+ // FEN string for the initial position in standard chess
const char* StartFEN = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1";
- // position() is called when engine receives the "position" UCI command.
- // The function sets up the position described in the given FEN string ("fen")
- // or the starting position ("startpos") and then makes the moves given in the
- // following move list ("moves").
+ // position() is called when the engine receives the "position" UCI command.
+ // It sets up the position that is described in the given FEN string ("fen") or
+ // the initial position ("startpos") and then makes the moves given in the following
+ // move list ("moves").
void position(Position& pos, istringstream& is, StateListPtr& states) {
if (token == "startpos")
{
fen = StartFEN;
- is >> token; // Consume "moves" token if any
+ is >> token; // Consume the "moves" token, if any
}
else if (token == "fen")
while (is >> token && token != "moves")
else
return;
- states = StateListPtr(new std::deque<StateInfo>(1)); // Drop old and create a new one
+ states = StateListPtr(new std::deque<StateInfo>(1)); // Drop the old state and create a new one
pos.set(fen, Options["UCI_Chess960"], &states->back(), Threads.main());
- // Parse move list (if any)
+ // Parse the move list, if any
while (is >> token && (m = UCI::to_move(pos, token)) != MOVE_NONE)
{
states->emplace_back();
}
}
- // trace_eval() prints the evaluation for the current position, consistent with the UCI
- // options set so far.
+ // trace_eval() prints the evaluation of the current position, consistent with
+ // the UCI options set so far.
void trace_eval(Position& pos) {
}
- // setoption() is called when engine receives the "setoption" UCI command. The
- // function updates the UCI option ("name") to the given value ("value").
+ // setoption() is called when the engine receives the "setoption" UCI command.
+ // The function updates the UCI option ("name") to the given value ("value").
void setoption(istringstream& is) {
string token, name, value;
- is >> token; // Consume "name" token
+ is >> token; // Consume the "name" token
- // Read option name (can contain spaces)
+ // Read the option name (can contain spaces)
while (is >> token && token != "value")
name += (name.empty() ? "" : " ") + token;
- // Read option value (can contain spaces)
+ // Read the option value (can contain spaces)
while (is >> token)
value += (value.empty() ? "" : " ") + token;
}
- // go() is called when engine receives the "go" UCI command. The function sets
- // the thinking time and other parameters from the input string, then starts
- // the search.
+ // go() is called when the engine receives the "go" UCI command. The function
+ // sets the thinking time and other parameters from the input string, then starts
+ // with a search.
void go(Position& pos, istringstream& is, StateListPtr& states) {
string token;
bool ponderMode = false;
- limits.startTime = now(); // As early as possible!
+ limits.startTime = now(); // The search starts as early as possible
while (is >> token)
if (token == "searchmoves") // Needs to be the last command on the line
}
- // bench() is called when engine receives the "bench" command. Firstly
- // a list of UCI commands is setup according to bench parameters, then
- // it is run one by one printing a summary at the end.
+ // bench() is called when the engine receives the "bench" command.
+ // Firstly, a list of UCI commands is set up according to the bench
+ // parameters, then it is run one by one, printing a summary at the end.
void bench(Position& pos, istream& args, StateListPtr& states) {
}
else if (token == "setoption") setoption(is);
else if (token == "position") position(pos, is, states);
- else if (token == "ucinewgame") { Search::clear(); elapsed = now(); } // Search::clear() may take some while
+ else if (token == "ucinewgame") { Search::clear(); elapsed = now(); } // Search::clear() may take a while
}
elapsed = now() - elapsed + 1; // Ensure positivity to avoid a 'divide by zero'
- dbg_print(); // Just before exiting
+ dbg_print();
cerr << "\n==========================="
<< "\nTotal time (ms) : " << elapsed
<< "\nNodes/second : " << 1000 * nodes / elapsed << endl;
}
- // The win rate model returns the probability (per mille) of winning given an eval
- // and a game-ply. The model fits rather accurately the LTC fishtest statistics.
+ // The win rate model returns the probability of winning (in per mille units) given an
+ // eval and a game ply. It fits the LTC fishtest statistics rather accurately.
int win_rate_model(Value v, int ply) {
- // The model captures only up to 240 plies, so limit input (and rescale)
+ // The model only captures up to 240 plies, so limit the input and then rescale
double m = std::min(240, ply) / 64.0;
- // Coefficients of a 3rd order polynomial fit based on fishtest data
- // for two parameters needed to transform eval to the argument of a
- // logistic function.
- double as[] = {-3.68389304, 30.07065921, -60.52878723, 149.53378557};
- double bs[] = {-2.0181857, 15.85685038, -29.83452023, 47.59078827};
+ // The coefficients of a third-order polynomial fit is based on the fishtest data
+ // for two parameters that need to transform eval to the argument of a logistic
+ // function.
+ double as[] = { 0.50379905, -4.12755858, 18.95487051, 152.00733652};
+ double bs[] = {-1.71790378, 10.71543602, -17.05515898, 41.15680404};
double a = (((as[0] * m + as[1]) * m + as[2]) * m) + as[3];
double b = (((bs[0] * m + bs[1]) * m + bs[2]) * m) + bs[3];
- // Transform eval to centipawns with limited range
+ // Transform the eval to centipawns with limited range
double x = std::clamp(double(100 * v) / PawnValueEg, -2000.0, 2000.0);
- // Return win rate in per mille (rounded to nearest)
+ // Return the win rate in per mille units rounded to the nearest value
return int(0.5 + 1000 / (1 + std::exp((a - x) / b)));
}
} // namespace
-/// UCI::loop() waits for a command from stdin, parses it and calls the appropriate
-/// function. Also intercepts EOF from stdin to ensure gracefully exiting if the
-/// GUI dies unexpectedly. When called with some command line arguments, e.g. to
-/// run 'bench', once the command is executed the function returns immediately.
-/// In addition to the UCI ones, also some additional debug commands are supported.
+/// UCI::loop() waits for a command from the stdin, parses it and then calls the appropriate
+/// function. It also intercepts an end-of-file (EOF) indication from the stdin to ensure a
+/// graceful exit if the GUI dies unexpectedly. When called with some command-line arguments,
+/// like running 'bench', the function returns immediately after the command is executed.
+/// In addition to the UCI ones, some additional debug commands are also supported.
void UCI::loop(int argc, char* argv[]) {
cmd += std::string(argv[i]) + " ";
do {
- if (argc == 1 && !getline(cin, cmd)) // Block here waiting for input or EOF
+ if (argc == 1 && !getline(cin, cmd)) // Wait for an input or an end-of-file (EOF) indication
cmd = "quit";
istringstream is(cmd);
- token.clear(); // Avoid a stale if getline() returns empty or blank line
+ token.clear(); // Avoid a stale if getline() returns nothing or a blank line
is >> skipws >> token;
if ( token == "quit"
|| token == "stop")
Threads.stop = true;
- // The GUI sends 'ponderhit' to tell us the user has played the expected move.
- // So 'ponderhit' will be sent if we were told to ponder on the same move the
- // user has played. We should continue searching but switch from pondering to
- // normal search.
+ // The GUI sends 'ponderhit' to tell that the user has played the expected move.
+ // So, 'ponderhit' is sent if pondering was done on the same move that the user
+ // has played. The search should continue, but should also switch from pondering
+ // to the normal search.
else if (token == "ponderhit")
- Threads.main()->ponder = false; // Switch to normal search
+ Threads.main()->ponder = false; // Switch to the normal search
else if (token == "uci")
sync_cout << "id name " << engine_info(true)
else if (token == "ucinewgame") Search::clear();
else if (token == "isready") sync_cout << "readyok" << sync_endl;
- // Additional custom non-UCI commands, mainly for debugging.
- // Do not use these commands during a search!
+ // Add custom non-UCI commands, mainly for debugging purposes.
+ // These commands must not be used during a search!
else if (token == "flip") pos.flip();
else if (token == "bench") bench(pos, is, states);
else if (token == "d") sync_cout << pos << sync_endl;
filename = f;
Eval::NNUE::save_eval(filename);
}
+ else if (token == "--help" || token == "help" || token == "--license" || token == "license")
+ sync_cout << "\nStockfish is a powerful chess engine for playing and analyzing."
+ "\nIt is released as free software licensed under the GNU GPLv3 License."
+ "\nStockfish is normally used with a graphical user interface (GUI) and implements"
+ "\nthe Universal Chess Interface (UCI) protocol to communicate with a GUI, an API, etc."
+ "\nFor any further information, visit https://github.com/official-stockfish/Stockfish#readme"
+ "\nor read the corresponding README.md and Copying.txt files distributed along with this program.\n" << sync_endl;
else if (!token.empty() && token[0] != '#')
- sync_cout << "Unknown command: " << cmd << sync_endl;
+ sync_cout << "Unknown command: '" << cmd << "'. Type help for more information." << sync_endl;
- } while (token != "quit" && argc == 1); // Command line args are one-shot
+ } while (token != "quit" && argc == 1); // The command-line arguments are one-shot
}
-/// UCI::value() converts a Value to a string suitable for use with the UCI
-/// protocol specification:
+/// UCI::value() converts a Value to a string by adhering to the UCI protocol specification:
///
/// cp <x> The score from the engine's point of view in centipawns.
-/// mate <y> Mate in y moves, not plies. If the engine is getting mated
-/// use negative values for y.
+/// mate <y> Mate in 'y' moves (not plies). If the engine is getting mated,
+/// uses negative values for 'y'.
string UCI::value(Value v) {
}
-/// UCI::wdl() report WDL statistics given an evaluation and a game ply, based on
-/// data gathered for fishtest LTC games.
+/// UCI::wdl() reports the win-draw-loss (WDL) statistics given an evaluation
+/// and a game ply based on the data gathered for fishtest LTC games.
string UCI::wdl(Value v, int ply) {
/// UCI::move() converts a Move to a string in coordinate notation (g1f3, a7a8q).
-/// The only special case is castling, where we print in the e1g1 notation in
-/// normal chess mode, and in e1h1 notation in chess960 mode. Internally all
-/// castling moves are always encoded as 'king captures rook'.
+/// The only special case is castling where the e1g1 notation is printed in
+/// standard chess mode and in e1h1 notation it is printed in Chess960 mode.
+/// Internally, all castling moves are always encoded as 'king captures rook'.
string UCI::move(Move m, bool chess960) {
Move UCI::to_move(const Position& pos, string& str) {
- if (str.length() == 5) // Junior could send promotion piece in uppercase
- str[4] = char(tolower(str[4]));
+ if (str.length() == 5)
+ str[4] = char(tolower(str[4])); // The promotion piece character must be lowercased
for (const auto& m : MoveList<LEGAL>(pos))
if (str == UCI::move(m, pos.is_chess960()))
class Option;
-/// Custom comparator because UCI options should be case insensitive
+/// Define a custom comparator, because the UCI options should be case-insensitive
struct CaseInsensitiveLess {
bool operator() (const std::string&, const std::string&) const;
};
-/// Our options container is actually a std::map
+/// The options container is defined as a std::map
typedef std::map<std::string, Option, CaseInsensitiveLess> OptionsMap;
-/// Option class implements an option as defined by UCI protocol
+/// The Option class implements each option as specified by the UCI protocol
class Option {
typedef void (*OnChange)(const Option&);
constexpr int MaxHashMB = Is64Bit ? 33554432 : 2048;
o["Debug Log File"] << Option("", on_logger);
- o["Threads"] << Option(1, 1, 512, on_threads);
+ o["Threads"] << Option(1, 1, 1024, on_threads);
o["Hash"] << Option(16, 1, MaxHashMB, on_hash_size);
o["Clear Hash"] << Option(on_clear_hash);
o["Ponder"] << Option(false);