From 3d9410377f831959fac3cfc43969b194e5f506be Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Mon, 4 Dec 2023 16:56:49 +0200 Subject: [PATCH 01/16] chore: bump version to 20.0.2 --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 4c88518214..1dd19c2bde 100644 --- a/configure.ac +++ b/configure.ac @@ -1,7 +1,7 @@ AC_PREREQ([2.69]) define(_CLIENT_VERSION_MAJOR, 20) define(_CLIENT_VERSION_MINOR, 0) -define(_CLIENT_VERSION_BUILD, 1) +define(_CLIENT_VERSION_BUILD, 2) define(_CLIENT_VERSION_RC, 0) define(_CLIENT_VERSION_IS_RELEASE, true) define(_COPYRIGHT_YEAR, 2023) From a8573c942f53dcb65808410bd3162ea1a8c335e0 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Mon, 20 Nov 2023 19:08:48 +0300 Subject: [PATCH 02/16] fix: avoid crashes on "corrupted db" reindex attempts (#5717) ## Issue being fixed or feature implemented Should fix crashes like ``` : Corrupted block database detected. Please restart with -reindex or -reindex-chainstate to recover. Assertion failure: assertion: globalInstance == nullptr file: mnhftx.cpp, line: 43 function: CMNHFManager 0#: (0x105ADA27C) stacktraces.cpp:629 - __assert_rtn 1#: (0x104945794) mnhftx.cpp:43 - CMNHFManager::CMNHFManager(CEvoDB&) 2#: (0x10499DA90) compressed_pair.h:40 - std::__1::__unique_if::__unique_single std::__1::make_unique[abi:v15006](CEvoDB&) 3#: (0x10499753C) init.cpp:1915 - AppInitMain(std::__1::variant, std::__1::reference_wrapper, std::__1::reference_wrapper, std::__1::reference_wrapper, std::__1::reference_wrapper, std::__1::reference_wrapper> const&, NodeContext&, interfaces::BlockAndHeaderTipInfo*) ``` ## What was done? ## How Has This Been Tested? ## Breaking Changes n/a ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- src/init.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/init.cpp b/src/init.cpp index 8a5d30fa27..518bb71c76 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1912,6 +1912,7 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc LOCK(cs_main); node.evodb.reset(); node.evodb = std::make_unique(nEvoDbCache, false, fReset || fReindexChainState); + node.mnhf_manager.reset(); node.mnhf_manager = std::make_unique(*node.evodb); chainman.Reset(); From 7c966c9db0d083605fdd83dd09a5b6458ef34b31 Mon Sep 17 00:00:00 2001 From: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com> Date: Fri, 24 Nov 2023 11:24:06 -0600 Subject: [PATCH 03/16] Merge pull request #5718 from knst/mac-improvements backport: bitcoin#24603, #26694, #24669, #22546, #22199, #25817 (mac build) --- Makefile.am | 17 +- ci/test/00_setup_env_native_qt5.sh | 2 +- contrib/guix/libexec/build.sh | 2 +- contrib/macdeploy/detached-sig-apply.sh | 27 --- contrib/macdeploy/macdeployqtplus | 221 +++++------------- depends/packages/qt.mk | 3 + .../feature_backwards_compatibility.py | 49 +++- test/get_previous_releases.py | 52 ++++- 8 files changed, 161 insertions(+), 212 deletions(-) delete mode 100755 contrib/macdeploy/detached-sig-apply.sh diff --git a/Makefile.am b/Makefile.am index 2ea6bb1f52..7cd688ba25 100644 --- a/Makefile.am +++ b/Makefile.am @@ -36,7 +36,6 @@ OSX_APP=Dash-Qt.app OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME)) OSX_DMG = $(OSX_VOLNAME).dmg OSX_TEMP_ISO = $(OSX_DMG:.dmg=).temp.iso -OSX_BACKGROUND_IMAGE=$(top_srcdir)/contrib/macdeploy/background.tiff OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/dash.icns OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed @@ -60,7 +59,6 @@ WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/dash.ico \ $(top_srcdir)/doc/README_windows.txt OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \ - $(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \ $(top_srcdir)/contrib/macdeploy/detached-sig-create.sh COVERAGE_INFO = baseline.info \ @@ -125,28 +123,17 @@ $(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) deploydir: $(OSX_DMG) else !BUILD_DARWIN APP_DIST_DIR=$(top_builddir)/dist -APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/background.tiff $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications -$(APP_DIST_DIR)/Applications: - @rm -f $@ - @cd $(@D); $(LN_S) /Applications $(@F) - -$(APP_DIST_EXTRAS): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt - -$(OSX_TEMP_ISO): $(APP_DIST_EXTRAS) +$(OSX_TEMP_ISO): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt $(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ $(APP_DIST_DIR) -- $(if $(SOURCE_DATE_EPOCH),-volume_date all_file_dates =$(SOURCE_DATE_EPOCH)) $(OSX_DMG): $(OSX_TEMP_ISO) $(DMG) dmg "$<" "$@" -$(APP_DIST_DIR)/.background/background.tiff: - $(MKDIR_P) $(@D) - cp $(OSX_BACKGROUND_IMAGE) $@ - $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING) INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -deploydir: $(APP_DIST_EXTRAS) +deploydir: $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt endif !BUILD_DARWIN appbundle: $(OSX_APP_BUILT) diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 72c30e1336..0938352753 100755 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -15,5 +15,5 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export TEST_PREVIOUS_RELEASES=true -export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.0.0 v0.16.1.1 v0.17.0.3 v18.2.2 v19.3.0" +export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.0.0 v0.16.1.1 v0.17.0.3 v18.2.2 v19.3.0 v20.0.1" export BITCOIN_CONFIG="--enable-zmq --enable-reduce-exports --disable-fuzz-binary LDFLAGS=-static-libstdc++" diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 395f885e31..a85d9bc921 100644 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -328,7 +328,7 @@ mkdir -p "$DISTSRC" mkdir -p "unsigned-app-${HOST}" cp --target-directory="unsigned-app-${HOST}" \ osx_volname \ - contrib/macdeploy/detached-sig-{apply,create}.sh \ + contrib/macdeploy/detached-sig-create.sh \ "${BASEPREFIX}/${HOST}"/native/bin/dmg mv --target-directory="unsigned-app-${HOST}" dist ( diff --git a/contrib/macdeploy/detached-sig-apply.sh b/contrib/macdeploy/detached-sig-apply.sh deleted file mode 100755 index ae83ccc3e1..0000000000 --- a/contrib/macdeploy/detached-sig-apply.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh -# Copyright (c) 2014-2015 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C -set -e - -UNSIGNED="$1" -SIGNATURE="$2" -ROOTDIR=dist -OUTDIR=signed-app -SIGNAPPLE=signapple - -if [ -z "$UNSIGNED" ]; then - echo "usage: $0 " - exit 1 -fi - -if [ -z "$SIGNATURE" ]; then - echo "usage: $0 " - exit 1 -fi - -${SIGNAPPLE} apply ${UNSIGNED} ${SIGNATURE} -mv ${ROOTDIR} ${OUTDIR} -echo "Signed: ${OUTDIR}" diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index 249ce9c187..293e0d0934 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -16,8 +16,7 @@ # along with this program. If not, see . # -import plistlib -import sys, re, os, shutil, stat, os.path +import sys, re, os, platform, shutil, stat, subprocess, os.path from argparse import ArgumentParser from ds_store import DSStore from mac_alias import Alias @@ -53,7 +52,7 @@ class FrameworkInfo(object): return False def __str__(self): - return f""" Framework name: {frameworkName} + return f""" Framework name: {self.frameworkName} Framework directory: {self.frameworkDirectory} Framework path: {self.frameworkPath} Binary name: {self.binaryName} @@ -85,8 +84,8 @@ class FrameworkInfo(object): if line == "": return None - # Don't deploy system libraries (exception for libQtuitools and libQtlucene). - if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line): + # Don't deploy system libraries + if line.startswith("/System/Library/") or line.startswith("@executable_path") or line.startswith("/usr/lib/"): return None m = cls.reOLine.match(line) @@ -246,56 +245,46 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional toDir = os.path.join(path, framework.destinationDirectory) toPath = os.path.join(toDir, framework.binaryName) - if not os.path.exists(fromPath): - raise RuntimeError(f"No file at {fromPath}") + if framework.isDylib(): + if not os.path.exists(fromPath): + raise RuntimeError(f"No file at {fromPath}") - if os.path.exists(toPath): - return None # Already there + if os.path.exists(toPath): + return None # Already there - if not os.path.exists(toDir): - os.makedirs(toDir) + if not os.path.exists(toDir): + os.makedirs(toDir) - shutil.copy2(fromPath, toPath) - if verbose: - print("Copied:", fromPath) - print(" to:", toPath) + shutil.copy2(fromPath, toPath) + if verbose: + print("Copied:", fromPath) + print(" to:", toPath) + else: + to_dir = os.path.join(path, "Contents", "Frameworks", framework.frameworkName) + if os.path.exists(to_dir): + return None # Already there + + from_dir = framework.frameworkPath + if not os.path.exists(from_dir): + raise RuntimeError(f"No directory at {from_dir}") + + shutil.copytree(from_dir, to_dir, symlinks=True) + if verbose: + print("Copied:", from_dir) + print(" to:", to_dir) + + headers_link = os.path.join(to_dir, "Headers") + if os.path.exists(headers_link): + os.unlink(headers_link) + + headers_dir = os.path.join(to_dir, framework.binaryDirectory, "Headers") + if os.path.exists(headers_dir): + shutil.rmtree(headers_dir) permissions = os.stat(toPath) if not permissions.st_mode & stat.S_IWRITE: os.chmod(toPath, permissions.st_mode | stat.S_IWRITE) - if not framework.isDylib(): # Copy resources for real frameworks - - linkfrom = os.path.join(path, "Contents","Frameworks", framework.frameworkName, "Versions", "Current") - linkto = framework.version - if not os.path.exists(linkfrom): - os.symlink(linkto, linkfrom) - print("Linked:", linkfrom, "->", linkto) - fromResourcesDir = framework.sourceResourcesDirectory - if os.path.exists(fromResourcesDir): - toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) - shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) - if verbose: - print("Copied resources:", fromResourcesDir) - print(" to:", toResourcesDir) - fromContentsDir = framework.sourceVersionContentsDirectory - if not os.path.exists(fromContentsDir): - fromContentsDir = framework.sourceContentsDirectory - if os.path.exists(fromContentsDir): - toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory) - shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) - if verbose: - print("Copied Contents:", fromContentsDir) - print(" to:", toContentsDir) - elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout) - qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib") - qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib") - if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath): - shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True) - if verbose: - print("Copied for libQtGui:", qtMenuNibSourcePath) - print(" to:", qtMenuNibDestinationPath) - return toPath def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: @@ -351,115 +340,20 @@ def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int): - # Lookup available plugins, exclude unneeded plugins = [] if deploymentInfo.pluginPath is None: return for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath): pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath) - if pluginDirectory == "designer": - # Skip designer plugins + + if pluginDirectory not in ['styles', 'platforms']: continue - elif pluginDirectory == "printsupport": - # Skip printsupport plugins - continue - elif pluginDirectory == "imageformats": - # Skip imageformats plugins - continue - elif pluginDirectory == "sqldrivers": - # Deploy the sql plugins only if QtSql is in use - if not deploymentInfo.usesFramework("QtSql"): - continue - elif pluginDirectory == "script": - # Deploy the script plugins only if QtScript is in use - if not deploymentInfo.usesFramework("QtScript"): - continue - elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling": - # Deploy the qml plugins only if QtDeclarative is in use - if not deploymentInfo.usesFramework("QtDeclarative"): - continue - elif pluginDirectory == "bearer": - # Deploy the bearer plugins only if QtNetwork is in use - if not deploymentInfo.usesFramework("QtNetwork"): - continue - elif pluginDirectory == "position": - # Deploy the position plugins only if QtPositioning is in use - if not deploymentInfo.usesFramework("QtPositioning"): - continue - elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures": - # Deploy the sensor plugins only if QtSensors is in use - if not deploymentInfo.usesFramework("QtSensors"): - continue - elif pluginDirectory == "audio" or pluginDirectory == "playlistformats": - # Deploy the audio plugins only if QtMultimedia is in use - if not deploymentInfo.usesFramework("QtMultimedia"): - continue - elif pluginDirectory == "mediaservice": - # Deploy the mediaservice plugins only if QtMultimediaWidgets is in use - if not deploymentInfo.usesFramework("QtMultimediaWidgets"): - continue - elif pluginDirectory == "canbus": - # Deploy the canbus plugins only if QtSerialBus is in use - if not deploymentInfo.usesFramework("QtSerialBus"): - continue - elif pluginDirectory == "webview": - # Deploy the webview plugins only if QtWebView is in use - if not deploymentInfo.usesFramework("QtWebView"): - continue - elif pluginDirectory == "gamepads": - # Deploy the webview plugins only if QtGamepad is in use - if not deploymentInfo.usesFramework("QtGamepad"): - continue - elif pluginDirectory == "geoservices": - # Deploy the webview plugins only if QtLocation is in use - if not deploymentInfo.usesFramework("QtLocation"): - continue - elif pluginDirectory == "texttospeech": - # Deploy the texttospeech plugins only if QtTextToSpeech is in use - if not deploymentInfo.usesFramework("QtTextToSpeech"): - continue - elif pluginDirectory == "virtualkeyboard": - # Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is in use - if not deploymentInfo.usesFramework("QtVirtualKeyboard"): - continue - elif pluginDirectory == "sceneparsers": - # Deploy the virtualkeyboard plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue - elif pluginDirectory == "renderplugins": - # Deploy the renderplugins plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue - elif pluginDirectory == "geometryloaders": - # Deploy the geometryloaders plugins only if Qt3DCore is in use - if not deploymentInfo.usesFramework("Qt3DCore"): - continue for pluginName in filenames: pluginPath = os.path.join(pluginDirectory, pluginName) - if pluginName.endswith("_debug.dylib"): - # Skip debug plugins + + if pluginName.split('.')[0] not in ['libqminimal', 'libqcocoa', 'libqmacstyle']: continue - elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib": - # Deploy the svg plugins only if QtSvg is in use - if not deploymentInfo.usesFramework("QtSvg"): - continue - elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib": - # Deploy accessibility for Qt3Support only if the Qt3Support is in use - if not deploymentInfo.usesFramework("Qt3Support"): - continue - elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib": - # Deploy the opengl graphicssystem plugin only if QtOpenGL is in use - if not deploymentInfo.usesFramework("QtOpenGL"): - continue - elif pluginPath == "accessible/libqtaccessiblequick.dylib": - # Deploy the accessible qtquick plugin only if QtQuick is in use - if not deploymentInfo.usesFramework("QtQuick"): - continue - elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib": - # Deploy the virtualkeyboardplugin plugin only if QtVirtualKeyboard is in use - if not deploymentInfo.usesFramework("QtVirtualKeyboard"): - continue plugins.append((pluginDirectory, pluginName)) @@ -527,6 +421,9 @@ if os.path.exists(appname + ".dmg"): print("+ Removing existing DMG +") os.unlink(appname + ".dmg") +if os.path.exists(appname + ".temp.dmg"): + os.unlink(appname + ".temp.dmg") + # ------------------------------------------------ target = os.path.join("dist", "Dash-Qt.app") @@ -644,6 +541,25 @@ ds.close() # ------------------------------------------------ +if platform.system() == "Darwin": + subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True) + +print("+ Installing background.tiff +") + +bg_path = os.path.join('dist', '.background', 'background.tiff') +os.mkdir(os.path.dirname(bg_path)) + +tiff_path = os.path.join('contrib', 'macdeploy', 'background.tiff') +shutil.copy2(tiff_path, bg_path) + +# ------------------------------------------------ + +print("+ Generating symlink for /Applications +") + +os.symlink("/Applications", os.path.join('dist', "Applications")) + +# ------------------------------------------------ + if config.dmg is not None: print("+ Preparing .dmg disk image +") @@ -667,19 +583,6 @@ if config.dmg is not None: print("Attaching temp image...") output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, universal_newlines=True, stdout=PIPE).stdout - m = re.search(r"/Volumes/(.+$)", output) - disk_root = m.group(0) - - print("+ Applying fancy settings +") - - bg_path = os.path.join(disk_root, ".background", os.path.basename('background.tiff')) - os.mkdir(os.path.dirname(bg_path)) - if verbose: - print('background.tiff', "->", bg_path) - shutil.copy2('contrib/macdeploy/background.tiff', bg_path) - - os.symlink("/Applications", os.path.join(disk_root, "Applications")) - print("+ Finalizing .dmg disk image +") run(["hdiutil", "detach", f"/Volumes/{appname}"], universal_newlines=True) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 37fbb28943..80b2300f73 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -134,6 +134,9 @@ $(package)_config_opts_darwin += -no-feature-corewlan $(package)_config_opts_darwin += -no-freetype $(package)_config_opts_darwin += QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION) +# Optimizing using > -O1 causes non-determinism when building across arches. +$(package)_config_opts_aarch64_darwin += "QMAKE_CFLAGS_OPTIMIZE_FULL = -O1" + ifneq ($(build_os),darwin) $(package)_config_opts_darwin += -xplatform macx-clang-linux $(package)_config_opts_darwin += -device-option MAC_SDK_PATH=$(OSX_SDK) diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py index 8fe6585f80..1aff7c337a 100755 --- a/test/functional/feature_backwards_compatibility.py +++ b/test/functional/feature_backwards_compatibility.py @@ -30,11 +30,12 @@ from test_framework.util import ( class BackwardsCompatibilityTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True - self.num_nodes = 6 + self.num_nodes = 7 # Add new version after each release: self.extra_args = [ [], # Pre-release: use to mine blocks ["-nowallet"], # Pre-release: use to receive coins, swap wallets, etc + ["-nowallet"], # v20.0.1 ["-nowallet"], # v19.3.0 ["-nowallet"], # v18.2.2 ["-nowallet"], # v0.17.0.3 @@ -51,6 +52,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): None, None, 19030000, + 19030000, 18020200, 170003, 160101, @@ -68,7 +70,8 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): res = self.nodes[self.num_nodes - 1].getblockchaininfo() assert_equal(res['blocks'], 101) - node_master = self.nodes[self.num_nodes - 5] + node_master = self.nodes[self.num_nodes - 6] + node_v20 = self.nodes[self.num_nodes - 5] node_v19 = self.nodes[self.num_nodes - 4] node_v18 = self.nodes[self.num_nodes - 3] node_v17 = self.nodes[self.num_nodes - 2] @@ -117,6 +120,13 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): assert info['private_keys_enabled'] == False assert info['keypoolsize'] == 0 + # w1_v20: regular wallet, created with v20.0 + node_v20.createwallet(wallet_name="w1_v20") + wallet = node_v20.get_wallet_rpc("w1_v20") + info = wallet.getwalletinfo() + assert info['private_keys_enabled'] + assert info['keypoolsize'] > 0 + # w2_v19: wallet with private keys disabled, created with v0.19 node_v19.createwallet(wallet_name="w2_v19", disable_private_keys=True) wallet = node_v19.get_wallet_rpc("w2_v19") @@ -155,6 +165,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): # Copy the wallets to older nodes: node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets") + node_v20_wallets_dir = os.path.join(node_v20.datadir, "regtest/wallets") node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets") node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets") node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets") @@ -199,6 +210,34 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): os.path.join(node_v19_wallets_dir, wallet) ) + # Copy wallets to v0.20 + for wallet in os.listdir(node_master_wallets_dir): + shutil.copytree( + os.path.join(node_master_wallets_dir, wallet), + os.path.join(node_v20_wallets_dir, wallet) + ) + + # Open the wallets in v0.20 + node_v20.loadwallet("w1") + wallet = node_v20.get_wallet_rpc("w1") + info = wallet.getwalletinfo() + assert info['private_keys_enabled'] + assert info['keypoolsize'] > 0 + txs = wallet.listtransactions() + assert_equal(len(txs), 1) + + node_v20.loadwallet("w2") + wallet = node_v20.get_wallet_rpc("w2") + info = wallet.getwalletinfo() + assert info['private_keys_enabled'] == False + assert info['keypoolsize'] == 0 + + node_v20.loadwallet("w3") + wallet = node_v20.get_wallet_rpc("w3") + info = wallet.getwalletinfo() + assert info['private_keys_enabled'] + assert info['keypoolsize'] == 0 + # Open the wallets in v0.19 node_v19.loadwallet("w1") wallet = node_v19.get_wallet_rpc("w1") @@ -277,15 +316,15 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18') # Instead, we stop node and try to launch it with the wallet: - self.stop_node(4) + self.stop_node(5) # it expected to fail with error 'DBErrors::TOO_NEW' but Dash Core can open v18 by version 17 # can be implemented in future if there's any incompatible versions #node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Dash Core") #node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Dash Core") - self.start_node(4) + self.start_node(5) # Open most recent wallet in v0.16 (no loadwallet RPC) - self.restart_node(5, extra_args=["-wallet=w2"]) + self.restart_node(6, extra_args=["-wallet=w2"]) wallet = node_v16.get_wallet_rpc("w2") info = wallet.getwalletinfo() assert info['keypoolsize'] == 1 diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py index 77988fe068..0ad41648de 100755 --- a/test/get_previous_releases.py +++ b/test/get_previous_releases.py @@ -21,6 +21,18 @@ import hashlib SHA256_SUMS = { + "d1f7121a7d7bdd4077709284076860389d6a0f4481a934ad9acb85cae3d7b83e": "dashcore-20.0.1-aarch64-linux-gnu.tar.gz", + "37375229e5ab18d7050b729fb016df24acdd72d60bc3fa074270d89030a27827": "dashcore-20.0.1-arm-linux-gnueabihf.tar.gz", + "ab530f72d2bfbfcd7fca0644e3ea5c5b279e2204fe50ff7bd9cc452a0d413c65": "dashcore-20.0.1-arm64-apple-darwin.dmg", + "8f4b55e4a3d6bb38a0c1f51ece14f387fd4dcffa000aeecfbbd1f751da8b4446": "dashcore-20.0.1-arm64-apple-darwin.tar.gz", + "1d9cdb00d93e8babe9f54d0ecb04c55f2cd6fd6cfaa85466aa7f95a6976d040d": "dashcore-20.0.1-riscv64-linux-gnu.tar.gz", + "f722954c38d5b18f8290e41ca9dd833929258dcf68c9396cbbc81d241285947b": "dashcore-20.0.1-win64-setup.exe", + "bb6d59a3eadac316e86e073a9f7ca4d28f3a2e8a59b7109d509a7368675a6f5f": "dashcore-20.0.1-win64.zip", + "5373a84f49e4f76bd04987806f5fcde0b537fa1408e1f98370680f3f5134970f": "dashcore-20.0.1-x86_64-apple-darwin.dmg", + "0c9344961ae5800f54ffc90af63826cdbf61acc5c442f3fab6527d528f2d9323": "dashcore-20.0.1-x86_64-apple-darwin.tar.gz", + "7c82bdbd1c2de515d6c7245886d8c0b0044a4a9b6f74166b8d58c82cd4ae3270": "dashcore-20.0.1-x86_64-linux-gnu.tar.gz", + "bb898a8e4c54fd5989f114673e1fee5116bf6ffa257c63397993035c390de806": "dashcore-20.0.1.tar.gz", + # "a4b555b47f5f9a5a01fc5d3b543731088bd10a65dd7fa81fb552818146e424b5": "dashcore-19.3.0-aarch64-linux-gnu.tar.gz", "531bb188c1aea808ef6f3533d71182a51958136f6e43d9fcadaef1a5fcdd0468": "dashcore-19.3.0-osx.dmg", "1b4673a2bd71f9f2b593c2d71386e60f4744b59b57142707f0045ed49c92024b": "dashcore-19.3.0-osx64.tar.gz", @@ -105,8 +117,11 @@ def download_binary(tag, args) -> int: if match: bin_path = 'releases/download/test.{}'.format( match.group(1), match.group(2)) + platform = args.platform + if tag < "v20" and platform in ["x86_64-apple-darwin", "aarch64-apple-darwin"]: + platform = "osx64" tarball = 'dashcore-{tag}-{platform}.tar.gz'.format( - tag=tag[1:], platform=args.platform) + tag=tag[1:], platform=platform) tarballUrl = 'https://github.com/dashpay/dash/{bin_path}/{tarball}'.format( bin_path=bin_path, tarball=tarball) @@ -147,10 +162,39 @@ def download_binary(tag, args) -> int: ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag, '--strip-components=1', 'dashcore-{tag}'.format(tag=filename, platform=args.platform)]).returncode - if ret: + if ret != 0: + print(f"Failed to extract the {tag} tarball") return ret Path(tarball).unlink() + + if tag >= "v19" and platform == "arm64-apple-darwin": + # Starting with v23 there are arm64 binaries for ARM (e.g. M1, M2) macs, but they have to be signed to run + binary_path = f'{os.getcwd()}/{tag}/bin/' + + for arm_binary in os.listdir(binary_path): + # Is it already signed? + ret = subprocess.run( + ['codesign', '-v', binary_path + arm_binary], + stderr=subprocess.DEVNULL, # Suppress expected stderr output + ).returncode + if ret == 1: + # Have to self-sign the binary + ret = subprocess.run( + ['codesign', '-s', '-', binary_path + arm_binary] + ).returncode + if ret != 0: + print(f"Failed to self-sign {tag} {arm_binary} arm64 binary") + return 1 + + # Confirm success + ret = subprocess.run( + ['codesign', '-v', binary_path + arm_binary] + ).returncode + if ret != 0: + print(f"Failed to verify the self-signed {tag} {arm_binary} arm64 binary") + return 1 + return 0 @@ -212,8 +256,8 @@ def check_host(args) -> int: platforms = { 'aarch64-*-linux*': 'aarch64-linux-gnu', 'x86_64-*-linux*': 'x86_64-linux-gnu', - 'x86_64-apple-darwin*': 'osx64', - 'aarch64-apple-darwin*': 'osx64', + 'x86_64-apple-darwin*': 'x86_64-apple-darwin', + 'aarch64-apple-darwin*': 'aarch64-apple-darwin', } args.platform = '' for pattern, target in platforms.items(): From 330c5f9451fde5a6335fb32f38f3b29d05fb19e3 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Tue, 21 Nov 2023 16:53:55 +0300 Subject: [PATCH 04/16] fix: should avoid implicit conversions in `pushKV` params (#5719) ## Issue being fixed or feature implemented Should fix compilation errors like ``` masternode/meta.cpp:43:9: error: call to member function 'pushKV' is ambiguous ret.pushKV("lastOutboundAttemptElapsed", now - lastOutboundAttempt); ^~ masternode/meta.cpp:45:9: error: call to member function 'pushKV' is ambiguous ret.pushKV("lastOutboundSuccessElapsed", now - lastOutboundSuccess); ^~ ``` on FreeBSD + clang-15 kudos to @MrDefacto for finding the issue and testing the fix ## What was done? Specify `now` variable type explicitly instead of relying on `auto` ## How Has This Been Tested? MrDefacto confirmed it compiles with no issues on FreeBSD now ## Breaking Changes n/a ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- src/masternode/meta.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/masternode/meta.cpp b/src/masternode/meta.cpp index 987fc3ded8..44b94a80f9 100644 --- a/src/masternode/meta.cpp +++ b/src/masternode/meta.cpp @@ -34,7 +34,7 @@ UniValue CMasternodeMetaInfo::ToJson() const { UniValue ret(UniValue::VOBJ); - auto now = GetTime().count(); + int64_t now = GetTime().count(); ret.pushKV("lastDSQ", nLastDsq); ret.pushKV("mixingTxCount", nMixingTxCount); From 4660170eaed3bdf009efbd533098dd0d9af4ccdc Mon Sep 17 00:00:00 2001 From: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com> Date: Sun, 26 Nov 2023 15:10:38 -0600 Subject: [PATCH 05/16] Merge pull request #5726 from UdjinM6/bp_18742_28150 backport: bitcoin#18742, #28150 --- src/rpc/mining.cpp | 12 ++++----- src/test/validation_block_tests.cpp | 14 +++++----- src/test/validationinterface_tests.cpp | 36 +++++++++++++++++++++++++- src/validationinterface.cpp | 35 +++++++++++++++---------- src/validationinterface.h | 14 +++++----- 5 files changed, 76 insertions(+), 35 deletions(-) diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 4a4cb503dc..13538afa41 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -955,7 +955,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request) return result; } -class submitblock_StateCatcher : public CValidationInterface +class submitblock_StateCatcher final : public CValidationInterface { public: uint256 hash; @@ -1016,17 +1016,17 @@ static UniValue submitblock(const JSONRPCRequest& request) } bool new_block; - submitblock_StateCatcher sc(block.GetHash()); - RegisterValidationInterface(&sc); + auto sc = std::make_shared(block.GetHash()); + RegisterSharedValidationInterface(sc); bool accepted = chainman.ProcessNewBlock(Params(), blockptr, /* fForceProcessing */ true, /* fNewBlock */ &new_block); - UnregisterValidationInterface(&sc); + UnregisterSharedValidationInterface(sc); if (!new_block && accepted) { return "duplicate"; } - if (!sc.found) { + if (!sc->found) { return "inconclusive"; } - return BIP22ValidationResult(sc.state); + return BIP22ValidationResult(sc->state); } static UniValue submitheader(const JSONRPCRequest& request) diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index ec19142166..9f6a56c15b 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -40,7 +40,7 @@ static const std::vector V_OP_TRUE{OP_TRUE}; BOOST_FIXTURE_TEST_SUITE(validation_block_tests, MinerTestingSetup) -struct TestSubscriber : public CValidationInterface { +struct TestSubscriber final : public CValidationInterface { uint256 m_expected_tip; explicit TestSubscriber(uint256 tip) : m_expected_tip(tip) {} @@ -179,8 +179,8 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) LOCK(cs_main); initial_tip = ::ChainActive().Tip(); } - TestSubscriber sub(initial_tip->GetBlockHash()); - RegisterValidationInterface(&sub); + auto sub = std::make_shared(initial_tip->GetBlockHash()); + RegisterSharedValidationInterface(sub); // create a bunch of threads that repeatedly process a block generated above at random // this will create parallelism and randomness inside validation - the ValidationInterface @@ -208,14 +208,12 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) for (auto& t : threads) { t.join(); } - while (GetMainSignals().CallbacksPending() > 0) { - UninterruptibleSleep(std::chrono::milliseconds{100}); - } + SyncWithValidationInterfaceQueue(); - UnregisterValidationInterface(&sub); + UnregisterSharedValidationInterface(sub); LOCK(cs_main); - BOOST_CHECK_EQUAL(sub.m_expected_tip, ::ChainActive().Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(sub->m_expected_tip, ::ChainActive().Tip()->GetBlockHash()); } /** diff --git a/src/test/validationinterface_tests.cpp b/src/test/validationinterface_tests.cpp index 208be92852..1e935a0ba1 100644 --- a/src/test/validationinterface_tests.cpp +++ b/src/test/validationinterface_tests.cpp @@ -10,7 +10,41 @@ #include #include -BOOST_FIXTURE_TEST_SUITE(validationinterface_tests, TestingSetup) +BOOST_FIXTURE_TEST_SUITE(validationinterface_tests, ChainTestingSetup) + +struct TestSubscriberNoop final : public CValidationInterface { + void BlockChecked(const CBlock&, const BlockValidationState&) override {} +}; + +BOOST_AUTO_TEST_CASE(unregister_validation_interface_race) +{ + std::atomic generate{true}; + + // Start thread to generate notifications + std::thread gen{[&] { + const CBlock block_dummy; + const BlockValidationState state_dummy; + while (generate) { + GetMainSignals().BlockChecked(block_dummy, state_dummy); + } + }}; + + // Start thread to consume notifications + std::thread sub{[&] { + // keep going for about 1 sec, which is 250k iterations + for (int i = 0; i < 250000; i++) { + auto sub = std::make_shared(); + RegisterSharedValidationInterface(sub); + UnregisterSharedValidationInterface(sub); + } + // tell the other thread we are done + generate = false; + }}; + + gen.join(); + sub.join(); + BOOST_CHECK(!generate); +} class TestInterface : public CValidationInterface { diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp index 1dc40f3aad..ed014cb752 100644 --- a/src/validationinterface.cpp +++ b/src/validationinterface.cpp @@ -94,22 +94,26 @@ public: static CMainSignals g_signals; -void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler& scheduler) { +void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler& scheduler) +{ assert(!m_internals); m_internals.reset(new MainSignalsInstance(&scheduler)); } -void CMainSignals::UnregisterBackgroundSignalScheduler() { +void CMainSignals::UnregisterBackgroundSignalScheduler() +{ m_internals.reset(nullptr); } -void CMainSignals::FlushBackgroundCallbacks() { +void CMainSignals::FlushBackgroundCallbacks() +{ if (m_internals) { m_internals->m_schedulerClient.EmptyQueue(); } } -size_t CMainSignals::CallbacksPending() { +size_t CMainSignals::CallbacksPending() +{ if (!m_internals) return 0; return m_internals->m_schedulerClient.CallbacksPending(); } @@ -119,10 +123,11 @@ CMainSignals& GetMainSignals() return g_signals; } -void RegisterSharedValidationInterface(std::shared_ptr pwalletIn) { - // Each connection captures pwalletIn to ensure that each callback is - // executed before pwalletIn is destroyed. For more details see #18338. - g_signals.m_internals->Register(std::move(pwalletIn)); +void RegisterSharedValidationInterface(std::shared_ptr callbacks) +{ + // Each connection captures the shared_ptr to ensure that each callback is + // executed before the subscriber is destroyed. For more details see #18338. + g_signals.m_internals->Register(std::move(callbacks)); } void RegisterValidationInterface(CValidationInterface* callbacks) @@ -137,24 +142,28 @@ void UnregisterSharedValidationInterface(std::shared_ptr c UnregisterValidationInterface(callbacks.get()); } -void UnregisterValidationInterface(CValidationInterface* pwalletIn) { +void UnregisterValidationInterface(CValidationInterface* callbacks) +{ if (g_signals.m_internals) { - g_signals.m_internals->Unregister(pwalletIn); + g_signals.m_internals->Unregister(callbacks); } } -void UnregisterAllValidationInterfaces() { +void UnregisterAllValidationInterfaces() +{ if (!g_signals.m_internals) { return; } g_signals.m_internals->Clear(); } -void CallFunctionInValidationInterfaceQueue(std::function func) { +void CallFunctionInValidationInterfaceQueue(std::function func) +{ g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func)); } -void SyncWithValidationInterfaceQueue() { +void SyncWithValidationInterfaceQueue() +{ AssertLockNotHeld(cs_main); // Block until the validation queue drains std::promise promise; diff --git a/src/validationinterface.h b/src/validationinterface.h index 018d858ef2..90a5729239 100644 --- a/src/validationinterface.h +++ b/src/validationinterface.h @@ -33,20 +33,20 @@ namespace llmq { class CRecoveredSig; } // namespace llmq -// These functions dispatch to one or all registered wallets - -/** Register a wallet to receive updates from core */ -void RegisterValidationInterface(CValidationInterface* pwalletIn); -/** Unregister a wallet from core */ -void UnregisterValidationInterface(CValidationInterface* pwalletIn); -/** Unregister all wallets from core */ +/** Register subscriber */ +void RegisterValidationInterface(CValidationInterface* callbacks); +/** Unregister subscriber. DEPRECATED. This is not safe to use when the RPC server or main message handler thread is running. */ +void UnregisterValidationInterface(CValidationInterface* callbacks); +/** Unregister all subscribers */ void UnregisterAllValidationInterfaces(); // Alternate registration functions that release a shared_ptr after the last // notification is sent. These are useful for race-free cleanup, since // unregistration is nonblocking and can return before the last notification is // processed. +/** Register subscriber */ void RegisterSharedValidationInterface(std::shared_ptr callbacks); +/** Unregister subscriber */ void UnregisterSharedValidationInterface(std::shared_ptr callbacks); /** From 96d4a3051087300e349d286f3472e0176f6a2c23 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Fri, 1 Dec 2023 18:11:35 +0300 Subject: [PATCH 06/16] ci: Bump Guix build timeout and implement cacheing (#5727) ## Issue being fixed or feature implemented Hopefully fixes issues like >The job running on runner ubuntu-core-x64_i-05ed4263b8e049c7a has exceeded the maximum execution time of 360 minutes https://github.com/dashpay/dash/actions/runs/6932017275 https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepstimeout-minutes https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes ## What was done? Bump timeouts for the job itself and for the corresponding step. Also, implemented caching for `.cache` and `depends` folders. ## How Has This Been Tested? #5729 https://github.com/dashpay/dash/actions/runs/6996271543/job/19031968814?pr=5729 ## Breaking Changes n/a ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- .github/workflows/guix-build.yml | 42 +++++++++++++++++++++++++----- contrib/containers/guix/Dockerfile | 11 +++++--- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/.github/workflows/guix-build.yml b/.github/workflows/guix-build.yml index ec3a5ff992..9b91fb2bea 100644 --- a/.github/workflows/guix-build.yml +++ b/.github/workflows/guix-build.yml @@ -9,11 +9,13 @@ jobs: build: runs-on: [ "self-hosted", "linux", "x64", "ubuntu-core" ] if: contains(github.event.pull_request.labels.*.name, 'guix-build') + timeout-minutes: 480 steps: - name: Checkout uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} + path: dash - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -21,31 +23,48 @@ jobs: - name: Commit variables id: dockerfile run: | - echo "hash=$(sha256sum ./contrib/containers/guix/Dockerfile | cut -d ' ' -f1)" >> $GITHUB_OUTPUT + echo "hash=$(sha256sum ./dash/contrib/containers/guix/Dockerfile | cut -d ' ' -f1)" >> $GITHUB_OUTPUT echo "host_user_id=$(id -u)" >> $GITHUB_OUTPUT echo "host_group_id=$(id -g)" >> $GITHUB_OUTPUT - name: Build Docker image uses: docker/build-push-action@v5 with: - context: ${{ github.workspace }} + context: ${{ github.workspace }}/dash build-args: | USER_ID=${{ steps.dockerfile.outputs.host_user_id }} GROUP_ID=${{ steps.dockerfile.outputs.host_group_id }} build-contexts: | - docker_root=${{ github.workspace }}/contrib/containers/guix - file: ./contrib/containers/guix/Dockerfile + docker_root=${{ github.workspace }}/dash/contrib/containers/guix + file: ./dash/contrib/containers/guix/Dockerfile load: true tags: guix_ubuntu:latest cache-from: type=gha cache-to: type=gha,mode=max + - name: Restore Guix cache and depends + id: guix-cache-restore + uses: actions/cache/restore@v3 + with: + path: | + ${{ github.workspace }}/.cache + ${{ github.workspace }}/dash/depends/built + ${{ github.workspace }}/dash/depends/sources + ${{ github.workspace }}/dash/depends/work + key: ${{ runner.os }}-guix + + - name: Create .cache folder if missing + if: steps.guix-cache-restore.outputs.cache-hit != 'true' + run: mkdir -p .cache + - name: Run Guix build + timeout-minutes: 480 run: | docker run --privileged -d --rm -t \ --name guix-daemon \ -e ADDITIONAL_GUIX_COMMON_FLAGS="--max-jobs=$(nproc --all)" \ - -v ${{ github.workspace }}:/src/dash \ + -v ${{ github.workspace }}/dash:/src/dash \ + -v ${{ github.workspace }}/.cache:/home/ubuntu/.cache \ -w /src/dash \ guix_ubuntu:latest && \ docker exec guix-daemon bash -c '/usr/local/bin/guix-start' @@ -57,6 +76,17 @@ jobs: exit 1 fi + - name: Save Guix cache and depends + id: guix-cache-save + uses: actions/cache/save@v3 + with: + path: | + ${{ github.workspace }}/.cache + ${{ github.workspace }}/dash/depends/built + ${{ github.workspace }}/dash/depends/sources + ${{ github.workspace }}/dash/depends/work + key: ${{ steps.guix-cache-restore.outputs.cache-primary-key }} + - name: Compute SHA256 checksums run: | - ./contrib/containers/guix/scripts/guix-check ${{ github.workspace }} + ./dash/contrib/containers/guix/scripts/guix-check ${{ github.workspace }}/dash diff --git a/contrib/containers/guix/Dockerfile b/contrib/containers/guix/Dockerfile index 12b531a715..aa42ce41c2 100644 --- a/contrib/containers/guix/Dockerfile +++ b/contrib/containers/guix/Dockerfile @@ -79,9 +79,14 @@ COPY --from=docker_root ./scripts/entrypoint /usr/local/bin/entrypoint COPY --from=docker_root ./scripts/guix-check /usr/local/bin/guix-check COPY --from=docker_root ./scripts/guix-start /usr/local/bin/guix-start -# Create directory for mounting and grant necessary permissions -RUN mkdir -p /src/dash && \ - chown -R ${USER_ID}:${GROUP_ID} /src +# Create directories for mounting to save/restore cache and grant necessary permissions +RUN mkdir -p \ + /home/${USERNAME}/.cache \ + /src/dash/depends/{built,sources,work} && \ + chown -R ${USER_ID}:${GROUP_ID} \ + /home/${USERNAME}/.cache \ + /src + WORKDIR "/src/dash" # Switch to unprivileged context From 4092abc10d4953c759ad05ffa4067835f7da85a5 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Wed, 29 Nov 2023 17:17:58 +0300 Subject: [PATCH 07/16] fix: Improve quorum data caching and cleanup (#5731) ## Issue being fixed or feature implemented ## What was done? ## How Has This Been Tested? ## Breaking Changes ## Checklist: - [x] I have performed a self-review of my own code - [x] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --------- Co-authored-by: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com> --- src/llmq/params.h | 21 +++++++++++++++ src/llmq/quorums.cpp | 61 +++++++++++++++++++++++++++----------------- src/llmq/quorums.h | 2 +- src/llmq/utils.cpp | 12 ++++----- src/llmq/utils.h | 2 +- 5 files changed, 67 insertions(+), 31 deletions(-) diff --git a/src/llmq/params.h b/src/llmq/params.h index 3076674fdb..a9cb118929 100644 --- a/src/llmq/params.h +++ b/src/llmq/params.h @@ -104,6 +104,13 @@ struct LLMQParams { // For rotated quorums it should be equal to 2 x active quorums set. int keepOldConnections; + // The number of quorums for which we should keep keys. Usually it's equal to keepOldConnections. + // Unlike for other quorum types we want to keep data (secret key shares and vvec) + // for Platform quorums for much longer because Platform can be restarted and + // it must be able to re-sign stuff. + + int keepOldKeys; + // How many members should we try to send all sigShares to before we give up. int recoveryMembers; }; @@ -138,6 +145,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, + .keepOldKeys = 3, .recoveryMembers = 3, }, @@ -163,6 +171,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, + .keepOldKeys = 3, .recoveryMembers = 3, }, @@ -188,6 +197,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, + .keepOldKeys = 3, .recoveryMembers = 3, }, @@ -213,6 +223,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 4, + .keepOldKeys = 4, .recoveryMembers = 3, }, @@ -238,6 +249,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 4, + .keepOldKeys = 24 * 30 * 2, // 2 months of quorums .recoveryMembers = 3, }, @@ -263,6 +275,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // just a few ones to allow easier testing .keepOldConnections = 5, + .keepOldKeys = 5, .recoveryMembers = 6, }, @@ -288,6 +301,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 4, + .keepOldKeys = 4, .recoveryMembers = 4, }, @@ -313,6 +327,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // just a few ones to allow easier testing .keepOldConnections = 5, + .keepOldKeys = 24 * 30 * 2, // 2 months of quorums .recoveryMembers = 6, }, @@ -338,6 +353,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, + .keepOldKeys = 25, .recoveryMembers = 25, }, @@ -363,6 +379,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 32, .keepOldConnections = 64, + .keepOldKeys = 64, .recoveryMembers = 25, }, @@ -389,6 +406,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // two days worth of LLMQs .keepOldConnections = 5, + .keepOldKeys = 5, .recoveryMembers = 100, }, @@ -416,6 +434,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // four days worth of LLMQs .keepOldConnections = 5, + .keepOldKeys = 5, .recoveryMembers = 100, }, @@ -443,6 +462,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, + .keepOldKeys = 24 * 30 * 2, // 2 months of quorums .recoveryMembers = 50, }, @@ -470,6 +490,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, + .keepOldKeys = 24 * 30 * 2, // 2 months of quorums .recoveryMembers = 12, }, diff --git a/src/llmq/quorums.cpp b/src/llmq/quorums.cpp index 1d0991f942..a5f58c922a 100644 --- a/src/llmq/quorums.cpp +++ b/src/llmq/quorums.cpp @@ -200,8 +200,8 @@ CQuorumManager::CQuorumManager(CBLSWorker& _blsWorker, CChainState& chainstate, m_mn_sync(mn_sync), m_peerman(peerman) { - utils::InitQuorumsCache(mapQuorumsCache); - utils::InitQuorumsCache(scanQuorumsCache); + utils::InitQuorumsCache(mapQuorumsCache, false); + utils::InitQuorumsCache(scanQuorumsCache, false); quorumThreadInterrupt.reset(); } @@ -296,7 +296,7 @@ void CQuorumManager::UpdatedBlockTip(const CBlockIndex* pindexNew, bool fInitial } TriggerQuorumDataRecoveryThreads(pindexNew); - CleanupOldQuorumData(pindexNew); + StartCleanupOldQuorumDataThread(pindexNew); } void CQuorumManager::CheckQuorumConnections(const Consensus::LLMQParams& llmqParams, const CBlockIndex* pindexNew) const @@ -956,7 +956,7 @@ void CQuorumManager::StartQuorumDataRecoveryThread(const CQuorumCPtr pQuorum, co }); } -static void DataCleanupHelper(CDBWrapper& db, std::set skip_list) +static void DataCleanupHelper(CDBWrapper& db, std::set skip_list, bool compact = false) { const auto prefixes = {DB_QUORUM_QUORUM_VVEC, DB_QUORUM_SK_SHARE}; @@ -990,39 +990,54 @@ static void DataCleanupHelper(CDBWrapper& db, std::set skip_list) db.WriteBatch(batch); - LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- %s removed %d\n", __func__, prefix, count); + LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- %s removed %d\n", __func__, prefix, count); } pcursor.reset(); - db.CompactFull(); + + if (compact) { + // Avoid using this on regular cleanups, use on db migrations only + LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- compact start\n", __func__); + db.CompactFull(); + LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- compact end\n", __func__); + } } -void CQuorumManager::CleanupOldQuorumData(const CBlockIndex* pIndex) const +void CQuorumManager::StartCleanupOldQuorumDataThread(const CBlockIndex* pIndex) const { - if (!fMasternodeMode || pIndex == nullptr || (pIndex->nHeight % 576 != 0)) { + // Note: this function is CPU heavy and we don't want it to be running during DKGs. + // The largest dkgMiningWindowStart for a related quorum type is 42 (LLMQ_60_75). + // At the same time most quorums use dkgInterval = 24 so the next DKG for them + // (after block 576 + 42) will start at block 576 + 24 * 2. That's only a 6 blocks + // window and it's better to have more room so we pick next cycle. + // dkgMiningWindowStart for small quorums is 10 i.e. a safe block to start + // these calculations is at height 576 + 24 * 2 + 10 = 576 + 58. + if (!fMasternodeMode || pIndex == nullptr || (pIndex->nHeight % 576 != 58)) { return; } - std::set dbKeysToSkip; + cxxtimer::Timer t(/*start=*/ true); + LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- start\n", __func__); - LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- start\n", __func__); - // Platform quorums in all networks are created every 24 blocks (~1h). - // Unlike for other quorum types we want to keep data (secret key shares and vvec) - // for Platform quorums for at least 2 months because Platform can be restarted and - // it must be able to re-sign stuff. During a month, 24 * 30 quorums are created. - constexpr auto numPlatformQuorumsDataToKeep = 24 * 30 * 2; + // do not block the caller thread + workerPool.push([pIndex, t, this](int threadId) { + std::set dbKeysToSkip; - for (const auto& params : Params().GetConsensus().llmqs) { - auto nQuorumsToKeep = params.type == Params().GetConsensus().llmqTypePlatform ? numPlatformQuorumsDataToKeep : params.keepOldConnections; - const auto vecQuorums = ScanQuorums(params.type, pIndex, nQuorumsToKeep); - for (const auto& pQuorum : vecQuorums) { - dbKeysToSkip.insert(MakeQuorumKey(*pQuorum)); + for (const auto& params : Params().GetConsensus().llmqs) { + if (quorumThreadInterrupt) { + break; + } + for (const auto& pQuorum : ScanQuorums(params.type, pIndex, params.keepOldKeys)) { + dbKeysToSkip.insert(MakeQuorumKey(*pQuorum)); + } } - } - DataCleanupHelper(m_evoDb.GetRawDB(), dbKeysToSkip); + if (!quorumThreadInterrupt) { + DataCleanupHelper(m_evoDb.GetRawDB(), dbKeysToSkip); + } - LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- done\n", __func__); + LogPrint(BCLog::LLMQ, "CQuorumManager::StartCleanupOldQuorumDataThread -- done. time=%d\n", t.count()); + }); } } // namespace llmq diff --git a/src/llmq/quorums.h b/src/llmq/quorums.h index bc2eba6ae4..976ba0f300 100644 --- a/src/llmq/quorums.h +++ b/src/llmq/quorums.h @@ -277,7 +277,7 @@ private: void StartCachePopulatorThread(const CQuorumCPtr pQuorum) const; void StartQuorumDataRecoveryThread(const CQuorumCPtr pQuorum, const CBlockIndex* pIndex, uint16_t nDataMask) const; - void CleanupOldQuorumData(const CBlockIndex* pIndex) const; + void StartCleanupOldQuorumDataThread(const CBlockIndex* pIndex) const; }; extern std::unique_ptr quorumManager; diff --git a/src/llmq/utils.cpp b/src/llmq/utils.cpp index 83fbe9b214..b4af8d5bdf 100644 --- a/src/llmq/utils.cpp +++ b/src/llmq/utils.cpp @@ -1110,17 +1110,17 @@ std::map GetEnabledQuorumVvecSyncEntries() } template -void InitQuorumsCache(CacheType& cache) +void InitQuorumsCache(CacheType& cache, bool limit_by_connections) { for (const auto& llmq : Params().GetConsensus().llmqs) { cache.emplace(std::piecewise_construct, std::forward_as_tuple(llmq.type), - std::forward_as_tuple(llmq.keepOldConnections)); + std::forward_as_tuple(limit_by_connections ? llmq.keepOldConnections : llmq.keepOldKeys)); } } -template void InitQuorumsCache>>(std::map>& cache); -template void InitQuorumsCache, StaticSaltedHasher>>>(std::map, StaticSaltedHasher>>& cache); -template void InitQuorumsCache, StaticSaltedHasher, 0ul, 0ul>, std::less, std::allocator, StaticSaltedHasher, 0ul, 0ul>>>>>(std::map, StaticSaltedHasher, 0ul, 0ul>, std::less, std::allocator, StaticSaltedHasher, 0ul, 0ul>>>>&); -template void InitQuorumsCache>>(std::map>& cache); +template void InitQuorumsCache>>(std::map>& cache, bool limit_by_connections); +template void InitQuorumsCache, StaticSaltedHasher>>>(std::map, StaticSaltedHasher>>& cache, bool limit_by_connections); +template void InitQuorumsCache, StaticSaltedHasher, 0ul, 0ul>, std::less, std::allocator, StaticSaltedHasher, 0ul, 0ul>>>>>(std::map, StaticSaltedHasher, 0ul, 0ul>, std::less, std::allocator, StaticSaltedHasher, 0ul, 0ul>>>>&cache, bool limit_by_connections); +template void InitQuorumsCache>>(std::map>& cache, bool limit_by_connections); } // namespace utils diff --git a/src/llmq/utils.h b/src/llmq/utils.h index 535e279afc..02037feadc 100644 --- a/src/llmq/utils.h +++ b/src/llmq/utils.h @@ -120,7 +120,7 @@ void IterateNodesRandom(NodesContainer& nodeStates, Continue&& cont, Callback&& } template -void InitQuorumsCache(CacheType& cache); +void InitQuorumsCache(CacheType& cache, bool limit_by_connections = true); } // namespace utils From 5132c11cb07f6aae3e78b295a7cc340f8ed0ac0b Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Mon, 27 Nov 2023 21:13:06 +0300 Subject: [PATCH 08/16] fix: use correct interruption condition in `StartCachePopulatorThread` (#5732) ## Issue being fixed or feature implemented https://github.com/dashpay/dash/pull/4788#discussion_r854468664 noticed while working on #5731 ## What was done? ## How Has This Been Tested? run a node, check logs - there is a meaningful time span between `start` and `done` now and not just zeros all the time. ## Breaking Changes ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- src/llmq/quorums.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmq/quorums.cpp b/src/llmq/quorums.cpp index a5f58c922a..49c6fcd215 100644 --- a/src/llmq/quorums.cpp +++ b/src/llmq/quorums.cpp @@ -824,7 +824,7 @@ void CQuorumManager::StartCachePopulatorThread(const CQuorumCPtr pQuorum) const // when then later some other thread tries to get keys, it will be much faster workerPool.push([pQuorum, t, this](int threadId) { for (const auto i : irange::range(pQuorum->members.size())) { - if (!quorumThreadInterrupt) { + if (quorumThreadInterrupt) { break; } if (pQuorum->qc->validMembers[i]) { From f8e88adadcc981dc218d60d53898f3865240a5a0 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Thu, 30 Nov 2023 23:16:45 +0300 Subject: [PATCH 09/16] fix: avoid a crash on -reindex-chainstate (#5746) ## Issue being fixed or feature implemented Avoid a crash on -reindex-chainstate. ## What was done? `ResetBlockFailureFlags` is crashing when `m_chain.Tip()` is null. Call `ResetBlockFailureFlags` inside `if (!is_coinsview_empty(chainstate)) {...}` block - we know `m_chain.Tip()` is not null there. ## How Has This Been Tested? Try running a node with `-reindex-chainstate` cmd-line param w/ and w/out this patch. ## Breaking Changes n/a ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- src/init.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 518bb71c76..9b139e4b5a 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -2121,6 +2121,10 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc LogPrintf("%s: bls_legacy_scheme=%d\n", __func__, bls::bls_legacy_scheme.load()); } + if (args.GetArg("-checklevel", DEFAULT_CHECKLEVEL) >= 3) { + chainstate->ResetBlockFailureFlags(nullptr); + } + } else { // TODO: CEvoDB instance should probably be a part of CChainState // (for multiple chainstates to actually work in parallel) @@ -2132,10 +2136,6 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc break; } } - - if (args.GetArg("-checklevel", DEFAULT_CHECKLEVEL) >= 3) { - ::ChainstateActive().ResetBlockFailureFlags(nullptr); - } } } catch (const std::exception& e) { LogPrintf("%s\n", e.what()); From 5e8140d23f0698c82497364f1c1e60df6a151915 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Mon, 4 Dec 2023 13:38:32 +0300 Subject: [PATCH 10/16] fix: Redefine `keepOldKeys` and align quorum and dkgsession key storage depths (#5748) ## Issue being fixed or feature implemented When DKG data recovery is triggered by `qgetdata` the data we use to construct `qdata` reply is actually the one handled by `CDKGSessionManager`, not by `CQuorumManager`. Not storing the data long enough in `CDKGSessionManager` will result in this data simply not being recoverable. Also, the formula in `CDKGSessionManager::CleanupOldContributions()` is broken for quorums which use rotation (the depth is way too large). ## What was done? Fix both issues by redefining `keepOldKeys` and aligning key storage depths in both modules. ## How Has This Been Tested? ## Breaking Changes n/a ## Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have added or updated relevant unit/integration/functional/e2e tests - [ ] I have made corresponding changes to the documentation - [x] I have assigned this pull request to a milestone _(for repository code-owners and collaborators only)_ --- src/llmq/dkgsessionmgr.cpp | 3 ++- src/llmq/params.h | 16 ++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/llmq/dkgsessionmgr.cpp b/src/llmq/dkgsessionmgr.cpp index fb20dbb368..5f8f5ccee0 100644 --- a/src/llmq/dkgsessionmgr.cpp +++ b/src/llmq/dkgsessionmgr.cpp @@ -466,7 +466,8 @@ void CDKGSessionManager::CleanupOldContributions() const for (const auto& params : Params().GetConsensus().llmqs) { // For how many blocks recent DKG info should be kept - const int MAX_STORE_DEPTH = 2 * params.signingActiveQuorumCount * params.dkgInterval; + const int MAX_CYCLES = params.useRotation ? params.keepOldKeys / params.signingActiveQuorumCount : params.keepOldKeys; + const int MAX_STORE_DEPTH = MAX_CYCLES * params.dkgInterval; LogPrint(BCLog::LLMQ, "CDKGSessionManager::%s -- looking for old entries for llmq type %d\n", __func__, ToUnderlying(params.type)); diff --git a/src/llmq/params.h b/src/llmq/params.h index a9cb118929..4d2cfae9e0 100644 --- a/src/llmq/params.h +++ b/src/llmq/params.h @@ -104,7 +104,7 @@ struct LLMQParams { // For rotated quorums it should be equal to 2 x active quorums set. int keepOldConnections; - // The number of quorums for which we should keep keys. Usually it's equal to keepOldConnections. + // The number of quorums for which we should keep keys. Usually it's equal to signingActiveQuorumCount * 2. // Unlike for other quorum types we want to keep data (secret key shares and vvec) // for Platform quorums for much longer because Platform can be restarted and // it must be able to re-sign stuff. @@ -145,7 +145,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, - .keepOldKeys = 3, + .keepOldKeys = 4, .recoveryMembers = 3, }, @@ -171,7 +171,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, - .keepOldKeys = 3, + .keepOldKeys = 4, .recoveryMembers = 3, }, @@ -197,7 +197,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, - .keepOldKeys = 3, + .keepOldKeys = 4, .recoveryMembers = 3, }, @@ -275,7 +275,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // just a few ones to allow easier testing .keepOldConnections = 5, - .keepOldKeys = 5, + .keepOldKeys = 8, .recoveryMembers = 6, }, @@ -353,7 +353,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, - .keepOldKeys = 25, + .keepOldKeys = 48, .recoveryMembers = 25, }, @@ -406,7 +406,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // two days worth of LLMQs .keepOldConnections = 5, - .keepOldKeys = 5, + .keepOldKeys = 8, .recoveryMembers = 100, }, @@ -434,7 +434,7 @@ static constexpr std::array available_llmqs = { .signingActiveQuorumCount = 4, // four days worth of LLMQs .keepOldConnections = 5, - .keepOldKeys = 5, + .keepOldKeys = 8, .recoveryMembers = 100, }, From 18b580591c2fdcc0b531a79cdc656b9cf38aac9d Mon Sep 17 00:00:00 2001 From: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com> Date: Fri, 1 Dec 2023 09:09:47 -0600 Subject: [PATCH 11/16] Merge pull request #5740 from knst/bp-versionbits backport: bitcoin#19438 Introduce deploymentstatus (versionbits improvements) --- src/Makefile.am | 6 +- src/chainparams.cpp | 2 +- src/consensus/params.h | 55 ++++++++++++++- src/deploymentinfo.cpp | 54 +++++++++++++++ src/deploymentinfo.h | 29 ++++++++ src/deploymentstatus.cpp | 17 +++++ src/deploymentstatus.h | 55 +++++++++++++++ src/evo/mnhftx.cpp | 15 +++-- src/evo/mnhftx.h | 2 + src/governance/classes.cpp | 1 + src/llmq/utils.cpp | 10 +-- src/miner.cpp | 7 +- src/node/interfaces.cpp | 1 + src/rpc/blockchain.cpp | 57 ++++++++-------- src/rpc/mining.cpp | 9 +-- .../dynamic_activation_thresholds_tests.cpp | 22 +++--- src/test/versionbits_tests.cpp | 33 ++++----- src/validation.cpp | 67 +++++++------------ src/validation.h | 7 -- src/versionbits.cpp | 36 +++++++--- src/versionbits.h | 35 ++++++---- src/versionbitsinfo.cpp | 22 ------ src/versionbitsinfo.h | 17 ----- 23 files changed, 377 insertions(+), 182 deletions(-) create mode 100644 src/deploymentinfo.cpp create mode 100644 src/deploymentinfo.h create mode 100644 src/deploymentstatus.cpp create mode 100644 src/deploymentstatus.h delete mode 100644 src/versionbitsinfo.cpp delete mode 100644 src/versionbitsinfo.h diff --git a/src/Makefile.am b/src/Makefile.am index c8cdc24e57..a9354896e5 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -169,6 +169,8 @@ BITCOIN_CORE_H = \ cuckoocache.h \ ctpl_stl.h \ cxxtimer.hpp \ + deploymentinfo.h \ + deploymentstatus.h \ evo/assetlocktx.h \ evo/dmn_types.h \ evo/cbtx.h \ @@ -349,7 +351,6 @@ BITCOIN_CORE_H = \ validation.h \ validationinterface.h \ versionbits.h \ - versionbitsinfo.h \ walletinitinterface.h \ wallet/bdb.h \ wallet/coincontrol.h \ @@ -403,6 +404,7 @@ libbitcoin_server_a_SOURCES = \ coinjoin/server.cpp \ consensus/tx_verify.cpp \ dbwrapper.cpp \ + deploymentstatus.cpp \ dsnotificationinterface.cpp \ evo/assetlocktx.cpp \ evo/cbtx.cpp \ @@ -696,6 +698,7 @@ libbitcoin_common_a_SOURCES = \ compressor.cpp \ core_read.cpp \ core_write.cpp \ + deploymentinfo.cpp \ key.cpp \ key_io.cpp \ merkleblock.cpp \ @@ -715,7 +718,6 @@ libbitcoin_common_a_SOURCES = \ script/sign.cpp \ script/signingprovider.cpp \ script/standard.cpp \ - versionbitsinfo.cpp \ warnings.cpp \ $(BITCOIN_CORE_H) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 34ded8f7c2..9e485057de 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -8,12 +8,12 @@ #include #include +#include #include #include #include #include #include -#include #include diff --git a/src/consensus/params.h b/src/consensus/params.h index 6c0c25c75d..5962b2028e 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -14,13 +14,32 @@ namespace Consensus { -enum DeploymentPos { +enum BuriedDeployment : int16_t +{ + DEPLOYMENT_HEIGHTINCB = std::numeric_limits::min(), + DEPLOYMENT_DERSIG, + DEPLOYMENT_CLTV, + DEPLOYMENT_BIP147, + DEPLOYMENT_CSV, + DEPLOYMENT_DIP0001, + DEPLOYMENT_DIP0003, + DEPLOYMENT_DIP0008, + DEPLOYMENT_DIP0020, + DEPLOYMENT_DIP0024, + DEPLOYMENT_BRR, + DEPLOYMENT_V19, +}; +constexpr bool ValidDeployment(BuriedDeployment dep) { return DEPLOYMENT_HEIGHTINCB <= dep && dep <= DEPLOYMENT_V19; } + +enum DeploymentPos : uint16_t +{ DEPLOYMENT_TESTDUMMY, DEPLOYMENT_V20, // Deployment of EHF, LLMQ Randomness Beacon DEPLOYMENT_MN_RR, // Deployment of Masternode Reward Location Reallocation - // NOTE: Also add new deployments to VersionBitsDeploymentInfo in versionbits.cpp + // NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp MAX_VERSION_BITS_DEPLOYMENTS }; +constexpr bool ValidDeployment(DeploymentPos dep) { return DEPLOYMENT_TESTDUMMY <= dep && dep <= DEPLOYMENT_MN_RR; } /** * Struct for each individual consensus rule change using BIP9. @@ -145,7 +164,39 @@ struct Params { LLMQType llmqTypePlatform{LLMQType::LLMQ_NONE}; LLMQType llmqTypeMnhf{LLMQType::LLMQ_NONE}; LLMQType llmqTypeAssetLocks{LLMQType::LLMQ_NONE}; + + int DeploymentHeight(BuriedDeployment dep) const + { + switch (dep) { + case DEPLOYMENT_HEIGHTINCB: + return BIP34Height; + case DEPLOYMENT_DERSIG: + return BIP66Height; + case DEPLOYMENT_CLTV: + return BIP65Height; + case DEPLOYMENT_BIP147: + return BIP147Height; + case DEPLOYMENT_CSV: + return CSVHeight; + case DEPLOYMENT_DIP0001: + return DIP0001Height; + case DEPLOYMENT_DIP0003: + return DIP0003Height; + case DEPLOYMENT_DIP0008: + return DIP0008Height; + case DEPLOYMENT_DIP0020: + return DIP0020Height; + case DEPLOYMENT_DIP0024: + return DIP0024Height; + case DEPLOYMENT_BRR: + return BRRHeight; + case DEPLOYMENT_V19: + return V19Height; + } // no default case, so the compiler can warn about missing cases + return std::numeric_limits::max(); + } }; + } // namespace Consensus #endif // BITCOIN_CONSENSUS_PARAMS_H diff --git a/src/deploymentinfo.cpp b/src/deploymentinfo.cpp new file mode 100644 index 0000000000..17da5d3d36 --- /dev/null +++ b/src/deploymentinfo.cpp @@ -0,0 +1,54 @@ +// Copyright (c) 2016-2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include + +const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] = { + { + /*.name =*/ "testdummy", + /*.gbt_force =*/ true, + }, + { + /*.name =*/"v20", + /*.gbt_force =*/true, + }, + { + /*.name =*/"mn_rr", + /*.gbt_force =*/true, + }, +}; + +std::string DeploymentName(Consensus::BuriedDeployment dep) +{ + assert(ValidDeployment(dep)); + switch (dep) { + case Consensus::DEPLOYMENT_HEIGHTINCB: + return "bip34"; + case Consensus::DEPLOYMENT_CLTV: + return "bip65"; + case Consensus::DEPLOYMENT_DERSIG: + return "bip66"; + case Consensus::DEPLOYMENT_BIP147: + return "bip147"; + case Consensus::DEPLOYMENT_CSV: + return "csv"; + case Consensus::DEPLOYMENT_DIP0001: + return "dip0001"; + case Consensus::DEPLOYMENT_DIP0003: + return "dip0003"; + case Consensus::DEPLOYMENT_DIP0008: + return "dip0008"; + case Consensus::DEPLOYMENT_DIP0020: + return "dip0020"; + case Consensus::DEPLOYMENT_DIP0024: + return "dip0024"; + case Consensus::DEPLOYMENT_BRR: + return "realloc"; + case Consensus::DEPLOYMENT_V19: + return "v19"; + } // no default case, so the compiler can warn about missing cases + return ""; +} diff --git a/src/deploymentinfo.h b/src/deploymentinfo.h new file mode 100644 index 0000000000..63d58a7da2 --- /dev/null +++ b/src/deploymentinfo.h @@ -0,0 +1,29 @@ +// Copyright (c) 2016-2018 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_DEPLOYMENTINFO_H +#define BITCOIN_DEPLOYMENTINFO_H + +#include + +#include + +struct VBDeploymentInfo { + /** Deployment name */ + const char *name; + /** Whether GBT clients can safely ignore this rule in simplified usage */ + bool gbt_force; +}; + +extern const VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS]; + +std::string DeploymentName(Consensus::BuriedDeployment dep); + +inline std::string DeploymentName(Consensus::DeploymentPos pos) +{ + assert(Consensus::ValidDeployment(pos)); + return VersionBitsDeploymentInfo[pos].name; +} + +#endif // BITCOIN_DEPLOYMENTINFO_H diff --git a/src/deploymentstatus.cpp b/src/deploymentstatus.cpp new file mode 100644 index 0000000000..9007800421 --- /dev/null +++ b/src/deploymentstatus.cpp @@ -0,0 +1,17 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include + +VersionBitsCache g_versionbitscache; + +/* Basic sanity checking for BuriedDeployment/DeploymentPos enums and + * ValidDeployment check */ + +static_assert(ValidDeployment(Consensus::DEPLOYMENT_TESTDUMMY), "sanity check of DeploymentPos failed (TESTDUMMY not valid)"); +static_assert(!ValidDeployment(Consensus::MAX_VERSION_BITS_DEPLOYMENTS), "sanity check of DeploymentPos failed (MAX value considered valid)"); +static_assert(!ValidDeployment(static_cast(Consensus::DEPLOYMENT_TESTDUMMY)), "sanity check of BuriedDeployment failed (overlaps with DeploymentPos)"); diff --git a/src/deploymentstatus.h b/src/deploymentstatus.h new file mode 100644 index 0000000000..84c5e54698 --- /dev/null +++ b/src/deploymentstatus.h @@ -0,0 +1,55 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_DEPLOYMENTSTATUS_H +#define BITCOIN_DEPLOYMENTSTATUS_H + +#include +#include + +#include + +/** Global cache for versionbits deployment status */ +extern VersionBitsCache g_versionbitscache; + +/** Determine if a deployment is active for the next block */ +inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::BuriedDeployment dep) +{ + assert(Consensus::ValidDeployment(dep)); + return (pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1) >= params.DeploymentHeight(dep); +} + +inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos dep) +{ + assert(Consensus::ValidDeployment(dep)); + return ThresholdState::ACTIVE == g_versionbitscache.State(pindexPrev, params, dep); +} + +/** Determine if a deployment is active for this block */ +inline bool DeploymentActiveAt(const CBlockIndex& index, const Consensus::Params& params, Consensus::BuriedDeployment dep) +{ + assert(Consensus::ValidDeployment(dep)); + return index.nHeight >= params.DeploymentHeight(dep); +} + +inline bool DeploymentActiveAt(const CBlockIndex& index, const Consensus::Params& params, Consensus::DeploymentPos dep) +{ + assert(Consensus::ValidDeployment(dep)); + return DeploymentActiveAfter(index.pprev, params, dep); +} + +/** Determine if a deployment is enabled (can ever be active) */ +inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::BuriedDeployment dep) +{ + assert(Consensus::ValidDeployment(dep)); + return params.DeploymentHeight(dep) != std::numeric_limits::max(); +} + +inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::DeploymentPos dep) +{ + assert(Consensus::ValidDeployment(dep)); + return params.vDeployments[dep].nTimeout != 0; +} + +#endif // BITCOIN_DEPLOYMENTSTATUS_H diff --git a/src/evo/mnhftx.cpp b/src/evo/mnhftx.cpp index 34a031e23e..8f648aea45 100644 --- a/src/evo/mnhftx.cpp +++ b/src/evo/mnhftx.cpp @@ -3,6 +3,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include #include @@ -278,10 +279,12 @@ CMNHFManager::Signals CMNHFManager::GetFromCache(const CBlockIndex* const pindex return signals; } } - if (VersionBitsState(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, versionbitscache) != ThresholdState::ACTIVE) { + { LOCK(cs_cache); - mnhfCache.insert(blockHash, {}); - return {}; + if (ThresholdState::ACTIVE != v20_activation.State(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20)) { + mnhfCache.insert(blockHash, {}); + return {}; + } } bool ok = m_evoDb.Read(std::make_pair(DB_SIGNALS, blockHash), signals); assert(ok); @@ -297,8 +300,10 @@ void CMNHFManager::AddToCache(const Signals& signals, const CBlockIndex* const p LOCK(cs_cache); mnhfCache.insert(blockHash, signals); } - if (VersionBitsState(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, versionbitscache) != ThresholdState::ACTIVE) { - return; + assert(pindex != nullptr); + { + LOCK(cs_cache); + if (ThresholdState::ACTIVE != v20_activation.State(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20)) return; } m_evoDb.Write(std::make_pair(DB_SIGNALS, blockHash), signals); } diff --git a/src/evo/mnhftx.h b/src/evo/mnhftx.h index c130045643..aae8c17412 100644 --- a/src/evo/mnhftx.h +++ b/src/evo/mnhftx.h @@ -102,6 +102,8 @@ private: // versionBit <-> height unordered_lru_cache mnhfCache GUARDED_BY(cs_cache) {MNHFCacheSize}; + // This cache is used only for v20 activation to avoid double lock throught VersionBitsConditionChecker::SignalHeight + VersionBitsCache v20_activation GUARDED_BY(cs_cache); public: explicit CMNHFManager(CEvoDB& evoDb); ~CMNHFManager(); diff --git a/src/governance/classes.cpp b/src/governance/classes.cpp index d3444a90f0..2182f2ad93 100644 --- a/src/governance/classes.cpp +++ b/src/governance/classes.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include diff --git a/src/llmq/utils.cpp b/src/llmq/utils.cpp index b4af8d5bdf..98daba584a 100644 --- a/src/llmq/utils.cpp +++ b/src/llmq/utils.cpp @@ -709,7 +709,7 @@ bool IsV19Active(gsl::not_null pindex) bool IsV20Active(gsl::not_null pindex) { LOCK(cs_llmq_vbc); - return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache) == ThresholdState::ACTIVE; + return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20) == ThresholdState::ACTIVE; } bool IsMNRewardReallocationActive(gsl::not_null pindex) @@ -717,19 +717,19 @@ bool IsMNRewardReallocationActive(gsl::not_null pindex) if (!IsV20Active(pindex)) return false; LOCK(cs_llmq_vbc); - return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_MN_RR, llmq_versionbitscache) == ThresholdState::ACTIVE; + return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_MN_RR) == ThresholdState::ACTIVE; } ThresholdState GetV20State(gsl::not_null pindex) { LOCK(cs_llmq_vbc); - return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache); + return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20); } int GetV20Since(gsl::not_null pindex) { LOCK(cs_llmq_vbc); - return VersionBitsStateSinceHeight(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache); + return llmq_versionbitscache.StateSinceHeight(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20); } bool IsInstantSendLLMQTypeShared() @@ -1006,7 +1006,7 @@ bool IsQuorumTypeEnabledInternal(Consensus::LLMQType llmqType, const CQuorumMana case Consensus::LLMQType::LLMQ_TEST_V17: { LOCK(cs_llmq_vbc); - return VersionBitsState(pindex, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY, llmq_versionbitscache) == ThresholdState::ACTIVE; + return llmq_versionbitscache.State(pindex, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY) == ThresholdState::ACTIVE; } case Consensus::LLMQType::LLMQ_100_67: return pindex->nHeight + 1 >= consensusParams.DIP0020Height; diff --git a/src/miner.cpp b/src/miner.cpp index a3b7272022..a9170ca834 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -132,11 +133,11 @@ std::unique_ptr BlockAssembler::CreateNewBlock(const CScript& sc assert(pindexPrev != nullptr); nHeight = pindexPrev->nHeight + 1; - bool fDIP0003Active_context = nHeight >= chainparams.GetConsensus().DIP0003Height; - bool fDIP0008Active_context = nHeight >= chainparams.GetConsensus().DIP0008Height; + bool fDIP0003Active_context = DeploymentActiveAfter(pindexPrev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_DIP0003); + bool fDIP0008Active_context = DeploymentActiveAfter(pindexPrev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_DIP0008); bool fV20Active_context = llmq::utils::IsV20Active(pindexPrev); - pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus()); + pblock->nVersion = g_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus()); // Non-mainnet only: allow overriding block.nVersion with // -blockversion=N to test forking scenarios if (Params().NetworkIDString() != CBaseChainParams::MAIN) diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index cb94992a27..e016931038 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index de5a74cccb..0f3c3a2caa 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -12,7 +12,10 @@ #include #include #include +#include #include +#include +#include #include #include #include @@ -35,7 +38,7 @@ #include #include #include -#include +#include #include #include @@ -1572,25 +1575,25 @@ static UniValue verifychain(const JSONRPCRequest& request) active_chainstate, Params(), active_chainstate.CoinsTip(), *node.evodb, check_level, check_depth); } -static void BuriedForkDescPushBack(UniValue& softforks, const std::string &name, int softfork_height, int tip_height) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue& softforks, const Consensus::Params& params, Consensus::BuriedDeployment dep) { // For buried deployments. // A buried deployment is one where the height of the activation has been hardcoded into // the client implementation long after the consensus change has activated. See BIP 90. // Buried deployments with activation height value of // std::numeric_limits::max() are disabled and thus hidden. - if (softfork_height == std::numeric_limits::max()) return; + if (!DeploymentEnabled(params, dep)) return; UniValue rv(UniValue::VOBJ); rv.pushKV("type", "buried"); // getblockchaininfo reports the softfork as active from when the chain height is // one below the activation height - rv.pushKV("active", tip_height + 1 >= softfork_height); - rv.pushKV("height", softfork_height); - softforks.pushKV(name, rv); + rv.pushKV("active", DeploymentActiveAfter(active_chain_tip, params, dep)); + rv.pushKV("height", params.DeploymentHeight(dep)); + softforks.pushKV(DeploymentName(dep), rv); } -static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const std::unordered_map& signals, UniValue& softforks, const std::string &name, const Consensus::Params& consensusParams, Consensus::DeploymentPos id) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const std::unordered_map& signals, UniValue& softforks, const Consensus::Params& consensusParams, Consensus::DeploymentPos id) { // For BIP9 deployments. // Deployments (e.g. testdummy) with timeout value before Jan 1, 2009 are hidden. @@ -1599,7 +1602,7 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const if (consensusParams.vDeployments[id].nTimeout <= 1230768000) return; UniValue bip9(UniValue::VOBJ); - const ThresholdState thresholdState = VersionBitsState(active_chain_tip, consensusParams, id, versionbitscache); + const ThresholdState thresholdState = g_versionbitscache.State(active_chain_tip, consensusParams, id); switch (thresholdState) { case ThresholdState::DEFINED: bip9.pushKV("status", "defined"); break; case ThresholdState::STARTED: bip9.pushKV("status", "started"); break; @@ -1617,12 +1620,12 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const if (auto it = signals.find(consensusParams.vDeployments[id].bit); it != signals.end()) { bip9.pushKV("ehf_height", it->second); } - int64_t since_height = VersionBitsStateSinceHeight(active_chain_tip, consensusParams, id, versionbitscache); + int64_t since_height = g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id); bip9.pushKV("since", since_height); if (ThresholdState::STARTED == thresholdState) { UniValue statsUV(UniValue::VOBJ); - BIP9Stats statsStruct = VersionBitsStatistics(active_chain_tip, consensusParams, id, versionbitscache); + BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id); statsUV.pushKV("period", statsStruct.period); statsUV.pushKV("threshold", statsStruct.threshold); statsUV.pushKV("elapsed", statsStruct.elapsed); @@ -1642,7 +1645,7 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const } rv.pushKV("active", ThresholdState::ACTIVE == thresholdState); - softforks.pushKV(name, rv); + softforks.pushKV(DeploymentName(id), rv); } UniValue getblockchaininfo(const JSONRPCRequest& request) @@ -1743,23 +1746,23 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) const Consensus::Params& consensusParams = Params().GetConsensus(); UniValue softforks(UniValue::VOBJ); // sorted by activation block - BuriedForkDescPushBack(softforks,"bip34", consensusParams.BIP34Height, height); - BuriedForkDescPushBack(softforks,"bip66", consensusParams.BIP66Height, height); - BuriedForkDescPushBack(softforks,"bip65", consensusParams.BIP65Height, height); - BuriedForkDescPushBack(softforks,"bip147", consensusParams.BIP147Height, height); - BuriedForkDescPushBack(softforks, "csv", consensusParams.CSVHeight, height); - BuriedForkDescPushBack(softforks, "dip0001", consensusParams.DIP0001Height, height); - BuriedForkDescPushBack(softforks, "dip0003", consensusParams.DIP0003Height, height); - BuriedForkDescPushBack(softforks, "dip0008", consensusParams.DIP0008Height, height); - BuriedForkDescPushBack(softforks, "dip0020", consensusParams.DIP0020Height, height); - BuriedForkDescPushBack(softforks, "dip0024", consensusParams.DIP0024Height, height); - BuriedForkDescPushBack(softforks, "realloc", consensusParams.BRRHeight, height); - BuriedForkDescPushBack(softforks, "v19", consensusParams.V19Height, height); - BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "v20", consensusParams, Consensus::DEPLOYMENT_V20); - BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "mn_rr", consensusParams, Consensus::DEPLOYMENT_MN_RR); - BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "testdummy", consensusParams, Consensus::DEPLOYMENT_TESTDUMMY); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DERSIG); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_CLTV); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_BIP147); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_CSV); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0001); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0003); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0008); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0020); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0024); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_BRR); + SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_V19); + SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_V20); + SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_MN_RR); + SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY); - obj.pushKV("softforks", softforks); + obj.pushKV("softforks", softforks); obj.pushKV("warnings", GetWarnings(false)); return obj; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 13538afa41..02057ce65a 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -39,7 +41,6 @@ #include #include #include -#include #include #include @@ -851,7 +852,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request) UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); - ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); + ThresholdState state = g_versionbitscache.State(pindexPrev, consensusParams, pos); switch (state) { case ThresholdState::DEFINED: case ThresholdState::FAILED: @@ -859,7 +860,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request) break; case ThresholdState::LOCKED_IN: // Ensure bit is set in block version - pblock->nVersion |= VersionBitsMask(consensusParams, pos); + pblock->nVersion |= g_versionbitscache.Mask(consensusParams, pos); // FALL THROUGH to get vbavailable set... case ThresholdState::STARTED: { @@ -868,7 +869,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request) if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it in the [default] version - pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); + pblock->nVersion &= ~g_versionbitscache.Mask(consensusParams, pos); } } break; diff --git a/src/test/dynamic_activation_thresholds_tests.cpp b/src/test/dynamic_activation_thresholds_tests.cpp index 7e32d903d2..e002229c37 100644 --- a/src/test/dynamic_activation_thresholds_tests.cpp +++ b/src/test/dynamic_activation_thresholds_tests.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -16,6 +17,7 @@ #include