mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 19:42:46 +01:00
Merge pull request #5750 from ogabrielides/v20.0.2_release
backport: v20.0.2 backports and release
This commit is contained in:
commit
b1e9e05c7f
42
.github/workflows/guix-build.yml
vendored
42
.github/workflows/guix-build.yml
vendored
@ -9,11 +9,13 @@ jobs:
|
||||
build:
|
||||
runs-on: [ "self-hosted", "linux", "x64", "ubuntu-core" ]
|
||||
if: contains(github.event.pull_request.labels.*.name, 'guix-build')
|
||||
timeout-minutes: 480
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
path: dash
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -21,31 +23,48 @@ jobs:
|
||||
- name: Commit variables
|
||||
id: dockerfile
|
||||
run: |
|
||||
echo "hash=$(sha256sum ./contrib/containers/guix/Dockerfile | cut -d ' ' -f1)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$(sha256sum ./dash/contrib/containers/guix/Dockerfile | cut -d ' ' -f1)" >> $GITHUB_OUTPUT
|
||||
echo "host_user_id=$(id -u)" >> $GITHUB_OUTPUT
|
||||
echo "host_group_id=$(id -g)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ github.workspace }}
|
||||
context: ${{ github.workspace }}/dash
|
||||
build-args: |
|
||||
USER_ID=${{ steps.dockerfile.outputs.host_user_id }}
|
||||
GROUP_ID=${{ steps.dockerfile.outputs.host_group_id }}
|
||||
build-contexts: |
|
||||
docker_root=${{ github.workspace }}/contrib/containers/guix
|
||||
file: ./contrib/containers/guix/Dockerfile
|
||||
docker_root=${{ github.workspace }}/dash/contrib/containers/guix
|
||||
file: ./dash/contrib/containers/guix/Dockerfile
|
||||
load: true
|
||||
tags: guix_ubuntu:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Restore Guix cache and depends
|
||||
id: guix-cache-restore
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
path: |
|
||||
${{ github.workspace }}/.cache
|
||||
${{ github.workspace }}/dash/depends/built
|
||||
${{ github.workspace }}/dash/depends/sources
|
||||
${{ github.workspace }}/dash/depends/work
|
||||
key: ${{ runner.os }}-guix
|
||||
|
||||
- name: Create .cache folder if missing
|
||||
if: steps.guix-cache-restore.outputs.cache-hit != 'true'
|
||||
run: mkdir -p .cache
|
||||
|
||||
- name: Run Guix build
|
||||
timeout-minutes: 480
|
||||
run: |
|
||||
docker run --privileged -d --rm -t \
|
||||
--name guix-daemon \
|
||||
-e ADDITIONAL_GUIX_COMMON_FLAGS="--max-jobs=$(nproc --all)" \
|
||||
-v ${{ github.workspace }}:/src/dash \
|
||||
-v ${{ github.workspace }}/dash:/src/dash \
|
||||
-v ${{ github.workspace }}/.cache:/home/ubuntu/.cache \
|
||||
-w /src/dash \
|
||||
guix_ubuntu:latest && \
|
||||
docker exec guix-daemon bash -c '/usr/local/bin/guix-start'
|
||||
@ -57,6 +76,17 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Save Guix cache and depends
|
||||
id: guix-cache-save
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: |
|
||||
${{ github.workspace }}/.cache
|
||||
${{ github.workspace }}/dash/depends/built
|
||||
${{ github.workspace }}/dash/depends/sources
|
||||
${{ github.workspace }}/dash/depends/work
|
||||
key: ${{ steps.guix-cache-restore.outputs.cache-primary-key }}
|
||||
|
||||
- name: Compute SHA256 checksums
|
||||
run: |
|
||||
./contrib/containers/guix/scripts/guix-check ${{ github.workspace }}
|
||||
./dash/contrib/containers/guix/scripts/guix-check ${{ github.workspace }}/dash
|
||||
|
17
Makefile.am
17
Makefile.am
@ -36,7 +36,6 @@ OSX_APP=Dash-Qt.app
|
||||
OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME))
|
||||
OSX_DMG = $(OSX_VOLNAME).dmg
|
||||
OSX_TEMP_ISO = $(OSX_DMG:.dmg=).temp.iso
|
||||
OSX_BACKGROUND_IMAGE=$(top_srcdir)/contrib/macdeploy/background.tiff
|
||||
OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus
|
||||
OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/dash.icns
|
||||
OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
|
||||
@ -60,7 +59,6 @@ WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/dash.ico \
|
||||
$(top_srcdir)/doc/README_windows.txt
|
||||
|
||||
OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \
|
||||
$(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \
|
||||
$(top_srcdir)/contrib/macdeploy/detached-sig-create.sh
|
||||
|
||||
COVERAGE_INFO = baseline.info \
|
||||
@ -125,28 +123,17 @@ $(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING)
|
||||
deploydir: $(OSX_DMG)
|
||||
else !BUILD_DARWIN
|
||||
APP_DIST_DIR=$(top_builddir)/dist
|
||||
APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/background.tiff $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications
|
||||
|
||||
$(APP_DIST_DIR)/Applications:
|
||||
@rm -f $@
|
||||
@cd $(@D); $(LN_S) /Applications $(@F)
|
||||
|
||||
$(APP_DIST_EXTRAS): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt
|
||||
|
||||
$(OSX_TEMP_ISO): $(APP_DIST_EXTRAS)
|
||||
$(OSX_TEMP_ISO): $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt
|
||||
$(XORRISOFS) -D -l -V "$(OSX_VOLNAME)" -no-pad -r -dir-mode 0755 -o $@ $(APP_DIST_DIR) -- $(if $(SOURCE_DATE_EPOCH),-volume_date all_file_dates =$(SOURCE_DATE_EPOCH))
|
||||
|
||||
$(OSX_DMG): $(OSX_TEMP_ISO)
|
||||
$(DMG) dmg "$<" "$@"
|
||||
|
||||
$(APP_DIST_DIR)/.background/background.tiff:
|
||||
$(MKDIR_P) $(@D)
|
||||
cp $(OSX_BACKGROUND_IMAGE) $@
|
||||
|
||||
$(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING)
|
||||
INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR)
|
||||
|
||||
deploydir: $(APP_DIST_EXTRAS)
|
||||
deploydir: $(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Dash-Qt
|
||||
endif !BUILD_DARWIN
|
||||
|
||||
appbundle: $(OSX_APP_BUILT)
|
||||
|
@ -15,5 +15,5 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true"
|
||||
export RUN_UNIT_TESTS="false"
|
||||
export GOAL="install"
|
||||
export TEST_PREVIOUS_RELEASES=true
|
||||
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.0.0 v0.16.1.1 v0.17.0.3 v18.2.2 v19.3.0"
|
||||
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.0.0 v0.16.1.1 v0.17.0.3 v18.2.2 v19.3.0 v20.0.1"
|
||||
export BITCOIN_CONFIG="--enable-zmq --enable-reduce-exports --disable-fuzz-binary LDFLAGS=-static-libstdc++"
|
||||
|
@ -1,7 +1,7 @@
|
||||
AC_PREREQ([2.69])
|
||||
define(_CLIENT_VERSION_MAJOR, 20)
|
||||
define(_CLIENT_VERSION_MINOR, 0)
|
||||
define(_CLIENT_VERSION_BUILD, 1)
|
||||
define(_CLIENT_VERSION_BUILD, 2)
|
||||
define(_CLIENT_VERSION_RC, 0)
|
||||
define(_CLIENT_VERSION_IS_RELEASE, true)
|
||||
define(_COPYRIGHT_YEAR, 2023)
|
||||
|
@ -79,9 +79,14 @@ COPY --from=docker_root ./scripts/entrypoint /usr/local/bin/entrypoint
|
||||
COPY --from=docker_root ./scripts/guix-check /usr/local/bin/guix-check
|
||||
COPY --from=docker_root ./scripts/guix-start /usr/local/bin/guix-start
|
||||
|
||||
# Create directory for mounting and grant necessary permissions
|
||||
RUN mkdir -p /src/dash && \
|
||||
chown -R ${USER_ID}:${GROUP_ID} /src
|
||||
# Create directories for mounting to save/restore cache and grant necessary permissions
|
||||
RUN mkdir -p \
|
||||
/home/${USERNAME}/.cache \
|
||||
/src/dash/depends/{built,sources,work} && \
|
||||
chown -R ${USER_ID}:${GROUP_ID} \
|
||||
/home/${USERNAME}/.cache \
|
||||
/src
|
||||
|
||||
WORKDIR "/src/dash"
|
||||
|
||||
# Switch to unprivileged context
|
||||
|
@ -328,7 +328,7 @@ mkdir -p "$DISTSRC"
|
||||
mkdir -p "unsigned-app-${HOST}"
|
||||
cp --target-directory="unsigned-app-${HOST}" \
|
||||
osx_volname \
|
||||
contrib/macdeploy/detached-sig-{apply,create}.sh \
|
||||
contrib/macdeploy/detached-sig-create.sh \
|
||||
"${BASEPREFIX}/${HOST}"/native/bin/dmg
|
||||
mv --target-directory="unsigned-app-${HOST}" dist
|
||||
(
|
||||
|
@ -1,27 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
export LC_ALL=C
|
||||
set -e
|
||||
|
||||
UNSIGNED="$1"
|
||||
SIGNATURE="$2"
|
||||
ROOTDIR=dist
|
||||
OUTDIR=signed-app
|
||||
SIGNAPPLE=signapple
|
||||
|
||||
if [ -z "$UNSIGNED" ]; then
|
||||
echo "usage: $0 <unsigned app> <signature>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$SIGNATURE" ]; then
|
||||
echo "usage: $0 <unsigned app> <signature>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
${SIGNAPPLE} apply ${UNSIGNED} ${SIGNATURE}
|
||||
mv ${ROOTDIR} ${OUTDIR}
|
||||
echo "Signed: ${OUTDIR}"
|
@ -16,8 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import plistlib
|
||||
import sys, re, os, shutil, stat, os.path
|
||||
import sys, re, os, platform, shutil, stat, subprocess, os.path
|
||||
from argparse import ArgumentParser
|
||||
from ds_store import DSStore
|
||||
from mac_alias import Alias
|
||||
@ -53,7 +52,7 @@ class FrameworkInfo(object):
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return f""" Framework name: {frameworkName}
|
||||
return f""" Framework name: {self.frameworkName}
|
||||
Framework directory: {self.frameworkDirectory}
|
||||
Framework path: {self.frameworkPath}
|
||||
Binary name: {self.binaryName}
|
||||
@ -85,8 +84,8 @@ class FrameworkInfo(object):
|
||||
if line == "":
|
||||
return None
|
||||
|
||||
# Don't deploy system libraries (exception for libQtuitools and libQtlucene).
|
||||
if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line):
|
||||
# Don't deploy system libraries
|
||||
if line.startswith("/System/Library/") or line.startswith("@executable_path") or line.startswith("/usr/lib/"):
|
||||
return None
|
||||
|
||||
m = cls.reOLine.match(line)
|
||||
@ -246,56 +245,46 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
|
||||
toDir = os.path.join(path, framework.destinationDirectory)
|
||||
toPath = os.path.join(toDir, framework.binaryName)
|
||||
|
||||
if not os.path.exists(fromPath):
|
||||
raise RuntimeError(f"No file at {fromPath}")
|
||||
if framework.isDylib():
|
||||
if not os.path.exists(fromPath):
|
||||
raise RuntimeError(f"No file at {fromPath}")
|
||||
|
||||
if os.path.exists(toPath):
|
||||
return None # Already there
|
||||
if os.path.exists(toPath):
|
||||
return None # Already there
|
||||
|
||||
if not os.path.exists(toDir):
|
||||
os.makedirs(toDir)
|
||||
if not os.path.exists(toDir):
|
||||
os.makedirs(toDir)
|
||||
|
||||
shutil.copy2(fromPath, toPath)
|
||||
if verbose:
|
||||
print("Copied:", fromPath)
|
||||
print(" to:", toPath)
|
||||
shutil.copy2(fromPath, toPath)
|
||||
if verbose:
|
||||
print("Copied:", fromPath)
|
||||
print(" to:", toPath)
|
||||
else:
|
||||
to_dir = os.path.join(path, "Contents", "Frameworks", framework.frameworkName)
|
||||
if os.path.exists(to_dir):
|
||||
return None # Already there
|
||||
|
||||
from_dir = framework.frameworkPath
|
||||
if not os.path.exists(from_dir):
|
||||
raise RuntimeError(f"No directory at {from_dir}")
|
||||
|
||||
shutil.copytree(from_dir, to_dir, symlinks=True)
|
||||
if verbose:
|
||||
print("Copied:", from_dir)
|
||||
print(" to:", to_dir)
|
||||
|
||||
headers_link = os.path.join(to_dir, "Headers")
|
||||
if os.path.exists(headers_link):
|
||||
os.unlink(headers_link)
|
||||
|
||||
headers_dir = os.path.join(to_dir, framework.binaryDirectory, "Headers")
|
||||
if os.path.exists(headers_dir):
|
||||
shutil.rmtree(headers_dir)
|
||||
|
||||
permissions = os.stat(toPath)
|
||||
if not permissions.st_mode & stat.S_IWRITE:
|
||||
os.chmod(toPath, permissions.st_mode | stat.S_IWRITE)
|
||||
|
||||
if not framework.isDylib(): # Copy resources for real frameworks
|
||||
|
||||
linkfrom = os.path.join(path, "Contents","Frameworks", framework.frameworkName, "Versions", "Current")
|
||||
linkto = framework.version
|
||||
if not os.path.exists(linkfrom):
|
||||
os.symlink(linkto, linkfrom)
|
||||
print("Linked:", linkfrom, "->", linkto)
|
||||
fromResourcesDir = framework.sourceResourcesDirectory
|
||||
if os.path.exists(fromResourcesDir):
|
||||
toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory)
|
||||
shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True)
|
||||
if verbose:
|
||||
print("Copied resources:", fromResourcesDir)
|
||||
print(" to:", toResourcesDir)
|
||||
fromContentsDir = framework.sourceVersionContentsDirectory
|
||||
if not os.path.exists(fromContentsDir):
|
||||
fromContentsDir = framework.sourceContentsDirectory
|
||||
if os.path.exists(fromContentsDir):
|
||||
toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory)
|
||||
shutil.copytree(fromContentsDir, toContentsDir, symlinks=True)
|
||||
if verbose:
|
||||
print("Copied Contents:", fromContentsDir)
|
||||
print(" to:", toContentsDir)
|
||||
elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout)
|
||||
qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib")
|
||||
qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib")
|
||||
if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath):
|
||||
shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True)
|
||||
if verbose:
|
||||
print("Copied for libQtGui:", qtMenuNibSourcePath)
|
||||
print(" to:", qtMenuNibDestinationPath)
|
||||
|
||||
return toPath
|
||||
|
||||
def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo:
|
||||
@ -351,115 +340,20 @@ def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip
|
||||
return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose)
|
||||
|
||||
def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: DeploymentInfo, strip: bool, verbose: int):
|
||||
# Lookup available plugins, exclude unneeded
|
||||
plugins = []
|
||||
if deploymentInfo.pluginPath is None:
|
||||
return
|
||||
for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath):
|
||||
pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath)
|
||||
if pluginDirectory == "designer":
|
||||
# Skip designer plugins
|
||||
|
||||
if pluginDirectory not in ['styles', 'platforms']:
|
||||
continue
|
||||
elif pluginDirectory == "printsupport":
|
||||
# Skip printsupport plugins
|
||||
continue
|
||||
elif pluginDirectory == "imageformats":
|
||||
# Skip imageformats plugins
|
||||
continue
|
||||
elif pluginDirectory == "sqldrivers":
|
||||
# Deploy the sql plugins only if QtSql is in use
|
||||
if not deploymentInfo.usesFramework("QtSql"):
|
||||
continue
|
||||
elif pluginDirectory == "script":
|
||||
# Deploy the script plugins only if QtScript is in use
|
||||
if not deploymentInfo.usesFramework("QtScript"):
|
||||
continue
|
||||
elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling":
|
||||
# Deploy the qml plugins only if QtDeclarative is in use
|
||||
if not deploymentInfo.usesFramework("QtDeclarative"):
|
||||
continue
|
||||
elif pluginDirectory == "bearer":
|
||||
# Deploy the bearer plugins only if QtNetwork is in use
|
||||
if not deploymentInfo.usesFramework("QtNetwork"):
|
||||
continue
|
||||
elif pluginDirectory == "position":
|
||||
# Deploy the position plugins only if QtPositioning is in use
|
||||
if not deploymentInfo.usesFramework("QtPositioning"):
|
||||
continue
|
||||
elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures":
|
||||
# Deploy the sensor plugins only if QtSensors is in use
|
||||
if not deploymentInfo.usesFramework("QtSensors"):
|
||||
continue
|
||||
elif pluginDirectory == "audio" or pluginDirectory == "playlistformats":
|
||||
# Deploy the audio plugins only if QtMultimedia is in use
|
||||
if not deploymentInfo.usesFramework("QtMultimedia"):
|
||||
continue
|
||||
elif pluginDirectory == "mediaservice":
|
||||
# Deploy the mediaservice plugins only if QtMultimediaWidgets is in use
|
||||
if not deploymentInfo.usesFramework("QtMultimediaWidgets"):
|
||||
continue
|
||||
elif pluginDirectory == "canbus":
|
||||
# Deploy the canbus plugins only if QtSerialBus is in use
|
||||
if not deploymentInfo.usesFramework("QtSerialBus"):
|
||||
continue
|
||||
elif pluginDirectory == "webview":
|
||||
# Deploy the webview plugins only if QtWebView is in use
|
||||
if not deploymentInfo.usesFramework("QtWebView"):
|
||||
continue
|
||||
elif pluginDirectory == "gamepads":
|
||||
# Deploy the webview plugins only if QtGamepad is in use
|
||||
if not deploymentInfo.usesFramework("QtGamepad"):
|
||||
continue
|
||||
elif pluginDirectory == "geoservices":
|
||||
# Deploy the webview plugins only if QtLocation is in use
|
||||
if not deploymentInfo.usesFramework("QtLocation"):
|
||||
continue
|
||||
elif pluginDirectory == "texttospeech":
|
||||
# Deploy the texttospeech plugins only if QtTextToSpeech is in use
|
||||
if not deploymentInfo.usesFramework("QtTextToSpeech"):
|
||||
continue
|
||||
elif pluginDirectory == "virtualkeyboard":
|
||||
# Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is in use
|
||||
if not deploymentInfo.usesFramework("QtVirtualKeyboard"):
|
||||
continue
|
||||
elif pluginDirectory == "sceneparsers":
|
||||
# Deploy the virtualkeyboard plugins only if Qt3DCore is in use
|
||||
if not deploymentInfo.usesFramework("Qt3DCore"):
|
||||
continue
|
||||
elif pluginDirectory == "renderplugins":
|
||||
# Deploy the renderplugins plugins only if Qt3DCore is in use
|
||||
if not deploymentInfo.usesFramework("Qt3DCore"):
|
||||
continue
|
||||
elif pluginDirectory == "geometryloaders":
|
||||
# Deploy the geometryloaders plugins only if Qt3DCore is in use
|
||||
if not deploymentInfo.usesFramework("Qt3DCore"):
|
||||
continue
|
||||
|
||||
for pluginName in filenames:
|
||||
pluginPath = os.path.join(pluginDirectory, pluginName)
|
||||
if pluginName.endswith("_debug.dylib"):
|
||||
# Skip debug plugins
|
||||
|
||||
if pluginName.split('.')[0] not in ['libqminimal', 'libqcocoa', 'libqmacstyle']:
|
||||
continue
|
||||
elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib":
|
||||
# Deploy the svg plugins only if QtSvg is in use
|
||||
if not deploymentInfo.usesFramework("QtSvg"):
|
||||
continue
|
||||
elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib":
|
||||
# Deploy accessibility for Qt3Support only if the Qt3Support is in use
|
||||
if not deploymentInfo.usesFramework("Qt3Support"):
|
||||
continue
|
||||
elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib":
|
||||
# Deploy the opengl graphicssystem plugin only if QtOpenGL is in use
|
||||
if not deploymentInfo.usesFramework("QtOpenGL"):
|
||||
continue
|
||||
elif pluginPath == "accessible/libqtaccessiblequick.dylib":
|
||||
# Deploy the accessible qtquick plugin only if QtQuick is in use
|
||||
if not deploymentInfo.usesFramework("QtQuick"):
|
||||
continue
|
||||
elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib":
|
||||
# Deploy the virtualkeyboardplugin plugin only if QtVirtualKeyboard is in use
|
||||
if not deploymentInfo.usesFramework("QtVirtualKeyboard"):
|
||||
continue
|
||||
|
||||
plugins.append((pluginDirectory, pluginName))
|
||||
|
||||
@ -527,6 +421,9 @@ if os.path.exists(appname + ".dmg"):
|
||||
print("+ Removing existing DMG +")
|
||||
os.unlink(appname + ".dmg")
|
||||
|
||||
if os.path.exists(appname + ".temp.dmg"):
|
||||
os.unlink(appname + ".temp.dmg")
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
target = os.path.join("dist", "Dash-Qt.app")
|
||||
@ -644,6 +541,25 @@ ds.close()
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
subprocess.check_call(f"codesign --deep --force --sign - {target}", shell=True)
|
||||
|
||||
print("+ Installing background.tiff +")
|
||||
|
||||
bg_path = os.path.join('dist', '.background', 'background.tiff')
|
||||
os.mkdir(os.path.dirname(bg_path))
|
||||
|
||||
tiff_path = os.path.join('contrib', 'macdeploy', 'background.tiff')
|
||||
shutil.copy2(tiff_path, bg_path)
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
print("+ Generating symlink for /Applications +")
|
||||
|
||||
os.symlink("/Applications", os.path.join('dist', "Applications"))
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
if config.dmg is not None:
|
||||
|
||||
print("+ Preparing .dmg disk image +")
|
||||
@ -667,19 +583,6 @@ if config.dmg is not None:
|
||||
print("Attaching temp image...")
|
||||
output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, universal_newlines=True, stdout=PIPE).stdout
|
||||
|
||||
m = re.search(r"/Volumes/(.+$)", output)
|
||||
disk_root = m.group(0)
|
||||
|
||||
print("+ Applying fancy settings +")
|
||||
|
||||
bg_path = os.path.join(disk_root, ".background", os.path.basename('background.tiff'))
|
||||
os.mkdir(os.path.dirname(bg_path))
|
||||
if verbose:
|
||||
print('background.tiff', "->", bg_path)
|
||||
shutil.copy2('contrib/macdeploy/background.tiff', bg_path)
|
||||
|
||||
os.symlink("/Applications", os.path.join(disk_root, "Applications"))
|
||||
|
||||
print("+ Finalizing .dmg disk image +")
|
||||
|
||||
run(["hdiutil", "detach", f"/Volumes/{appname}"], universal_newlines=True)
|
||||
|
@ -134,6 +134,9 @@ $(package)_config_opts_darwin += -no-feature-corewlan
|
||||
$(package)_config_opts_darwin += -no-freetype
|
||||
$(package)_config_opts_darwin += QMAKE_MACOSX_DEPLOYMENT_TARGET=$(OSX_MIN_VERSION)
|
||||
|
||||
# Optimizing using > -O1 causes non-determinism when building across arches.
|
||||
$(package)_config_opts_aarch64_darwin += "QMAKE_CFLAGS_OPTIMIZE_FULL = -O1"
|
||||
|
||||
ifneq ($(build_os),darwin)
|
||||
$(package)_config_opts_darwin += -xplatform macx-clang-linux
|
||||
$(package)_config_opts_darwin += -device-option MAC_SDK_PATH=$(OSX_SDK)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Dash Core version v20.0.1
|
||||
# Dash Core version v20.0.2
|
||||
|
||||
Release is now available from:
|
||||
|
||||
@ -35,17 +35,20 @@ reindex or re-sync the whole chain.
|
||||
|
||||
# Notable changes
|
||||
|
||||
## Qt Testnet Crash
|
||||
## Masternode fix
|
||||
|
||||
A crash has been fixed which has only been seen on testnet and only affects QT clients.
|
||||
A problem has been fixed in the old quorum data cleanup mechanism. It was slowing down masternodes during DKG sessions and causing them to get PoSe scored.
|
||||
|
||||
## Guix Build System Enhancements
|
||||
The Guix build system has been enhanced to enable building with custom options when needed.
|
||||
This will be used to support custom builds such as for nightly builds with extra debug options.
|
||||
## Testnet Crash
|
||||
|
||||
Additionally, the Guix system will now produce debug symbols for MacOS.
|
||||
A fix has been implemented for the reported crash that could occur when upgrading from v19.x to v20.0.0 after v20 activation without re-indexing.
|
||||
|
||||
# v20.0.1 Change log
|
||||
## Other changes
|
||||
|
||||
Implemented improvements in Github CI and build system for macOS. Fixed compilation issues on FreeBSD.
|
||||
|
||||
|
||||
# v20.0.2 Change log
|
||||
|
||||
See detailed [set of changes][set-of-changes].
|
||||
|
||||
@ -54,6 +57,7 @@ See detailed [set of changes][set-of-changes].
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- Konstantin Akimov (knst)
|
||||
- Odysseas Gabrielides (ogabrielides)
|
||||
- PastaPastaPasta
|
||||
- UdjinM6
|
||||
|
||||
@ -82,6 +86,7 @@ Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12.
|
||||
|
||||
These release are considered obsolete. Old release notes can be found here:
|
||||
|
||||
- [v20.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.1.md) released November/18/2023
|
||||
- [v20.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.0.md) released November/15/2023
|
||||
- [v19.3.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.3.0.md) released July/31/2023
|
||||
- [v19.2.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.2.0.md) released June/19/2023
|
||||
@ -125,4 +130,4 @@ These release are considered obsolete. Old release notes can be found here:
|
||||
- [v0.10.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.10.0.md) released Sep/25/2014
|
||||
- [v0.9.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.9.0.md) released Mar/13/2014
|
||||
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.0...dashpay:v20.0.1
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.1...dashpay:v20.0.2
|
||||
|
128
doc/release-notes/dash/release-notes-20.0.1.md
Normal file
128
doc/release-notes/dash/release-notes-20.0.1.md
Normal file
@ -0,0 +1,128 @@
|
||||
# Dash Core version v20.0.1
|
||||
|
||||
Release is now available from:
|
||||
|
||||
<https://www.dash.org/downloads/#wallets>
|
||||
|
||||
This is a new patch version release, bringing small bug fixes and build system enhancements.
|
||||
|
||||
This release is optional for all nodes.
|
||||
|
||||
Please report bugs using the issue tracker at GitHub:
|
||||
|
||||
<https://github.com/dashpay/dash/issues>
|
||||
|
||||
|
||||
# Upgrading and downgrading
|
||||
|
||||
## How to Upgrade
|
||||
|
||||
If you are running an older version, shut it down. Wait until it has completely
|
||||
shut down (which might take a few minutes for older versions), then run the
|
||||
installer (on Windows) or just copy over /Applications/Dash-Qt (on Mac) or
|
||||
dashd/dash-qt (on Linux). If you upgrade after DIP0003 activation and you were
|
||||
using version < 0.13 you will have to reindex (start with -reindex-chainstate
|
||||
or -reindex) to make sure your wallet has all the new data synced. Upgrading
|
||||
from version 0.13 should not require any additional actions.
|
||||
|
||||
## Downgrade warning
|
||||
|
||||
### Downgrade to a version < v19.2.0
|
||||
|
||||
Downgrading to a version older than v19.2.0 is not supported due to changes
|
||||
in the evodb database. If you need to use an older version, you must either
|
||||
reindex or re-sync the whole chain.
|
||||
|
||||
# Notable changes
|
||||
|
||||
## Qt Testnet Crash
|
||||
|
||||
A crash has been fixed which has only been seen on testnet and only affects QT clients.
|
||||
|
||||
## Guix Build System Enhancements
|
||||
The Guix build system has been enhanced to enable building with custom options when needed.
|
||||
This will be used to support custom builds such as for nightly builds with extra debug options.
|
||||
|
||||
Additionally, the Guix system will now produce debug symbols for MacOS.
|
||||
|
||||
# v20.0.1 Change log
|
||||
|
||||
See detailed [set of changes][set-of-changes].
|
||||
|
||||
# Credits
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- Konstantin Akimov (knst)
|
||||
- PastaPastaPasta
|
||||
- UdjinM6
|
||||
|
||||
As well as everyone that submitted issues, reviewed pull requests and helped
|
||||
debug the release candidates.
|
||||
|
||||
# Older releases
|
||||
|
||||
Dash was previously known as Darkcoin.
|
||||
|
||||
Darkcoin tree 0.8.x was a fork of Litecoin tree 0.8, original name was XCoin
|
||||
which was first released on Jan/18/2014.
|
||||
|
||||
Darkcoin tree 0.9.x was the open source implementation of masternodes based on
|
||||
the 0.8.x tree and was first released on Mar/13/2014.
|
||||
|
||||
Darkcoin tree 0.10.x used to be the closed source implementation of Darksend
|
||||
which was released open source on Sep/25/2014.
|
||||
|
||||
Dash Core tree 0.11.x was a fork of Bitcoin Core tree 0.9,
|
||||
Darkcoin was rebranded to Dash.
|
||||
|
||||
Dash Core tree 0.12.0.x was a fork of Bitcoin Core tree 0.10.
|
||||
|
||||
Dash Core tree 0.12.1.x was a fork of Bitcoin Core tree 0.12.
|
||||
|
||||
These release are considered obsolete. Old release notes can be found here:
|
||||
|
||||
- [v20.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-20.0.0.md) released November/15/2023
|
||||
- [v19.3.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.3.0.md) released July/31/2023
|
||||
- [v19.2.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.2.0.md) released June/19/2023
|
||||
- [v19.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.1.0.md) released May/22/2023
|
||||
- [v19.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-19.0.0.md) released Apr/14/2023
|
||||
- [v18.2.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.2.md) released Mar/21/2023
|
||||
- [v18.2.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.1.md) released Jan/17/2023
|
||||
- [v18.2.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.2.0.md) released Jan/01/2023
|
||||
- [v18.1.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.1.1.md) released January/08/2023
|
||||
- [v18.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.1.0.md) released October/09/2022
|
||||
- [v18.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.0.2.md) released October/09/2022
|
||||
- [v18.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-18.0.1.md) released August/17/2022
|
||||
- [v0.17.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.17.0.3.md) released June/07/2021
|
||||
- [v0.17.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.17.0.2.md) released May/19/2021
|
||||
- [v0.16.1.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.1.1.md) released November/17/2020
|
||||
- [v0.16.1.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.1.0.md) released November/14/2020
|
||||
- [v0.16.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.16.0.1.md) released September/30/2020
|
||||
- [v0.15.0.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.15.0.0.md) released Febrary/18/2020
|
||||
- [v0.14.0.5](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.5.md) released December/08/2019
|
||||
- [v0.14.0.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.4.md) released November/22/2019
|
||||
- [v0.14.0.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.3.md) released August/15/2019
|
||||
- [v0.14.0.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.2.md) released July/4/2019
|
||||
- [v0.14.0.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.1.md) released May/31/2019
|
||||
- [v0.14.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.14.0.md) released May/22/2019
|
||||
- [v0.13.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.3.md) released Apr/04/2019
|
||||
- [v0.13.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.2.md) released Mar/15/2019
|
||||
- [v0.13.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.1.md) released Feb/9/2019
|
||||
- [v0.13.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.13.0.md) released Jan/14/2019
|
||||
- [v0.12.3.4](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.4.md) released Dec/14/2018
|
||||
- [v0.12.3.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.3.md) released Sep/19/2018
|
||||
- [v0.12.3.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.2.md) released Jul/09/2018
|
||||
- [v0.12.3.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.3.1.md) released Jul/03/2018
|
||||
- [v0.12.2.3](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.3.md) released Jan/12/2018
|
||||
- [v0.12.2.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.2.md) released Dec/17/2017
|
||||
- [v0.12.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.2.md) released Nov/08/2017
|
||||
- [v0.12.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.1.md) released Feb/06/2017
|
||||
- [v0.12.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.12.0.md) released Aug/15/2015
|
||||
- [v0.11.2](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.2.md) released Mar/04/2015
|
||||
- [v0.11.1](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.1.md) released Feb/10/2015
|
||||
- [v0.11.0](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.11.0.md) released Jan/15/2015
|
||||
- [v0.10.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.10.0.md) released Sep/25/2014
|
||||
- [v0.9.x](https://github.com/dashpay/dash/blob/master/doc/release-notes/dash/release-notes-0.9.0.md) released Mar/13/2014
|
||||
|
||||
[set-of-changes]: https://github.com/dashpay/dash/compare/v20.0.0...dashpay:v20.0.1
|
@ -169,6 +169,8 @@ BITCOIN_CORE_H = \
|
||||
cuckoocache.h \
|
||||
ctpl_stl.h \
|
||||
cxxtimer.hpp \
|
||||
deploymentinfo.h \
|
||||
deploymentstatus.h \
|
||||
evo/assetlocktx.h \
|
||||
evo/dmn_types.h \
|
||||
evo/cbtx.h \
|
||||
@ -349,7 +351,6 @@ BITCOIN_CORE_H = \
|
||||
validation.h \
|
||||
validationinterface.h \
|
||||
versionbits.h \
|
||||
versionbitsinfo.h \
|
||||
walletinitinterface.h \
|
||||
wallet/bdb.h \
|
||||
wallet/coincontrol.h \
|
||||
@ -403,6 +404,7 @@ libbitcoin_server_a_SOURCES = \
|
||||
coinjoin/server.cpp \
|
||||
consensus/tx_verify.cpp \
|
||||
dbwrapper.cpp \
|
||||
deploymentstatus.cpp \
|
||||
dsnotificationinterface.cpp \
|
||||
evo/assetlocktx.cpp \
|
||||
evo/cbtx.cpp \
|
||||
@ -696,6 +698,7 @@ libbitcoin_common_a_SOURCES = \
|
||||
compressor.cpp \
|
||||
core_read.cpp \
|
||||
core_write.cpp \
|
||||
deploymentinfo.cpp \
|
||||
key.cpp \
|
||||
key_io.cpp \
|
||||
merkleblock.cpp \
|
||||
@ -715,7 +718,6 @@ libbitcoin_common_a_SOURCES = \
|
||||
script/sign.cpp \
|
||||
script/signingprovider.cpp \
|
||||
script/standard.cpp \
|
||||
versionbitsinfo.cpp \
|
||||
warnings.cpp \
|
||||
$(BITCOIN_CORE_H)
|
||||
|
||||
|
@ -8,12 +8,12 @@
|
||||
|
||||
#include <chainparamsseeds.h>
|
||||
#include <consensus/merkle.h>
|
||||
#include <deploymentinfo.h>
|
||||
#include <llmq/params.h>
|
||||
#include <util/ranges.h>
|
||||
#include <util/system.h>
|
||||
#include <util/underlying.h>
|
||||
#include <versionbits.h>
|
||||
#include <versionbitsinfo.h>
|
||||
|
||||
#include <arith_uint256.h>
|
||||
|
||||
|
@ -14,13 +14,32 @@
|
||||
|
||||
namespace Consensus {
|
||||
|
||||
enum DeploymentPos {
|
||||
enum BuriedDeployment : int16_t
|
||||
{
|
||||
DEPLOYMENT_HEIGHTINCB = std::numeric_limits<int16_t>::min(),
|
||||
DEPLOYMENT_DERSIG,
|
||||
DEPLOYMENT_CLTV,
|
||||
DEPLOYMENT_BIP147,
|
||||
DEPLOYMENT_CSV,
|
||||
DEPLOYMENT_DIP0001,
|
||||
DEPLOYMENT_DIP0003,
|
||||
DEPLOYMENT_DIP0008,
|
||||
DEPLOYMENT_DIP0020,
|
||||
DEPLOYMENT_DIP0024,
|
||||
DEPLOYMENT_BRR,
|
||||
DEPLOYMENT_V19,
|
||||
};
|
||||
constexpr bool ValidDeployment(BuriedDeployment dep) { return DEPLOYMENT_HEIGHTINCB <= dep && dep <= DEPLOYMENT_V19; }
|
||||
|
||||
enum DeploymentPos : uint16_t
|
||||
{
|
||||
DEPLOYMENT_TESTDUMMY,
|
||||
DEPLOYMENT_V20, // Deployment of EHF, LLMQ Randomness Beacon
|
||||
DEPLOYMENT_MN_RR, // Deployment of Masternode Reward Location Reallocation
|
||||
// NOTE: Also add new deployments to VersionBitsDeploymentInfo in versionbits.cpp
|
||||
// NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp
|
||||
MAX_VERSION_BITS_DEPLOYMENTS
|
||||
};
|
||||
constexpr bool ValidDeployment(DeploymentPos dep) { return DEPLOYMENT_TESTDUMMY <= dep && dep <= DEPLOYMENT_MN_RR; }
|
||||
|
||||
/**
|
||||
* Struct for each individual consensus rule change using BIP9.
|
||||
@ -145,7 +164,39 @@ struct Params {
|
||||
LLMQType llmqTypePlatform{LLMQType::LLMQ_NONE};
|
||||
LLMQType llmqTypeMnhf{LLMQType::LLMQ_NONE};
|
||||
LLMQType llmqTypeAssetLocks{LLMQType::LLMQ_NONE};
|
||||
|
||||
int DeploymentHeight(BuriedDeployment dep) const
|
||||
{
|
||||
switch (dep) {
|
||||
case DEPLOYMENT_HEIGHTINCB:
|
||||
return BIP34Height;
|
||||
case DEPLOYMENT_DERSIG:
|
||||
return BIP66Height;
|
||||
case DEPLOYMENT_CLTV:
|
||||
return BIP65Height;
|
||||
case DEPLOYMENT_BIP147:
|
||||
return BIP147Height;
|
||||
case DEPLOYMENT_CSV:
|
||||
return CSVHeight;
|
||||
case DEPLOYMENT_DIP0001:
|
||||
return DIP0001Height;
|
||||
case DEPLOYMENT_DIP0003:
|
||||
return DIP0003Height;
|
||||
case DEPLOYMENT_DIP0008:
|
||||
return DIP0008Height;
|
||||
case DEPLOYMENT_DIP0020:
|
||||
return DIP0020Height;
|
||||
case DEPLOYMENT_DIP0024:
|
||||
return DIP0024Height;
|
||||
case DEPLOYMENT_BRR:
|
||||
return BRRHeight;
|
||||
case DEPLOYMENT_V19:
|
||||
return V19Height;
|
||||
} // no default case, so the compiler can warn about missing cases
|
||||
return std::numeric_limits<int>::max();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Consensus
|
||||
|
||||
#endif // BITCOIN_CONSENSUS_PARAMS_H
|
||||
|
54
src/deploymentinfo.cpp
Normal file
54
src/deploymentinfo.cpp
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright (c) 2016-2020 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <deploymentinfo.h>
|
||||
|
||||
#include <consensus/params.h>
|
||||
|
||||
const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] = {
|
||||
{
|
||||
/*.name =*/ "testdummy",
|
||||
/*.gbt_force =*/ true,
|
||||
},
|
||||
{
|
||||
/*.name =*/"v20",
|
||||
/*.gbt_force =*/true,
|
||||
},
|
||||
{
|
||||
/*.name =*/"mn_rr",
|
||||
/*.gbt_force =*/true,
|
||||
},
|
||||
};
|
||||
|
||||
std::string DeploymentName(Consensus::BuriedDeployment dep)
|
||||
{
|
||||
assert(ValidDeployment(dep));
|
||||
switch (dep) {
|
||||
case Consensus::DEPLOYMENT_HEIGHTINCB:
|
||||
return "bip34";
|
||||
case Consensus::DEPLOYMENT_CLTV:
|
||||
return "bip65";
|
||||
case Consensus::DEPLOYMENT_DERSIG:
|
||||
return "bip66";
|
||||
case Consensus::DEPLOYMENT_BIP147:
|
||||
return "bip147";
|
||||
case Consensus::DEPLOYMENT_CSV:
|
||||
return "csv";
|
||||
case Consensus::DEPLOYMENT_DIP0001:
|
||||
return "dip0001";
|
||||
case Consensus::DEPLOYMENT_DIP0003:
|
||||
return "dip0003";
|
||||
case Consensus::DEPLOYMENT_DIP0008:
|
||||
return "dip0008";
|
||||
case Consensus::DEPLOYMENT_DIP0020:
|
||||
return "dip0020";
|
||||
case Consensus::DEPLOYMENT_DIP0024:
|
||||
return "dip0024";
|
||||
case Consensus::DEPLOYMENT_BRR:
|
||||
return "realloc";
|
||||
case Consensus::DEPLOYMENT_V19:
|
||||
return "v19";
|
||||
} // no default case, so the compiler can warn about missing cases
|
||||
return "";
|
||||
}
|
29
src/deploymentinfo.h
Normal file
29
src/deploymentinfo.h
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright (c) 2016-2018 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_DEPLOYMENTINFO_H
|
||||
#define BITCOIN_DEPLOYMENTINFO_H
|
||||
|
||||
#include <consensus/params.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
struct VBDeploymentInfo {
|
||||
/** Deployment name */
|
||||
const char *name;
|
||||
/** Whether GBT clients can safely ignore this rule in simplified usage */
|
||||
bool gbt_force;
|
||||
};
|
||||
|
||||
extern const VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS];
|
||||
|
||||
std::string DeploymentName(Consensus::BuriedDeployment dep);
|
||||
|
||||
inline std::string DeploymentName(Consensus::DeploymentPos pos)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(pos));
|
||||
return VersionBitsDeploymentInfo[pos].name;
|
||||
}
|
||||
|
||||
#endif // BITCOIN_DEPLOYMENTINFO_H
|
17
src/deploymentstatus.cpp
Normal file
17
src/deploymentstatus.cpp
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright (c) 2020 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <deploymentstatus.h>
|
||||
|
||||
#include <consensus/params.h>
|
||||
#include <versionbits.h>
|
||||
|
||||
VersionBitsCache g_versionbitscache;
|
||||
|
||||
/* Basic sanity checking for BuriedDeployment/DeploymentPos enums and
|
||||
* ValidDeployment check */
|
||||
|
||||
static_assert(ValidDeployment(Consensus::DEPLOYMENT_TESTDUMMY), "sanity check of DeploymentPos failed (TESTDUMMY not valid)");
|
||||
static_assert(!ValidDeployment(Consensus::MAX_VERSION_BITS_DEPLOYMENTS), "sanity check of DeploymentPos failed (MAX value considered valid)");
|
||||
static_assert(!ValidDeployment(static_cast<Consensus::BuriedDeployment>(Consensus::DEPLOYMENT_TESTDUMMY)), "sanity check of BuriedDeployment failed (overlaps with DeploymentPos)");
|
55
src/deploymentstatus.h
Normal file
55
src/deploymentstatus.h
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright (c) 2020 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_DEPLOYMENTSTATUS_H
|
||||
#define BITCOIN_DEPLOYMENTSTATUS_H
|
||||
|
||||
#include <chain.h>
|
||||
#include <versionbits.h>
|
||||
|
||||
#include <limits>
|
||||
|
||||
/** Global cache for versionbits deployment status */
|
||||
extern VersionBitsCache g_versionbitscache;
|
||||
|
||||
/** Determine if a deployment is active for the next block */
|
||||
inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::BuriedDeployment dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return (pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1) >= params.DeploymentHeight(dep);
|
||||
}
|
||||
|
||||
inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return ThresholdState::ACTIVE == g_versionbitscache.State(pindexPrev, params, dep);
|
||||
}
|
||||
|
||||
/** Determine if a deployment is active for this block */
|
||||
inline bool DeploymentActiveAt(const CBlockIndex& index, const Consensus::Params& params, Consensus::BuriedDeployment dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return index.nHeight >= params.DeploymentHeight(dep);
|
||||
}
|
||||
|
||||
inline bool DeploymentActiveAt(const CBlockIndex& index, const Consensus::Params& params, Consensus::DeploymentPos dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return DeploymentActiveAfter(index.pprev, params, dep);
|
||||
}
|
||||
|
||||
/** Determine if a deployment is enabled (can ever be active) */
|
||||
inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::BuriedDeployment dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return params.DeploymentHeight(dep) != std::numeric_limits<int>::max();
|
||||
}
|
||||
|
||||
inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::DeploymentPos dep)
|
||||
{
|
||||
assert(Consensus::ValidDeployment(dep));
|
||||
return params.vDeployments[dep].nTimeout != 0;
|
||||
}
|
||||
|
||||
#endif // BITCOIN_DEPLOYMENTSTATUS_H
|
@ -3,12 +3,14 @@
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <consensus/validation.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <evo/mnhftx.h>
|
||||
#include <evo/specialtx.h>
|
||||
#include <llmq/commitment.h>
|
||||
#include <llmq/signing.h>
|
||||
#include <llmq/utils.h>
|
||||
#include <llmq/quorums.h>
|
||||
#include <node/blockstorage.h>
|
||||
|
||||
#include <chain.h>
|
||||
#include <chainparams.h>
|
||||
@ -16,6 +18,7 @@
|
||||
#include <versionbits.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <stack>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -52,7 +55,7 @@ CMNHFManager::~CMNHFManager()
|
||||
|
||||
CMNHFManager::Signals CMNHFManager::GetSignalsStage(const CBlockIndex* const pindexPrev)
|
||||
{
|
||||
Signals signals = GetFromCache(pindexPrev);
|
||||
Signals signals = GetForBlock(pindexPrev);
|
||||
const int height = pindexPrev->nHeight + 1;
|
||||
for (auto it = signals.begin(); it != signals.end(); ) {
|
||||
bool found{false};
|
||||
@ -98,7 +101,7 @@ bool MNHFTx::Verify(const uint256& quorumHash, const uint256& requestId, const u
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CheckMNHFTx(const CTransaction& tx, const CBlockIndex* pindexPrev, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
bool CheckMNHFTx(const CTransaction& tx, const CBlockIndex* pindexPrev, TxValidationState& state)
|
||||
{
|
||||
if (tx.nVersion != 3 || tx.nType != TRANSACTION_MNHF_SIGNAL) {
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-mnhf-type");
|
||||
@ -113,7 +116,7 @@ bool CheckMNHFTx(const CTransaction& tx, const CBlockIndex* pindexPrev, TxValida
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-mnhf-version");
|
||||
}
|
||||
|
||||
const CBlockIndex* pindexQuorum = g_chainman.m_blockman.LookupBlockIndex(mnhfTx.signal.quorumHash);
|
||||
const CBlockIndex* pindexQuorum = WITH_LOCK(::cs_main, return g_chainman.m_blockman.LookupBlockIndex(mnhfTx.signal.quorumHash));
|
||||
if (!pindexQuorum) {
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-mnhf-quorum-hash");
|
||||
}
|
||||
@ -159,8 +162,6 @@ std::optional<uint8_t> extractEHFSignal(const CTransaction& tx)
|
||||
|
||||
static bool extractSignals(const CBlock& block, const CBlockIndex* const pindex, std::vector<uint8_t>& new_signals, BlockValidationState& state)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
|
||||
// we skip the coinbase
|
||||
for (size_t i = 1; i < block.vtx.size(); ++i) {
|
||||
const CTransaction& tx = *block.vtx[i];
|
||||
@ -189,13 +190,13 @@ static bool extractSignals(const CBlock& block, const CBlockIndex* const pindex,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CMNHFManager::ProcessBlock(const CBlock& block, const CBlockIndex* const pindex, bool fJustCheck, BlockValidationState& state)
|
||||
std::optional<CMNHFManager::Signals> CMNHFManager::ProcessBlock(const CBlock& block, const CBlockIndex* const pindex, bool fJustCheck, BlockValidationState& state)
|
||||
{
|
||||
try {
|
||||
std::vector<uint8_t> new_signals;
|
||||
if (!extractSignals(block, pindex, new_signals, state)) {
|
||||
// state is set inside extractSignals
|
||||
return false;
|
||||
return std::nullopt;
|
||||
}
|
||||
Signals signals = GetSignalsStage(pindex->pprev);
|
||||
if (new_signals.empty()) {
|
||||
@ -203,25 +204,27 @@ bool CMNHFManager::ProcessBlock(const CBlock& block, const CBlockIndex* const pi
|
||||
AddToCache(signals, pindex);
|
||||
}
|
||||
LogPrint(BCLog::EHF, "CMNHFManager::ProcessBlock: no new signals; number of known signals: %d\n", signals.size());
|
||||
return true;
|
||||
return signals;
|
||||
}
|
||||
|
||||
int mined_height = pindex->nHeight;
|
||||
const int mined_height = pindex->nHeight;
|
||||
|
||||
// Extra validation of signals to be sure that it can succeed
|
||||
for (const auto& versionBit : new_signals) {
|
||||
LogPrintf("CMNHFManager::ProcessBlock: add mnhf bit=%d block:%s number of known signals:%lld\n", versionBit, pindex->GetBlockHash().ToString(), signals.size());
|
||||
if (signals.find(versionBit) != signals.end()) {
|
||||
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-mnhf-duplicate");
|
||||
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-mnhf-duplicate");
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (!Params().IsValidMNActivation(versionBit, pindex->GetMedianTimePast())) {
|
||||
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-mnhf-non-mn-fork");
|
||||
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-mnhf-non-mn-fork");
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
if (fJustCheck) {
|
||||
// We are done, no need actually update any params
|
||||
return true;
|
||||
return signals;
|
||||
}
|
||||
for (const auto& versionBit : new_signals) {
|
||||
if (Params().IsValidMNActivation(versionBit, pindex->GetMedianTimePast())) {
|
||||
@ -231,10 +234,11 @@ bool CMNHFManager::ProcessBlock(const CBlock& block, const CBlockIndex* const pi
|
||||
}
|
||||
|
||||
AddToCache(signals, pindex);
|
||||
return true;
|
||||
return signals;
|
||||
} catch (const std::exception& e) {
|
||||
LogPrintf("CMNHFManager::ProcessBlock -- failed: %s\n", e.what());
|
||||
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "failed-proc-mnhf-inblock");
|
||||
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "failed-proc-mnhf-inblock");
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
@ -250,7 +254,7 @@ bool CMNHFManager::UndoBlock(const CBlock& block, const CBlockIndex* const pinde
|
||||
return true;
|
||||
}
|
||||
|
||||
const Signals signals = GetFromCache(pindex);
|
||||
const Signals signals = GetForBlock(pindex);
|
||||
for (const auto& versionBit : excluded_signals) {
|
||||
LogPrintf("%s: exclude mnhf bit=%d block:%s number of known signals:%lld\n", __func__, versionBit, pindex->GetBlockHash().ToString(), signals.size());
|
||||
assert(signals.find(versionBit) != signals.end());
|
||||
@ -260,34 +264,68 @@ bool CMNHFManager::UndoBlock(const CBlock& block, const CBlockIndex* const pinde
|
||||
return true;
|
||||
}
|
||||
|
||||
CMNHFManager::Signals CMNHFManager::GetFromCache(const CBlockIndex* const pindex)
|
||||
CMNHFManager::Signals CMNHFManager::GetForBlock(const CBlockIndex* pindex)
|
||||
{
|
||||
if (pindex == nullptr) return {};
|
||||
|
||||
std::stack<const CBlockIndex *> to_calculate;
|
||||
|
||||
std::optional<CMNHFManager::Signals> signalsTmp;
|
||||
while (!(signalsTmp = GetFromCache(pindex)).has_value()) {
|
||||
to_calculate.push(pindex);
|
||||
pindex = pindex->pprev;
|
||||
}
|
||||
|
||||
const Consensus::Params& consensusParams{Params().GetConsensus()};
|
||||
while (!to_calculate.empty()) {
|
||||
const CBlockIndex* pindex_top{to_calculate.top()};
|
||||
CBlock block;
|
||||
if (!ReadBlockFromDisk(block, pindex_top, consensusParams)) {
|
||||
throw std::runtime_error("failed-getehfforblock-read");
|
||||
}
|
||||
BlockValidationState state;
|
||||
signalsTmp = ProcessBlock(block, pindex_top, false, state);
|
||||
if (!signalsTmp.has_value()) {
|
||||
LogPrintf("%s: process block failed due to %s\n", __func__, state.ToString());
|
||||
throw std::runtime_error("failed-getehfforblock-construct");
|
||||
}
|
||||
|
||||
to_calculate.pop();
|
||||
}
|
||||
return *signalsTmp;
|
||||
}
|
||||
|
||||
std::optional<CMNHFManager::Signals> CMNHFManager::GetFromCache(const CBlockIndex* const pindex)
|
||||
{
|
||||
Signals signals{};
|
||||
if (pindex == nullptr) return signals;
|
||||
|
||||
// TODO: remove this check of phashBlock to nullptr
|
||||
// This check is needed only because unit test 'versionbits_tests.cpp'
|
||||
// lets `phashBlock` to be nullptr
|
||||
if (pindex->phashBlock == nullptr) return {};
|
||||
if (pindex->phashBlock == nullptr) return signals;
|
||||
|
||||
|
||||
const uint256& blockHash = pindex->GetBlockHash();
|
||||
Signals signals{};
|
||||
{
|
||||
LOCK(cs_cache);
|
||||
if (mnhfCache.get(blockHash, signals)) {
|
||||
return signals;
|
||||
}
|
||||
}
|
||||
if (VersionBitsState(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, versionbitscache) != ThresholdState::ACTIVE) {
|
||||
{
|
||||
LOCK(cs_cache);
|
||||
mnhfCache.insert(blockHash, {});
|
||||
return {};
|
||||
if (ThresholdState::ACTIVE != v20_activation.State(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20)) {
|
||||
mnhfCache.insert(blockHash, signals);
|
||||
return signals;
|
||||
}
|
||||
}
|
||||
bool ok = m_evoDb.Read(std::make_pair(DB_SIGNALS, blockHash), signals);
|
||||
assert(ok);
|
||||
LOCK(cs_cache);
|
||||
mnhfCache.insert(blockHash, signals);
|
||||
return signals;
|
||||
if (m_evoDb.Read(std::make_pair(DB_SIGNALS, blockHash), signals)) {
|
||||
LOCK(cs_cache);
|
||||
mnhfCache.insert(blockHash, signals);
|
||||
return signals;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void CMNHFManager::AddToCache(const Signals& signals, const CBlockIndex* const pindex)
|
||||
@ -297,15 +335,17 @@ void CMNHFManager::AddToCache(const Signals& signals, const CBlockIndex* const p
|
||||
LOCK(cs_cache);
|
||||
mnhfCache.insert(blockHash, signals);
|
||||
}
|
||||
if (VersionBitsState(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, versionbitscache) != ThresholdState::ACTIVE) {
|
||||
return;
|
||||
assert(pindex != nullptr);
|
||||
{
|
||||
LOCK(cs_cache);
|
||||
if (ThresholdState::ACTIVE != v20_activation.State(pindex->pprev, Params().GetConsensus(), Consensus::DEPLOYMENT_V20)) return;
|
||||
}
|
||||
m_evoDb.Write(std::make_pair(DB_SIGNALS, blockHash), signals);
|
||||
}
|
||||
|
||||
void CMNHFManager::AddSignal(const CBlockIndex* const pindex, int bit)
|
||||
{
|
||||
auto signals = GetFromCache(pindex->pprev);
|
||||
auto signals = GetForBlock(pindex->pprev);
|
||||
signals.emplace(bit, pindex->nHeight);
|
||||
AddToCache(signals, pindex);
|
||||
}
|
||||
|
@ -22,7 +22,6 @@ class CBlock;
|
||||
class CBlockIndex;
|
||||
class CEvoDB;
|
||||
class TxValidationState;
|
||||
extern RecursiveMutex cs_main;
|
||||
|
||||
// mnhf signal special transaction
|
||||
class MNHFTx
|
||||
@ -102,20 +101,25 @@ private:
|
||||
// versionBit <-> height
|
||||
unordered_lru_cache<uint256, Signals, StaticSaltedHasher> mnhfCache GUARDED_BY(cs_cache) {MNHFCacheSize};
|
||||
|
||||
// This cache is used only for v20 activation to avoid double lock throught VersionBitsConditionChecker::SignalHeight
|
||||
VersionBitsCache v20_activation GUARDED_BY(cs_cache);
|
||||
public:
|
||||
explicit CMNHFManager(CEvoDB& evoDb);
|
||||
~CMNHFManager();
|
||||
explicit CMNHFManager(const CMNHFManager&) = delete;
|
||||
|
||||
/**
|
||||
* Every new block should be processed when Tip() is updated by calling of CMNHFManager::ProcessBlock
|
||||
* Every new block should be processed when Tip() is updated by calling of CMNHFManager::ProcessBlock.
|
||||
* This function actually does only validate EHF transaction for this block and update internal caches/evodb state
|
||||
*/
|
||||
bool ProcessBlock(const CBlock& block, const CBlockIndex* const pindex, bool fJustCheck, BlockValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||
std::optional<Signals> ProcessBlock(const CBlock& block, const CBlockIndex* const pindex, bool fJustCheck, BlockValidationState& state);
|
||||
|
||||
/**
|
||||
* Every undo block should be processed when Tip() is updated by calling of CMNHFManager::UndoBlock
|
||||
* This function actually does nothing at the moment, because status of ancester block is already know.
|
||||
* Altough it should be still called to do some sanity checks
|
||||
*/
|
||||
bool UndoBlock(const CBlock& block, const CBlockIndex* const pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||
bool UndoBlock(const CBlock& block, const CBlockIndex* const pindex);
|
||||
|
||||
|
||||
// Implements interface
|
||||
@ -130,13 +134,20 @@ private:
|
||||
|
||||
/**
|
||||
* This function returns list of signals available on previous block.
|
||||
* if the signals for previous block is not available in cache it would read blocks from disk
|
||||
* until state won't be recovered.
|
||||
* NOTE: that some signals could expired between blocks.
|
||||
* validate them by
|
||||
*/
|
||||
Signals GetFromCache(const CBlockIndex* const pindex);
|
||||
Signals GetForBlock(const CBlockIndex* const pindex);
|
||||
|
||||
/**
|
||||
* This function access to in-memory cache or to evo db but does not calculate anything
|
||||
* NOTE: that some signals could expired between blocks.
|
||||
*/
|
||||
std::optional<Signals> GetFromCache(const CBlockIndex* const pindex);
|
||||
};
|
||||
|
||||
std::optional<uint8_t> extractEHFSignal(const CTransaction& tx);
|
||||
bool CheckMNHFTx(const CTransaction& tx, const CBlockIndex* pindexPrev, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
||||
bool CheckMNHFTx(const CTransaction& tx, const CBlockIndex* pindexPrev, TxValidationState& state);
|
||||
|
||||
#endif // BITCOIN_EVO_MNHFTX_H
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <util/time.h>
|
||||
#include <util/underlying.h>
|
||||
#include <validation.h>
|
||||
#include <versionbits.h>
|
||||
|
||||
#include <univalue.h>
|
||||
|
||||
|
18
src/init.cpp
18
src/init.cpp
@ -1912,6 +1912,7 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc
|
||||
LOCK(cs_main);
|
||||
node.evodb.reset();
|
||||
node.evodb = std::make_unique<CEvoDB>(nEvoDbCache, false, fReset || fReindexChainState);
|
||||
node.mnhf_manager.reset();
|
||||
node.mnhf_manager = std::make_unique<CMNHFManager>(*node.evodb);
|
||||
|
||||
chainman.Reset();
|
||||
@ -1933,8 +1934,15 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc
|
||||
creditPoolManager.reset(new CCreditPoolManager(*node.evodb));
|
||||
llmq::quorumSnapshotManager.reset();
|
||||
llmq::quorumSnapshotManager.reset(new llmq::CQuorumSnapshotManager(*node.evodb));
|
||||
|
||||
if (node.llmq_ctx) {
|
||||
node.llmq_ctx->Interrupt();
|
||||
node.llmq_ctx->Stop();
|
||||
}
|
||||
node.llmq_ctx.reset();
|
||||
node.llmq_ctx.reset(new LLMQContext(chainman.ActiveChainstate(), *node.connman, *node.evodb, *::sporkManager, *node.mempool, node.peerman, false, fReset || fReindexChainState));
|
||||
// Have to start it early to let VerifyDB check ChainLock signatures in coinbase
|
||||
node.llmq_ctx->Start();
|
||||
|
||||
if (fReset) {
|
||||
pblocktree->WriteReindexing(true);
|
||||
@ -2120,6 +2128,10 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc
|
||||
LogPrintf("%s: bls_legacy_scheme=%d\n", __func__, bls::bls_legacy_scheme.load());
|
||||
}
|
||||
|
||||
if (args.GetArg("-checklevel", DEFAULT_CHECKLEVEL) >= 3) {
|
||||
chainstate->ResetBlockFailureFlags(nullptr);
|
||||
}
|
||||
|
||||
} else {
|
||||
// TODO: CEvoDB instance should probably be a part of CChainState
|
||||
// (for multiple chainstates to actually work in parallel)
|
||||
@ -2131,10 +2143,6 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (args.GetArg("-checklevel", DEFAULT_CHECKLEVEL) >= 3) {
|
||||
::ChainstateActive().ResetBlockFailureFlags(nullptr);
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
LogPrintf("%s\n", e.what());
|
||||
@ -2306,8 +2314,6 @@ bool AppInitMain(const CoreContext& context, NodeContext& node, interfaces::Bloc
|
||||
node.scheduler->scheduleEvery(std::bind(&PeriodicStats, std::ref(*node.args), std::cref(*node.mempool)), std::chrono::seconds{nStatsPeriod});
|
||||
}
|
||||
|
||||
node.llmq_ctx->Start();
|
||||
|
||||
// ********************************************************* Step 11: import blocks
|
||||
|
||||
if (!CheckDiskSpace(GetDataDir())) {
|
||||
|
@ -60,10 +60,7 @@ LLMQContext::~LLMQContext() {
|
||||
llmq::chainLocksHandler.reset();
|
||||
llmq::quorumManager.reset();
|
||||
llmq::quorumBlockProcessor.reset();
|
||||
{
|
||||
LOCK(llmq::cs_llmq_vbc);
|
||||
llmq::llmq_versionbitscache.Clear();
|
||||
}
|
||||
llmq::llmq_versionbitscache.Clear();
|
||||
}
|
||||
|
||||
void LLMQContext::Interrupt() {
|
||||
|
@ -466,7 +466,8 @@ void CDKGSessionManager::CleanupOldContributions() const
|
||||
|
||||
for (const auto& params : Params().GetConsensus().llmqs) {
|
||||
// For how many blocks recent DKG info should be kept
|
||||
const int MAX_STORE_DEPTH = 2 * params.signingActiveQuorumCount * params.dkgInterval;
|
||||
const int MAX_CYCLES = params.useRotation ? params.keepOldKeys / params.signingActiveQuorumCount : params.keepOldKeys;
|
||||
const int MAX_STORE_DEPTH = MAX_CYCLES * params.dkgInterval;
|
||||
|
||||
LogPrint(BCLog::LLMQ, "CDKGSessionManager::%s -- looking for old entries for llmq type %d\n", __func__, ToUnderlying(params.type));
|
||||
|
||||
|
@ -104,6 +104,13 @@ struct LLMQParams {
|
||||
// For rotated quorums it should be equal to 2 x active quorums set.
|
||||
int keepOldConnections;
|
||||
|
||||
// The number of quorums for which we should keep keys. Usually it's equal to signingActiveQuorumCount * 2.
|
||||
// Unlike for other quorum types we want to keep data (secret key shares and vvec)
|
||||
// for Platform quorums for much longer because Platform can be restarted and
|
||||
// it must be able to re-sign stuff.
|
||||
|
||||
int keepOldKeys;
|
||||
|
||||
// How many members should we try to send all sigShares to before we give up.
|
||||
int recoveryMembers;
|
||||
};
|
||||
@ -138,6 +145,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 3,
|
||||
.keepOldKeys = 4,
|
||||
.recoveryMembers = 3,
|
||||
},
|
||||
|
||||
@ -163,6 +171,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 3,
|
||||
.keepOldKeys = 4,
|
||||
.recoveryMembers = 3,
|
||||
},
|
||||
|
||||
@ -188,6 +197,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 3,
|
||||
.keepOldKeys = 4,
|
||||
.recoveryMembers = 3,
|
||||
},
|
||||
|
||||
@ -213,6 +223,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 4,
|
||||
.keepOldKeys = 4,
|
||||
.recoveryMembers = 3,
|
||||
},
|
||||
|
||||
@ -238,6 +249,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 4,
|
||||
.keepOldKeys = 24 * 30 * 2, // 2 months of quorums
|
||||
.recoveryMembers = 3,
|
||||
},
|
||||
|
||||
@ -263,6 +275,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 4, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 5,
|
||||
.keepOldKeys = 8,
|
||||
.recoveryMembers = 6,
|
||||
},
|
||||
|
||||
@ -288,6 +301,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 2, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 4,
|
||||
.keepOldKeys = 4,
|
||||
.recoveryMembers = 4,
|
||||
},
|
||||
|
||||
@ -313,6 +327,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 4, // just a few ones to allow easier testing
|
||||
|
||||
.keepOldConnections = 5,
|
||||
.keepOldKeys = 24 * 30 * 2, // 2 months of quorums
|
||||
.recoveryMembers = 6,
|
||||
},
|
||||
|
||||
@ -338,6 +353,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
|
||||
.signingActiveQuorumCount = 24, // a full day worth of LLMQs
|
||||
.keepOldConnections = 25,
|
||||
.keepOldKeys = 48,
|
||||
.recoveryMembers = 25,
|
||||
},
|
||||
|
||||
@ -363,6 +379,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
|
||||
.signingActiveQuorumCount = 32,
|
||||
.keepOldConnections = 64,
|
||||
.keepOldKeys = 64,
|
||||
.recoveryMembers = 25,
|
||||
},
|
||||
|
||||
@ -389,6 +406,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 4, // two days worth of LLMQs
|
||||
|
||||
.keepOldConnections = 5,
|
||||
.keepOldKeys = 8,
|
||||
.recoveryMembers = 100,
|
||||
},
|
||||
|
||||
@ -416,6 +434,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 4, // four days worth of LLMQs
|
||||
|
||||
.keepOldConnections = 5,
|
||||
.keepOldKeys = 8,
|
||||
.recoveryMembers = 100,
|
||||
},
|
||||
|
||||
@ -443,6 +462,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 24, // a full day worth of LLMQs
|
||||
|
||||
.keepOldConnections = 25,
|
||||
.keepOldKeys = 24 * 30 * 2, // 2 months of quorums
|
||||
.recoveryMembers = 50,
|
||||
},
|
||||
|
||||
@ -470,6 +490,7 @@ static constexpr std::array<LLMQParams, 14> available_llmqs = {
|
||||
.signingActiveQuorumCount = 24, // a full day worth of LLMQs
|
||||
|
||||
.keepOldConnections = 25,
|
||||
.keepOldKeys = 24 * 30 * 2, // 2 months of quorums
|
||||
.recoveryMembers = 12,
|
||||
},
|
||||
|
||||
|
@ -200,8 +200,8 @@ CQuorumManager::CQuorumManager(CBLSWorker& _blsWorker, CChainState& chainstate,
|
||||
m_mn_sync(mn_sync),
|
||||
m_peerman(peerman)
|
||||
{
|
||||
utils::InitQuorumsCache(mapQuorumsCache);
|
||||
utils::InitQuorumsCache(scanQuorumsCache);
|
||||
utils::InitQuorumsCache(mapQuorumsCache, false);
|
||||
utils::InitQuorumsCache(scanQuorumsCache, false);
|
||||
|
||||
quorumThreadInterrupt.reset();
|
||||
}
|
||||
@ -296,7 +296,7 @@ void CQuorumManager::UpdatedBlockTip(const CBlockIndex* pindexNew, bool fInitial
|
||||
}
|
||||
|
||||
TriggerQuorumDataRecoveryThreads(pindexNew);
|
||||
CleanupOldQuorumData(pindexNew);
|
||||
StartCleanupOldQuorumDataThread(pindexNew);
|
||||
}
|
||||
|
||||
void CQuorumManager::CheckQuorumConnections(const Consensus::LLMQParams& llmqParams, const CBlockIndex* pindexNew) const
|
||||
@ -824,7 +824,7 @@ void CQuorumManager::StartCachePopulatorThread(const CQuorumCPtr pQuorum) const
|
||||
// when then later some other thread tries to get keys, it will be much faster
|
||||
workerPool.push([pQuorum, t, this](int threadId) {
|
||||
for (const auto i : irange::range(pQuorum->members.size())) {
|
||||
if (!quorumThreadInterrupt) {
|
||||
if (quorumThreadInterrupt) {
|
||||
break;
|
||||
}
|
||||
if (pQuorum->qc->validMembers[i]) {
|
||||
@ -956,7 +956,7 @@ void CQuorumManager::StartQuorumDataRecoveryThread(const CQuorumCPtr pQuorum, co
|
||||
});
|
||||
}
|
||||
|
||||
static void DataCleanupHelper(CDBWrapper& db, std::set<uint256> skip_list)
|
||||
static void DataCleanupHelper(CDBWrapper& db, std::set<uint256> skip_list, bool compact = false)
|
||||
{
|
||||
const auto prefixes = {DB_QUORUM_QUORUM_VVEC, DB_QUORUM_SK_SHARE};
|
||||
|
||||
@ -990,39 +990,54 @@ static void DataCleanupHelper(CDBWrapper& db, std::set<uint256> skip_list)
|
||||
|
||||
db.WriteBatch(batch);
|
||||
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- %s removed %d\n", __func__, prefix, count);
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- %s removed %d\n", __func__, prefix, count);
|
||||
}
|
||||
|
||||
pcursor.reset();
|
||||
db.CompactFull();
|
||||
|
||||
if (compact) {
|
||||
// Avoid using this on regular cleanups, use on db migrations only
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- compact start\n", __func__);
|
||||
db.CompactFull();
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- compact end\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
void CQuorumManager::CleanupOldQuorumData(const CBlockIndex* pIndex) const
|
||||
void CQuorumManager::StartCleanupOldQuorumDataThread(const CBlockIndex* pIndex) const
|
||||
{
|
||||
if (!fMasternodeMode || pIndex == nullptr || (pIndex->nHeight % 576 != 0)) {
|
||||
// Note: this function is CPU heavy and we don't want it to be running during DKGs.
|
||||
// The largest dkgMiningWindowStart for a related quorum type is 42 (LLMQ_60_75).
|
||||
// At the same time most quorums use dkgInterval = 24 so the next DKG for them
|
||||
// (after block 576 + 42) will start at block 576 + 24 * 2. That's only a 6 blocks
|
||||
// window and it's better to have more room so we pick next cycle.
|
||||
// dkgMiningWindowStart for small quorums is 10 i.e. a safe block to start
|
||||
// these calculations is at height 576 + 24 * 2 + 10 = 576 + 58.
|
||||
if (!fMasternodeMode || pIndex == nullptr || (pIndex->nHeight % 576 != 58)) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::set<uint256> dbKeysToSkip;
|
||||
cxxtimer::Timer t(/*start=*/ true);
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%s -- start\n", __func__);
|
||||
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- start\n", __func__);
|
||||
// Platform quorums in all networks are created every 24 blocks (~1h).
|
||||
// Unlike for other quorum types we want to keep data (secret key shares and vvec)
|
||||
// for Platform quorums for at least 2 months because Platform can be restarted and
|
||||
// it must be able to re-sign stuff. During a month, 24 * 30 quorums are created.
|
||||
constexpr auto numPlatformQuorumsDataToKeep = 24 * 30 * 2;
|
||||
// do not block the caller thread
|
||||
workerPool.push([pIndex, t, this](int threadId) {
|
||||
std::set<uint256> dbKeysToSkip;
|
||||
|
||||
for (const auto& params : Params().GetConsensus().llmqs) {
|
||||
auto nQuorumsToKeep = params.type == Params().GetConsensus().llmqTypePlatform ? numPlatformQuorumsDataToKeep : params.keepOldConnections;
|
||||
const auto vecQuorums = ScanQuorums(params.type, pIndex, nQuorumsToKeep);
|
||||
for (const auto& pQuorum : vecQuorums) {
|
||||
dbKeysToSkip.insert(MakeQuorumKey(*pQuorum));
|
||||
for (const auto& params : Params().GetConsensus().llmqs) {
|
||||
if (quorumThreadInterrupt) {
|
||||
break;
|
||||
}
|
||||
for (const auto& pQuorum : ScanQuorums(params.type, pIndex, params.keepOldKeys)) {
|
||||
dbKeysToSkip.insert(MakeQuorumKey(*pQuorum));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DataCleanupHelper(m_evoDb.GetRawDB(), dbKeysToSkip);
|
||||
if (!quorumThreadInterrupt) {
|
||||
DataCleanupHelper(m_evoDb.GetRawDB(), dbKeysToSkip);
|
||||
}
|
||||
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::%d -- done\n", __func__);
|
||||
LogPrint(BCLog::LLMQ, "CQuorumManager::StartCleanupOldQuorumDataThread -- done. time=%d\n", t.count());
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace llmq
|
||||
|
@ -277,7 +277,7 @@ private:
|
||||
void StartCachePopulatorThread(const CQuorumCPtr pQuorum) const;
|
||||
void StartQuorumDataRecoveryThread(const CQuorumCPtr pQuorum, const CBlockIndex* pIndex, uint16_t nDataMask) const;
|
||||
|
||||
void CleanupOldQuorumData(const CBlockIndex* pIndex) const;
|
||||
void StartCleanupOldQuorumDataThread(const CBlockIndex* pIndex) const;
|
||||
};
|
||||
|
||||
extern std::unique_ptr<CQuorumManager> quorumManager;
|
||||
|
@ -35,7 +35,6 @@ std::optional<std::pair<CBLSSignature, uint32_t>> GetNonNullCoinbaseChainlock(co
|
||||
namespace llmq
|
||||
{
|
||||
|
||||
Mutex cs_llmq_vbc;
|
||||
VersionBitsCache llmq_versionbitscache;
|
||||
|
||||
namespace utils
|
||||
@ -708,28 +707,24 @@ bool IsV19Active(gsl::not_null<const CBlockIndex*> pindex)
|
||||
|
||||
bool IsV20Active(gsl::not_null<const CBlockIndex*> pindex)
|
||||
{
|
||||
LOCK(cs_llmq_vbc);
|
||||
return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache) == ThresholdState::ACTIVE;
|
||||
return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20) == ThresholdState::ACTIVE;
|
||||
}
|
||||
|
||||
bool IsMNRewardReallocationActive(gsl::not_null<const CBlockIndex*> pindex)
|
||||
{
|
||||
if (!IsV20Active(pindex)) return false;
|
||||
|
||||
LOCK(cs_llmq_vbc);
|
||||
return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_MN_RR, llmq_versionbitscache) == ThresholdState::ACTIVE;
|
||||
return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_MN_RR) == ThresholdState::ACTIVE;
|
||||
}
|
||||
|
||||
ThresholdState GetV20State(gsl::not_null<const CBlockIndex*> pindex)
|
||||
{
|
||||
LOCK(cs_llmq_vbc);
|
||||
return VersionBitsState(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache);
|
||||
return llmq_versionbitscache.State(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20);
|
||||
}
|
||||
|
||||
int GetV20Since(gsl::not_null<const CBlockIndex*> pindex)
|
||||
{
|
||||
LOCK(cs_llmq_vbc);
|
||||
return VersionBitsStateSinceHeight(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20, llmq_versionbitscache);
|
||||
return llmq_versionbitscache.StateSinceHeight(pindex, Params().GetConsensus(), Consensus::DEPLOYMENT_V20);
|
||||
}
|
||||
|
||||
bool IsInstantSendLLMQTypeShared()
|
||||
@ -1005,8 +1000,7 @@ bool IsQuorumTypeEnabledInternal(Consensus::LLMQType llmqType, const CQuorumMana
|
||||
return true;
|
||||
|
||||
case Consensus::LLMQType::LLMQ_TEST_V17: {
|
||||
LOCK(cs_llmq_vbc);
|
||||
return VersionBitsState(pindex, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY, llmq_versionbitscache) == ThresholdState::ACTIVE;
|
||||
return llmq_versionbitscache.State(pindex, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY) == ThresholdState::ACTIVE;
|
||||
}
|
||||
case Consensus::LLMQType::LLMQ_100_67:
|
||||
return pindex->nHeight + 1 >= consensusParams.DIP0020Height;
|
||||
@ -1110,17 +1104,17 @@ std::map<Consensus::LLMQType, QvvecSyncMode> GetEnabledQuorumVvecSyncEntries()
|
||||
}
|
||||
|
||||
template <typename CacheType>
|
||||
void InitQuorumsCache(CacheType& cache)
|
||||
void InitQuorumsCache(CacheType& cache, bool limit_by_connections)
|
||||
{
|
||||
for (const auto& llmq : Params().GetConsensus().llmqs) {
|
||||
cache.emplace(std::piecewise_construct, std::forward_as_tuple(llmq.type),
|
||||
std::forward_as_tuple(llmq.keepOldConnections));
|
||||
std::forward_as_tuple(limit_by_connections ? llmq.keepOldConnections : llmq.keepOldKeys));
|
||||
}
|
||||
}
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, bool, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, bool, StaticSaltedHasher>>& cache);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>& cache);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>&);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>& cache);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, bool, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, bool, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::vector<CQuorumCPtr>, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>, std::less<Consensus::LLMQType>, std::allocator<std::pair<Consensus::LLMQType const, unordered_lru_cache<uint256, std::shared_ptr<llmq::CQuorum>, StaticSaltedHasher, 0ul, 0ul>>>>&cache, bool limit_by_connections);
|
||||
template void InitQuorumsCache<std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>>(std::map<Consensus::LLMQType, unordered_lru_cache<uint256, int, StaticSaltedHasher>>& cache, bool limit_by_connections);
|
||||
|
||||
} // namespace utils
|
||||
|
||||
|
@ -30,10 +30,10 @@ namespace llmq
|
||||
class CQuorumManager;
|
||||
class CQuorumSnapshot;
|
||||
|
||||
// Use a separate cache instance instead of versionbitscache to avoid locking cs_main
|
||||
// A separate cache instance instead of versionbitscache has been introduced to avoid locking cs_main
|
||||
// and dealing with all kinds of deadlocks.
|
||||
extern Mutex cs_llmq_vbc;
|
||||
extern VersionBitsCache llmq_versionbitscache GUARDED_BY(cs_llmq_vbc);
|
||||
// TODO: drop llmq_versionbitscache completely so far as VersionBitsCache do not uses anymore cs_main
|
||||
extern VersionBitsCache llmq_versionbitscache;
|
||||
|
||||
static const bool DEFAULT_ENABLE_QUORUM_DATA_RECOVERY = true;
|
||||
|
||||
@ -120,7 +120,7 @@ void IterateNodesRandom(NodesContainer& nodeStates, Continue&& cont, Callback&&
|
||||
}
|
||||
|
||||
template <typename CacheType>
|
||||
void InitQuorumsCache(CacheType& cache);
|
||||
void InitQuorumsCache(CacheType& cache, bool limit_by_connections = true);
|
||||
|
||||
} // namespace utils
|
||||
|
||||
|
@ -34,7 +34,7 @@ UniValue CMasternodeMetaInfo::ToJson() const
|
||||
{
|
||||
UniValue ret(UniValue::VOBJ);
|
||||
|
||||
auto now = GetTime<std::chrono::seconds>().count();
|
||||
int64_t now = GetTime<std::chrono::seconds>().count();
|
||||
|
||||
ret.pushKV("lastDSQ", nLastDsq);
|
||||
ret.pushKV("mixingTxCount", nMixingTxCount);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <consensus/merkle.h>
|
||||
#include <consensus/tx_verify.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <policy/feerate.h>
|
||||
#include <policy/policy.h>
|
||||
#include <pow.h>
|
||||
@ -132,11 +133,11 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc
|
||||
assert(pindexPrev != nullptr);
|
||||
nHeight = pindexPrev->nHeight + 1;
|
||||
|
||||
bool fDIP0003Active_context = nHeight >= chainparams.GetConsensus().DIP0003Height;
|
||||
bool fDIP0008Active_context = nHeight >= chainparams.GetConsensus().DIP0008Height;
|
||||
bool fDIP0003Active_context = DeploymentActiveAfter(pindexPrev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_DIP0003);
|
||||
bool fDIP0008Active_context = DeploymentActiveAfter(pindexPrev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_DIP0008);
|
||||
bool fV20Active_context = llmq::utils::IsV20Active(pindexPrev);
|
||||
|
||||
pblock->nVersion = ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
|
||||
pblock->nVersion = g_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus());
|
||||
// Non-mainnet only: allow overriding block.nVersion with
|
||||
// -blockversion=N to test forking scenarios
|
||||
if (Params().NetworkIDString() != CBaseChainParams::MAIN)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <banman.h>
|
||||
#include <chain.h>
|
||||
#include <chainparams.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <evo/deterministicmns.h>
|
||||
#include <governance/governance.h>
|
||||
#include <governance/object.h>
|
||||
|
@ -12,7 +12,10 @@
|
||||
#include <chainparams.h>
|
||||
#include <coins.h>
|
||||
#include <core_io.h>
|
||||
#include <consensus/params.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <deploymentinfo.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <index/blockfilterindex.h>
|
||||
#include <index/coinstatsindex.h>
|
||||
#include <index/txindex.h>
|
||||
@ -35,7 +38,7 @@
|
||||
#include <util/system.h>
|
||||
#include <validation.h>
|
||||
#include <validationinterface.h>
|
||||
#include <versionbitsinfo.h>
|
||||
#include <versionbits.h>
|
||||
#include <warnings.h>
|
||||
|
||||
#include <evo/specialtx.h>
|
||||
@ -1572,25 +1575,25 @@ static UniValue verifychain(const JSONRPCRequest& request)
|
||||
active_chainstate, Params(), active_chainstate.CoinsTip(), *node.evodb, check_level, check_depth);
|
||||
}
|
||||
|
||||
static void BuriedForkDescPushBack(UniValue& softforks, const std::string &name, int softfork_height, int tip_height) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue& softforks, const Consensus::Params& params, Consensus::BuriedDeployment dep)
|
||||
{
|
||||
// For buried deployments.
|
||||
// A buried deployment is one where the height of the activation has been hardcoded into
|
||||
// the client implementation long after the consensus change has activated. See BIP 90.
|
||||
// Buried deployments with activation height value of
|
||||
// std::numeric_limits<int>::max() are disabled and thus hidden.
|
||||
if (softfork_height == std::numeric_limits<int>::max()) return;
|
||||
if (!DeploymentEnabled(params, dep)) return;
|
||||
|
||||
UniValue rv(UniValue::VOBJ);
|
||||
rv.pushKV("type", "buried");
|
||||
// getblockchaininfo reports the softfork as active from when the chain height is
|
||||
// one below the activation height
|
||||
rv.pushKV("active", tip_height + 1 >= softfork_height);
|
||||
rv.pushKV("height", softfork_height);
|
||||
softforks.pushKV(name, rv);
|
||||
rv.pushKV("active", DeploymentActiveAfter(active_chain_tip, params, dep));
|
||||
rv.pushKV("height", params.DeploymentHeight(dep));
|
||||
softforks.pushKV(DeploymentName(dep), rv);
|
||||
}
|
||||
|
||||
static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const std::unordered_map<uint8_t, int>& signals, UniValue& softforks, const std::string &name, const Consensus::Params& consensusParams, Consensus::DeploymentPos id) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||
static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const std::unordered_map<uint8_t, int>& signals, UniValue& softforks, const Consensus::Params& consensusParams, Consensus::DeploymentPos id)
|
||||
{
|
||||
// For BIP9 deployments.
|
||||
// Deployments (e.g. testdummy) with timeout value before Jan 1, 2009 are hidden.
|
||||
@ -1599,7 +1602,7 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const
|
||||
if (consensusParams.vDeployments[id].nTimeout <= 1230768000) return;
|
||||
|
||||
UniValue bip9(UniValue::VOBJ);
|
||||
const ThresholdState thresholdState = VersionBitsState(active_chain_tip, consensusParams, id, versionbitscache);
|
||||
const ThresholdState thresholdState = g_versionbitscache.State(active_chain_tip, consensusParams, id);
|
||||
switch (thresholdState) {
|
||||
case ThresholdState::DEFINED: bip9.pushKV("status", "defined"); break;
|
||||
case ThresholdState::STARTED: bip9.pushKV("status", "started"); break;
|
||||
@ -1617,12 +1620,12 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const
|
||||
if (auto it = signals.find(consensusParams.vDeployments[id].bit); it != signals.end()) {
|
||||
bip9.pushKV("ehf_height", it->second);
|
||||
}
|
||||
int64_t since_height = VersionBitsStateSinceHeight(active_chain_tip, consensusParams, id, versionbitscache);
|
||||
int64_t since_height = g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id);
|
||||
bip9.pushKV("since", since_height);
|
||||
if (ThresholdState::STARTED == thresholdState)
|
||||
{
|
||||
UniValue statsUV(UniValue::VOBJ);
|
||||
BIP9Stats statsStruct = VersionBitsStatistics(active_chain_tip, consensusParams, id, versionbitscache);
|
||||
BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id);
|
||||
statsUV.pushKV("period", statsStruct.period);
|
||||
statsUV.pushKV("threshold", statsStruct.threshold);
|
||||
statsUV.pushKV("elapsed", statsStruct.elapsed);
|
||||
@ -1642,7 +1645,7 @@ static void BIP9SoftForkDescPushBack(const CBlockIndex* active_chain_tip, const
|
||||
}
|
||||
rv.pushKV("active", ThresholdState::ACTIVE == thresholdState);
|
||||
|
||||
softforks.pushKV(name, rv);
|
||||
softforks.pushKV(DeploymentName(id), rv);
|
||||
}
|
||||
|
||||
UniValue getblockchaininfo(const JSONRPCRequest& request)
|
||||
@ -1743,23 +1746,23 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
|
||||
const Consensus::Params& consensusParams = Params().GetConsensus();
|
||||
UniValue softforks(UniValue::VOBJ);
|
||||
// sorted by activation block
|
||||
BuriedForkDescPushBack(softforks,"bip34", consensusParams.BIP34Height, height);
|
||||
BuriedForkDescPushBack(softforks,"bip66", consensusParams.BIP66Height, height);
|
||||
BuriedForkDescPushBack(softforks,"bip65", consensusParams.BIP65Height, height);
|
||||
BuriedForkDescPushBack(softforks,"bip147", consensusParams.BIP147Height, height);
|
||||
BuriedForkDescPushBack(softforks, "csv", consensusParams.CSVHeight, height);
|
||||
BuriedForkDescPushBack(softforks, "dip0001", consensusParams.DIP0001Height, height);
|
||||
BuriedForkDescPushBack(softforks, "dip0003", consensusParams.DIP0003Height, height);
|
||||
BuriedForkDescPushBack(softforks, "dip0008", consensusParams.DIP0008Height, height);
|
||||
BuriedForkDescPushBack(softforks, "dip0020", consensusParams.DIP0020Height, height);
|
||||
BuriedForkDescPushBack(softforks, "dip0024", consensusParams.DIP0024Height, height);
|
||||
BuriedForkDescPushBack(softforks, "realloc", consensusParams.BRRHeight, height);
|
||||
BuriedForkDescPushBack(softforks, "v19", consensusParams.V19Height, height);
|
||||
BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "v20", consensusParams, Consensus::DEPLOYMENT_V20);
|
||||
BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "mn_rr", consensusParams, Consensus::DEPLOYMENT_MN_RR);
|
||||
BIP9SoftForkDescPushBack(tip, ehfSignals, softforks, "testdummy", consensusParams, Consensus::DEPLOYMENT_TESTDUMMY);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DERSIG);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_CLTV);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_BIP147);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_CSV);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0001);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0003);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0008);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0020);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DIP0024);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_BRR);
|
||||
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_V19);
|
||||
SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_V20);
|
||||
SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_MN_RR);
|
||||
SoftForkDescPushBack(tip, ehfSignals, softforks, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY);
|
||||
|
||||
obj.pushKV("softforks", softforks);
|
||||
obj.pushKV("softforks", softforks);
|
||||
|
||||
obj.pushKV("warnings", GetWarnings(false));
|
||||
return obj;
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <consensus/params.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <core_io.h>
|
||||
#include <deploymentinfo.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <key_io.h>
|
||||
#include <llmq/blockprocessor.h>
|
||||
#include <llmq/context.h>
|
||||
@ -39,7 +41,6 @@
|
||||
#include <util/system.h>
|
||||
#include <validation.h>
|
||||
#include <validationinterface.h>
|
||||
#include <versionbitsinfo.h>
|
||||
#include <warnings.h>
|
||||
|
||||
#include <governance/classes.h>
|
||||
@ -851,7 +852,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
|
||||
UniValue vbavailable(UniValue::VOBJ);
|
||||
for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) {
|
||||
Consensus::DeploymentPos pos = Consensus::DeploymentPos(j);
|
||||
ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache);
|
||||
ThresholdState state = g_versionbitscache.State(pindexPrev, consensusParams, pos);
|
||||
switch (state) {
|
||||
case ThresholdState::DEFINED:
|
||||
case ThresholdState::FAILED:
|
||||
@ -859,7 +860,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
|
||||
break;
|
||||
case ThresholdState::LOCKED_IN:
|
||||
// Ensure bit is set in block version
|
||||
pblock->nVersion |= VersionBitsMask(consensusParams, pos);
|
||||
pblock->nVersion |= g_versionbitscache.Mask(consensusParams, pos);
|
||||
// FALL THROUGH to get vbavailable set...
|
||||
case ThresholdState::STARTED:
|
||||
{
|
||||
@ -868,7 +869,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
|
||||
if (setClientRules.find(vbinfo.name) == setClientRules.end()) {
|
||||
if (!vbinfo.gbt_force) {
|
||||
// If the client doesn't support this, don't indicate it in the [default] version
|
||||
pblock->nVersion &= ~VersionBitsMask(consensusParams, pos);
|
||||
pblock->nVersion &= ~g_versionbitscache.Mask(consensusParams, pos);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -955,7 +956,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
|
||||
return result;
|
||||
}
|
||||
|
||||
class submitblock_StateCatcher : public CValidationInterface
|
||||
class submitblock_StateCatcher final : public CValidationInterface
|
||||
{
|
||||
public:
|
||||
uint256 hash;
|
||||
@ -1016,17 +1017,17 @@ static UniValue submitblock(const JSONRPCRequest& request)
|
||||
}
|
||||
|
||||
bool new_block;
|
||||
submitblock_StateCatcher sc(block.GetHash());
|
||||
RegisterValidationInterface(&sc);
|
||||
auto sc = std::make_shared<submitblock_StateCatcher>(block.GetHash());
|
||||
RegisterSharedValidationInterface(sc);
|
||||
bool accepted = chainman.ProcessNewBlock(Params(), blockptr, /* fForceProcessing */ true, /* fNewBlock */ &new_block);
|
||||
UnregisterValidationInterface(&sc);
|
||||
UnregisterSharedValidationInterface(sc);
|
||||
if (!new_block && accepted) {
|
||||
return "duplicate";
|
||||
}
|
||||
if (!sc.found) {
|
||||
if (!sc->found) {
|
||||
return "inconclusive";
|
||||
}
|
||||
return BIP22ValidationResult(sc.state);
|
||||
return BIP22ValidationResult(sc->state);
|
||||
}
|
||||
|
||||
static UniValue submitheader(const JSONRPCRequest& request)
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <chainparams.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <evo/evodb.h>
|
||||
#include <governance/governance.h>
|
||||
#include <llmq/blockprocessor.h>
|
||||
@ -16,6 +17,7 @@
|
||||
#include <script/interpreter.h>
|
||||
#include <spork.h>
|
||||
#include <validation.h>
|
||||
#include <versionbits.h>
|
||||
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
@ -53,9 +55,9 @@ struct TestChainDATSetup : public TestChainSetup
|
||||
}
|
||||
LOCK(cs_main);
|
||||
if (expected_lockin) {
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::LOCKED_IN);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::LOCKED_IN);
|
||||
} else {
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::STARTED);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::STARTED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,7 +69,7 @@ struct TestChainDATSetup : public TestChainSetup
|
||||
{
|
||||
LOCK(cs_main);
|
||||
BOOST_CHECK_EQUAL(::ChainActive().Height(), window - 2);
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::DEFINED);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::DEFINED);
|
||||
}
|
||||
|
||||
CreateAndProcessBlock({}, coinbaseKey);
|
||||
@ -76,8 +78,8 @@ struct TestChainDATSetup : public TestChainSetup
|
||||
LOCK(cs_main);
|
||||
// Advance from DEFINED to STARTED at height = window - 1
|
||||
BOOST_CHECK_EQUAL(::ChainActive().Height(), window - 1);
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::STARTED);
|
||||
BOOST_CHECK_EQUAL(VersionBitsStatistics(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache).threshold, threshold(0));
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::STARTED);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.Statistics(::ChainActive().Tip(), consensus_params, deployment_id).threshold, threshold(0));
|
||||
// Next block should be signaling by default
|
||||
const auto pblocktemplate = BlockAssembler(*sporkManager, *governance, *m_node.llmq_ctx, *m_node.evodb, ::ChainstateActive(), *m_node.mempool, Params()).CreateNewBlock(coinbasePubKey);
|
||||
const uint32_t bitmask = ((uint32_t)1) << consensus_params.vDeployments[deployment_id].bit;
|
||||
@ -93,17 +95,17 @@ struct TestChainDATSetup : public TestChainSetup
|
||||
// Still STARTED but with a (potentially) new threshold
|
||||
LOCK(cs_main);
|
||||
BOOST_CHECK_EQUAL(::ChainActive().Height(), window * (i + 2) - 1);
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::STARTED);
|
||||
const auto vbts = VersionBitsStatistics(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::STARTED);
|
||||
const auto vbts = g_versionbitscache.Statistics(::ChainActive().Tip(), consensus_params, deployment_id);
|
||||
BOOST_CHECK_EQUAL(vbts.threshold, threshold(i + 1));
|
||||
BOOST_CHECK(vbts.threshold <= th_start);
|
||||
BOOST_CHECK(vbts.threshold >= th_end);
|
||||
}
|
||||
}
|
||||
if (LOCK(cs_main); check_activation_at_min) {
|
||||
BOOST_CHECK_EQUAL(VersionBitsStatistics(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache).threshold, th_end);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.Statistics(::ChainActive().Tip(), consensus_params, deployment_id).threshold, th_end);
|
||||
} else {
|
||||
BOOST_CHECK(VersionBitsStatistics(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache).threshold > th_end);
|
||||
BOOST_CHECK(g_versionbitscache.Statistics(::ChainActive().Tip(), consensus_params, deployment_id).threshold > th_end);
|
||||
}
|
||||
|
||||
// activate
|
||||
@ -113,7 +115,7 @@ struct TestChainDATSetup : public TestChainSetup
|
||||
}
|
||||
{
|
||||
LOCK(cs_main);
|
||||
BOOST_CHECK_EQUAL(VersionBitsState(::ChainActive().Tip(), consensus_params, deployment_id, versionbitscache), ThresholdState::ACTIVE);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.State(::ChainActive().Tip(), consensus_params, deployment_id), ThresholdState::ACTIVE);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ static const std::vector<unsigned char> V_OP_TRUE{OP_TRUE};
|
||||
|
||||
BOOST_FIXTURE_TEST_SUITE(validation_block_tests, MinerTestingSetup)
|
||||
|
||||
struct TestSubscriber : public CValidationInterface {
|
||||
struct TestSubscriber final : public CValidationInterface {
|
||||
uint256 m_expected_tip;
|
||||
|
||||
explicit TestSubscriber(uint256 tip) : m_expected_tip(tip) {}
|
||||
@ -179,8 +179,8 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
|
||||
LOCK(cs_main);
|
||||
initial_tip = ::ChainActive().Tip();
|
||||
}
|
||||
TestSubscriber sub(initial_tip->GetBlockHash());
|
||||
RegisterValidationInterface(&sub);
|
||||
auto sub = std::make_shared<TestSubscriber>(initial_tip->GetBlockHash());
|
||||
RegisterSharedValidationInterface(sub);
|
||||
|
||||
// create a bunch of threads that repeatedly process a block generated above at random
|
||||
// this will create parallelism and randomness inside validation - the ValidationInterface
|
||||
@ -208,14 +208,12 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering)
|
||||
for (auto& t : threads) {
|
||||
t.join();
|
||||
}
|
||||
while (GetMainSignals().CallbacksPending() > 0) {
|
||||
UninterruptibleSleep(std::chrono::milliseconds{100});
|
||||
}
|
||||
SyncWithValidationInterfaceQueue();
|
||||
|
||||
UnregisterValidationInterface(&sub);
|
||||
UnregisterSharedValidationInterface(sub);
|
||||
|
||||
LOCK(cs_main);
|
||||
BOOST_CHECK_EQUAL(sub.m_expected_tip, ::ChainActive().Tip()->GetBlockHash());
|
||||
BOOST_CHECK_EQUAL(sub->m_expected_tip, ::ChainActive().Tip()->GetBlockHash());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -10,7 +10,41 @@
|
||||
#include <util/check.h>
|
||||
#include <validationinterface.h>
|
||||
|
||||
BOOST_FIXTURE_TEST_SUITE(validationinterface_tests, TestingSetup)
|
||||
BOOST_FIXTURE_TEST_SUITE(validationinterface_tests, ChainTestingSetup)
|
||||
|
||||
struct TestSubscriberNoop final : public CValidationInterface {
|
||||
void BlockChecked(const CBlock&, const BlockValidationState&) override {}
|
||||
};
|
||||
|
||||
BOOST_AUTO_TEST_CASE(unregister_validation_interface_race)
|
||||
{
|
||||
std::atomic<bool> generate{true};
|
||||
|
||||
// Start thread to generate notifications
|
||||
std::thread gen{[&] {
|
||||
const CBlock block_dummy;
|
||||
const BlockValidationState state_dummy;
|
||||
while (generate) {
|
||||
GetMainSignals().BlockChecked(block_dummy, state_dummy);
|
||||
}
|
||||
}};
|
||||
|
||||
// Start thread to consume notifications
|
||||
std::thread sub{[&] {
|
||||
// keep going for about 1 sec, which is 250k iterations
|
||||
for (int i = 0; i < 250000; i++) {
|
||||
auto sub = std::make_shared<TestSubscriberNoop>();
|
||||
RegisterSharedValidationInterface(sub);
|
||||
UnregisterSharedValidationInterface(sub);
|
||||
}
|
||||
// tell the other thread we are done
|
||||
generate = false;
|
||||
}};
|
||||
|
||||
gen.join();
|
||||
sub.join();
|
||||
BOOST_CHECK(!generate);
|
||||
}
|
||||
|
||||
class TestInterface : public CValidationInterface
|
||||
{
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <chain.h>
|
||||
#include <chainparams.h>
|
||||
#include <consensus/params.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <validation.h>
|
||||
#include <versionbits.h>
|
||||
@ -227,7 +228,7 @@ BOOST_AUTO_TEST_CASE(versionbits_test)
|
||||
const auto chainParams = CreateChainParams(CBaseChainParams::MAIN);
|
||||
const Consensus::Params &mainnetParams = chainParams->GetConsensus();
|
||||
for (int i=0; i<(int) Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
|
||||
uint32_t bitmask = VersionBitsMask(mainnetParams, static_cast<Consensus::DeploymentPos>(i));
|
||||
uint32_t bitmask = g_versionbitscache.Mask(mainnetParams, static_cast<Consensus::DeploymentPos>(i));
|
||||
// Make sure that no deployment tries to set an invalid bit.
|
||||
BOOST_CHECK_EQUAL(bitmask & ~(uint32_t)VERSIONBITS_TOP_MASK, bitmask);
|
||||
|
||||
@ -239,7 +240,7 @@ BOOST_AUTO_TEST_CASE(versionbits_test)
|
||||
// activated soft fork could be later changed to be earlier to avoid
|
||||
// overlap.)
|
||||
for (int j=i+1; j<(int) Consensus::MAX_VERSION_BITS_DEPLOYMENTS; j++) {
|
||||
if (VersionBitsMask(mainnetParams, static_cast<Consensus::DeploymentPos>(j)) == bitmask) {
|
||||
if (g_versionbitscache.Mask(mainnetParams, static_cast<Consensus::DeploymentPos>(j)) == bitmask) {
|
||||
BOOST_CHECK(mainnetParams.vDeployments[j].nStartTime > mainnetParams.vDeployments[i].nTimeout ||
|
||||
mainnetParams.vDeployments[i].nStartTime > mainnetParams.vDeployments[j].nTimeout);
|
||||
}
|
||||
@ -273,29 +274,29 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
|
||||
// should not be set.
|
||||
CBlockIndex *lastBlock = nullptr;
|
||||
lastBlock = firstChain.Mine(mainnetParams.nMinerConfirmationWindow, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
|
||||
// Mine more blocks (4 less than the adjustment period) at the old time, and check that CBV isn't setting the bit yet.
|
||||
for (uint32_t i = 1; i < mainnetParams.nMinerConfirmationWindow - 4; i++) {
|
||||
lastBlock = firstChain.Mine(mainnetParams.nMinerConfirmationWindow + i, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
// This works because VERSIONBITS_LAST_OLD_BLOCK_VERSION happens
|
||||
// to be 4, and the bit we're testing happens to be bit 28.
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
}
|
||||
// Now mine 5 more blocks at the start time -- MTP should not have passed yet, so
|
||||
// CBV should still not yet set the bit.
|
||||
nTime = nStartTime;
|
||||
for (uint32_t i = mainnetParams.nMinerConfirmationWindow - 4; i <= mainnetParams.nMinerConfirmationWindow; i++) {
|
||||
lastBlock = firstChain.Mine(mainnetParams.nMinerConfirmationWindow + i, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
}
|
||||
|
||||
// Advance to the next period and transition to STARTED,
|
||||
lastBlock = firstChain.Mine(mainnetParams.nMinerConfirmationWindow * 3, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
// so ComputeBlockVersion should now set the bit,
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
// and should also be using the VERSIONBITS_TOP_BITS.
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
|
||||
// Check that ComputeBlockVersion will set the bit until nTimeout
|
||||
nTime += 600;
|
||||
@ -304,8 +305,8 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
|
||||
// These blocks are all before nTimeout is reached.
|
||||
while (nTime < nTimeout && blocksToMine > 0) {
|
||||
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
blocksToMine--;
|
||||
nTime += 600;
|
||||
nHeight += 1;
|
||||
@ -316,12 +317,12 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
|
||||
// the bit until the period transition.
|
||||
for (uint32_t i = 0; i < mainnetParams.nMinerConfirmationWindow - 1; i++) {
|
||||
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
nHeight += 1;
|
||||
}
|
||||
// The next block should trigger no longer setting the bit.
|
||||
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
|
||||
// On a new chain:
|
||||
// verify that the bit will be set after lock-in, and then stop being set
|
||||
@ -331,24 +332,24 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
|
||||
// Mine one period worth of blocks, and check that the bit will be on for the
|
||||
// next period.
|
||||
lastBlock = secondChain.Mine(mainnetParams.nMinerConfirmationWindow, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
|
||||
// Mine another period worth of blocks, signaling the new bit.
|
||||
lastBlock = secondChain.Mine(mainnetParams.nMinerConfirmationWindow * 2, nTime, VERSIONBITS_TOP_BITS | (1<<bit)).Tip();
|
||||
// After one period of setting the bit on each block, it should have locked in.
|
||||
// We keep setting the bit for one more period though, until activation.
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
|
||||
|
||||
// Now check that we keep mining the block until the end of this period, and
|
||||
// then stop at the beginning of the next period.
|
||||
lastBlock = secondChain.Mine((mainnetParams.nMinerConfirmationWindow * 3) - 1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1 << bit)) != 0);
|
||||
BOOST_CHECK((g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1 << bit)) != 0);
|
||||
lastBlock = secondChain.Mine(mainnetParams.nMinerConfirmationWindow * 3, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
|
||||
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
|
||||
|
||||
// Finally, verify that after a soft fork has activated, CBV no longer uses
|
||||
// VERSIONBITS_LAST_OLD_BLOCK_VERSION.
|
||||
//BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
//BOOST_CHECK_EQUAL(g_versionbitscache.ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <consensus/tx_verify.h>
|
||||
#include <consensus/validation.h>
|
||||
#include <cuckoocache.h>
|
||||
#include <deploymentstatus.h>
|
||||
#include <flatfile.h>
|
||||
#include <hash.h>
|
||||
#include <index/blockfilterindex.h>
|
||||
@ -157,6 +158,7 @@ bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
|
||||
uint64_t nPruneTarget = 0;
|
||||
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
|
||||
|
||||
// TODO: drop this global variable
|
||||
std::atomic<bool> fDIP0001ActiveAtTip{false};
|
||||
|
||||
uint256 hashAssumeValid;
|
||||
@ -1923,24 +1925,6 @@ void StopScriptCheckWorkerThreads()
|
||||
scriptcheckqueue.StopWorkerThreads();
|
||||
}
|
||||
|
||||
VersionBitsCache versionbitscache GUARDED_BY(cs_main);
|
||||
|
||||
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
|
||||
{
|
||||
LOCK(cs_main);
|
||||
int32_t nVersion = VERSIONBITS_TOP_BITS;
|
||||
|
||||
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
|
||||
Consensus::DeploymentPos pos = Consensus::DeploymentPos(i);
|
||||
ThresholdState state = VersionBitsState(pindexPrev, params, pos, versionbitscache);
|
||||
if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
|
||||
nVersion |= VersionBitsMask(params, static_cast<Consensus::DeploymentPos>(i));
|
||||
}
|
||||
}
|
||||
|
||||
return nVersion;
|
||||
}
|
||||
|
||||
bool GetBlockHash(uint256& hashRet, int nBlockHeight)
|
||||
{
|
||||
LOCK(cs_main);
|
||||
@ -1973,15 +1957,14 @@ public:
|
||||
return pindex->nHeight >= params.MinBIP9WarningHeight &&
|
||||
((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
|
||||
((pindex->nVersion >> bit) & 1) != 0 &&
|
||||
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
|
||||
((g_versionbitscache.ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
|
||||
}
|
||||
};
|
||||
|
||||
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
|
||||
|
||||
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
|
||||
AssertLockHeld(cs_main);
|
||||
|
||||
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams)
|
||||
{
|
||||
unsigned int flags = SCRIPT_VERIFY_NONE;
|
||||
|
||||
// Start enforcing P2SH (BIP16)
|
||||
@ -1989,27 +1972,28 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens
|
||||
flags |= SCRIPT_VERIFY_P2SH;
|
||||
}
|
||||
|
||||
// Start enforcing the DERSIG (BIP66) rule
|
||||
if (pindex->nHeight >= consensusparams.BIP66Height) {
|
||||
// Enforce the DERSIG (BIP66) rule
|
||||
if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_DERSIG)) {
|
||||
flags |= SCRIPT_VERIFY_DERSIG;
|
||||
}
|
||||
|
||||
// Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
|
||||
if (pindex->nHeight >= consensusparams.BIP65Height) {
|
||||
// Enforce CHECKLOCKTIMEVERIFY (BIP65)
|
||||
if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CLTV)) {
|
||||
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
|
||||
}
|
||||
|
||||
// Start enforcing BIP112 (CHECKSEQUENCEVERIFY)
|
||||
if (pindex->nHeight >= consensusparams.CSVHeight) {
|
||||
// Enforce CHECKSEQUENCEVERIFY (BIP112)
|
||||
if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_CSV)) {
|
||||
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
|
||||
}
|
||||
|
||||
// Start enforcing BIP147 (NULLDUMMY) rule using versionbits logic.
|
||||
if (pindex->nHeight >= consensusparams.BIP147Height) {
|
||||
// Enforce BIP147 NULLDUMMY
|
||||
if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_BIP147)) {
|
||||
flags |= SCRIPT_VERIFY_NULLDUMMY;
|
||||
}
|
||||
|
||||
if (pindex->nHeight >= consensusparams.DIP0020Height) {
|
||||
// Enforce DIP0020
|
||||
if (DeploymentActiveAt(*pindex, consensusparams, Consensus::DEPLOYMENT_DIP0020)) {
|
||||
flags |= SCRIPT_ENABLE_DIP0020_OPCODES;
|
||||
}
|
||||
|
||||
@ -2184,9 +2168,9 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
|
||||
}
|
||||
/// END DASH
|
||||
|
||||
// Start enforcing BIP68 (sequence locks)
|
||||
// Enforce BIP68 (sequence locks)
|
||||
int nLockTimeFlags = 0;
|
||||
if (pindex->nHeight >= m_params.GetConsensus().CSVHeight) {
|
||||
if (DeploymentActiveAt(*pindex, m_params.GetConsensus(), Consensus::DEPLOYMENT_CSV)) {
|
||||
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
|
||||
}
|
||||
|
||||
@ -3926,12 +3910,13 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio
|
||||
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
|
||||
return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", strprintf("block timestamp too far in the future %d %d", block.GetBlockTime(), nAdjustedTime + 2 * 60 * 60));
|
||||
|
||||
// check for version 2, 3 and 4 upgrades
|
||||
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
|
||||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
|
||||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
|
||||
// Reject blocks with outdated version
|
||||
if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
|
||||
(block.nVersion < 3 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_DERSIG)) ||
|
||||
(block.nVersion < 4 && DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CLTV))) {
|
||||
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
|
||||
strprintf("rejected nVersion=0x%08x block", block.nVersion));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -3947,9 +3932,9 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
|
||||
AssertLockHeld(cs_main);
|
||||
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
|
||||
|
||||
// Start enforcing BIP113 (Median Time Past).
|
||||
// Enforce BIP113 (Median Time Past).
|
||||
int nLockTimeFlags = 0;
|
||||
if (nHeight >= consensusParams.CSVHeight) {
|
||||
if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV)) {
|
||||
assert(pindexPrev != nullptr);
|
||||
nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
|
||||
}
|
||||
@ -3991,7 +3976,7 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
|
||||
// Enforce rule that the coinbase starts with serialized block height
|
||||
// After DIP3/DIP4 activation, we don't enforce the height in the input script anymore.
|
||||
// The CbTx special transaction payload will then contain the height, which is checked in CheckCbTx
|
||||
if (nHeight >= consensusParams.BIP34Height && !fDIP0003Active_context)
|
||||
if (DeploymentActiveAfter(pindexPrev, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB) && !fDIP0003Active_context)
|
||||
{
|
||||
CScript expect = CScript() << nHeight;
|
||||
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
|
||||
@ -5041,7 +5026,7 @@ void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman)
|
||||
nLastBlockFile = 0;
|
||||
setDirtyBlockIndex.clear();
|
||||
setDirtyFileInfo.clear();
|
||||
versionbitscache.Clear();
|
||||
g_versionbitscache.Clear();
|
||||
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
|
||||
warningcache[b].clear();
|
||||
}
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <sync.h>
|
||||
#include <txdb.h>
|
||||
#include <txmempool.h> // For CTxMemPool::cs
|
||||
#include <versionbits.h>
|
||||
#include <serialize.h>
|
||||
#include <spentindex.h>
|
||||
#include <util/hasher.h>
|
||||
@ -1071,12 +1070,6 @@ CChain& ChainActive();
|
||||
/** Global variable that points to the active block tree (protected by cs_main) */
|
||||
extern std::unique_ptr<CBlockTreeDB> pblocktree;
|
||||
|
||||
extern VersionBitsCache versionbitscache;
|
||||
|
||||
/**
|
||||
* Determine what nVersion a new block should use.
|
||||
*/
|
||||
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
|
||||
|
||||
/**
|
||||
* Return true if hash can be found in ::ChainActive() at nBlockHeight height.
|
||||
|
@ -94,22 +94,26 @@ public:
|
||||
|
||||
static CMainSignals g_signals;
|
||||
|
||||
void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler& scheduler) {
|
||||
void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler& scheduler)
|
||||
{
|
||||
assert(!m_internals);
|
||||
m_internals.reset(new MainSignalsInstance(&scheduler));
|
||||
}
|
||||
|
||||
void CMainSignals::UnregisterBackgroundSignalScheduler() {
|
||||
void CMainSignals::UnregisterBackgroundSignalScheduler()
|
||||
{
|
||||
m_internals.reset(nullptr);
|
||||
}
|
||||
|
||||
void CMainSignals::FlushBackgroundCallbacks() {
|
||||
void CMainSignals::FlushBackgroundCallbacks()
|
||||
{
|
||||
if (m_internals) {
|
||||
m_internals->m_schedulerClient.EmptyQueue();
|
||||
}
|
||||
}
|
||||
|
||||
size_t CMainSignals::CallbacksPending() {
|
||||
size_t CMainSignals::CallbacksPending()
|
||||
{
|
||||
if (!m_internals) return 0;
|
||||
return m_internals->m_schedulerClient.CallbacksPending();
|
||||
}
|
||||
@ -119,10 +123,11 @@ CMainSignals& GetMainSignals()
|
||||
return g_signals;
|
||||
}
|
||||
|
||||
void RegisterSharedValidationInterface(std::shared_ptr<CValidationInterface> pwalletIn) {
|
||||
// Each connection captures pwalletIn to ensure that each callback is
|
||||
// executed before pwalletIn is destroyed. For more details see #18338.
|
||||
g_signals.m_internals->Register(std::move(pwalletIn));
|
||||
void RegisterSharedValidationInterface(std::shared_ptr<CValidationInterface> callbacks)
|
||||
{
|
||||
// Each connection captures the shared_ptr to ensure that each callback is
|
||||
// executed before the subscriber is destroyed. For more details see #18338.
|
||||
g_signals.m_internals->Register(std::move(callbacks));
|
||||
}
|
||||
|
||||
void RegisterValidationInterface(CValidationInterface* callbacks)
|
||||
@ -137,24 +142,28 @@ void UnregisterSharedValidationInterface(std::shared_ptr<CValidationInterface> c
|
||||
UnregisterValidationInterface(callbacks.get());
|
||||
}
|
||||
|
||||
void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
|
||||
void UnregisterValidationInterface(CValidationInterface* callbacks)
|
||||
{
|
||||
if (g_signals.m_internals) {
|
||||
g_signals.m_internals->Unregister(pwalletIn);
|
||||
g_signals.m_internals->Unregister(callbacks);
|
||||
}
|
||||
}
|
||||
|
||||
void UnregisterAllValidationInterfaces() {
|
||||
void UnregisterAllValidationInterfaces()
|
||||
{
|
||||
if (!g_signals.m_internals) {
|
||||
return;
|
||||
}
|
||||
g_signals.m_internals->Clear();
|
||||
}
|
||||
|
||||
void CallFunctionInValidationInterfaceQueue(std::function<void ()> func) {
|
||||
void CallFunctionInValidationInterfaceQueue(std::function<void()> func)
|
||||
{
|
||||
g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func));
|
||||
}
|
||||
|
||||
void SyncWithValidationInterfaceQueue() {
|
||||
void SyncWithValidationInterfaceQueue()
|
||||
{
|
||||
AssertLockNotHeld(cs_main);
|
||||
// Block until the validation queue drains
|
||||
std::promise<void> promise;
|
||||
|
@ -33,20 +33,20 @@ namespace llmq {
|
||||
class CRecoveredSig;
|
||||
} // namespace llmq
|
||||
|
||||
// These functions dispatch to one or all registered wallets
|
||||
|
||||
/** Register a wallet to receive updates from core */
|
||||
void RegisterValidationInterface(CValidationInterface* pwalletIn);
|
||||
/** Unregister a wallet from core */
|
||||
void UnregisterValidationInterface(CValidationInterface* pwalletIn);
|
||||
/** Unregister all wallets from core */
|
||||
/** Register subscriber */
|
||||
void RegisterValidationInterface(CValidationInterface* callbacks);
|
||||
/** Unregister subscriber. DEPRECATED. This is not safe to use when the RPC server or main message handler thread is running. */
|
||||
void UnregisterValidationInterface(CValidationInterface* callbacks);
|
||||
/** Unregister all subscribers */
|
||||
void UnregisterAllValidationInterfaces();
|
||||
|
||||
// Alternate registration functions that release a shared_ptr after the last
|
||||
// notification is sent. These are useful for race-free cleanup, since
|
||||
// unregistration is nonblocking and can return before the last notification is
|
||||
// processed.
|
||||
/** Register subscriber */
|
||||
void RegisterSharedValidationInterface(std::shared_ptr<CValidationInterface> callbacks);
|
||||
/** Unregister subscriber */
|
||||
void UnregisterSharedValidationInterface(std::shared_ptr<CValidationInterface> callbacks);
|
||||
|
||||
/**
|
||||
|
@ -249,30 +249,50 @@ public:
|
||||
|
||||
} // namespace
|
||||
|
||||
ThresholdState VersionBitsState(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache)
|
||||
ThresholdState VersionBitsCache::State(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
|
||||
{
|
||||
return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, cache.caches[pos]);
|
||||
LOCK(m_mutex);
|
||||
return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, m_caches[pos]);
|
||||
}
|
||||
|
||||
BIP9Stats VersionBitsStatistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache)
|
||||
BIP9Stats VersionBitsCache::Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
|
||||
{
|
||||
return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindexPrev, params, cache.caches[pos]);
|
||||
LOCK(m_mutex);
|
||||
return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindexPrev, params, m_caches[pos]);
|
||||
}
|
||||
|
||||
int VersionBitsStateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache)
|
||||
int VersionBitsCache::StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
|
||||
{
|
||||
return VersionBitsConditionChecker(pos).GetStateSinceHeightFor(pindexPrev, params, cache.caches[pos]);
|
||||
LOCK(m_mutex);
|
||||
return VersionBitsConditionChecker(pos).GetStateSinceHeightFor(pindexPrev, params, m_caches[pos]);
|
||||
}
|
||||
|
||||
uint32_t VersionBitsMask(const Consensus::Params& params, Consensus::DeploymentPos pos)
|
||||
uint32_t VersionBitsCache::Mask(const Consensus::Params& params, Consensus::DeploymentPos pos)
|
||||
{
|
||||
return VersionBitsConditionChecker(pos).Mask(params);
|
||||
}
|
||||
|
||||
int32_t VersionBitsCache::ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
|
||||
{
|
||||
LOCK(m_mutex);
|
||||
int32_t nVersion = VERSIONBITS_TOP_BITS;
|
||||
|
||||
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
|
||||
Consensus::DeploymentPos pos = static_cast<Consensus::DeploymentPos>(i);
|
||||
ThresholdState state = VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, m_caches[pos]);
|
||||
if (state == ThresholdState::LOCKED_IN || state == ThresholdState::STARTED) {
|
||||
nVersion |= Mask(params, pos);
|
||||
}
|
||||
}
|
||||
|
||||
return nVersion;
|
||||
}
|
||||
|
||||
void VersionBitsCache::Clear()
|
||||
{
|
||||
LOCK(m_mutex);
|
||||
for (unsigned int d = 0; d < Consensus::MAX_VERSION_BITS_DEPLOYMENTS; d++) {
|
||||
caches[d].clear();
|
||||
m_caches[d].clear();
|
||||
}
|
||||
}
|
||||
AbstractEHFManager* AbstractEHFManager::globalInstance{nullptr};
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define BITCOIN_VERSIONBITS_H
|
||||
|
||||
#include <chain.h>
|
||||
#include <sync.h>
|
||||
#include <map>
|
||||
|
||||
/** What block version to use for new blocks (pre versionbits) */
|
||||
@ -71,23 +72,33 @@ public:
|
||||
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const;
|
||||
};
|
||||
|
||||
/** BIP 9 allows multiple softforks to be deployed in parallel. We cache per-period state for every one of them
|
||||
* keyed by the bit position used to signal support. */
|
||||
struct VersionBitsCache
|
||||
/** BIP 9 allows multiple softforks to be deployed in parallel. We cache
|
||||
* per-period state for every one of them. */
|
||||
class VersionBitsCache
|
||||
{
|
||||
ThresholdConditionCache caches[Consensus::MAX_VERSION_BITS_DEPLOYMENTS];
|
||||
private:
|
||||
Mutex m_mutex;
|
||||
ThresholdConditionCache m_caches[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] GUARDED_BY(m_mutex);
|
||||
|
||||
public:
|
||||
/** Get the numerical statistics for a given deployment for the signalling period that includes the block after pindexPrev. */
|
||||
BIP9Stats Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
|
||||
|
||||
static uint32_t Mask(const Consensus::Params& params, Consensus::DeploymentPos pos);
|
||||
|
||||
/** Get the BIP9 state for a given deployment for the block after pindexPrev. */
|
||||
ThresholdState State(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
|
||||
|
||||
/** Get the block height at which the BIP9 deployment switched into the state for the block after pindexPrev. */
|
||||
int StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
|
||||
|
||||
/** Determine what nVersion a new block should use
|
||||
*/
|
||||
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
|
||||
|
||||
void Clear();
|
||||
};
|
||||
|
||||
/** Get the BIP9 state for a given deployment at the current tip. */
|
||||
ThresholdState VersionBitsState(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache);
|
||||
/** Get the numerical statistics for the BIP9 state for a given deployment at the current tip. */
|
||||
BIP9Stats VersionBitsStatistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache);
|
||||
/** Get the block height at which the BIP9 deployment switched into the state for the block building on the current tip. */
|
||||
int VersionBitsStateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos, VersionBitsCache& cache);
|
||||
uint32_t VersionBitsMask(const Consensus::Params& params, Consensus::DeploymentPos pos);
|
||||
|
||||
class AbstractEHFManager
|
||||
{
|
||||
public:
|
||||
|
@ -1,22 +0,0 @@
|
||||
// Copyright (c) 2016-2018 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <versionbitsinfo.h>
|
||||
|
||||
#include <consensus/params.h>
|
||||
|
||||
const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] = {
|
||||
{
|
||||
/*.name =*/ "testdummy",
|
||||
/*.gbt_force =*/ true,
|
||||
},
|
||||
{
|
||||
/*.name =*/"v20",
|
||||
/*.gbt_force =*/true,
|
||||
},
|
||||
{
|
||||
/*.name =*/"mn_rr",
|
||||
/*.gbt_force =*/true,
|
||||
},
|
||||
};
|
@ -1,17 +0,0 @@
|
||||
// Copyright (c) 2016-2018 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_VERSIONBITSINFO_H
|
||||
#define BITCOIN_VERSIONBITSINFO_H
|
||||
|
||||
struct VBDeploymentInfo {
|
||||
/** Deployment name */
|
||||
const char *name;
|
||||
/** Whether GBT clients can safely ignore this rule in simplified usage */
|
||||
bool gbt_force;
|
||||
};
|
||||
|
||||
extern const struct VBDeploymentInfo VersionBitsDeploymentInfo[];
|
||||
|
||||
#endif // BITCOIN_VERSIONBITSINFO_H
|
@ -30,11 +30,12 @@ from test_framework.util import (
|
||||
class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 6
|
||||
self.num_nodes = 7
|
||||
# Add new version after each release:
|
||||
self.extra_args = [
|
||||
[], # Pre-release: use to mine blocks
|
||||
["-nowallet"], # Pre-release: use to receive coins, swap wallets, etc
|
||||
["-nowallet"], # v20.0.1
|
||||
["-nowallet"], # v19.3.0
|
||||
["-nowallet"], # v18.2.2
|
||||
["-nowallet"], # v0.17.0.3
|
||||
@ -51,6 +52,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
None,
|
||||
None,
|
||||
19030000,
|
||||
19030000,
|
||||
18020200,
|
||||
170003,
|
||||
160101,
|
||||
@ -68,7 +70,8 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
|
||||
assert_equal(res['blocks'], 101)
|
||||
|
||||
node_master = self.nodes[self.num_nodes - 5]
|
||||
node_master = self.nodes[self.num_nodes - 6]
|
||||
node_v20 = self.nodes[self.num_nodes - 5]
|
||||
node_v19 = self.nodes[self.num_nodes - 4]
|
||||
node_v18 = self.nodes[self.num_nodes - 3]
|
||||
node_v17 = self.nodes[self.num_nodes - 2]
|
||||
@ -117,6 +120,13 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
assert info['private_keys_enabled'] == False
|
||||
assert info['keypoolsize'] == 0
|
||||
|
||||
# w1_v20: regular wallet, created with v20.0
|
||||
node_v20.createwallet(wallet_name="w1_v20")
|
||||
wallet = node_v20.get_wallet_rpc("w1_v20")
|
||||
info = wallet.getwalletinfo()
|
||||
assert info['private_keys_enabled']
|
||||
assert info['keypoolsize'] > 0
|
||||
|
||||
# w2_v19: wallet with private keys disabled, created with v0.19
|
||||
node_v19.createwallet(wallet_name="w2_v19", disable_private_keys=True)
|
||||
wallet = node_v19.get_wallet_rpc("w2_v19")
|
||||
@ -155,6 +165,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
|
||||
# Copy the wallets to older nodes:
|
||||
node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
|
||||
node_v20_wallets_dir = os.path.join(node_v20.datadir, "regtest/wallets")
|
||||
node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
|
||||
node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
|
||||
node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
|
||||
@ -199,6 +210,34 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
os.path.join(node_v19_wallets_dir, wallet)
|
||||
)
|
||||
|
||||
# Copy wallets to v0.20
|
||||
for wallet in os.listdir(node_master_wallets_dir):
|
||||
shutil.copytree(
|
||||
os.path.join(node_master_wallets_dir, wallet),
|
||||
os.path.join(node_v20_wallets_dir, wallet)
|
||||
)
|
||||
|
||||
# Open the wallets in v0.20
|
||||
node_v20.loadwallet("w1")
|
||||
wallet = node_v20.get_wallet_rpc("w1")
|
||||
info = wallet.getwalletinfo()
|
||||
assert info['private_keys_enabled']
|
||||
assert info['keypoolsize'] > 0
|
||||
txs = wallet.listtransactions()
|
||||
assert_equal(len(txs), 1)
|
||||
|
||||
node_v20.loadwallet("w2")
|
||||
wallet = node_v20.get_wallet_rpc("w2")
|
||||
info = wallet.getwalletinfo()
|
||||
assert info['private_keys_enabled'] == False
|
||||
assert info['keypoolsize'] == 0
|
||||
|
||||
node_v20.loadwallet("w3")
|
||||
wallet = node_v20.get_wallet_rpc("w3")
|
||||
info = wallet.getwalletinfo()
|
||||
assert info['private_keys_enabled']
|
||||
assert info['keypoolsize'] == 0
|
||||
|
||||
# Open the wallets in v0.19
|
||||
node_v19.loadwallet("w1")
|
||||
wallet = node_v19.get_wallet_rpc("w1")
|
||||
@ -277,15 +316,15 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
|
||||
# assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')
|
||||
|
||||
# Instead, we stop node and try to launch it with the wallet:
|
||||
self.stop_node(4)
|
||||
self.stop_node(5)
|
||||
# it expected to fail with error 'DBErrors::TOO_NEW' but Dash Core can open v18 by version 17
|
||||
# can be implemented in future if there's any incompatible versions
|
||||
#node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Dash Core")
|
||||
#node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Dash Core")
|
||||
self.start_node(4)
|
||||
self.start_node(5)
|
||||
|
||||
# Open most recent wallet in v0.16 (no loadwallet RPC)
|
||||
self.restart_node(5, extra_args=["-wallet=w2"])
|
||||
self.restart_node(6, extra_args=["-wallet=w2"])
|
||||
wallet = node_v16.get_wallet_rpc("w2")
|
||||
info = wallet.getwalletinfo()
|
||||
assert info['keypoolsize'] == 1
|
||||
|
@ -21,6 +21,18 @@ import hashlib
|
||||
|
||||
|
||||
SHA256_SUMS = {
|
||||
"d1f7121a7d7bdd4077709284076860389d6a0f4481a934ad9acb85cae3d7b83e": "dashcore-20.0.1-aarch64-linux-gnu.tar.gz",
|
||||
"37375229e5ab18d7050b729fb016df24acdd72d60bc3fa074270d89030a27827": "dashcore-20.0.1-arm-linux-gnueabihf.tar.gz",
|
||||
"ab530f72d2bfbfcd7fca0644e3ea5c5b279e2204fe50ff7bd9cc452a0d413c65": "dashcore-20.0.1-arm64-apple-darwin.dmg",
|
||||
"8f4b55e4a3d6bb38a0c1f51ece14f387fd4dcffa000aeecfbbd1f751da8b4446": "dashcore-20.0.1-arm64-apple-darwin.tar.gz",
|
||||
"1d9cdb00d93e8babe9f54d0ecb04c55f2cd6fd6cfaa85466aa7f95a6976d040d": "dashcore-20.0.1-riscv64-linux-gnu.tar.gz",
|
||||
"f722954c38d5b18f8290e41ca9dd833929258dcf68c9396cbbc81d241285947b": "dashcore-20.0.1-win64-setup.exe",
|
||||
"bb6d59a3eadac316e86e073a9f7ca4d28f3a2e8a59b7109d509a7368675a6f5f": "dashcore-20.0.1-win64.zip",
|
||||
"5373a84f49e4f76bd04987806f5fcde0b537fa1408e1f98370680f3f5134970f": "dashcore-20.0.1-x86_64-apple-darwin.dmg",
|
||||
"0c9344961ae5800f54ffc90af63826cdbf61acc5c442f3fab6527d528f2d9323": "dashcore-20.0.1-x86_64-apple-darwin.tar.gz",
|
||||
"7c82bdbd1c2de515d6c7245886d8c0b0044a4a9b6f74166b8d58c82cd4ae3270": "dashcore-20.0.1-x86_64-linux-gnu.tar.gz",
|
||||
"bb898a8e4c54fd5989f114673e1fee5116bf6ffa257c63397993035c390de806": "dashcore-20.0.1.tar.gz",
|
||||
#
|
||||
"a4b555b47f5f9a5a01fc5d3b543731088bd10a65dd7fa81fb552818146e424b5": "dashcore-19.3.0-aarch64-linux-gnu.tar.gz",
|
||||
"531bb188c1aea808ef6f3533d71182a51958136f6e43d9fcadaef1a5fcdd0468": "dashcore-19.3.0-osx.dmg",
|
||||
"1b4673a2bd71f9f2b593c2d71386e60f4744b59b57142707f0045ed49c92024b": "dashcore-19.3.0-osx64.tar.gz",
|
||||
@ -105,8 +117,11 @@ def download_binary(tag, args) -> int:
|
||||
if match:
|
||||
bin_path = 'releases/download/test.{}'.format(
|
||||
match.group(1), match.group(2))
|
||||
platform = args.platform
|
||||
if tag < "v20" and platform in ["x86_64-apple-darwin", "aarch64-apple-darwin"]:
|
||||
platform = "osx64"
|
||||
tarball = 'dashcore-{tag}-{platform}.tar.gz'.format(
|
||||
tag=tag[1:], platform=args.platform)
|
||||
tag=tag[1:], platform=platform)
|
||||
tarballUrl = 'https://github.com/dashpay/dash/{bin_path}/{tarball}'.format(
|
||||
bin_path=bin_path, tarball=tarball)
|
||||
|
||||
@ -147,10 +162,39 @@ def download_binary(tag, args) -> int:
|
||||
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
|
||||
'--strip-components=1',
|
||||
'dashcore-{tag}'.format(tag=filename, platform=args.platform)]).returncode
|
||||
if ret:
|
||||
if ret != 0:
|
||||
print(f"Failed to extract the {tag} tarball")
|
||||
return ret
|
||||
|
||||
Path(tarball).unlink()
|
||||
|
||||
if tag >= "v19" and platform == "arm64-apple-darwin":
|
||||
# Starting with v23 there are arm64 binaries for ARM (e.g. M1, M2) macs, but they have to be signed to run
|
||||
binary_path = f'{os.getcwd()}/{tag}/bin/'
|
||||
|
||||
for arm_binary in os.listdir(binary_path):
|
||||
# Is it already signed?
|
||||
ret = subprocess.run(
|
||||
['codesign', '-v', binary_path + arm_binary],
|
||||
stderr=subprocess.DEVNULL, # Suppress expected stderr output
|
||||
).returncode
|
||||
if ret == 1:
|
||||
# Have to self-sign the binary
|
||||
ret = subprocess.run(
|
||||
['codesign', '-s', '-', binary_path + arm_binary]
|
||||
).returncode
|
||||
if ret != 0:
|
||||
print(f"Failed to self-sign {tag} {arm_binary} arm64 binary")
|
||||
return 1
|
||||
|
||||
# Confirm success
|
||||
ret = subprocess.run(
|
||||
['codesign', '-v', binary_path + arm_binary]
|
||||
).returncode
|
||||
if ret != 0:
|
||||
print(f"Failed to verify the self-signed {tag} {arm_binary} arm64 binary")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@ -212,8 +256,8 @@ def check_host(args) -> int:
|
||||
platforms = {
|
||||
'aarch64-*-linux*': 'aarch64-linux-gnu',
|
||||
'x86_64-*-linux*': 'x86_64-linux-gnu',
|
||||
'x86_64-apple-darwin*': 'osx64',
|
||||
'aarch64-apple-darwin*': 'osx64',
|
||||
'x86_64-apple-darwin*': 'x86_64-apple-darwin',
|
||||
'aarch64-apple-darwin*': 'aarch64-apple-darwin',
|
||||
}
|
||||
args.platform = ''
|
||||
for pattern, target in platforms.items():
|
||||
|
Loading…
Reference in New Issue
Block a user