2016-05-06 11:23:48 +02:00
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
2017-05-07 15:13:29 +02:00
# Copyright (c) 2014-2019 The Dash Core developers
2014-10-23 03:48:19 +02:00
# Distributed under the MIT software license, see the accompanying
2014-07-08 18:07:23 +02:00
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2019-01-07 10:55:35 +01:00
""" Base class for RPC testing. """
2014-07-08 18:07:23 +02:00
2017-03-28 11:24:14 +02:00
from collections import deque
2017-06-02 11:32:55 +02:00
from enum import Enum
2016-05-10 18:27:31 +02:00
import logging
import optparse
2014-07-08 18:07:23 +02:00
import os
2017-08-11 18:09:51 +02:00
import pdb
2014-07-08 18:07:23 +02:00
import shutil
2017-05-07 15:13:29 +02:00
import sys
2014-07-08 18:07:23 +02:00
import tempfile
2017-03-22 13:03:26 +01:00
import time
2017-06-29 17:37:19 +02:00
import traceback
2019-01-23 17:36:51 +01:00
from concurrent . futures import ThreadPoolExecutor
2014-07-08 18:07:23 +02:00
2017-06-29 17:37:19 +02:00
from . authproxy import JSONRPCException
from . import coverage
2017-08-15 23:34:07 +02:00
from . test_node import TestNode
2015-10-11 07:41:19 +02:00
from . util import (
2017-05-07 15:13:29 +02:00
PortSeed ,
MAX_NODES ,
2019-06-20 18:37:09 +02:00
assert_equal ,
2017-05-07 15:13:29 +02:00
check_json_precision ,
2015-10-11 07:41:19 +02:00
connect_nodes_bi ,
2018-10-20 16:08:02 +02:00
connect_nodes ,
2019-06-20 18:37:09 +02:00
copy_datadir ,
2017-05-02 20:02:55 +02:00
disconnect_nodes ,
2017-05-07 15:13:29 +02:00
initialize_datadir ,
log_filename ,
p2p_port ,
set_node_times ,
2019-06-20 18:37:09 +02:00
satoshi_round ,
2015-10-11 07:41:19 +02:00
sync_blocks ,
sync_mempools ,
2018-10-20 16:08:02 +02:00
sync_masternodes ,
2019-10-01 16:14:26 +02:00
wait_to_sync ,
wait_until ,
)
2014-07-08 18:07:23 +02:00
2017-06-02 11:32:55 +02:00
class TestStatus ( Enum ) :
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
2019-08-09 01:14:11 +02:00
GENESISTIME = 1417713337
2014-07-08 18:07:23 +02:00
class BitcoinTestFramework ( object ) :
2017-05-07 15:13:29 +02:00
""" Base class for a bitcoin test script.
2017-09-01 18:47:13 +02:00
Individual bitcoin test scripts should subclass this class and override the set_test_params ( ) and run_test ( ) methods .
Individual tests can also override the following methods to customize the test setup :
2017-05-07 15:13:29 +02:00
- add_options ( )
- setup_chain ( )
- setup_network ( )
2017-09-01 18:47:13 +02:00
- setup_nodes ( )
2017-05-07 15:13:29 +02:00
2017-09-01 18:47:13 +02:00
The __init__ ( ) and main ( ) methods should not be overridden .
2017-05-07 15:13:29 +02:00
This class also contains various public and private helper methods . """
2016-05-20 15:16:51 +02:00
def __init__ ( self ) :
2017-09-01 18:47:13 +02:00
""" Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method """
2016-05-20 15:16:51 +02:00
self . setup_clean_chain = False
2017-06-29 17:37:19 +02:00
self . nodes = [ ]
self . mocktime = 0
2017-09-01 18:47:13 +02:00
self . set_test_params ( )
2016-05-20 15:16:51 +02:00
2017-09-01 18:47:13 +02:00
assert hasattr ( self , " num_nodes " ) , " Test must set self.num_nodes in set_test_params() "
2014-07-08 18:07:23 +02:00
def main ( self ) :
2017-09-01 18:47:13 +02:00
""" Main function. This should not be overridden by the subclass test scripts. """
2014-07-08 18:07:23 +02:00
parser = optparse . OptionParser ( usage = " % prog [options] " )
parser . add_option ( " --nocleanup " , dest = " nocleanup " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Leave dashds and test.* datadir on exit or error " )
2015-04-23 14:19:00 +02:00
parser . add_option ( " --noshutdown " , dest = " noshutdown " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Don ' t stop dashds after the test execution " )
2017-06-29 17:37:19 +02:00
parser . add_option ( " --srcdir " , dest = " srcdir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../../src " ) ,
2016-03-04 08:25:16 +01:00
help = " Source directory containing dashd/dash-cli (default: %d efault) " )
2017-06-29 17:37:19 +02:00
parser . add_option ( " --cachedir " , dest = " cachedir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../cache " ) ,
2016-08-17 12:12:55 +02:00
help = " Directory for caching pregenerated datadirs " )
2017-05-22 08:59:11 +02:00
parser . add_option ( " --tmpdir " , dest = " tmpdir " , help = " Root directory for datadirs " )
2017-03-09 21:16:20 +01:00
parser . add_option ( " -l " , " --loglevel " , dest = " loglevel " , default = " INFO " ,
help = " log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory. " )
2014-10-23 19:11:20 +02:00
parser . add_option ( " --tracerpc " , dest = " trace_rpc " , default = False , action = " store_true " ,
help = " Print out all RPC calls as they are made " )
2016-05-10 18:27:31 +02:00
parser . add_option ( " --portseed " , dest = " port_seed " , default = os . getpid ( ) , type = ' int ' ,
help = " The seed to use for assigning port numbers (default: current process id) " )
2015-10-11 07:41:19 +02:00
parser . add_option ( " --coveragedir " , dest = " coveragedir " ,
help = " Write tested RPC commands into this directory " )
2017-05-03 08:37:14 +02:00
parser . add_option ( " --configfile " , dest = " configfile " ,
help = " Location of the test framework config file " )
2017-08-11 18:09:51 +02:00
parser . add_option ( " --pdbonfailure " , dest = " pdbonfailure " , default = False , action = " store_true " ,
help = " Attach a python debugger if test fails " )
2014-07-08 18:07:23 +02:00
self . add_options ( parser )
( self . options , self . args ) = parser . parse_args ( )
2016-05-10 18:27:31 +02:00
PortSeed . n = self . options . port_seed
2017-06-29 17:37:19 +02:00
os . environ [ ' PATH ' ] = self . options . srcdir + " : " + self . options . srcdir + " /qt: " + os . environ [ ' PATH ' ]
2014-07-08 18:07:23 +02:00
check_json_precision ( )
2017-10-18 16:52:44 +02:00
self . options . cachedir = os . path . abspath ( self . options . cachedir )
2017-03-09 21:16:20 +01:00
# Set up temp directory and start logging
2017-05-22 08:59:11 +02:00
if self . options . tmpdir :
2017-10-18 16:52:44 +02:00
self . options . tmpdir = os . path . abspath ( self . options . tmpdir )
2017-05-22 08:59:11 +02:00
os . makedirs ( self . options . tmpdir , exist_ok = False )
else :
self . options . tmpdir = tempfile . mkdtemp ( prefix = " test " )
2017-03-09 21:16:20 +01:00
self . _start_logging ( )
2017-06-02 11:32:55 +02:00
success = TestStatus . FAILED
2017-03-09 21:16:20 +01:00
2014-07-08 18:07:23 +02:00
try :
2014-10-20 14:14:04 +02:00
self . setup_chain ( )
self . setup_network ( )
self . run_test ( )
2017-06-02 11:32:55 +02:00
success = TestStatus . PASSED
2014-07-09 03:24:40 +02:00
except JSONRPCException as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " JSONRPC error " )
2017-06-02 11:32:55 +02:00
except SkipTest as e :
self . log . warning ( " Test Skipped: %s " % e . message )
success = TestStatus . SKIPPED
2014-07-08 18:07:23 +02:00
except AssertionError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Assertion failed " )
2016-04-14 12:14:24 +02:00
except KeyError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Key error " )
2014-07-08 18:07:23 +02:00
except Exception as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Unexpected exception caught during testing " )
2016-05-09 17:01:55 +02:00
except KeyboardInterrupt as e :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Exiting after keyboard interrupt " )
2014-07-08 18:07:23 +02:00
2017-08-11 18:09:51 +02:00
if success == TestStatus . FAILED and self . options . pdbonfailure :
print ( " Testcase failed. Attaching python debugger. Enter ? for help " )
pdb . set_trace ( )
2015-04-23 14:19:00 +02:00
if not self . options . noshutdown :
2017-03-09 21:16:20 +01:00
self . log . info ( " Stopping nodes " )
2019-02-21 19:37:16 +01:00
try :
2017-06-02 11:32:55 +02:00
if self . nodes :
self . stop_nodes ( )
2019-02-21 19:37:16 +01:00
except BaseException as e :
success = False
2019-03-08 09:05:00 +01:00
self . log . exception ( " Unexpected exception caught during shutdown " )
2015-04-23 14:19:00 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . info ( " Note: dashds were not stopped and may still be running " )
2015-04-20 11:50:33 +02:00
2017-06-02 11:32:55 +02:00
if not self . options . nocleanup and not self . options . noshutdown and success != TestStatus . FAILED :
2017-03-09 21:16:20 +01:00
self . log . info ( " Cleaning up " )
2014-07-08 18:07:23 +02:00
shutil . rmtree ( self . options . tmpdir )
2016-05-25 11:52:25 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Not cleaning up dir %s " % self . options . tmpdir )
2016-12-02 16:40:50 +01:00
if os . getenv ( " PYTHON_DEBUG " , " " ) :
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
2017-03-28 11:24:14 +02:00
filenames = [ self . options . tmpdir + " /test_framework.log " ]
filenames + = glob . glob ( self . options . tmpdir + " /node*/regtest/debug.log " )
2016-12-02 16:40:50 +01:00
MAX_LINES_TO_PRINT = 1000
2017-03-28 11:24:14 +02:00
for fn in filenames :
try :
with open ( fn , ' r ' ) as f :
2017-06-29 17:37:19 +02:00
print ( " From " , fn , " : " )
2017-03-28 11:24:14 +02:00
print ( " " . join ( deque ( f , MAX_LINES_TO_PRINT ) ) )
except OSError :
print ( " Opening file %s failed. " % fn )
traceback . print_exc ( )
2017-06-02 11:32:55 +02:00
if success == TestStatus . PASSED :
2017-03-09 21:16:20 +01:00
self . log . info ( " Tests successful " )
2017-06-02 11:32:55 +02:00
sys . exit ( TEST_EXIT_PASSED )
elif success == TestStatus . SKIPPED :
self . log . info ( " Test skipped " )
sys . exit ( TEST_EXIT_SKIPPED )
2014-07-08 18:07:23 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . error ( " Test failed. Test logging available at %s /test_framework.log " , self . options . tmpdir )
logging . shutdown ( )
2017-06-02 11:32:55 +02:00
sys . exit ( TEST_EXIT_FAILED )
2015-04-28 18:39:47 +02:00
2017-09-01 18:47:13 +02:00
# Methods to override in subclass test scripts.
def set_test_params ( self ) :
""" Tests must this method to change default values for number of nodes, topology, etc """
raise NotImplementedError
2017-05-07 15:13:29 +02:00
2017-09-01 18:47:13 +02:00
def add_options ( self , parser ) :
""" Override this method to add command-line options to the test """
pass
2017-06-29 17:37:19 +02:00
2017-09-01 18:47:13 +02:00
def setup_chain ( self ) :
""" Override this method to customize blockchain setup """
self . log . info ( " Initializing test directory " + self . options . tmpdir )
if self . setup_clean_chain :
self . _initialize_chain_clean ( )
self . set_genesis_mocktime ( )
else :
self . _initialize_chain ( )
self . set_cache_mocktime ( )
2017-06-29 17:37:19 +02:00
2017-09-01 18:47:13 +02:00
def setup_network ( self ) :
""" Override this method to customize test network topology """
self . setup_nodes ( )
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range ( self . num_nodes - 1 ) :
connect_nodes_bi ( self . nodes , i , i + 1 )
self . sync_all ( )
def setup_nodes ( self ) :
""" Override this method to customize test node setup """
extra_args = None
2019-09-24 00:54:26 +02:00
stderr = None
2017-09-01 18:47:13 +02:00
if hasattr ( self , " extra_args " ) :
extra_args = self . extra_args
2019-09-24 00:54:26 +02:00
if hasattr ( self , " stderr " ) :
stderr = self . stderr
self . add_nodes ( self . num_nodes , extra_args , stderr = stderr )
2017-09-01 18:47:13 +02:00
self . start_nodes ( )
def run_test ( self ) :
""" Tests must override this method to define test logic """
raise NotImplementedError
2017-06-29 17:37:19 +02:00
2017-09-01 18:47:13 +02:00
# Public helper methods. These can be accessed by the subclass test scripts.
2017-06-29 17:37:19 +02:00
2017-09-01 18:47:13 +02:00
def add_nodes ( self , num_nodes , extra_args = None , rpchost = None , timewait = None , binary = None , stderr = None ) :
""" Instantiate TestNode objects """
2017-05-07 15:13:29 +02:00
2017-06-29 17:37:19 +02:00
if extra_args is None :
2017-08-15 23:34:07 +02:00
extra_args = [ [ ] ] * num_nodes
2017-06-29 17:37:19 +02:00
if binary is None :
binary = [ None ] * num_nodes
assert_equal ( len ( extra_args ) , num_nodes )
assert_equal ( len ( binary ) , num_nodes )
2019-09-23 23:15:31 +02:00
old_num_nodes = len ( self . nodes )
2017-09-01 18:47:13 +02:00
for i in range ( num_nodes ) :
2019-09-23 23:15:31 +02:00
self . nodes . append ( TestNode ( old_num_nodes + i , self . options . tmpdir , extra_args [ i ] , rpchost , timewait = timewait , binary = binary [ i ] , stderr = stderr , mocktime = self . mocktime , coverage_dir = self . options . coveragedir ) )
2017-09-01 18:47:13 +02:00
def start_node ( self , i , extra_args = None , stderr = None ) :
2019-09-26 15:50:24 +02:00
""" Start a dashd """
2017-09-01 18:47:13 +02:00
node = self . nodes [ i ]
node . start ( extra_args , stderr )
node . wait_for_rpc_connection ( )
if self . options . coveragedir is not None :
coverage . write_all_rpc_commands ( self . options . coveragedir , node . rpc )
def start_nodes ( self , extra_args = None , stderr = None ) :
""" Start multiple dashds """
if extra_args is None :
extra_args = [ None ] * self . num_nodes
assert_equal ( len ( extra_args ) , self . num_nodes )
2017-06-29 17:37:19 +02:00
try :
2017-09-01 18:47:13 +02:00
for i , node in enumerate ( self . nodes ) :
node . start ( extra_args [ i ] , stderr )
for node in self . nodes :
2017-08-15 23:34:07 +02:00
node . wait_for_rpc_connection ( )
2017-06-29 17:37:19 +02:00
except :
# If one node failed to start, stop the others
self . stop_nodes ( )
raise
2017-08-15 23:34:07 +02:00
if self . options . coveragedir is not None :
2017-09-01 18:47:13 +02:00
for node in self . nodes :
2017-08-15 23:34:07 +02:00
coverage . write_all_rpc_commands ( self . options . coveragedir , node . rpc )
2019-09-23 21:17:14 +02:00
def stop_node ( self , i ) :
2019-08-09 01:21:57 +02:00
""" Stop a dashd test node """
2017-08-15 23:34:07 +02:00
self . nodes [ i ] . stop_node ( )
2017-09-06 20:02:08 +02:00
self . nodes [ i ] . wait_until_stopped ( )
2019-08-09 01:19:22 +02:00
2019-09-23 21:17:14 +02:00
def stop_nodes ( self ) :
2019-08-09 01:21:57 +02:00
""" Stop multiple dashd test nodes """
2017-08-15 23:34:07 +02:00
for node in self . nodes :
# Issue RPC to stop nodes
node . stop_node ( )
2017-06-29 17:37:19 +02:00
2017-08-15 23:34:07 +02:00
for node in self . nodes :
# Wait for nodes to stop
2017-09-06 20:02:08 +02:00
node . wait_until_stopped ( )
2017-06-29 17:37:19 +02:00
2017-09-01 18:47:13 +02:00
def assert_start_raises_init_error ( self , i , extra_args = None , expected_msg = None ) :
2017-06-29 17:37:19 +02:00
with tempfile . SpooledTemporaryFile ( max_size = 2 * * 16 ) as log_stderr :
try :
2017-09-01 18:47:13 +02:00
self . start_node ( i , extra_args , stderr = log_stderr )
2017-06-29 17:37:19 +02:00
self . stop_node ( i )
except Exception as e :
2019-08-09 01:21:57 +02:00
assert ' dashd exited ' in str ( e ) # node must have shutdown
2017-08-15 23:34:07 +02:00
self . nodes [ i ] . running = False
self . nodes [ i ] . process = None
2017-06-29 17:37:19 +02:00
if expected_msg is not None :
log_stderr . seek ( 0 )
stderr = log_stderr . read ( ) . decode ( ' utf-8 ' )
if expected_msg not in stderr :
raise AssertionError ( " Expected error \" " + expected_msg + " \" not found in: \n " + stderr )
else :
if expected_msg is None :
2019-08-09 01:21:57 +02:00
assert_msg = " dashd should have exited with an error "
2017-06-29 17:37:19 +02:00
else :
2019-08-09 01:21:57 +02:00
assert_msg = " dashd should have exited with expected error " + expected_msg
2017-06-29 17:37:19 +02:00
raise AssertionError ( assert_msg )
def wait_for_node_exit ( self , i , timeout ) :
2017-08-15 23:34:07 +02:00
self . nodes [ i ] . process . wait ( timeout )
2017-05-07 15:13:29 +02:00
def split_network ( self ) :
"""
Split the network of four nodes into nodes 0 / 1 and 2 / 3.
"""
disconnect_nodes ( self . nodes [ 1 ] , 2 )
disconnect_nodes ( self . nodes [ 2 ] , 1 )
self . sync_all ( [ self . nodes [ : 2 ] , self . nodes [ 2 : ] ] )
def join_network ( self ) :
"""
Join the ( previously split ) network halves together .
"""
connect_nodes_bi ( self . nodes , 1 , 2 )
self . sync_all ( )
def sync_all ( self , node_groups = None ) :
if not node_groups :
node_groups = [ self . nodes ]
for group in node_groups :
sync_blocks ( group )
sync_mempools ( group )
2019-08-09 01:14:11 +02:00
def disable_mocktime ( self ) :
self . mocktime = 0
2017-06-29 17:37:19 +02:00
2019-08-09 01:14:11 +02:00
def bump_mocktime ( self , t ) :
self . mocktime + = t
2017-06-29 17:37:19 +02:00
2019-08-09 01:14:11 +02:00
def set_cache_mocktime ( self ) :
# For backwared compatibility of the python scripts
# with previous versions of the cache, set MOCKTIME
# to regtest genesis time + (201 * 156)
self . mocktime = GENESISTIME + ( 201 * 156 )
2017-06-29 17:37:19 +02:00
2019-08-09 01:14:11 +02:00
def set_genesis_mocktime ( self ) :
self . mocktime = GENESISTIME
2017-06-29 17:37:19 +02:00
2017-05-07 15:13:29 +02:00
# Private helper methods. These should not be accessed by the subclass test scripts.
2019-02-26 23:04:56 +01:00
def _start_logging ( self ) :
# Add logger and logging handlers
self . log = logging . getLogger ( ' TestFramework ' )
self . log . setLevel ( logging . DEBUG )
# Create file handler to log all messages
fh = logging . FileHandler ( self . options . tmpdir + ' /test_framework.log ' )
fh . setLevel ( logging . DEBUG )
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging . StreamHandler ( sys . stdout )
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int ( self . options . loglevel ) if self . options . loglevel . isdigit ( ) else self . options . loglevel . upper ( )
ch . setLevel ( ll )
2019-08-09 01:21:57 +02:00
# Format logs the same as dashd's debug.log with microprecision (so log files can be concatenated and sorted)
2017-06-29 17:37:19 +02:00
formatter = logging . Formatter ( fmt = ' %(asctime)s . %(msecs)03d 000 %(name)s ( %(levelname)s ): %(message)s ' , datefmt = ' % Y- % m- %d % H: % M: % S ' )
2017-03-22 13:03:26 +01:00
formatter . converter = time . gmtime
2019-02-26 23:04:56 +01:00
fh . setFormatter ( formatter )
ch . setFormatter ( formatter )
# add the handlers to the logger
self . log . addHandler ( fh )
self . log . addHandler ( ch )
if self . options . trace_rpc :
rpc_logger = logging . getLogger ( " BitcoinRPC " )
rpc_logger . setLevel ( logging . DEBUG )
rpc_handler = logging . StreamHandler ( sys . stdout )
rpc_handler . setLevel ( logging . DEBUG )
rpc_logger . addHandler ( rpc_handler )
2017-09-01 18:47:13 +02:00
def _initialize_chain ( self , extra_args = None , stderr = None ) :
2017-05-07 15:13:29 +02:00
""" Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200 - block - long chain ( with wallet ) for MAX_NODES
Afterward , create num_nodes copies from the cache . """
2017-09-01 18:47:13 +02:00
assert self . num_nodes < = MAX_NODES
2017-05-07 15:13:29 +02:00
create_cache = False
for i in range ( MAX_NODES ) :
2017-09-01 18:47:13 +02:00
if not os . path . isdir ( os . path . join ( self . options . cachedir , ' node ' + str ( i ) ) ) :
2017-05-07 15:13:29 +02:00
create_cache = True
break
if create_cache :
self . log . debug ( " Creating data directories from cached datadir " )
# find and delete old cache directories if any exist
for i in range ( MAX_NODES ) :
2017-09-01 18:47:13 +02:00
if os . path . isdir ( os . path . join ( self . options . cachedir , " node " + str ( i ) ) ) :
shutil . rmtree ( os . path . join ( self . options . cachedir , " node " + str ( i ) ) )
2017-05-07 15:13:29 +02:00
# Create cache directories, run dashds:
2019-08-09 01:14:11 +02:00
self . set_genesis_mocktime ( )
2017-05-07 15:13:29 +02:00
for i in range ( MAX_NODES ) :
2017-09-01 18:47:13 +02:00
datadir = initialize_datadir ( self . options . cachedir , i )
2019-06-19 20:01:39 +02:00
args = [ os . getenv ( " DASHD " , " dashd " ) , " -server " , " -keypool=1 " , " -datadir= " + datadir , " -discover=0 " , " -mocktime= " + str ( GENESISTIME ) ]
2017-05-07 15:13:29 +02:00
if i > 0 :
args . append ( " -connect=127.0.0.1: " + str ( p2p_port ( 0 ) ) )
2019-06-20 18:36:17 +02:00
if extra_args is not None :
args . extend ( extra_args )
2017-09-01 18:47:13 +02:00
self . nodes . append ( TestNode ( i , self . options . cachedir , extra_args = [ ] , rpchost = None , timewait = None , binary = None , stderr = stderr , mocktime = self . mocktime , coverage_dir = None ) )
2017-08-15 23:34:07 +02:00
self . nodes [ i ] . args = args
2017-09-01 18:47:13 +02:00
self . start_node ( i )
2017-05-07 15:13:29 +02:00
2017-08-15 23:34:07 +02:00
# Wait for RPC connections to be ready
for node in self . nodes :
node . wait_for_rpc_connection ( )
2017-05-07 15:13:29 +02:00
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
2019-06-19 20:01:39 +02:00
block_time = GENESISTIME
2017-05-07 15:13:29 +02:00
for i in range ( 2 ) :
for peer in range ( 4 ) :
for j in range ( 25 ) :
set_node_times ( self . nodes , block_time )
self . nodes [ peer ] . generate ( 1 )
2019-06-19 20:01:39 +02:00
block_time + = 156
2017-05-07 15:13:29 +02:00
# Must sync before next peer starts generating blocks
sync_blocks ( self . nodes )
# Shut them down, and clean up cache directories:
self . stop_nodes ( )
self . nodes = [ ]
2017-06-29 17:37:19 +02:00
self . disable_mocktime ( )
2017-05-07 15:13:29 +02:00
for i in range ( MAX_NODES ) :
2017-09-01 18:47:13 +02:00
os . remove ( log_filename ( self . options . cachedir , i , " debug.log " ) )
os . remove ( log_filename ( self . options . cachedir , i , " db.log " ) )
os . remove ( log_filename ( self . options . cachedir , i , " peers.dat " ) )
os . remove ( log_filename ( self . options . cachedir , i , " fee_estimates.dat " ) )
for i in range ( self . num_nodes ) :
from_dir = os . path . join ( self . options . cachedir , " node " + str ( i ) )
to_dir = os . path . join ( self . options . tmpdir , " node " + str ( i ) )
2017-05-07 15:13:29 +02:00
shutil . copytree ( from_dir , to_dir )
2017-09-01 18:47:13 +02:00
initialize_datadir ( self . options . tmpdir , i ) # Overwrite port/rpcport in dash.conf
2017-05-07 15:13:29 +02:00
2017-09-01 18:47:13 +02:00
def _initialize_chain_clean ( self ) :
2017-05-07 15:13:29 +02:00
""" Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets .
Useful if a test case wants complete control over initialization . """
2017-09-01 18:47:13 +02:00
for i in range ( self . num_nodes ) :
initialize_datadir ( self . options . tmpdir , i )
2017-05-07 15:13:29 +02:00
2018-10-20 16:08:02 +02:00
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo :
2018-12-31 08:12:36 +01:00
def __init__ ( self , proTxHash , ownerAddr , votingAddr , pubKeyOperator , keyOperator , collateral_address , collateral_txid , collateral_vout ) :
self . proTxHash = proTxHash
self . ownerAddr = ownerAddr
self . votingAddr = votingAddr
self . pubKeyOperator = pubKeyOperator
self . keyOperator = keyOperator
self . collateral_address = collateral_address
self . collateral_txid = collateral_txid
self . collateral_vout = collateral_vout
2018-10-20 16:08:02 +02:00
class DashTestFramework ( BitcoinTestFramework ) :
2019-09-24 00:57:30 +02:00
def set_dash_test_params ( self , num_nodes , masterodes_count , extra_args , fast_dip3_enforcement = False ) :
2018-10-20 16:08:02 +02:00
self . mn_count = masterodes_count
self . num_nodes = num_nodes
self . mninfo = [ ]
self . setup_clean_chain = True
self . is_network_split = False
# additional args
self . extra_args = extra_args
2019-01-23 17:36:51 +01:00
self . extra_args + = [ " -sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK " ]
2019-01-29 15:54:38 +01:00
self . fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement :
2019-04-25 17:39:04 +02:00
self . extra_args + = [ " -dip3params=30:50 " ]
2019-01-23 17:36:51 +01:00
2018-10-20 16:08:02 +02:00
def create_simple_node ( self ) :
idx = len ( self . nodes )
2019-09-24 00:57:30 +02:00
self . add_nodes ( 1 , extra_args = [ self . extra_args ] )
self . start_node ( idx )
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx ) :
connect_nodes ( self . nodes [ i ] , idx )
def prepare_masternodes ( self ) :
2019-10-03 12:45:18 +02:00
self . log . info ( " Preparing %d masternodes " % self . mn_count )
2018-10-20 16:08:02 +02:00
for idx in range ( 0 , self . mn_count ) :
2019-04-04 08:08:44 +02:00
self . prepare_masternode ( idx )
def prepare_masternode ( self , idx ) :
bls = self . nodes [ 0 ] . bls ( ' generate ' )
address = self . nodes [ 0 ] . getnewaddress ( )
txid = self . nodes [ 0 ] . sendtoaddress ( address , MASTERNODE_COLLATERAL )
txraw = self . nodes [ 0 ] . getrawtransaction ( txid , True )
collateral_vout = 0
for vout_idx in range ( 0 , len ( txraw [ " vout " ] ) ) :
vout = txraw [ " vout " ] [ vout_idx ]
if vout [ " value " ] == MASTERNODE_COLLATERAL :
collateral_vout = vout_idx
self . nodes [ 0 ] . lockunspent ( False , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
# send to same address to reserve some funds for fees
self . nodes [ 0 ] . sendtoaddress ( address , 0.001 )
ownerAddr = self . nodes [ 0 ] . getnewaddress ( )
votingAddr = self . nodes [ 0 ] . getnewaddress ( )
rewardsAddr = self . nodes [ 0 ] . getnewaddress ( )
port = p2p_port ( len ( self . nodes ) + idx )
if ( idx % 2 ) == 0 :
self . nodes [ 0 ] . lockunspent ( True , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
proTxHash = self . nodes [ 0 ] . protx ( ' register_fund ' , address , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
else :
2018-12-31 08:12:36 +01:00
self . nodes [ 0 ] . generate ( 1 )
2019-04-04 08:08:44 +02:00
proTxHash = self . nodes [ 0 ] . protx ( ' register ' , txid , collateral_vout , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
self . nodes [ 0 ] . generate ( 1 )
self . mninfo . append ( MasternodeInfo ( proTxHash , ownerAddr , votingAddr , bls [ ' public ' ] , bls [ ' secret ' ] , address , txid , collateral_vout ) )
self . sync_all ( )
2018-10-20 16:08:02 +02:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Prepared masternode %d : collateral_txid= %s , collateral_vout= %d , protxHash= %s " % ( idx , txid , collateral_vout , proTxHash ) )
2019-04-04 08:08:44 +02:00
def remove_mastermode ( self , idx ) :
mn = self . mninfo [ idx ]
rawtx = self . nodes [ 0 ] . createrawtransaction ( [ { " txid " : mn . collateral_txid , " vout " : mn . collateral_vout } ] , { self . nodes [ 0 ] . getnewaddress ( ) : 999.9999 } )
rawtx = self . nodes [ 0 ] . signrawtransaction ( rawtx )
self . nodes [ 0 ] . sendrawtransaction ( rawtx [ " hex " ] )
self . nodes [ 0 ] . generate ( 1 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
2019-04-04 08:08:44 +02:00
self . mninfo . remove ( mn )
2018-10-20 16:08:02 +02:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Removed masternode %d " , idx )
2019-01-23 17:36:51 +01:00
def prepare_datadirs ( self ) :
# stop faucet node so that we can copy the datadir
2019-07-04 16:48:01 +02:00
self . stop_node ( 0 )
2019-01-23 17:36:51 +01:00
start_idx = len ( self . nodes )
for idx in range ( 0 , self . mn_count ) :
copy_datadir ( 0 , idx + start_idx , self . options . tmpdir )
# restart faucet node
2019-09-24 00:57:30 +02:00
self . start_node ( 0 )
2019-01-23 17:36:51 +01:00
2018-12-31 08:12:36 +01:00
def start_masternodes ( self ) :
2019-10-03 12:45:18 +02:00
self . log . info ( " Starting %d masternodes " , self . mn_count )
2018-12-31 08:12:36 +01:00
start_idx = len ( self . nodes )
2019-01-23 17:36:51 +01:00
2019-09-24 00:57:30 +02:00
self . add_nodes ( self . mn_count )
2019-01-23 17:36:51 +01:00
executor = ThreadPoolExecutor ( max_workers = 20 )
def do_start ( idx ) :
2018-12-31 08:12:36 +01:00
args = [ ' -masternode=1 ' ,
' -masternodeblsprivkey= %s ' % self . mninfo [ idx ] . keyOperator ] + self . extra_args
2019-09-24 00:57:30 +02:00
self . start_node ( idx + start_idx , extra_args = args )
2019-03-11 09:38:58 +01:00
self . mninfo [ idx ] . nodeIdx = idx + start_idx
2019-09-24 00:57:30 +02:00
self . mninfo [ idx ] . node = self . nodes [ idx + start_idx ]
wait_to_sync ( self . mninfo [ idx ] . node , True )
2019-01-23 17:36:51 +01:00
def do_connect ( idx ) :
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx + 1 ) :
2018-12-31 08:12:36 +01:00
connect_nodes ( self . nodes [ idx + start_idx ] , i )
2019-01-23 17:36:51 +01:00
jobs = [ ]
# start up nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_start , idx ) )
# wait for all nodes to start up
for job in jobs :
job . result ( )
jobs . clear ( )
# connect nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_connect , idx ) )
# wait for all nodes to connect
for job in jobs :
job . result ( )
jobs . clear ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
2018-10-20 16:08:02 +02:00
2019-01-23 17:36:51 +01:00
executor . shutdown ( )
2018-10-20 16:08:02 +02:00
def setup_network ( self ) :
2019-10-03 12:45:18 +02:00
self . log . info ( " Creating and starting controller node " )
2019-09-24 00:57:30 +02:00
self . add_nodes ( 1 , extra_args = [ self . extra_args ] )
self . start_node ( 0 )
2018-10-20 16:08:02 +02:00
required_balance = MASTERNODE_COLLATERAL * self . mn_count + 1
2019-10-03 12:45:18 +02:00
self . log . info ( " Generating %d coins " % required_balance )
2018-10-20 16:08:02 +02:00
while self . nodes [ 0 ] . getbalance ( ) < required_balance :
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2018-10-20 16:08:02 +02:00
self . nodes [ 0 ] . generate ( 1 )
2019-10-03 12:45:18 +02:00
num_simple_nodes = self . num_nodes - self . mn_count - 1
self . log . info ( " Creating and starting %s simple nodes " , num_simple_nodes )
for i in range ( 0 , num_simple_nodes ) :
2018-10-20 16:08:02 +02:00
self . create_simple_node ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
2019-10-03 12:45:18 +02:00
self . log . info ( " Activating DIP3 " )
2019-01-29 15:54:38 +01:00
if not self . fast_dip3_enforcement :
2019-01-23 17:36:51 +01:00
while self . nodes [ 0 ] . getblockcount ( ) < 500 :
self . nodes [ 0 ] . generate ( 10 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
# create masternodes
self . prepare_masternodes ( )
2019-01-23 17:36:51 +01:00
self . prepare_datadirs ( )
2018-12-31 08:12:36 +01:00
self . start_masternodes ( )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2018-10-20 16:08:02 +02:00
self . nodes [ 0 ] . generate ( 1 )
# sync nodes
self . sync_all ( )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2018-12-31 08:12:36 +01:00
2018-10-20 16:08:02 +02:00
mn_info = self . nodes [ 0 ] . masternodelist ( " status " )
assert ( len ( mn_info ) == self . mn_count )
for status in mn_info . values ( ) :
assert ( status == ' ENABLED ' )
2019-03-18 14:50:44 +01:00
def create_raw_tx ( self , node_from , node_to , amount , min_inputs , max_inputs ) :
2018-10-20 16:08:02 +02:00
assert ( min_inputs < = max_inputs )
# fill inputs
inputs = [ ]
balances = node_from . listunspent ( )
in_amount = 0.0
last_amount = 0.0
for tx in balances :
if len ( inputs ) < min_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
elif in_amount > amount :
break
elif len ( inputs ) < max_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
else :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount - = last_amount
in_amount + = float ( tx [ ' amount ' ] )
inputs [ - 1 ] = input
last_amount = float ( tx [ ' amount ' ] )
2019-03-18 14:50:44 +01:00
assert ( len ( inputs ) > = min_inputs )
assert ( len ( inputs ) < = max_inputs )
assert ( in_amount > = amount )
2018-10-20 16:08:02 +02:00
# fill outputs
receiver_address = node_to . getnewaddress ( )
change_address = node_from . getnewaddress ( )
fee = 0.001
outputs = { }
outputs [ receiver_address ] = satoshi_round ( amount )
outputs [ change_address ] = satoshi_round ( in_amount - amount - fee )
rawtx = node_from . createrawtransaction ( inputs , outputs )
2019-05-07 14:14:33 +02:00
ret = node_from . signrawtransaction ( rawtx )
decoded = node_from . decoderawtransaction ( ret [ ' hex ' ] )
ret = { * * decoded , * * ret }
return ret
2018-10-20 16:08:02 +02:00
def wait_for_instantlock ( self , txid , node ) :
2019-10-01 16:14:26 +02:00
def check_instantlock ( ) :
2019-03-19 08:38:16 +01:00
try :
2019-10-01 16:14:26 +02:00
return node . getrawtransaction ( txid , True ) [ " instantlock " ]
2019-03-19 08:38:16 +01:00
except :
2019-10-01 16:14:26 +02:00
return False
wait_until ( check_instantlock , timeout = 10 , sleep = 0.5 )
2018-10-20 16:08:02 +02:00
2019-10-02 02:11:10 +02:00
def wait_for_chainlocked_block ( self , node , block_hash , timeout = 15 ) :
def check_chainlocked_block ( ) :
try :
block = node . getblock ( block_hash )
return block [ " confirmations " ] > 0 and block [ " chainlock " ]
except :
2019-01-18 11:51:31 +01:00
return False
2019-10-02 02:11:10 +02:00
wait_until ( check_chainlocked_block , timeout = timeout , sleep = 0.1 )
2019-10-02 15:24:57 +02:00
def wait_for_chainlocked_block_all_nodes ( self , block_hash , timeout = 15 ) :
2019-10-02 02:11:10 +02:00
for node in self . nodes :
2019-10-02 15:24:57 +02:00
self . wait_for_chainlocked_block ( node , block_hash , timeout )
2019-10-02 02:11:10 +02:00
2019-10-02 15:24:57 +02:00
def wait_for_best_chainlock ( self , node , block_hash , timeout = 15 ) :
wait_until ( lambda : node . getbestchainlock ( ) [ " blockhash " ] == block_hash , timeout = timeout , sleep = 0.1 )
2019-10-02 02:11:10 +02:00
def wait_for_sporks_same ( self , timeout = 30 ) :
def check_sporks_same ( ) :
sporks = self . nodes [ 0 ] . spork ( ' show ' )
return all ( node . spork ( ' show ' ) == sporks for node in self . nodes [ 1 : ] )
wait_until ( check_sporks_same , timeout = timeout , sleep = 0.5 )
2019-01-18 11:51:31 +01:00
2019-03-12 09:20:17 +01:00
def wait_for_quorum_phase ( self , phase , check_received_messages , check_received_messages_count , timeout = 30 ) :
2019-10-02 02:11:10 +02:00
def check_dkg_session ( ) :
2019-01-18 11:51:31 +01:00
all_ok = True
for mn in self . mninfo :
2019-02-01 08:49:01 +01:00
s = mn . node . quorum ( " dkgstatus " ) [ " session " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-02-01 08:49:01 +01:00
all_ok = False
break
2019-03-15 09:48:24 +01:00
s = s [ " llmq_5_60 " ]
2019-01-18 11:51:31 +01:00
if " phase " not in s :
all_ok = False
break
if s [ " phase " ] != phase :
all_ok = False
break
if check_received_messages is not None :
2019-01-23 15:02:56 +01:00
if s [ check_received_messages ] < check_received_messages_count :
2019-01-18 11:51:31 +01:00
all_ok = False
break
2019-10-02 02:11:10 +02:00
return all_ok
wait_until ( check_dkg_session , timeout = timeout , sleep = 0.1 )
2019-01-18 11:51:31 +01:00
2019-01-23 15:02:56 +01:00
def wait_for_quorum_commitment ( self , timeout = 15 ) :
2019-10-02 02:11:10 +02:00
def check_dkg_comitments ( ) :
2019-01-18 11:51:31 +01:00
all_ok = True
for node in self . nodes :
2019-02-01 08:49:01 +01:00
s = node . quorum ( " dkgstatus " )
if " minableCommitments " not in s :
all_ok = False
break
s = s [ " minableCommitments " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-01-18 11:51:31 +01:00
all_ok = False
break
2019-10-02 02:11:10 +02:00
return all_ok
wait_until ( check_dkg_comitments , timeout = timeout , sleep = 0.1 )
2019-01-18 11:51:31 +01:00
2019-05-08 11:13:27 +02:00
def mine_quorum ( self , expected_contributions = 5 , expected_complaints = 0 , expected_justifications = 0 , expected_commitments = 5 ) :
2019-10-03 12:45:18 +02:00
self . log . info ( " Mining quorum: expected_contributions= %d , expected_complaints= %d , expected_justifications= %d , "
" expected_commitments= %d " % ( expected_contributions , expected_complaints ,
expected_justifications , expected_commitments ) )
2019-01-18 11:51:31 +01:00
quorums = self . nodes [ 0 ] . quorum ( " list " )
# move forward to next DKG
skip_count = 24 - ( self . nodes [ 0 ] . getblockcount ( ) % 24 )
if skip_count != 0 :
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( skip_count )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 1 (init) " )
2019-01-18 11:51:31 +01:00
self . wait_for_quorum_phase ( 1 , None , 0 )
2019-03-04 13:34:06 +01:00
# Give nodes some time to connect to neighbors
2019-05-21 10:01:57 +02:00
time . sleep ( 2 )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 2 (contribute) " )
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 2 , " receivedContributions " , expected_contributions )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 3 (complain) " )
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 3 , " receivedComplaints " , expected_complaints )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 4 (justify) " )
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 4 , " receivedJustifications " , expected_justifications )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 5 (commit) " )
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 5 , " receivedPrematureCommitments " , expected_commitments )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting for phase 6 (mining) " )
2019-01-18 11:51:31 +01:00
self . wait_for_quorum_phase ( 6 , None , 0 )
2019-10-03 12:45:18 +02:00
self . log . info ( " Waiting final commitment " )
2019-01-18 11:51:31 +01:00
self . wait_for_quorum_commitment ( )
2019-10-03 12:45:18 +02:00
self . log . info ( " Mining final commitment " )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 1 )
while quorums == self . nodes [ 0 ] . quorum ( " list " ) :
2019-05-21 10:01:57 +02:00
time . sleep ( 2 )
2019-08-09 01:14:11 +02:00
self . bump_mocktime ( 1 )
set_node_times ( self . nodes , self . mocktime )
2019-01-18 11:51:31 +01:00
self . nodes [ 0 ] . generate ( 1 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-04-04 08:15:57 +02:00
new_quorum = self . nodes [ 0 ] . quorum ( " list " , 1 ) [ " llmq_5_60 " ] [ 0 ]
2019-10-03 12:45:18 +02:00
quorum_info = self . nodes [ 0 ] . quorum ( " info " , 100 , new_quorum )
2019-01-18 11:51:31 +01:00
2019-04-04 08:15:31 +02:00
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self . nodes [ 0 ] . generate ( 8 )
2018-05-24 16:16:20 +02:00
sync_blocks ( self . nodes )
2018-10-20 16:08:02 +02:00
2019-10-03 12:45:18 +02:00
self . log . info ( " New quorum: height= %d , quorumHash= %s , minedBlock= %s " % ( quorum_info [ " height " ] , new_quorum , quorum_info [ " minedBlock " ] ) )
2019-04-04 08:15:57 +02:00
return new_quorum
2015-04-28 18:39:47 +02:00
class ComparisonTestFramework ( BitcoinTestFramework ) :
2017-06-29 17:37:19 +02:00
""" Test framework for doing p2p comparison testing
2019-08-09 01:21:57 +02:00
Sets up some dashd binaries :
2017-06-29 17:37:19 +02:00
- 1 binary : test binary
- 2 binaries : 1 test binary , 1 ref binary
- n > 2 binaries : 1 test binary , n - 1 ref binaries """
2015-04-28 18:39:47 +02:00
2017-09-01 18:47:13 +02:00
def set_test_params ( self ) :
2015-04-28 18:39:47 +02:00
self . num_nodes = 2
2016-05-20 15:16:51 +02:00
self . setup_clean_chain = True
2015-04-28 18:39:47 +02:00
def add_options ( self , parser ) :
2015-04-29 15:18:33 +02:00
parser . add_option ( " --testbinary " , dest = " testbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to test " )
2015-04-29 15:18:33 +02:00
parser . add_option ( " --refbinary " , dest = " refbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to use for reference nodes (if any) " )
2015-04-28 18:39:47 +02:00
def setup_network ( self ) :
2017-09-01 18:47:13 +02:00
extra_args = [ [ ' -whitelist=127.0.0.1 ' ] ] * self . num_nodes
2017-06-28 18:09:54 +02:00
if hasattr ( self , " extra_args " ) :
extra_args = self . extra_args
2017-09-01 18:47:13 +02:00
self . add_nodes ( self . num_nodes , extra_args ,
binary = [ self . options . testbinary ] +
[ self . options . refbinary ] * ( self . num_nodes - 1 ) )
self . start_nodes ( )
2017-06-29 17:37:19 +02:00
class SkipTest ( Exception ) :
""" This exception is raised to skip a test """
def __init__ ( self , message ) :
self . message = message