2016-05-06 11:23:48 +02:00
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
2017-05-07 15:13:29 +02:00
# Copyright (c) 2014-2019 The Dash Core developers
2014-10-23 03:48:19 +02:00
# Distributed under the MIT software license, see the accompanying
2014-07-08 18:07:23 +02:00
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2019-01-07 10:55:35 +01:00
""" Base class for RPC testing. """
2014-07-08 18:07:23 +02:00
2017-03-28 11:24:14 +02:00
from collections import deque
2017-06-02 11:32:55 +02:00
from enum import Enum
2016-05-10 18:27:31 +02:00
import logging
import optparse
2014-07-08 18:07:23 +02:00
import os
import shutil
2017-05-07 15:13:29 +02:00
import subprocess
import sys
2014-07-08 18:07:23 +02:00
import tempfile
2017-03-22 13:03:26 +01:00
import time
2019-01-23 17:36:51 +01:00
from concurrent . futures import ThreadPoolExecutor
2014-07-08 18:07:23 +02:00
2015-10-11 07:41:19 +02:00
from . util import (
2017-05-07 15:13:29 +02:00
PortSeed ,
2019-06-20 18:37:09 +02:00
GENESISTIME ,
2017-05-07 15:13:29 +02:00
MAX_NODES ,
2019-06-20 18:37:09 +02:00
assert_equal ,
2017-05-07 15:13:29 +02:00
bitcoind_processes ,
check_json_precision ,
2015-10-11 07:41:19 +02:00
connect_nodes_bi ,
2018-10-20 16:08:02 +02:00
connect_nodes ,
2019-06-20 18:37:09 +02:00
copy_datadir ,
2017-05-07 15:13:29 +02:00
disable_mocktime ,
2017-05-02 20:02:55 +02:00
disconnect_nodes ,
2017-05-07 15:13:29 +02:00
enable_coverage ,
get_mocktime ,
get_rpc_proxy ,
initialize_datadir ,
log_filename ,
p2p_port ,
rpc_url ,
2019-06-20 18:37:09 +02:00
set_cache_mocktime ,
set_genesis_mocktime ,
set_mocktime ,
2017-05-07 15:13:29 +02:00
set_node_times ,
2019-06-20 18:37:09 +02:00
satoshi_round ,
2017-06-02 12:08:48 +02:00
_start_node ,
_start_nodes ,
_stop_node ,
_stop_nodes ,
2015-10-11 07:41:19 +02:00
sync_blocks ,
sync_mempools ,
2018-10-20 16:08:02 +02:00
sync_masternodes ,
2017-05-07 15:13:29 +02:00
wait_for_bitcoind_start ,
2019-06-20 18:37:09 +02:00
wait_to_sync )
2016-05-10 18:27:31 +02:00
from . authproxy import JSONRPCException
2014-07-08 18:07:23 +02:00
2017-06-02 11:32:55 +02:00
class TestStatus ( Enum ) :
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
2014-07-08 18:07:23 +02:00
class BitcoinTestFramework ( object ) :
2017-05-07 15:13:29 +02:00
""" Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods :
- __init__ ( )
- add_options ( )
- setup_chain ( )
- setup_network ( )
- run_test ( )
The main ( ) method should not be overridden .
This class also contains various public and private helper methods . """
# Methods to override in subclass test scripts.
2016-05-20 15:16:51 +02:00
def __init__ ( self ) :
self . num_nodes = 4
self . setup_clean_chain = False
self . nodes = None
2014-07-08 18:07:23 +02:00
def add_options ( self , parser ) :
pass
2014-10-20 14:14:04 +02:00
def setup_chain ( self ) :
2017-03-09 21:16:20 +01:00
self . log . info ( " Initializing test directory " + self . options . tmpdir )
2016-05-20 15:16:51 +02:00
if self . setup_clean_chain :
2017-05-07 15:13:29 +02:00
self . _initialize_chain_clean ( self . options . tmpdir , self . num_nodes )
2018-04-18 13:48:59 +02:00
set_genesis_mocktime ( )
2016-05-20 15:16:51 +02:00
else :
2017-05-07 15:13:29 +02:00
self . _initialize_chain ( self . options . tmpdir , self . num_nodes , self . options . cachedir )
2018-04-18 13:48:59 +02:00
set_cache_mocktime ( )
2014-10-20 14:14:04 +02:00
2017-05-02 20:02:55 +02:00
def setup_network ( self ) :
self . setup_nodes ( )
2014-10-20 14:14:04 +02:00
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
2017-05-02 20:02:55 +02:00
for i in range ( self . num_nodes - 1 ) :
connect_nodes_bi ( self . nodes , i , i + 1 )
2014-10-20 14:14:04 +02:00
self . sync_all ( )
2019-06-20 18:36:17 +02:00
def setup_nodes ( self , stderr = None ) :
2017-05-07 15:13:29 +02:00
extra_args = None
if hasattr ( self , " extra_args " ) :
extra_args = self . extra_args
2017-06-02 12:08:48 +02:00
self . nodes = _start_nodes ( self . num_nodes , self . options . tmpdir , extra_args , stderr = stderr )
2017-05-02 20:02:55 +02:00
2017-05-07 15:13:29 +02:00
def run_test ( self ) :
raise NotImplementedError
2014-10-20 14:14:04 +02:00
2017-05-07 15:13:29 +02:00
# Main function. This should not be overridden by the subclass test scripts.
2014-07-08 18:07:23 +02:00
def main ( self ) :
parser = optparse . OptionParser ( usage = " % prog [options] " )
parser . add_option ( " --nocleanup " , dest = " nocleanup " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Leave dashds and test.* datadir on exit or error " )
2015-04-23 14:19:00 +02:00
parser . add_option ( " --noshutdown " , dest = " noshutdown " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Don ' t stop dashds after the test execution " )
2016-05-09 16:59:54 +02:00
parser . add_option ( " --srcdir " , dest = " srcdir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../../src " ) ,
2016-03-04 08:25:16 +01:00
help = " Source directory containing dashd/dash-cli (default: %d efault) " )
2016-08-17 12:12:55 +02:00
parser . add_option ( " --cachedir " , dest = " cachedir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../cache " ) ,
help = " Directory for caching pregenerated datadirs " )
2017-05-22 08:59:11 +02:00
parser . add_option ( " --tmpdir " , dest = " tmpdir " , help = " Root directory for datadirs " )
2017-03-09 21:16:20 +01:00
parser . add_option ( " -l " , " --loglevel " , dest = " loglevel " , default = " INFO " ,
help = " log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory. " )
2014-10-23 19:11:20 +02:00
parser . add_option ( " --tracerpc " , dest = " trace_rpc " , default = False , action = " store_true " ,
help = " Print out all RPC calls as they are made " )
2016-05-10 18:27:31 +02:00
parser . add_option ( " --portseed " , dest = " port_seed " , default = os . getpid ( ) , type = ' int ' ,
help = " The seed to use for assigning port numbers (default: current process id) " )
2015-10-11 07:41:19 +02:00
parser . add_option ( " --coveragedir " , dest = " coveragedir " ,
help = " Write tested RPC commands into this directory " )
2017-05-03 08:37:14 +02:00
parser . add_option ( " --configfile " , dest = " configfile " ,
help = " Location of the test framework config file " )
2014-07-08 18:07:23 +02:00
self . add_options ( parser )
( self . options , self . args ) = parser . parse_args ( )
2015-10-11 07:41:19 +02:00
if self . options . coveragedir :
enable_coverage ( self . options . coveragedir )
2016-05-10 18:27:31 +02:00
PortSeed . n = self . options . port_seed
2015-12-14 12:54:55 +01:00
os . environ [ ' PATH ' ] = self . options . srcdir + " : " + self . options . srcdir + " /qt: " + os . environ [ ' PATH ' ]
2014-07-08 18:07:23 +02:00
check_json_precision ( )
2017-03-09 21:16:20 +01:00
# Set up temp directory and start logging
2017-05-22 08:59:11 +02:00
if self . options . tmpdir :
os . makedirs ( self . options . tmpdir , exist_ok = False )
else :
self . options . tmpdir = tempfile . mkdtemp ( prefix = " test " )
2017-03-09 21:16:20 +01:00
self . _start_logging ( )
2017-06-02 11:32:55 +02:00
success = TestStatus . FAILED
2017-03-09 21:16:20 +01:00
2014-07-08 18:07:23 +02:00
try :
2014-10-20 14:14:04 +02:00
self . setup_chain ( )
self . setup_network ( )
self . run_test ( )
2017-06-02 11:32:55 +02:00
success = TestStatus . PASSED
2014-07-09 03:24:40 +02:00
except JSONRPCException as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " JSONRPC error " )
2017-06-02 11:32:55 +02:00
except SkipTest as e :
self . log . warning ( " Test Skipped: %s " % e . message )
success = TestStatus . SKIPPED
2014-07-08 18:07:23 +02:00
except AssertionError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Assertion failed " )
2016-04-14 12:14:24 +02:00
except KeyError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Key error " )
2014-07-08 18:07:23 +02:00
except Exception as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Unexpected exception caught during testing " )
2016-05-09 17:01:55 +02:00
except KeyboardInterrupt as e :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Exiting after keyboard interrupt " )
2014-07-08 18:07:23 +02:00
2015-04-23 14:19:00 +02:00
if not self . options . noshutdown :
2017-03-09 21:16:20 +01:00
self . log . info ( " Stopping nodes " )
2019-02-21 19:37:16 +01:00
try :
2017-06-02 11:32:55 +02:00
if self . nodes :
self . stop_nodes ( )
2019-02-21 19:37:16 +01:00
except BaseException as e :
success = False
2019-03-08 09:05:00 +01:00
self . log . exception ( " Unexpected exception caught during shutdown " )
2015-04-23 14:19:00 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . info ( " Note: dashds were not stopped and may still be running " )
2015-04-20 11:50:33 +02:00
2017-06-02 11:32:55 +02:00
if not self . options . nocleanup and not self . options . noshutdown and success != TestStatus . FAILED :
2017-03-09 21:16:20 +01:00
self . log . info ( " Cleaning up " )
2014-07-08 18:07:23 +02:00
shutil . rmtree ( self . options . tmpdir )
2016-05-25 11:52:25 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Not cleaning up dir %s " % self . options . tmpdir )
2016-12-02 16:40:50 +01:00
if os . getenv ( " PYTHON_DEBUG " , " " ) :
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
2017-03-28 11:24:14 +02:00
filenames = [ self . options . tmpdir + " /test_framework.log " ]
filenames + = glob . glob ( self . options . tmpdir + " /node*/regtest/debug.log " )
2016-12-02 16:40:50 +01:00
MAX_LINES_TO_PRINT = 1000
2017-03-28 11:24:14 +02:00
for fn in filenames :
try :
with open ( fn , ' r ' ) as f :
print ( " From " , fn , " : " )
print ( " " . join ( deque ( f , MAX_LINES_TO_PRINT ) ) )
except OSError :
print ( " Opening file %s failed. " % fn )
traceback . print_exc ( )
2017-06-02 11:32:55 +02:00
if success == TestStatus . PASSED :
2017-03-09 21:16:20 +01:00
self . log . info ( " Tests successful " )
2017-06-02 11:32:55 +02:00
sys . exit ( TEST_EXIT_PASSED )
elif success == TestStatus . SKIPPED :
self . log . info ( " Test skipped " )
sys . exit ( TEST_EXIT_SKIPPED )
2014-07-08 18:07:23 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . error ( " Test failed. Test logging available at %s /test_framework.log " , self . options . tmpdir )
logging . shutdown ( )
2017-06-02 11:32:55 +02:00
sys . exit ( TEST_EXIT_FAILED )
2015-04-28 18:39:47 +02:00
2017-05-07 15:13:29 +02:00
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node ( self , i , dirname , extra_args = None , rpchost = None , timewait = None , binary = None , stderr = None ) :
2017-06-02 12:08:48 +02:00
return _start_node ( i , dirname , extra_args , rpchost , timewait , binary , stderr )
2017-05-07 15:13:29 +02:00
2019-06-20 18:36:17 +02:00
def start_nodes ( self , num_nodes , dirname , extra_args = None , rpchost = None , timewait = None , binary = None , stderr = None ) :
2017-06-02 12:08:48 +02:00
return _start_nodes ( num_nodes , dirname , extra_args , rpchost , timewait , binary , stderr )
2017-05-07 15:13:29 +02:00
def stop_node ( self , num_node ) :
2017-06-02 12:08:48 +02:00
_stop_node ( self . nodes [ num_node ] , num_node )
2017-05-07 15:13:29 +02:00
def stop_nodes ( self ) :
2017-06-02 12:08:48 +02:00
_stop_nodes ( self . nodes )
2017-05-07 15:13:29 +02:00
def split_network ( self ) :
"""
Split the network of four nodes into nodes 0 / 1 and 2 / 3.
"""
disconnect_nodes ( self . nodes [ 1 ] , 2 )
disconnect_nodes ( self . nodes [ 2 ] , 1 )
self . sync_all ( [ self . nodes [ : 2 ] , self . nodes [ 2 : ] ] )
def join_network ( self ) :
"""
Join the ( previously split ) network halves together .
"""
connect_nodes_bi ( self . nodes , 1 , 2 )
self . sync_all ( )
def sync_all ( self , node_groups = None ) :
if not node_groups :
node_groups = [ self . nodes ]
for group in node_groups :
sync_blocks ( group )
sync_mempools ( group )
# Private helper methods. These should not be accessed by the subclass test scripts.
2019-02-26 23:04:56 +01:00
def _start_logging ( self ) :
# Add logger and logging handlers
self . log = logging . getLogger ( ' TestFramework ' )
self . log . setLevel ( logging . DEBUG )
# Create file handler to log all messages
fh = logging . FileHandler ( self . options . tmpdir + ' /test_framework.log ' )
fh . setLevel ( logging . DEBUG )
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging . StreamHandler ( sys . stdout )
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int ( self . options . loglevel ) if self . options . loglevel . isdigit ( ) else self . options . loglevel . upper ( )
ch . setLevel ( ll )
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging . Formatter ( fmt = ' %(asctime)s . %(msecs)03d 000 %(name)s ( %(levelname)s ): %(message)s ' , datefmt = ' % Y- % m- %d % H: % M: % S ' )
2017-03-22 13:03:26 +01:00
formatter . converter = time . gmtime
2019-02-26 23:04:56 +01:00
fh . setFormatter ( formatter )
ch . setFormatter ( formatter )
# add the handlers to the logger
self . log . addHandler ( fh )
self . log . addHandler ( ch )
if self . options . trace_rpc :
rpc_logger = logging . getLogger ( " BitcoinRPC " )
rpc_logger . setLevel ( logging . DEBUG )
rpc_handler = logging . StreamHandler ( sys . stdout )
rpc_handler . setLevel ( logging . DEBUG )
rpc_logger . addHandler ( rpc_handler )
2019-06-20 18:36:17 +02:00
def _initialize_chain ( self , test_dir , num_nodes , cachedir , extra_args = None , stderr = None ) :
2017-05-07 15:13:29 +02:00
""" Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200 - block - long chain ( with wallet ) for MAX_NODES
Afterward , create num_nodes copies from the cache . """
assert num_nodes < = MAX_NODES
create_cache = False
for i in range ( MAX_NODES ) :
if not os . path . isdir ( os . path . join ( cachedir , ' node ' + str ( i ) ) ) :
create_cache = True
break
if create_cache :
self . log . debug ( " Creating data directories from cached datadir " )
# find and delete old cache directories if any exist
for i in range ( MAX_NODES ) :
if os . path . isdir ( os . path . join ( cachedir , " node " + str ( i ) ) ) :
shutil . rmtree ( os . path . join ( cachedir , " node " + str ( i ) ) )
# Create cache directories, run dashds:
2019-06-19 20:01:39 +02:00
set_genesis_mocktime ( )
2017-05-07 15:13:29 +02:00
for i in range ( MAX_NODES ) :
datadir = initialize_datadir ( cachedir , i )
2019-06-19 20:01:39 +02:00
args = [ os . getenv ( " DASHD " , " dashd " ) , " -server " , " -keypool=1 " , " -datadir= " + datadir , " -discover=0 " , " -mocktime= " + str ( GENESISTIME ) ]
2017-05-07 15:13:29 +02:00
if i > 0 :
args . append ( " -connect=127.0.0.1: " + str ( p2p_port ( 0 ) ) )
2019-06-20 18:36:17 +02:00
if extra_args is not None :
args . extend ( extra_args )
bitcoind_processes [ i ] = subprocess . Popen ( args , stderr = stderr )
2017-05-07 15:13:29 +02:00
self . log . debug ( " initialize_chain: dashd started, waiting for RPC to come up " )
wait_for_bitcoind_start ( bitcoind_processes [ i ] , rpc_url ( i ) , i )
self . log . debug ( " initialize_chain: RPC successfully started " )
self . nodes = [ ]
for i in range ( MAX_NODES ) :
try :
self . nodes . append ( get_rpc_proxy ( rpc_url ( i ) , i ) )
except :
self . log . exception ( " Error connecting to node %d " % i )
sys . exit ( 1 )
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
2019-06-19 20:01:39 +02:00
block_time = GENESISTIME
2017-05-07 15:13:29 +02:00
for i in range ( 2 ) :
for peer in range ( 4 ) :
for j in range ( 25 ) :
set_node_times ( self . nodes , block_time )
self . nodes [ peer ] . generate ( 1 )
2019-06-19 20:01:39 +02:00
block_time + = 156
2017-05-07 15:13:29 +02:00
# Must sync before next peer starts generating blocks
sync_blocks ( self . nodes )
# Shut them down, and clean up cache directories:
self . stop_nodes ( )
self . nodes = [ ]
disable_mocktime ( )
for i in range ( MAX_NODES ) :
os . remove ( log_filename ( cachedir , i , " debug.log " ) )
os . remove ( log_filename ( cachedir , i , " db.log " ) )
os . remove ( log_filename ( cachedir , i , " peers.dat " ) )
os . remove ( log_filename ( cachedir , i , " fee_estimates.dat " ) )
for i in range ( num_nodes ) :
from_dir = os . path . join ( cachedir , " node " + str ( i ) )
to_dir = os . path . join ( test_dir , " node " + str ( i ) )
shutil . copytree ( from_dir , to_dir )
2019-06-19 20:01:39 +02:00
initialize_datadir ( test_dir , i ) # Overwrite port/rpcport in dsah.conf
2017-05-07 15:13:29 +02:00
def _initialize_chain_clean ( self , test_dir , num_nodes ) :
""" Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets .
Useful if a test case wants complete control over initialization . """
for i in range ( num_nodes ) :
initialize_datadir ( test_dir , i )
2015-04-28 18:39:47 +02:00
2018-10-20 16:08:02 +02:00
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo :
2018-12-31 08:12:36 +01:00
def __init__ ( self , proTxHash , ownerAddr , votingAddr , pubKeyOperator , keyOperator , collateral_address , collateral_txid , collateral_vout ) :
self . proTxHash = proTxHash
self . ownerAddr = ownerAddr
self . votingAddr = votingAddr
self . pubKeyOperator = pubKeyOperator
self . keyOperator = keyOperator
self . collateral_address = collateral_address
self . collateral_txid = collateral_txid
self . collateral_vout = collateral_vout
2018-10-20 16:08:02 +02:00
class DashTestFramework ( BitcoinTestFramework ) :
2019-01-29 15:54:38 +01:00
def __init__ ( self , num_nodes , masterodes_count , extra_args , fast_dip3_enforcement = False ) :
2018-10-20 16:08:02 +02:00
super ( ) . __init__ ( )
self . mn_count = masterodes_count
self . num_nodes = num_nodes
self . mninfo = [ ]
self . setup_clean_chain = True
self . is_network_split = False
# additional args
self . extra_args = extra_args
2019-01-23 17:36:51 +01:00
self . extra_args + = [ " -sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK " ]
2019-01-29 15:54:38 +01:00
self . fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement :
2019-04-25 17:39:04 +02:00
self . extra_args + = [ " -dip3params=30:50 " ]
2019-01-23 17:36:51 +01:00
2018-10-20 16:08:02 +02:00
def create_simple_node ( self ) :
idx = len ( self . nodes )
2018-11-05 10:28:04 +01:00
args = self . extra_args
2018-12-31 08:12:36 +01:00
self . nodes . append ( start_node ( idx , self . options . tmpdir , args ) )
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx ) :
connect_nodes ( self . nodes [ i ] , idx )
def prepare_masternodes ( self ) :
for idx in range ( 0 , self . mn_count ) :
2019-04-04 08:08:44 +02:00
self . prepare_masternode ( idx )
def prepare_masternode ( self , idx ) :
bls = self . nodes [ 0 ] . bls ( ' generate ' )
address = self . nodes [ 0 ] . getnewaddress ( )
txid = self . nodes [ 0 ] . sendtoaddress ( address , MASTERNODE_COLLATERAL )
txraw = self . nodes [ 0 ] . getrawtransaction ( txid , True )
collateral_vout = 0
for vout_idx in range ( 0 , len ( txraw [ " vout " ] ) ) :
vout = txraw [ " vout " ] [ vout_idx ]
if vout [ " value " ] == MASTERNODE_COLLATERAL :
collateral_vout = vout_idx
self . nodes [ 0 ] . lockunspent ( False , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
# send to same address to reserve some funds for fees
self . nodes [ 0 ] . sendtoaddress ( address , 0.001 )
ownerAddr = self . nodes [ 0 ] . getnewaddress ( )
votingAddr = self . nodes [ 0 ] . getnewaddress ( )
rewardsAddr = self . nodes [ 0 ] . getnewaddress ( )
port = p2p_port ( len ( self . nodes ) + idx )
if ( idx % 2 ) == 0 :
self . nodes [ 0 ] . lockunspent ( True , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
proTxHash = self . nodes [ 0 ] . protx ( ' register_fund ' , address , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
else :
2018-12-31 08:12:36 +01:00
self . nodes [ 0 ] . generate ( 1 )
2019-04-04 08:08:44 +02:00
proTxHash = self . nodes [ 0 ] . protx ( ' register ' , txid , collateral_vout , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
self . nodes [ 0 ] . generate ( 1 )
self . mninfo . append ( MasternodeInfo ( proTxHash , ownerAddr , votingAddr , bls [ ' public ' ] , bls [ ' secret ' ] , address , txid , collateral_vout ) )
self . sync_all ( )
2018-10-20 16:08:02 +02:00
2019-04-04 08:08:44 +02:00
def remove_mastermode ( self , idx ) :
mn = self . mninfo [ idx ]
rawtx = self . nodes [ 0 ] . createrawtransaction ( [ { " txid " : mn . collateral_txid , " vout " : mn . collateral_vout } ] , { self . nodes [ 0 ] . getnewaddress ( ) : 999.9999 } )
rawtx = self . nodes [ 0 ] . signrawtransaction ( rawtx )
self . nodes [ 0 ] . sendrawtransaction ( rawtx [ " hex " ] )
self . nodes [ 0 ] . generate ( 1 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
2019-04-04 08:08:44 +02:00
self . mninfo . remove ( mn )
2018-10-20 16:08:02 +02:00
2019-01-23 17:36:51 +01:00
def prepare_datadirs ( self ) :
# stop faucet node so that we can copy the datadir
2019-07-05 05:59:48 +02:00
self . stop_node ( self . nodes [ 0 ] , 0 )
2019-01-23 17:36:51 +01:00
start_idx = len ( self . nodes )
for idx in range ( 0 , self . mn_count ) :
copy_datadir ( 0 , idx + start_idx , self . options . tmpdir )
# restart faucet node
2019-07-05 06:46:15 +02:00
self . nodes [ 0 ] = self . start_node ( 0 , self . options . tmpdir , self . extra_args )
2019-01-23 17:36:51 +01:00
2018-12-31 08:12:36 +01:00
def start_masternodes ( self ) :
start_idx = len ( self . nodes )
2019-01-23 17:36:51 +01:00
2018-10-20 16:08:02 +02:00
for idx in range ( 0 , self . mn_count ) :
2019-01-23 17:36:51 +01:00
self . nodes . append ( None )
executor = ThreadPoolExecutor ( max_workers = 20 )
def do_start ( idx ) :
2018-12-31 08:12:36 +01:00
args = [ ' -masternode=1 ' ,
' -masternodeblsprivkey= %s ' % self . mninfo [ idx ] . keyOperator ] + self . extra_args
2019-07-05 06:46:15 +02:00
node = self . start_node ( idx + start_idx , self . options . tmpdir , args )
2019-03-11 09:38:58 +01:00
self . mninfo [ idx ] . nodeIdx = idx + start_idx
2018-12-31 08:12:36 +01:00
self . mninfo [ idx ] . node = node
2019-01-23 17:36:51 +01:00
self . nodes [ idx + start_idx ] = node
wait_to_sync ( node , True )
def do_connect ( idx ) :
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx + 1 ) :
2018-12-31 08:12:36 +01:00
connect_nodes ( self . nodes [ idx + start_idx ] , i )
2019-01-23 17:36:51 +01:00
jobs = [ ]
# start up nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_start , idx ) )
# wait for all nodes to start up
for job in jobs :
job . result ( )
jobs . clear ( )
# connect nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_connect , idx ) )
# wait for all nodes to connect
for job in jobs :
job . result ( )
jobs . clear ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
2018-10-20 16:08:02 +02:00
2019-01-23 17:36:51 +01:00
executor . shutdown ( )
2018-10-20 16:08:02 +02:00
def setup_network ( self ) :
self . nodes = [ ]
# create faucet node for collateral and transactions
2019-01-23 17:36:51 +01:00
self . nodes . append ( start_node ( 0 , self . options . tmpdir , self . extra_args ) )
2018-10-20 16:08:02 +02:00
required_balance = MASTERNODE_COLLATERAL * self . mn_count + 1
while self . nodes [ 0 ] . getbalance ( ) < required_balance :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
# create connected simple nodes
for i in range ( 0 , self . num_nodes - self . mn_count - 1 ) :
self . create_simple_node ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
# activate DIP3
2019-01-29 15:54:38 +01:00
if not self . fast_dip3_enforcement :
2019-01-23 17:36:51 +01:00
while self . nodes [ 0 ] . getblockcount ( ) < 500 :
self . nodes [ 0 ] . generate ( 10 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
# create masternodes
self . prepare_masternodes ( )
2019-01-23 17:36:51 +01:00
self . prepare_datadirs ( )
2018-12-31 08:12:36 +01:00
self . start_masternodes ( )
2018-10-20 16:08:02 +02:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
# sync nodes
self . sync_all ( )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
2018-12-31 08:12:36 +01:00
2018-10-20 16:08:02 +02:00
mn_info = self . nodes [ 0 ] . masternodelist ( " status " )
assert ( len ( mn_info ) == self . mn_count )
for status in mn_info . values ( ) :
assert ( status == ' ENABLED ' )
2019-03-18 14:50:44 +01:00
def get_autois_bip9_status ( self , node ) :
info = node . getblockchaininfo ( )
# we reuse the dip3 deployment
return info [ ' bip9_softforks ' ] [ ' dip0003 ' ] [ ' status ' ]
def activate_autois_bip9 ( self , node ) :
# sync nodes periodically
# if we sync them too often, activation takes too many time
# if we sync them too rarely, nodes failed to update its state and
# bip9 status is not updated
# so, in this code nodes are synced once per 20 blocks
counter = 0
sync_period = 10
while self . get_autois_bip9_status ( node ) == ' defined ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
while self . get_autois_bip9_status ( node ) == ' started ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
while self . get_autois_bip9_status ( node ) == ' locked_in ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
# sync nodes
self . sync_all ( )
assert ( self . get_autois_bip9_status ( node ) == ' active ' )
def get_autois_spork_state ( self , node ) :
info = node . spork ( ' active ' )
return info [ ' SPORK_16_INSTANTSEND_AUTOLOCKS ' ]
def set_autois_spork_state ( self , node , state ) :
# Increment mocktime as otherwise nodes will not update sporks
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
if state :
value = 0
else :
value = 4070908800
node . spork ( ' SPORK_16_INSTANTSEND_AUTOLOCKS ' , value )
def create_raw_tx ( self , node_from , node_to , amount , min_inputs , max_inputs ) :
2018-10-20 16:08:02 +02:00
assert ( min_inputs < = max_inputs )
# fill inputs
inputs = [ ]
balances = node_from . listunspent ( )
in_amount = 0.0
last_amount = 0.0
for tx in balances :
if len ( inputs ) < min_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
elif in_amount > amount :
break
elif len ( inputs ) < max_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
else :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount - = last_amount
in_amount + = float ( tx [ ' amount ' ] )
inputs [ - 1 ] = input
last_amount = float ( tx [ ' amount ' ] )
2019-03-18 14:50:44 +01:00
assert ( len ( inputs ) > = min_inputs )
assert ( len ( inputs ) < = max_inputs )
assert ( in_amount > = amount )
2018-10-20 16:08:02 +02:00
# fill outputs
receiver_address = node_to . getnewaddress ( )
change_address = node_from . getnewaddress ( )
fee = 0.001
outputs = { }
outputs [ receiver_address ] = satoshi_round ( amount )
outputs [ change_address ] = satoshi_round ( in_amount - amount - fee )
rawtx = node_from . createrawtransaction ( inputs , outputs )
2019-05-07 14:14:33 +02:00
ret = node_from . signrawtransaction ( rawtx )
decoded = node_from . decoderawtransaction ( ret [ ' hex ' ] )
ret = { * * decoded , * * ret }
return ret
2018-10-20 16:08:02 +02:00
2019-03-18 14:50:44 +01:00
# sends regular instantsend with high fee
def send_regular_instantsend ( self , sender , receiver , check_fee = True ) :
receiver_addr = receiver . getnewaddress ( )
txid = sender . instantsendtoaddress ( receiver_addr , 1.0 )
if ( check_fee ) :
MIN_FEE = satoshi_round ( - 0.0001 )
fee = sender . gettransaction ( txid ) [ ' fee ' ]
expected_fee = MIN_FEE * len ( sender . getrawtransaction ( txid , True ) [ ' vin ' ] )
assert_equal ( fee , expected_fee )
return self . wait_for_instantlock ( txid , sender )
# sends simple tx, it should become locked if autolocks are allowed
def send_simple_tx ( self , sender , receiver ) :
raw_tx = self . create_raw_tx ( sender , receiver , 1.0 , 1 , 4 )
txid = self . nodes [ 0 ] . sendrawtransaction ( raw_tx [ ' hex ' ] )
self . sync_all ( )
return self . wait_for_instantlock ( txid , sender )
# sends complex tx, it should never become locked for old instentsend
def send_complex_tx ( self , sender , receiver ) :
raw_tx = self . create_raw_tx ( sender , receiver , 1.0 , 5 , 100 )
txid = sender . sendrawtransaction ( raw_tx [ ' hex ' ] )
self . sync_all ( )
return self . wait_for_instantlock ( txid , sender )
2018-10-20 16:08:02 +02:00
def wait_for_instantlock ( self , txid , node ) :
# wait for instantsend locks
2019-05-21 10:01:57 +02:00
start = time . time ( )
2018-10-20 16:08:02 +02:00
locked = False
while True :
2019-03-19 08:38:16 +01:00
try :
is_tx = node . getrawtransaction ( txid , True )
if is_tx [ ' instantlock ' ] :
locked = True
break
except :
# TX not received yet?
pass
2019-05-21 10:01:57 +02:00
if time . time ( ) > start + 10 :
2018-10-20 16:08:02 +02:00
break
2019-05-21 10:01:57 +02:00
time . sleep ( 0.5 )
2018-10-20 16:08:02 +02:00
return locked
2019-01-18 11:51:31 +01:00
def wait_for_sporks_same ( self , timeout = 30 ) :
2019-05-21 10:01:57 +02:00
st = time . time ( )
while time . time ( ) < st + timeout :
2019-01-18 11:51:31 +01:00
if self . check_sporks_same ( ) :
return
2019-05-21 10:01:57 +02:00
time . sleep ( 0.5 )
2019-01-18 11:51:31 +01:00
raise AssertionError ( " wait_for_sporks_same timed out " )
def check_sporks_same ( self ) :
sporks = self . nodes [ 0 ] . spork ( ' show ' )
for node in self . nodes [ 1 : ] :
sporks2 = node . spork ( ' show ' )
if sporks != sporks2 :
return False
return True
2019-03-12 09:20:17 +01:00
def wait_for_quorum_phase ( self , phase , check_received_messages , check_received_messages_count , timeout = 30 ) :
2019-05-21 10:01:57 +02:00
t = time . time ( )
while time . time ( ) - t < timeout :
2019-01-18 11:51:31 +01:00
all_ok = True
for mn in self . mninfo :
2019-02-01 08:49:01 +01:00
s = mn . node . quorum ( " dkgstatus " ) [ " session " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-02-01 08:49:01 +01:00
all_ok = False
break
2019-03-15 09:48:24 +01:00
s = s [ " llmq_5_60 " ]
2019-01-18 11:51:31 +01:00
if " phase " not in s :
all_ok = False
break
if s [ " phase " ] != phase :
all_ok = False
break
if check_received_messages is not None :
2019-01-23 15:02:56 +01:00
if s [ check_received_messages ] < check_received_messages_count :
2019-01-18 11:51:31 +01:00
all_ok = False
break
if all_ok :
2019-01-23 09:37:02 +01:00
return
2019-05-21 10:01:57 +02:00
time . sleep ( 0.1 )
2019-01-23 09:37:02 +01:00
raise AssertionError ( " wait_for_quorum_phase timed out " )
2019-01-18 11:51:31 +01:00
2019-01-23 15:02:56 +01:00
def wait_for_quorum_commitment ( self , timeout = 15 ) :
2019-05-21 10:01:57 +02:00
t = time . time ( )
while time . time ( ) - t < timeout :
2019-01-18 11:51:31 +01:00
all_ok = True
for node in self . nodes :
2019-02-01 08:49:01 +01:00
s = node . quorum ( " dkgstatus " )
if " minableCommitments " not in s :
all_ok = False
break
s = s [ " minableCommitments " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-01-18 11:51:31 +01:00
all_ok = False
break
if all_ok :
2019-01-23 09:37:02 +01:00
return
2019-05-21 10:01:57 +02:00
time . sleep ( 0.1 )
2019-01-23 09:37:02 +01:00
raise AssertionError ( " wait_for_quorum_commitment timed out " )
2019-01-18 11:51:31 +01:00
2019-05-08 11:13:27 +02:00
def mine_quorum ( self , expected_contributions = 5 , expected_complaints = 0 , expected_justifications = 0 , expected_commitments = 5 ) :
2019-01-18 11:51:31 +01:00
quorums = self . nodes [ 0 ] . quorum ( " list " )
# move forward to next DKG
skip_count = 24 - ( self . nodes [ 0 ] . getblockcount ( ) % 24 )
if skip_count != 0 :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( skip_count )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 1 (init)
self . wait_for_quorum_phase ( 1 , None , 0 )
2019-03-04 13:34:06 +01:00
# Give nodes some time to connect to neighbors
2019-05-21 10:01:57 +02:00
time . sleep ( 2 )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 2 (contribute) and received all contributions
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 2 , " receivedContributions " , expected_contributions )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 3 (complain) and received all complaints
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 3 , " receivedComplaints " , expected_complaints )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 4 (justify)
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 4 , " receivedJustifications " , expected_justifications )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 5 (commit)
2019-05-08 11:13:27 +02:00
self . wait_for_quorum_phase ( 5 , " receivedPrematureCommitments " , expected_commitments )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 6 (mining)
self . wait_for_quorum_phase ( 6 , None , 0 )
# Wait for final commitment
self . wait_for_quorum_commitment ( )
# mine the final commitment
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
while quorums == self . nodes [ 0 ] . quorum ( " list " ) :
2019-05-21 10:01:57 +02:00
time . sleep ( 2 )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-04-04 08:15:57 +02:00
new_quorum = self . nodes [ 0 ] . quorum ( " list " , 1 ) [ " llmq_5_60 " ] [ 0 ]
2019-01-18 11:51:31 +01:00
2019-04-04 08:15:31 +02:00
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self . nodes [ 0 ] . generate ( 8 )
2018-05-24 16:16:20 +02:00
sync_blocks ( self . nodes )
2018-10-20 16:08:02 +02:00
2019-04-04 08:15:57 +02:00
return new_quorum
2015-04-28 18:39:47 +02:00
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
2017-06-02 11:32:55 +02:00
class SkipTest ( Exception ) :
""" This exception is raised to skip a test """
def __init__ ( self , message ) :
self . message = message
2015-04-28 18:39:47 +02:00
class ComparisonTestFramework ( BitcoinTestFramework ) :
def __init__ ( self ) :
2016-05-20 15:16:51 +02:00
super ( ) . __init__ ( )
2015-04-28 18:39:47 +02:00
self . num_nodes = 2
2016-05-20 15:16:51 +02:00
self . setup_clean_chain = True
2015-04-28 18:39:47 +02:00
def add_options ( self , parser ) :
2015-04-29 15:18:33 +02:00
parser . add_option ( " --testbinary " , dest = " testbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to test " )
2015-04-29 15:18:33 +02:00
parser . add_option ( " --refbinary " , dest = " refbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to use for reference nodes (if any) " )
2015-04-28 18:39:47 +02:00
def setup_network ( self ) :
2017-06-28 18:09:54 +02:00
extra_args = [ [ ' -whitelist=127.0.0.1 ' ] ] * self . num_nodes
if hasattr ( self , " extra_args " ) :
extra_args = self . extra_args
2017-05-07 15:13:29 +02:00
self . nodes = self . start_nodes (
2017-06-28 18:09:54 +02:00
self . num_nodes , self . options . tmpdir , extra_args ,
2015-10-11 07:41:19 +02:00
binary = [ self . options . testbinary ] +
[ self . options . refbinary ] * ( self . num_nodes - 1 ) )