2016-05-06 11:23:48 +02:00
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
2014-10-23 03:48:19 +02:00
# Distributed under the MIT software license, see the accompanying
2014-07-08 18:07:23 +02:00
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2019-01-07 10:55:35 +01:00
""" Base class for RPC testing. """
2014-07-08 18:07:23 +02:00
2016-05-10 18:27:31 +02:00
import logging
import optparse
2014-07-08 18:07:23 +02:00
import os
import sys
import shutil
import tempfile
import traceback
2019-01-23 17:36:51 +01:00
from concurrent . futures import ThreadPoolExecutor
2018-10-20 16:08:02 +02:00
from time import time , sleep
2014-07-08 18:07:23 +02:00
2015-10-11 07:41:19 +02:00
from . util import (
2019-03-18 14:50:44 +01:00
assert_equal ,
2015-10-11 07:41:19 +02:00
initialize_chain ,
2018-10-20 16:08:02 +02:00
start_node ,
2015-10-11 07:41:19 +02:00
start_nodes ,
connect_nodes_bi ,
2018-10-20 16:08:02 +02:00
connect_nodes ,
2015-10-11 07:41:19 +02:00
sync_blocks ,
sync_mempools ,
2018-10-20 16:08:02 +02:00
sync_masternodes ,
2015-10-11 07:41:19 +02:00
stop_nodes ,
2016-07-06 20:46:22 +02:00
stop_node ,
2015-10-11 07:41:19 +02:00
enable_coverage ,
check_json_precision ,
initialize_chain_clean ,
2016-05-10 18:27:31 +02:00
PortSeed ,
2018-04-18 13:48:59 +02:00
set_cache_mocktime ,
set_genesis_mocktime ,
2018-10-20 16:08:02 +02:00
get_mocktime ,
set_mocktime ,
set_node_times ,
p2p_port ,
2018-12-31 08:12:36 +01:00
satoshi_round ,
2019-01-23 17:36:51 +01:00
wait_to_sync ,
copy_datadir )
2016-05-10 18:27:31 +02:00
from . authproxy import JSONRPCException
2014-07-08 18:07:23 +02:00
class BitcoinTestFramework ( object ) :
2016-05-20 15:16:51 +02:00
def __init__ ( self ) :
self . num_nodes = 4
self . setup_clean_chain = False
self . nodes = None
2014-10-20 14:14:04 +02:00
def run_test ( self ) :
2016-05-20 15:16:51 +02:00
raise NotImplementedError
2014-07-08 18:07:23 +02:00
def add_options ( self , parser ) :
pass
2014-10-20 14:14:04 +02:00
def setup_chain ( self ) :
2017-03-09 21:16:20 +01:00
self . log . info ( " Initializing test directory " + self . options . tmpdir )
2016-05-20 15:16:51 +02:00
if self . setup_clean_chain :
initialize_chain_clean ( self . options . tmpdir , self . num_nodes )
2018-04-18 13:48:59 +02:00
set_genesis_mocktime ( )
2016-05-20 15:16:51 +02:00
else :
2016-08-17 12:12:55 +02:00
initialize_chain ( self . options . tmpdir , self . num_nodes , self . options . cachedir )
2018-04-18 13:48:59 +02:00
set_cache_mocktime ( )
2014-10-20 14:14:04 +02:00
2016-07-06 20:46:22 +02:00
def stop_node ( self , num_node ) :
stop_node ( self . nodes [ num_node ] , num_node )
2015-01-14 02:43:39 +01:00
def setup_nodes ( self ) :
2016-05-20 15:16:51 +02:00
return start_nodes ( self . num_nodes , self . options . tmpdir )
2015-01-14 02:43:39 +01:00
2014-10-20 14:14:04 +02:00
def setup_network ( self , split = False ) :
2015-01-14 02:43:39 +01:00
self . nodes = self . setup_nodes ( )
2014-10-20 14:14:04 +02:00
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split :
2014-10-24 09:06:37 +02:00
connect_nodes_bi ( self . nodes , 1 , 2 )
2014-11-07 10:23:21 +01:00
sync_blocks ( self . nodes [ 1 : 3 ] )
sync_mempools ( self . nodes [ 1 : 3 ] )
2014-10-20 14:14:04 +02:00
2014-10-24 09:06:37 +02:00
connect_nodes_bi ( self . nodes , 0 , 1 )
connect_nodes_bi ( self . nodes , 2 , 3 )
2014-10-20 14:14:04 +02:00
self . is_network_split = split
self . sync_all ( )
def split_network ( self ) :
"""
Split the network of four nodes into nodes 0 / 1 and 2 / 3.
"""
assert not self . is_network_split
stop_nodes ( self . nodes )
self . setup_network ( True )
def sync_all ( self ) :
if self . is_network_split :
2014-11-07 10:23:21 +01:00
sync_blocks ( self . nodes [ : 2 ] )
2014-10-20 14:14:04 +02:00
sync_blocks ( self . nodes [ 2 : ] )
2014-11-07 10:23:21 +01:00
sync_mempools ( self . nodes [ : 2 ] )
2014-10-20 14:14:04 +02:00
sync_mempools ( self . nodes [ 2 : ] )
else :
sync_blocks ( self . nodes )
sync_mempools ( self . nodes )
def join_network ( self ) :
"""
Join the ( previously split ) network halves together .
"""
assert self . is_network_split
stop_nodes ( self . nodes )
self . setup_network ( False )
2014-07-08 18:07:23 +02:00
def main ( self ) :
parser = optparse . OptionParser ( usage = " % prog [options] " )
parser . add_option ( " --nocleanup " , dest = " nocleanup " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Leave dashds and test.* datadir on exit or error " )
2015-04-23 14:19:00 +02:00
parser . add_option ( " --noshutdown " , dest = " noshutdown " , default = False , action = " store_true " ,
2016-03-04 08:25:16 +01:00
help = " Don ' t stop dashds after the test execution " )
2016-05-09 16:59:54 +02:00
parser . add_option ( " --srcdir " , dest = " srcdir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../../src " ) ,
2016-03-04 08:25:16 +01:00
help = " Source directory containing dashd/dash-cli (default: %d efault) " )
2016-08-17 12:12:55 +02:00
parser . add_option ( " --cachedir " , dest = " cachedir " , default = os . path . normpath ( os . path . dirname ( os . path . realpath ( __file__ ) ) + " /../../cache " ) ,
help = " Directory for caching pregenerated datadirs " )
2014-07-08 18:07:23 +02:00
parser . add_option ( " --tmpdir " , dest = " tmpdir " , default = tempfile . mkdtemp ( prefix = " test " ) ,
help = " Root directory for datadirs " )
2017-03-09 21:16:20 +01:00
parser . add_option ( " -l " , " --loglevel " , dest = " loglevel " , default = " INFO " ,
help = " log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory. " )
2014-10-23 19:11:20 +02:00
parser . add_option ( " --tracerpc " , dest = " trace_rpc " , default = False , action = " store_true " ,
help = " Print out all RPC calls as they are made " )
2016-05-10 18:27:31 +02:00
parser . add_option ( " --portseed " , dest = " port_seed " , default = os . getpid ( ) , type = ' int ' ,
help = " The seed to use for assigning port numbers (default: current process id) " )
2015-10-11 07:41:19 +02:00
parser . add_option ( " --coveragedir " , dest = " coveragedir " ,
help = " Write tested RPC commands into this directory " )
2014-07-08 18:07:23 +02:00
self . add_options ( parser )
( self . options , self . args ) = parser . parse_args ( )
2016-09-13 11:59:07 +02:00
# backup dir variable for removal at cleanup
self . options . root , self . options . tmpdir = self . options . tmpdir , self . options . tmpdir + ' / ' + str ( self . options . port_seed )
2016-05-27 08:21:37 +02:00
2015-10-11 07:41:19 +02:00
if self . options . coveragedir :
enable_coverage ( self . options . coveragedir )
2016-05-10 18:27:31 +02:00
PortSeed . n = self . options . port_seed
2015-12-14 12:54:55 +01:00
os . environ [ ' PATH ' ] = self . options . srcdir + " : " + self . options . srcdir + " /qt: " + os . environ [ ' PATH ' ]
2014-07-08 18:07:23 +02:00
check_json_precision ( )
2017-03-09 21:16:20 +01:00
# Set up temp directory and start logging
os . makedirs ( self . options . tmpdir , exist_ok = False )
self . _start_logging ( )
2014-07-08 18:07:23 +02:00
success = False
2017-03-09 21:16:20 +01:00
2014-07-08 18:07:23 +02:00
try :
2014-10-20 14:14:04 +02:00
self . setup_chain ( )
self . setup_network ( )
self . run_test ( )
2014-07-08 18:07:23 +02:00
success = True
2014-07-09 03:24:40 +02:00
except JSONRPCException as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " JSONRPC error " )
2014-07-08 18:07:23 +02:00
except AssertionError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Assertion failed " )
2016-04-14 12:14:24 +02:00
except KeyError as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Key error " )
2014-07-08 18:07:23 +02:00
except Exception as e :
2017-03-09 21:16:20 +01:00
self . log . exception ( " Unexpected exception caught during testing " )
2016-05-09 17:01:55 +02:00
except KeyboardInterrupt as e :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Exiting after keyboard interrupt " )
2014-07-08 18:07:23 +02:00
2015-04-23 14:19:00 +02:00
if not self . options . noshutdown :
2017-03-09 21:16:20 +01:00
self . log . info ( " Stopping nodes " )
2019-02-21 19:37:16 +01:00
try :
stop_nodes ( self . nodes )
except BaseException as e :
success = False
2019-03-08 09:05:00 +01:00
self . log . exception ( " Unexpected exception caught during shutdown " )
2015-04-23 14:19:00 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . info ( " Note: dashds were not stopped and may still be running " )
2015-04-20 11:50:33 +02:00
2016-05-25 11:52:25 +02:00
if not self . options . nocleanup and not self . options . noshutdown and success :
2017-03-09 21:16:20 +01:00
self . log . info ( " Cleaning up " )
2014-07-08 18:07:23 +02:00
shutil . rmtree ( self . options . tmpdir )
2016-09-13 11:59:07 +02:00
if not os . listdir ( self . options . root ) :
os . rmdir ( self . options . root )
2016-05-25 11:52:25 +02:00
else :
2017-03-09 21:16:20 +01:00
self . log . warning ( " Not cleaning up dir %s " % self . options . tmpdir )
2016-12-02 16:40:50 +01:00
if os . getenv ( " PYTHON_DEBUG " , " " ) :
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob . glob ( self . options . tmpdir + " /node*/regtest/debug.log " )
MAX_LINES_TO_PRINT = 1000
for f in filenames :
print ( " From " , f , " : " )
from collections import deque
print ( " " . join ( deque ( open ( f ) , MAX_LINES_TO_PRINT ) ) )
2014-07-08 18:07:23 +02:00
if success :
2017-03-09 21:16:20 +01:00
self . log . info ( " Tests successful " )
2014-07-08 18:07:23 +02:00
sys . exit ( 0 )
else :
2017-03-09 21:16:20 +01:00
self . log . error ( " Test failed. Test logging available at %s /test_framework.log " , self . options . tmpdir )
logging . shutdown ( )
2014-07-08 18:07:23 +02:00
sys . exit ( 1 )
2015-04-28 18:39:47 +02:00
2019-02-26 23:04:56 +01:00
def _start_logging ( self ) :
# Add logger and logging handlers
self . log = logging . getLogger ( ' TestFramework ' )
self . log . setLevel ( logging . DEBUG )
# Create file handler to log all messages
fh = logging . FileHandler ( self . options . tmpdir + ' /test_framework.log ' )
fh . setLevel ( logging . DEBUG )
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging . StreamHandler ( sys . stdout )
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int ( self . options . loglevel ) if self . options . loglevel . isdigit ( ) else self . options . loglevel . upper ( )
ch . setLevel ( ll )
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging . Formatter ( fmt = ' %(asctime)s . %(msecs)03d 000 %(name)s ( %(levelname)s ): %(message)s ' , datefmt = ' % Y- % m- %d % H: % M: % S ' )
fh . setFormatter ( formatter )
ch . setFormatter ( formatter )
# add the handlers to the logger
self . log . addHandler ( fh )
self . log . addHandler ( ch )
if self . options . trace_rpc :
rpc_logger = logging . getLogger ( " BitcoinRPC " )
rpc_logger . setLevel ( logging . DEBUG )
rpc_handler = logging . StreamHandler ( sys . stdout )
rpc_handler . setLevel ( logging . DEBUG )
rpc_logger . addHandler ( rpc_handler )
2015-04-28 18:39:47 +02:00
2018-10-20 16:08:02 +02:00
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo :
2018-12-31 08:12:36 +01:00
def __init__ ( self , proTxHash , ownerAddr , votingAddr , pubKeyOperator , keyOperator , collateral_address , collateral_txid , collateral_vout ) :
self . proTxHash = proTxHash
self . ownerAddr = ownerAddr
self . votingAddr = votingAddr
self . pubKeyOperator = pubKeyOperator
self . keyOperator = keyOperator
self . collateral_address = collateral_address
self . collateral_txid = collateral_txid
self . collateral_vout = collateral_vout
2018-10-20 16:08:02 +02:00
class DashTestFramework ( BitcoinTestFramework ) :
2019-01-29 15:54:38 +01:00
def __init__ ( self , num_nodes , masterodes_count , extra_args , fast_dip3_enforcement = False ) :
2018-10-20 16:08:02 +02:00
super ( ) . __init__ ( )
self . mn_count = masterodes_count
self . num_nodes = num_nodes
self . mninfo = [ ]
self . setup_clean_chain = True
self . is_network_split = False
# additional args
self . extra_args = extra_args
2019-01-23 17:36:51 +01:00
self . extra_args + = [ " -sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK " ]
2019-01-29 15:54:38 +01:00
self . fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement :
self . extra_args + = [ " -bip9params=dip0003:0:999999999999:10:5 " , " -dip3enforcementheight=50 " ]
2019-01-23 17:36:51 +01:00
2018-10-20 16:08:02 +02:00
def create_simple_node ( self ) :
idx = len ( self . nodes )
2018-11-05 10:28:04 +01:00
args = self . extra_args
2018-12-31 08:12:36 +01:00
self . nodes . append ( start_node ( idx , self . options . tmpdir , args ) )
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx ) :
connect_nodes ( self . nodes [ i ] , idx )
def prepare_masternodes ( self ) :
for idx in range ( 0 , self . mn_count ) :
2019-04-04 08:08:44 +02:00
self . prepare_masternode ( idx )
def prepare_masternode ( self , idx ) :
bls = self . nodes [ 0 ] . bls ( ' generate ' )
address = self . nodes [ 0 ] . getnewaddress ( )
txid = self . nodes [ 0 ] . sendtoaddress ( address , MASTERNODE_COLLATERAL )
txraw = self . nodes [ 0 ] . getrawtransaction ( txid , True )
collateral_vout = 0
for vout_idx in range ( 0 , len ( txraw [ " vout " ] ) ) :
vout = txraw [ " vout " ] [ vout_idx ]
if vout [ " value " ] == MASTERNODE_COLLATERAL :
collateral_vout = vout_idx
self . nodes [ 0 ] . lockunspent ( False , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
# send to same address to reserve some funds for fees
self . nodes [ 0 ] . sendtoaddress ( address , 0.001 )
ownerAddr = self . nodes [ 0 ] . getnewaddress ( )
votingAddr = self . nodes [ 0 ] . getnewaddress ( )
rewardsAddr = self . nodes [ 0 ] . getnewaddress ( )
port = p2p_port ( len ( self . nodes ) + idx )
if ( idx % 2 ) == 0 :
self . nodes [ 0 ] . lockunspent ( True , [ { ' txid ' : txid , ' vout ' : collateral_vout } ] )
proTxHash = self . nodes [ 0 ] . protx ( ' register_fund ' , address , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
else :
2018-12-31 08:12:36 +01:00
self . nodes [ 0 ] . generate ( 1 )
2019-04-04 08:08:44 +02:00
proTxHash = self . nodes [ 0 ] . protx ( ' register ' , txid , collateral_vout , ' 127.0.0.1: %d ' % port , ownerAddr , bls [ ' public ' ] , votingAddr , 0 , rewardsAddr , address )
self . nodes [ 0 ] . generate ( 1 )
self . mninfo . append ( MasternodeInfo ( proTxHash , ownerAddr , votingAddr , bls [ ' public ' ] , bls [ ' secret ' ] , address , txid , collateral_vout ) )
self . sync_all ( )
2018-10-20 16:08:02 +02:00
2019-04-04 08:08:44 +02:00
def remove_mastermode ( self , idx ) :
mn = self . mninfo [ idx ]
rawtx = self . nodes [ 0 ] . createrawtransaction ( [ { " txid " : mn . collateral_txid , " vout " : mn . collateral_vout } ] , { self . nodes [ 0 ] . getnewaddress ( ) : 999.9999 } )
rawtx = self . nodes [ 0 ] . signrawtransaction ( rawtx )
self . nodes [ 0 ] . sendrawtransaction ( rawtx [ " hex " ] )
self . nodes [ 0 ] . generate ( 1 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
2019-04-04 08:08:44 +02:00
self . mninfo . remove ( mn )
2018-10-20 16:08:02 +02:00
2019-01-23 17:36:51 +01:00
def prepare_datadirs ( self ) :
# stop faucet node so that we can copy the datadir
stop_node ( self . nodes [ 0 ] , 0 )
start_idx = len ( self . nodes )
for idx in range ( 0 , self . mn_count ) :
copy_datadir ( 0 , idx + start_idx , self . options . tmpdir )
# restart faucet node
self . nodes [ 0 ] = start_node ( 0 , self . options . tmpdir , self . extra_args )
2018-12-31 08:12:36 +01:00
def start_masternodes ( self ) :
start_idx = len ( self . nodes )
2019-01-23 17:36:51 +01:00
2018-10-20 16:08:02 +02:00
for idx in range ( 0 , self . mn_count ) :
2019-01-23 17:36:51 +01:00
self . nodes . append ( None )
executor = ThreadPoolExecutor ( max_workers = 20 )
def do_start ( idx ) :
2018-12-31 08:12:36 +01:00
args = [ ' -masternode=1 ' ,
' -masternodeblsprivkey= %s ' % self . mninfo [ idx ] . keyOperator ] + self . extra_args
node = start_node ( idx + start_idx , self . options . tmpdir , args )
2019-03-11 09:38:58 +01:00
self . mninfo [ idx ] . nodeIdx = idx + start_idx
2018-12-31 08:12:36 +01:00
self . mninfo [ idx ] . node = node
2019-01-23 17:36:51 +01:00
self . nodes [ idx + start_idx ] = node
wait_to_sync ( node , True )
def do_connect ( idx ) :
2018-10-20 16:08:02 +02:00
for i in range ( 0 , idx + 1 ) :
2018-12-31 08:12:36 +01:00
connect_nodes ( self . nodes [ idx + start_idx ] , i )
2019-01-23 17:36:51 +01:00
jobs = [ ]
# start up nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_start , idx ) )
# wait for all nodes to start up
for job in jobs :
job . result ( )
jobs . clear ( )
# connect nodes in parallel
for idx in range ( 0 , self . mn_count ) :
jobs . append ( executor . submit ( do_connect , idx ) )
# wait for all nodes to connect
for job in jobs :
job . result ( )
jobs . clear ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
2018-10-20 16:08:02 +02:00
2019-01-23 17:36:51 +01:00
executor . shutdown ( )
2018-10-20 16:08:02 +02:00
def setup_network ( self ) :
self . nodes = [ ]
# create faucet node for collateral and transactions
2019-01-23 17:36:51 +01:00
self . nodes . append ( start_node ( 0 , self . options . tmpdir , self . extra_args ) )
2018-10-20 16:08:02 +02:00
required_balance = MASTERNODE_COLLATERAL * self . mn_count + 1
while self . nodes [ 0 ] . getbalance ( ) < required_balance :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
# create connected simple nodes
for i in range ( 0 , self . num_nodes - self . mn_count - 1 ) :
self . create_simple_node ( )
2018-12-31 08:12:36 +01:00
sync_masternodes ( self . nodes , True )
# activate DIP3
2019-01-29 15:54:38 +01:00
if not self . fast_dip3_enforcement :
2019-01-23 17:36:51 +01:00
while self . nodes [ 0 ] . getblockcount ( ) < 500 :
self . nodes [ 0 ] . generate ( 10 )
2018-12-31 08:12:36 +01:00
self . sync_all ( )
# create masternodes
self . prepare_masternodes ( )
2019-01-23 17:36:51 +01:00
self . prepare_datadirs ( )
2018-12-31 08:12:36 +01:00
self . start_masternodes ( )
2018-10-20 16:08:02 +02:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
# sync nodes
self . sync_all ( )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
2018-12-31 08:12:36 +01:00
2018-10-20 16:08:02 +02:00
mn_info = self . nodes [ 0 ] . masternodelist ( " status " )
assert ( len ( mn_info ) == self . mn_count )
for status in mn_info . values ( ) :
assert ( status == ' ENABLED ' )
2019-03-18 14:50:44 +01:00
def get_autois_bip9_status ( self , node ) :
info = node . getblockchaininfo ( )
# we reuse the dip3 deployment
return info [ ' bip9_softforks ' ] [ ' dip0003 ' ] [ ' status ' ]
def activate_autois_bip9 ( self , node ) :
# sync nodes periodically
# if we sync them too often, activation takes too many time
# if we sync them too rarely, nodes failed to update its state and
# bip9 status is not updated
# so, in this code nodes are synced once per 20 blocks
counter = 0
sync_period = 10
while self . get_autois_bip9_status ( node ) == ' defined ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
while self . get_autois_bip9_status ( node ) == ' started ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
while self . get_autois_bip9_status ( node ) == ' locked_in ' :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
node . generate ( 1 )
counter + = 1
if counter % sync_period == 0 :
# sync nodes
self . sync_all ( )
# sync nodes
self . sync_all ( )
assert ( self . get_autois_bip9_status ( node ) == ' active ' )
def get_autois_spork_state ( self , node ) :
info = node . spork ( ' active ' )
return info [ ' SPORK_16_INSTANTSEND_AUTOLOCKS ' ]
def set_autois_spork_state ( self , node , state ) :
# Increment mocktime as otherwise nodes will not update sporks
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
if state :
value = 0
else :
value = 4070908800
node . spork ( ' SPORK_16_INSTANTSEND_AUTOLOCKS ' , value )
def create_raw_tx ( self , node_from , node_to , amount , min_inputs , max_inputs ) :
2018-10-20 16:08:02 +02:00
assert ( min_inputs < = max_inputs )
# fill inputs
inputs = [ ]
balances = node_from . listunspent ( )
in_amount = 0.0
last_amount = 0.0
for tx in balances :
if len ( inputs ) < min_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
elif in_amount > amount :
break
elif len ( inputs ) < max_inputs :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount + = float ( tx [ ' amount ' ] )
inputs . append ( input )
else :
input = { }
input [ " txid " ] = tx [ ' txid ' ]
input [ ' vout ' ] = tx [ ' vout ' ]
in_amount - = last_amount
in_amount + = float ( tx [ ' amount ' ] )
inputs [ - 1 ] = input
last_amount = float ( tx [ ' amount ' ] )
2019-03-18 14:50:44 +01:00
assert ( len ( inputs ) > = min_inputs )
assert ( len ( inputs ) < = max_inputs )
assert ( in_amount > = amount )
2018-10-20 16:08:02 +02:00
# fill outputs
receiver_address = node_to . getnewaddress ( )
change_address = node_from . getnewaddress ( )
fee = 0.001
outputs = { }
outputs [ receiver_address ] = satoshi_round ( amount )
outputs [ change_address ] = satoshi_round ( in_amount - amount - fee )
rawtx = node_from . createrawtransaction ( inputs , outputs )
return node_from . signrawtransaction ( rawtx )
2019-03-18 14:50:44 +01:00
# sends regular instantsend with high fee
def send_regular_instantsend ( self , sender , receiver , check_fee = True ) :
receiver_addr = receiver . getnewaddress ( )
txid = sender . instantsendtoaddress ( receiver_addr , 1.0 )
if ( check_fee ) :
MIN_FEE = satoshi_round ( - 0.0001 )
fee = sender . gettransaction ( txid ) [ ' fee ' ]
expected_fee = MIN_FEE * len ( sender . getrawtransaction ( txid , True ) [ ' vin ' ] )
assert_equal ( fee , expected_fee )
return self . wait_for_instantlock ( txid , sender )
# sends simple tx, it should become locked if autolocks are allowed
def send_simple_tx ( self , sender , receiver ) :
raw_tx = self . create_raw_tx ( sender , receiver , 1.0 , 1 , 4 )
txid = self . nodes [ 0 ] . sendrawtransaction ( raw_tx [ ' hex ' ] )
self . sync_all ( )
return self . wait_for_instantlock ( txid , sender )
# sends complex tx, it should never become locked for old instentsend
def send_complex_tx ( self , sender , receiver ) :
raw_tx = self . create_raw_tx ( sender , receiver , 1.0 , 5 , 100 )
txid = sender . sendrawtransaction ( raw_tx [ ' hex ' ] )
self . sync_all ( )
return self . wait_for_instantlock ( txid , sender )
2018-10-20 16:08:02 +02:00
def wait_for_instantlock ( self , txid , node ) :
# wait for instantsend locks
start = time ( )
locked = False
while True :
2019-03-19 08:38:16 +01:00
try :
is_tx = node . getrawtransaction ( txid , True )
if is_tx [ ' instantlock ' ] :
locked = True
break
except :
# TX not received yet?
pass
2018-10-20 16:08:02 +02:00
if time ( ) > start + 10 :
break
2019-03-18 14:50:44 +01:00
sleep ( 0.5 )
2018-10-20 16:08:02 +02:00
return locked
2019-01-18 11:51:31 +01:00
def wait_for_sporks_same ( self , timeout = 30 ) :
st = time ( )
while time ( ) < st + timeout :
if self . check_sporks_same ( ) :
return
sleep ( 0.5 )
raise AssertionError ( " wait_for_sporks_same timed out " )
def check_sporks_same ( self ) :
sporks = self . nodes [ 0 ] . spork ( ' show ' )
for node in self . nodes [ 1 : ] :
sporks2 = node . spork ( ' show ' )
if sporks != sporks2 :
return False
return True
2019-03-12 09:20:17 +01:00
def wait_for_quorum_phase ( self , phase , check_received_messages , check_received_messages_count , timeout = 30 ) :
2019-01-18 11:51:31 +01:00
t = time ( )
while time ( ) - t < timeout :
all_ok = True
for mn in self . mninfo :
2019-02-01 08:49:01 +01:00
s = mn . node . quorum ( " dkgstatus " ) [ " session " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-02-01 08:49:01 +01:00
all_ok = False
break
2019-03-15 09:48:24 +01:00
s = s [ " llmq_5_60 " ]
2019-01-18 11:51:31 +01:00
if " phase " not in s :
all_ok = False
break
if s [ " phase " ] != phase :
all_ok = False
break
if check_received_messages is not None :
2019-01-23 15:02:56 +01:00
if s [ check_received_messages ] < check_received_messages_count :
2019-01-18 11:51:31 +01:00
all_ok = False
break
if all_ok :
2019-01-23 09:37:02 +01:00
return
2019-01-18 11:51:31 +01:00
sleep ( 0.1 )
2019-01-23 09:37:02 +01:00
raise AssertionError ( " wait_for_quorum_phase timed out " )
2019-01-18 11:51:31 +01:00
2019-01-23 15:02:56 +01:00
def wait_for_quorum_commitment ( self , timeout = 15 ) :
2019-01-18 11:51:31 +01:00
t = time ( )
while time ( ) - t < timeout :
all_ok = True
for node in self . nodes :
2019-02-01 08:49:01 +01:00
s = node . quorum ( " dkgstatus " )
if " minableCommitments " not in s :
all_ok = False
break
s = s [ " minableCommitments " ]
2019-03-15 09:48:24 +01:00
if " llmq_5_60 " not in s :
2019-01-18 11:51:31 +01:00
all_ok = False
break
if all_ok :
2019-01-23 09:37:02 +01:00
return
2019-01-18 11:51:31 +01:00
sleep ( 0.1 )
2019-01-23 09:37:02 +01:00
raise AssertionError ( " wait_for_quorum_commitment timed out " )
2019-01-18 11:51:31 +01:00
2019-03-15 09:48:24 +01:00
def mine_quorum ( self , expected_valid_count = 5 ) :
2019-01-18 11:51:31 +01:00
quorums = self . nodes [ 0 ] . quorum ( " list " )
# move forward to next DKG
skip_count = 24 - ( self . nodes [ 0 ] . getblockcount ( ) % 24 )
if skip_count != 0 :
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( skip_count )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 1 (init)
self . wait_for_quorum_phase ( 1 , None , 0 )
2019-03-04 13:34:06 +01:00
# Give nodes some time to connect to neighbors
sleep ( 2 )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 2 (contribute) and received all contributions
self . wait_for_quorum_phase ( 2 , " receivedContributions " , expected_valid_count )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 3 (complain) and received all complaints
2019-03-15 09:48:24 +01:00
self . wait_for_quorum_phase ( 3 , " receivedComplaints " if expected_valid_count != 5 else None , expected_valid_count )
2019-01-18 11:51:31 +01:00
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 4 (justify)
self . wait_for_quorum_phase ( 4 , None , 0 )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 5 (commit)
self . wait_for_quorum_phase ( 5 , " receivedPrematureCommitments " , expected_valid_count )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 2 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
# Make sure all reached phase 6 (mining)
self . wait_for_quorum_phase ( 6 , None , 0 )
# Wait for final commitment
self . wait_for_quorum_commitment ( )
# mine the final commitment
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
while quorums == self . nodes [ 0 ] . quorum ( " list " ) :
sleep ( 2 )
set_mocktime ( get_mocktime ( ) + 1 )
set_node_times ( self . nodes , get_mocktime ( ) )
self . nodes [ 0 ] . generate ( 1 )
2019-03-04 13:34:06 +01:00
sync_blocks ( self . nodes )
2019-01-18 11:51:31 +01:00
2019-04-04 08:15:31 +02:00
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self . nodes [ 0 ] . generate ( 8 )
2018-05-24 16:16:20 +02:00
sync_blocks ( self . nodes )
2018-10-20 16:08:02 +02:00
2015-04-28 18:39:47 +02:00
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework ( BitcoinTestFramework ) :
def __init__ ( self ) :
2016-05-20 15:16:51 +02:00
super ( ) . __init__ ( )
2015-04-28 18:39:47 +02:00
self . num_nodes = 2
2016-05-20 15:16:51 +02:00
self . setup_clean_chain = True
2015-04-28 18:39:47 +02:00
def add_options ( self , parser ) :
2015-04-29 15:18:33 +02:00
parser . add_option ( " --testbinary " , dest = " testbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to test " )
2015-04-29 15:18:33 +02:00
parser . add_option ( " --refbinary " , dest = " refbinary " ,
2019-01-03 10:19:46 +01:00
default = os . getenv ( " BITCOIND " , " dashd " ) ,
help = " dashd binary to use for reference nodes (if any) " )
2015-04-28 18:39:47 +02:00
def setup_network ( self ) :
2015-10-11 07:41:19 +02:00
self . nodes = start_nodes (
self . num_nodes , self . options . tmpdir ,
2017-03-09 21:16:20 +01:00
extra_args = [ [ ' -whitelist=127.0.0.1 ' ] ] * self . num_nodes ,
2015-10-11 07:41:19 +02:00
binary = [ self . options . testbinary ] +
[ self . options . refbinary ] * ( self . num_nodes - 1 ) )