2019-01-29 15:53:14 +01:00
// Copyright (c) 2018-2019 The Dash Core developers
2018-05-24 16:14:55 +02:00
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
# include "quorums_dkgsessionhandler.h"
# include "quorums_blockprocessor.h"
# include "quorums_debug.h"
# include "quorums_init.h"
# include "quorums_utils.h"
# include "activemasternode.h"
# include "chainparams.h"
# include "init.h"
# include "net_processing.h"
# include "validation.h"
namespace llmq
{
CDKGPendingMessages : : CDKGPendingMessages ( size_t _maxMessagesPerNode ) :
maxMessagesPerNode ( _maxMessagesPerNode )
{
}
void CDKGPendingMessages : : PushPendingMessage ( NodeId from , CDataStream & vRecv )
{
// this will also consume the data, even if we bail out early
auto pm = std : : make_shared < CDataStream > ( std : : move ( vRecv ) ) ;
{
LOCK ( cs ) ;
if ( messagesPerNode [ from ] > = maxMessagesPerNode ) {
// TODO ban?
LogPrint ( " net " , " CDKGPendingMessages::%s -- too many messages, peer=%d \n " , __func__ , from ) ;
return ;
}
messagesPerNode [ from ] + + ;
}
CHashWriter hw ( SER_GETHASH , 0 ) ;
hw . write ( pm - > data ( ) , pm - > size ( ) ) ;
uint256 hash = hw . GetHash ( ) ;
LOCK2 ( cs_main , cs ) ;
if ( ! seenMessages . emplace ( hash ) . second ) {
LogPrint ( " net " , " CDKGPendingMessages::%s -- already seen %s, peer=%d " , __func__ , from ) ;
return ;
}
g_connman - > RemoveAskFor ( hash ) ;
pendingMessages . emplace_back ( std : : make_pair ( from , std : : move ( pm ) ) ) ;
}
std : : list < CDKGPendingMessages : : BinaryMessage > CDKGPendingMessages : : PopPendingMessages ( size_t maxCount )
{
LOCK ( cs ) ;
std : : list < BinaryMessage > ret ;
while ( ! pendingMessages . empty ( ) & & ret . size ( ) < maxCount ) {
ret . emplace_back ( std : : move ( pendingMessages . front ( ) ) ) ;
pendingMessages . pop_front ( ) ;
}
return std : : move ( ret ) ;
}
bool CDKGPendingMessages : : HasSeen ( const uint256 & hash ) const
{
LOCK ( cs ) ;
return seenMessages . count ( hash ) ! = 0 ;
}
void CDKGPendingMessages : : Clear ( )
{
LOCK ( cs ) ;
pendingMessages . clear ( ) ;
messagesPerNode . clear ( ) ;
seenMessages . clear ( ) ;
}
//////
2019-03-08 17:25:42 +01:00
CDKGSessionHandler : : CDKGSessionHandler ( const Consensus : : LLMQParams & _params , ctpl : : thread_pool & _messageHandlerPool , CBLSWorker & _blsWorker , CDKGSessionManager & _dkgManager ) :
2018-05-24 16:14:55 +02:00
params ( _params ) ,
messageHandlerPool ( _messageHandlerPool ) ,
blsWorker ( _blsWorker ) ,
dkgManager ( _dkgManager ) ,
2019-03-08 17:25:42 +01:00
curSession ( std : : make_shared < CDKGSession > ( _params , _blsWorker , _dkgManager ) ) ,
2018-05-24 16:14:55 +02:00
pendingContributions ( ( size_t ) _params . size * 2 ) , // we allow size*2 messages as we need to make sure we see bad behavior (double messages)
pendingComplaints ( ( size_t ) _params . size * 2 ) ,
pendingJustifications ( ( size_t ) _params . size * 2 ) ,
pendingPrematureCommitments ( ( size_t ) _params . size * 2 )
{
phaseHandlerThread = std : : thread ( [ this ] {
RenameThread ( strprintf ( " quorum-phase-%d " , ( uint8_t ) params . type ) . c_str ( ) ) ;
PhaseHandlerThread ( ) ;
} ) ;
}
CDKGSessionHandler : : ~ CDKGSessionHandler ( )
{
stopRequested = true ;
if ( phaseHandlerThread . joinable ( ) ) {
phaseHandlerThread . join ( ) ;
}
}
void CDKGSessionHandler : : UpdatedBlockTip ( const CBlockIndex * pindexNew , const CBlockIndex * pindexFork , bool fInitialDownload )
{
LOCK ( cs ) ;
int quorumStageInt = pindexNew - > nHeight % params . dkgInterval ;
2019-01-29 15:55:18 +01:00
const CBlockIndex * pindexQuorum = pindexNew - > GetAncestor ( pindexNew - > nHeight - quorumStageInt ) ;
2018-05-24 16:14:55 +02:00
quorumHeight = pindexQuorum - > nHeight ;
quorumHash = pindexQuorum - > GetBlockHash ( ) ;
2019-01-22 14:33:42 +01:00
bool fNewPhase = ( quorumStageInt % params . dkgPhaseBlocks ) = = 0 ;
2019-01-23 15:02:56 +01:00
int phaseInt = quorumStageInt / params . dkgPhaseBlocks + 1 ;
2019-01-22 14:33:42 +01:00
if ( fNewPhase & & phaseInt > = QuorumPhase_Initialized & & phaseInt < = QuorumPhase_Idle ) {
phase = static_cast < QuorumPhase > ( phaseInt ) ;
2018-05-24 16:14:55 +02:00
}
}
void CDKGSessionHandler : : ProcessMessage ( CNode * pfrom , const std : : string & strCommand , CDataStream & vRecv , CConnman & connman )
{
// We don't handle messages in the calling thread as deserialization/processing of these would block everything
if ( strCommand = = NetMsgType : : QCONTRIB ) {
pendingContributions . PushPendingMessage ( pfrom - > id , vRecv ) ;
} else if ( strCommand = = NetMsgType : : QCOMPLAINT ) {
pendingComplaints . PushPendingMessage ( pfrom - > id , vRecv ) ;
} else if ( strCommand = = NetMsgType : : QJUSTIFICATION ) {
pendingJustifications . PushPendingMessage ( pfrom - > id , vRecv ) ;
} else if ( strCommand = = NetMsgType : : QPCOMMITMENT ) {
pendingPrematureCommitments . PushPendingMessage ( pfrom - > id , vRecv ) ;
}
}
2019-01-22 14:33:42 +01:00
bool CDKGSessionHandler : : InitNewQuorum ( int newQuorumHeight , const uint256 & newQuorumHash )
2018-05-24 16:14:55 +02:00
{
//AssertLockHeld(cs_main);
const auto & consensus = Params ( ) . GetConsensus ( ) ;
2019-03-08 17:25:42 +01:00
curSession = std : : make_shared < CDKGSession > ( params , blsWorker , dkgManager ) ;
2018-05-24 16:14:55 +02:00
2019-01-29 15:54:38 +01:00
if ( ! deterministicMNManager - > IsDIP3Enforced ( newQuorumHeight ) ) {
2018-05-24 16:14:55 +02:00
return false ;
}
2019-01-22 14:33:42 +01:00
auto mns = CLLMQUtils : : GetAllQuorumMembers ( params . type , newQuorumHash ) ;
2018-05-24 16:14:55 +02:00
2019-01-22 14:33:42 +01:00
if ( ! curSession - > Init ( newQuorumHeight , newQuorumHash , mns , activeMasternodeInfo . proTxHash ) ) {
2018-05-24 16:14:55 +02:00
LogPrintf ( " CDKGSessionManager::%s -- quorum initialiation failed \n " , __func__ ) ;
return false ;
}
return true ;
}
2019-01-11 10:00:40 +01:00
std : : pair < QuorumPhase , uint256 > CDKGSessionHandler : : GetPhaseAndQuorumHash ( ) const
2018-05-24 16:14:55 +02:00
{
LOCK ( cs ) ;
return std : : make_pair ( phase , quorumHash ) ;
}
class AbortPhaseException : public std : : exception {
} ;
void CDKGSessionHandler : : WaitForNextPhase ( QuorumPhase curPhase ,
QuorumPhase nextPhase ,
2019-01-22 14:33:42 +01:00
const uint256 & expectedQuorumHash ,
2018-05-24 16:14:55 +02:00
const WhileWaitFunc & runWhileWaiting )
{
while ( true ) {
if ( stopRequested | | ShutdownRequested ( ) ) {
throw AbortPhaseException ( ) ;
}
auto p = GetPhaseAndQuorumHash ( ) ;
if ( ! expectedQuorumHash . IsNull ( ) & & p . second ! = expectedQuorumHash ) {
throw AbortPhaseException ( ) ;
}
if ( p . first = = nextPhase ) {
2019-01-30 14:05:22 +01:00
break ;
2018-05-24 16:14:55 +02:00
}
if ( curPhase ! = QuorumPhase_None & & p . first ! = curPhase ) {
throw AbortPhaseException ( ) ;
}
if ( ! runWhileWaiting ( ) ) {
MilliSleep ( 100 ) ;
}
}
2019-01-30 14:05:22 +01:00
if ( nextPhase = = QuorumPhase_Initialized ) {
2019-02-01 08:49:01 +01:00
quorumDKGDebugManager - > ResetLocalSessionStatus ( params . type ) ;
} else {
quorumDKGDebugManager - > UpdateLocalSessionStatus ( params . type , [ & ] ( CDKGDebugSessionStatus & status ) {
bool changed = status . phase ! = ( uint8_t ) nextPhase ;
status . phase = ( uint8_t ) nextPhase ;
return changed ;
} ) ;
2019-01-30 14:05:22 +01:00
}
2018-05-24 16:14:55 +02:00
}
void CDKGSessionHandler : : WaitForNewQuorum ( const uint256 & oldQuorumHash )
{
while ( true ) {
if ( stopRequested | | ShutdownRequested ( ) ) {
throw AbortPhaseException ( ) ;
}
auto p = GetPhaseAndQuorumHash ( ) ;
if ( p . second ! = oldQuorumHash ) {
return ;
}
MilliSleep ( 100 ) ;
}
}
2019-01-10 07:07:58 +01:00
// Sleep some time to not fully overload the whole network
void CDKGSessionHandler : : SleepBeforePhase ( QuorumPhase curPhase ,
2019-01-22 14:33:42 +01:00
const uint256 & expectedQuorumHash ,
2019-01-10 07:07:58 +01:00
double randomSleepFactor ,
const WhileWaitFunc & runWhileWaiting )
2018-05-24 16:14:55 +02:00
{
2019-01-10 07:07:58 +01:00
// expected time for a full phase
double phaseTime = params . dkgPhaseBlocks * Params ( ) . GetConsensus ( ) . nPowTargetSpacing * 1000 ;
// expected time per member
phaseTime = phaseTime / params . size ;
// Don't expect perfect block times and thus reduce the phase time to be on the secure side (caller chooses factor)
phaseTime * = randomSleepFactor ;
if ( Params ( ) . MineBlocksOnDemand ( ) ) {
// on regtest, blocks can be mined on demand without any significant time passing between these. We shouldn't
// wait before phases in this case
phaseTime = 0 ;
}
int64_t sleepTime = ( int64_t ) ( phaseTime * curSession - > GetMyMemberIndex ( ) ) ;
int64_t endTime = GetTimeMillis ( ) + sleepTime ;
2018-05-24 16:14:55 +02:00
while ( GetTimeMillis ( ) < endTime ) {
if ( stopRequested | | ShutdownRequested ( ) ) {
throw AbortPhaseException ( ) ;
}
auto p = GetPhaseAndQuorumHash ( ) ;
if ( p . first ! = curPhase | | p . second ! = expectedQuorumHash ) {
throw AbortPhaseException ( ) ;
}
if ( ! runWhileWaiting ( ) ) {
MilliSleep ( 100 ) ;
}
}
}
void CDKGSessionHandler : : HandlePhase ( QuorumPhase curPhase ,
QuorumPhase nextPhase ,
2019-01-22 14:33:42 +01:00
const uint256 & expectedQuorumHash ,
2018-05-24 16:14:55 +02:00
double randomSleepFactor ,
const StartPhaseFunc & startPhaseFunc ,
const WhileWaitFunc & runWhileWaiting )
{
2019-01-10 07:07:58 +01:00
SleepBeforePhase ( curPhase , expectedQuorumHash , randomSleepFactor , runWhileWaiting ) ;
2018-05-24 16:14:55 +02:00
startPhaseFunc ( ) ;
WaitForNextPhase ( curPhase , nextPhase , expectedQuorumHash , runWhileWaiting ) ;
}
// returns a set of NodeIds which sent invalid messages
template < typename Message >
std : : set < NodeId > BatchVerifyMessageSigs ( CDKGSession & session , const std : : vector < std : : pair < NodeId , std : : shared_ptr < Message > > > & messages )
{
if ( messages . empty ( ) ) {
return { } ;
}
std : : set < NodeId > ret ;
bool revertToSingleVerification = false ;
CBLSSignature aggSig ;
std : : vector < CBLSPublicKey > pubKeys ;
std : : vector < uint256 > messageHashes ;
std : : set < uint256 > messageHashesSet ;
pubKeys . reserve ( messages . size ( ) ) ;
messageHashes . reserve ( messages . size ( ) ) ;
bool first = true ;
for ( const auto & p : messages ) {
const auto & msg = * p . second ;
auto member = session . GetMember ( msg . proTxHash ) ;
if ( ! member ) {
// should not happen as it was verified before
ret . emplace ( p . first ) ;
continue ;
}
if ( first ) {
aggSig = msg . sig ;
} else {
aggSig . AggregateInsecure ( msg . sig ) ;
}
first = false ;
auto msgHash = msg . GetSignHash ( ) ;
if ( ! messageHashesSet . emplace ( msgHash ) . second ) {
// can only happen in 2 cases:
// 1. Someone sent us the same message twice but with differing signature, meaning that at least one of them
// must be invalid. In this case, we'd have to revert to single message verification nevertheless
// 2. Someone managed to find a way to create two different binary representations of a message that deserializes
// to the same object representation. This would be some form of malleability. However, this shouldn't be
// possible as only deterministic/unique BLS signatures and very simple data types are involved
revertToSingleVerification = true ;
break ;
}
pubKeys . emplace_back ( member - > dmn - > pdmnState - > pubKeyOperator ) ;
messageHashes . emplace_back ( msgHash ) ;
}
if ( ! revertToSingleVerification ) {
bool valid = aggSig . VerifyInsecureAggregated ( pubKeys , messageHashes ) ;
if ( valid ) {
// all good
return ret ;
}
// are all messages from the same node?
NodeId firstNodeId ;
first = true ;
bool nodeIdsAllSame = true ;
for ( auto it = messages . begin ( ) ; it ! = messages . end ( ) ; + + it ) {
if ( first ) {
firstNodeId = it - > first ;
} else {
first = false ;
if ( it - > first ! = firstNodeId ) {
nodeIdsAllSame = false ;
break ;
}
}
}
// if yes, take a short path and return a set with only him
if ( nodeIdsAllSame ) {
ret . emplace ( firstNodeId ) ;
return ret ;
}
// different nodes, let's figure out who are the bad ones
}
for ( const auto & p : messages ) {
if ( ret . count ( p . first ) ) {
continue ;
}
const auto & msg = * p . second ;
auto member = session . GetMember ( msg . proTxHash ) ;
bool valid = msg . sig . VerifyInsecure ( member - > dmn - > pdmnState - > pubKeyOperator , msg . GetSignHash ( ) ) ;
if ( ! valid ) {
ret . emplace ( p . first ) ;
}
}
return ret ;
}
template < typename Message >
bool ProcessPendingMessageBatch ( CDKGSession & session , CDKGPendingMessages & pendingMessages , size_t maxCount )
{
auto msgs = pendingMessages . PopAndDeserializeMessages < Message > ( maxCount ) ;
if ( msgs . empty ( ) ) {
return false ;
}
std : : vector < uint256 > hashes ;
std : : vector < std : : pair < NodeId , std : : shared_ptr < Message > > > preverifiedMessages ;
hashes . reserve ( msgs . size ( ) ) ;
preverifiedMessages . reserve ( msgs . size ( ) ) ;
for ( const auto & p : msgs ) {
if ( ! p . second ) {
LogPrint ( " net " , " %s -- failed to deserialize message, peer=%d " , __func__ , p . first ) ;
{
LOCK ( cs_main ) ;
Misbehaving ( p . first , 100 ) ;
}
continue ;
}
const auto & msg = * p . second ;
auto hash = : : SerializeHash ( msg ) ;
{
LOCK ( cs_main ) ;
g_connman - > RemoveAskFor ( hash ) ;
}
bool ban = false ;
if ( ! session . PreVerifyMessage ( hash , msg , ban ) ) {
if ( ban ) {
LogPrint ( " net " , " %s -- banning node due to failed preverification, peer=%d " , __func__ , p . first ) ;
{
LOCK ( cs_main ) ;
Misbehaving ( p . first , 100 ) ;
}
}
LogPrint ( " net " , " %s -- skipping message due to failed preverification, peer=%d " , __func__ , p . first ) ;
continue ;
}
hashes . emplace_back ( hash ) ;
preverifiedMessages . emplace_back ( p ) ;
}
if ( preverifiedMessages . empty ( ) ) {
return true ;
}
auto badNodes = BatchVerifyMessageSigs ( session , preverifiedMessages ) ;
if ( ! badNodes . empty ( ) ) {
LOCK ( cs_main ) ;
for ( auto nodeId : badNodes ) {
LogPrint ( " net " , " %s -- failed to verify signature, peer=%d " , __func__ , nodeId ) ;
Misbehaving ( nodeId , 100 ) ;
}
}
for ( size_t i = 0 ; i < preverifiedMessages . size ( ) ; i + + ) {
NodeId nodeId = preverifiedMessages [ i ] . first ;
if ( badNodes . count ( nodeId ) ) {
continue ;
}
const auto & msg = * preverifiedMessages [ i ] . second ;
bool ban = false ;
session . ReceiveMessage ( hashes [ i ] , msg , ban ) ;
if ( ban ) {
LogPrint ( " net " , " %s -- banning node after ReceiveMessage failed, peer=%d " , __func__ , nodeId ) ;
LOCK ( cs_main ) ;
Misbehaving ( nodeId , 100 ) ;
badNodes . emplace ( nodeId ) ;
}
}
for ( const auto & p : preverifiedMessages ) {
NodeId nodeId = p . first ;
if ( badNodes . count ( nodeId ) ) {
continue ;
}
session . AddParticipatingNode ( nodeId ) ;
}
return true ;
}
void CDKGSessionHandler : : HandleDKGRound ( )
{
uint256 curQuorumHash ;
2019-01-22 14:33:42 +01:00
int curQuorumHeight ;
2018-05-24 16:14:55 +02:00
2019-01-22 14:33:42 +01:00
WaitForNextPhase ( QuorumPhase_None , QuorumPhase_Initialized , uint256 ( ) , [ ] { return false ; } ) ;
2018-05-24 16:14:55 +02:00
{
LOCK ( cs ) ;
pendingContributions . Clear ( ) ;
pendingComplaints . Clear ( ) ;
pendingJustifications . Clear ( ) ;
pendingPrematureCommitments . Clear ( ) ;
2019-01-22 14:33:42 +01:00
curQuorumHash = quorumHash ;
curQuorumHeight = quorumHeight ;
2018-05-24 16:14:55 +02:00
}
2019-01-22 14:33:42 +01:00
if ( ! InitNewQuorum ( curQuorumHeight , curQuorumHash ) ) {
2018-05-24 16:14:55 +02:00
// should actually never happen
WaitForNewQuorum ( curQuorumHash ) ;
throw AbortPhaseException ( ) ;
}
2019-02-01 08:49:01 +01:00
quorumDKGDebugManager - > UpdateLocalSessionStatus ( params . type , [ & ] ( CDKGDebugSessionStatus & status ) {
bool changed = status . phase ! = ( uint8_t ) QuorumPhase_Initialized ;
status . phase = ( uint8_t ) QuorumPhase_Initialized ;
return changed ;
} ) ;
2018-05-24 16:14:55 +02:00
if ( curSession - > AreWeMember ( ) | | GetBoolArg ( " -watchquorums " , DEFAULT_WATCH_QUORUMS ) ) {
std : : set < CService > connections ;
if ( curSession - > AreWeMember ( ) ) {
connections = CLLMQUtils : : GetQuorumConnections ( params . type , curQuorumHash , curSession - > myProTxHash ) ;
} else {
auto cindexes = CLLMQUtils : : CalcDeterministicWatchConnections ( params . type , curQuorumHash , curSession - > members . size ( ) , 1 ) ;
for ( auto idx : cindexes ) {
connections . emplace ( curSession - > members [ idx ] - > dmn - > pdmnState - > addr ) ;
}
}
if ( ! connections . empty ( ) ) {
std : : string debugMsg = strprintf ( " CDKGSessionManager::%s -- adding masternodes quorum connections for quorum %s: \n " , __func__ , curSession - > quorumHash . ToString ( ) ) ;
2019-01-11 10:00:40 +01:00
for ( const auto & c : connections ) {
2018-05-24 16:14:55 +02:00
debugMsg + = strprintf ( " %s \n " , c . ToString ( false ) ) ;
}
LogPrintf ( debugMsg ) ;
g_connman - > AddMasternodeQuorumNodes ( params . type , curQuorumHash , connections ) ;
2019-01-22 14:33:42 +01:00
auto participatingNodesTmp = g_connman - > GetMasternodeQuorumAddresses ( params . type , curQuorumHash ) ;
2018-05-24 16:14:55 +02:00
LOCK ( curSession - > invCs ) ;
2019-01-22 14:33:42 +01:00
curSession - > participatingNodes = std : : move ( participatingNodesTmp ) ;
2018-05-24 16:14:55 +02:00
}
}
WaitForNextPhase ( QuorumPhase_Initialized , QuorumPhase_Contribute , curQuorumHash , [ ] { return false ; } ) ;
// Contribute
auto fContributeStart = [ this ] ( ) {
2019-01-11 13:03:25 +01:00
curSession - > Contribute ( pendingContributions ) ;
2018-05-24 16:14:55 +02:00
} ;
auto fContributeWait = [ this ] {
return ProcessPendingMessageBatch < CDKGContribution > ( * curSession , pendingContributions , 8 ) ;
} ;
2019-02-01 10:07:21 +01:00
HandlePhase ( QuorumPhase_Contribute , QuorumPhase_Complain , curQuorumHash , 0.05 , fContributeStart , fContributeWait ) ;
2018-05-24 16:14:55 +02:00
// Complain
auto fComplainStart = [ this ] ( ) {
2019-01-11 13:03:25 +01:00
curSession - > VerifyAndComplain ( pendingComplaints ) ;
2018-05-24 16:14:55 +02:00
} ;
auto fComplainWait = [ this ] {
return ProcessPendingMessageBatch < CDKGComplaint > ( * curSession , pendingComplaints , 8 ) ;
} ;
2019-02-01 10:07:21 +01:00
HandlePhase ( QuorumPhase_Complain , QuorumPhase_Justify , curQuorumHash , 0.05 , fComplainStart , fComplainWait ) ;
2018-05-24 16:14:55 +02:00
// Justify
auto fJustifyStart = [ this ] ( ) {
2019-01-11 13:03:25 +01:00
curSession - > VerifyAndJustify ( pendingJustifications ) ;
2018-05-24 16:14:55 +02:00
} ;
auto fJustifyWait = [ this ] {
return ProcessPendingMessageBatch < CDKGJustification > ( * curSession , pendingJustifications , 8 ) ;
} ;
2019-02-01 10:07:21 +01:00
HandlePhase ( QuorumPhase_Justify , QuorumPhase_Commit , curQuorumHash , 0.05 , fJustifyStart , fJustifyWait ) ;
2018-05-24 16:14:55 +02:00
// Commit
auto fCommitStart = [ this ] ( ) {
2019-01-11 13:03:25 +01:00
curSession - > VerifyAndCommit ( pendingPrematureCommitments ) ;
2018-05-24 16:14:55 +02:00
} ;
auto fCommitWait = [ this ] {
return ProcessPendingMessageBatch < CDKGPrematureCommitment > ( * curSession , pendingPrematureCommitments , 8 ) ;
} ;
2019-02-01 10:07:21 +01:00
HandlePhase ( QuorumPhase_Commit , QuorumPhase_Finalize , curQuorumHash , 0.1 , fCommitStart , fCommitWait ) ;
2018-05-24 16:14:55 +02:00
auto finalCommitments = curSession - > FinalizeCommitments ( ) ;
2019-01-11 10:00:40 +01:00
for ( const auto & fqc : finalCommitments ) {
2018-05-24 16:14:55 +02:00
quorumBlockProcessor - > AddMinableCommitment ( fqc ) ;
}
}
void CDKGSessionHandler : : PhaseHandlerThread ( )
{
while ( ! stopRequested & & ! ShutdownRequested ( ) ) {
try {
HandleDKGRound ( ) ;
} catch ( AbortPhaseException & e ) {
2019-01-08 09:55:19 +01:00
quorumDKGDebugManager - > UpdateLocalSessionStatus ( params . type , [ & ] ( CDKGDebugSessionStatus & status ) {
status . aborted = true ;
return true ;
} ) ;
2018-05-24 16:14:55 +02:00
LogPrintf ( " CDKGSessionHandler::%s -- aborted current DKG session \n " , __func__ ) ;
}
}
}
}