From 790c9e784bc0fb82b4947e8819747afe8b22ab2b Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 22 Apr 2019 08:09:55 -0400 Subject: [PATCH 01/10] Merge #15826: Pure python EC b67978529a Add comments to Python ECDSA implementation (John Newbery) 8c7b9324ca Pure python EC (Pieter Wuille) Pull request description: This removes the dependency on OpenSSL for the interaction tests, by providing a pure-Python toy implementation of secp256k1. ACKs for commit b67978: jnewbery: utACK b67978529ad02fc2665f2362418dc53db2e25e17 Tree-SHA512: 181445eb08b316c46937b80dc10aa50d103ab1fdddaf834896c0ea22204889f7b13fd33cbcbd00ddba15f7e4686fe0d9f8e8bb4c0ad0e9587490c90be83966dc --- test/functional/feature_assumevalid.py | 8 +- test/functional/feature_block.py | 12 +- test/functional/test_framework/key.py | 526 ++++++++++++++++--------- 3 files changed, 358 insertions(+), 188 deletions(-) diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 0936b71ab8..111a408239 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -32,7 +32,7 @@ Start three nodes: import time from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.key import CECKey +from test_framework.key import ECKey from test_framework.messages import ( CBlockHeader, COutPoint, @@ -105,9 +105,9 @@ class AssumeValidTest(BitcoinTestFramework): self.blocks = [] # Get a pubkey for the coinbase TXO - coinbase_key = CECKey() - coinbase_key.set_secretbytes(b"horsebattery") - coinbase_pubkey = coinbase_key.get_pubkey() + coinbase_key = ECKey() + coinbase_key.generate() + coinbase_pubkey = coinbase_key.get_pubkey().get_bytes() # Create the first block with a coinbase output to our key height = 1 diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 3122db3d9d..e6d1cf44b9 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -7,7 +7,7 @@ import copy import struct from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block -from test_framework.key import CECKey +from test_framework.key import ECKey from test_framework.messages import ( CBlock, COIN, @@ -85,9 +85,9 @@ class FullBlockTest(BitcoinTestFramework): self.bootstrap_p2p() # Add one p2p connection to the node self.block_heights = {} - self.coinbase_key = CECKey() - self.coinbase_key.set_secretbytes(b"horsebattery") - self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.coinbase_key = ECKey() + self.coinbase_key.generate() + self.coinbase_pubkey = self.coinbase_key.get_pubkey().get_bytes() self.tip = None self.blocks = {} self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) @@ -481,7 +481,7 @@ class FullBlockTest(BitcoinTestFramework): tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the signature hash function (sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) - sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) + sig = self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig @@ -1225,7 +1225,7 @@ class FullBlockTest(BitcoinTestFramework): tx.vin[0].scriptSig = CScript() return (sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL) - tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + tx.vin[0].scriptSig = CScript([self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))]) def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, 0, value, script) diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index ff66f74bb6..912c0ca978 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,216 +1,386 @@ -# Copyright (c) 2011 Sam Rushing -"""ECC secp256k1 OpenSSL wrapper. +# Copyright (c) 2019 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test-only secp256k1 elliptic curve implementation -WARNING: This module does not mlock() secrets; your private keys may end up on -disk in swap! Use with caution! +WARNING: This code is slow, uses bad randomness, does not properly protect +keys, and is trivially vulnerable to side channel attacks. Do not use for +anything but tests.""" +import random -This file is modified from python-bitcoinlib. -""" +def modinv(a, n): + """Compute the modular inverse of a modulo n -import ctypes -import ctypes.util -import hashlib + See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers. + """ + t1, t2 = 0, 1 + r1, r2 = n, a + while r2 != 0: + q = r1 // r2 + t1, t2 = t2, t1 - q * t2 + r1, r2 = r2, r1 - q * r2 + if r1 > 1: + return None + if t1 < 0: + t1 += n + return t1 -ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32') +def jacobi_symbol(n, k): + """Compute the Jacobi symbol of n modulo k -ssl.BN_new.restype = ctypes.c_void_p -ssl.BN_new.argtypes = [] + See http://en.wikipedia.org/wiki/Jacobi_symbol -ssl.BN_bin2bn.restype = ctypes.c_void_p -ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p] + For our application k is always prime, so this is the same as the Legendre symbol.""" + assert k > 0 and k & 1, "jacobi symbol is only defined for positive odd k" + n %= k + t = 0 + while n != 0: + while n & 1 == 0: + n >>= 1 + r = k & 7 + t ^= (r == 3 or r == 5) + n, k = k, n + t ^= (n & k & 3 == 3) + n = n % k + if k == 1: + return -1 if t else 1 + return 0 -ssl.BN_CTX_free.restype = None -ssl.BN_CTX_free.argtypes = [ctypes.c_void_p] +def modsqrt(a, p): + """Compute the square root of a modulo p when p % 4 = 3. -ssl.BN_CTX_new.restype = ctypes.c_void_p -ssl.BN_CTX_new.argtypes = [] + The Tonelli-Shanks algorithm can be used. See https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm -ssl.ECDH_compute_key.restype = ctypes.c_int -ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] + Limiting this function to only work for p % 4 = 3 means we don't need to + iterate through the loop. The highest n such that p - 1 = 2^n Q with Q odd + is n = 1. Therefore Q = (p-1)/2 and sqrt = a^((Q+1)/2) = a^((p+1)/4) -ssl.ECDSA_sign.restype = ctypes.c_int -ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + secp256k1's is defined over field of size 2**256 - 2**32 - 977, which is 3 mod 4. + """ + if p % 4 != 3: + raise NotImplementedError("modsqrt only implemented for p % 4 = 3") + sqrt = pow(a, (p + 1)//4, p) + if pow(sqrt, 2, p) == a % p: + return sqrt + return None -ssl.ECDSA_verify.restype = ctypes.c_int -ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] +class EllipticCurve: + def __init__(self, p, a, b): + """Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p).""" + self.p = p + self.a = a % p + self.b = b % p -ssl.EC_KEY_free.restype = None -ssl.EC_KEY_free.argtypes = [ctypes.c_void_p] + def affine(self, p1): + """Convert a Jacobian point tuple p1 to affine form, or None if at infinity. -ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p -ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] + An affine point is represented as the Jacobian (x, y, 1)""" + x1, y1, z1 = p1 + if z1 == 0: + return None + inv = modinv(z1, self.p) + inv_2 = (inv**2) % self.p + inv_3 = (inv_2 * inv) % self.p + return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1) -ssl.EC_KEY_get0_group.restype = ctypes.c_void_p -ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] + def negate(self, p1): + """Negate a Jacobian point tuple p1.""" + x1, y1, z1 = p1 + return (x1, (self.p - y1) % self.p, z1) -ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p -ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] + def on_curve(self, p1): + """Determine whether a Jacobian tuple p is on the curve (and not infinity)""" + x1, y1, z1 = p1 + z2 = pow(z1, 2, self.p) + z4 = pow(z2, 2, self.p) + return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0 -ssl.EC_KEY_set_private_key.restype = ctypes.c_int -ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + def is_x_coord(self, x): + """Test whether x is a valid X coordinate on the curve.""" + x_3 = pow(x, 3, self.p) + return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1 -ssl.EC_KEY_set_conv_form.restype = None -ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int] + def lift_x(self, x): + """Given an X coordinate on the curve, return a corresponding affine point.""" + x_3 = pow(x, 3, self.p) + v = x_3 + self.a * x + self.b + y = modsqrt(v, self.p) + if y is None: + return None + return (x, y, 1) -ssl.EC_KEY_set_public_key.restype = ctypes.c_int -ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + def double(self, p1): + """Double a Jacobian tuple p1 -ssl.i2o_ECPublicKey.restype = ctypes.c_void_p -ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Doubling""" + x1, y1, z1 = p1 + if z1 == 0: + return (0, 1, 0) + y1_2 = (y1**2) % self.p + y1_4 = (y1_2**2) % self.p + x1_2 = (x1**2) % self.p + s = (4*x1*y1_2) % self.p + m = 3*x1_2 + if self.a: + m += self.a * pow(z1, 4, self.p) + m = m % self.p + x2 = (m**2 - 2*s) % self.p + y2 = (m*(s - x2) - 8*y1_4) % self.p + z2 = (2*y1*z1) % self.p + return (x2, y2, z2) -ssl.EC_POINT_new.restype = ctypes.c_void_p -ssl.EC_POINT_new.argtypes = [ctypes.c_void_p] + def add_mixed(self, p1, p2): + """Add a Jacobian tuple p1 and an affine tuple p2 -ssl.EC_POINT_free.restype = None -ssl.EC_POINT_free.argtypes = [ctypes.c_void_p] + See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition (with affine point)""" + x1, y1, z1 = p1 + x2, y2, z2 = p2 + assert(z2 == 1) + # Adding to the point at infinity is a no-op + if z1 == 0: + return p2 + z1_2 = (z1**2) % self.p + z1_3 = (z1_2 * z1) % self.p + u2 = (x2 * z1_2) % self.p + s2 = (y2 * z1_3) % self.p + if x1 == u2: + if (y1 != s2): + # p1 and p2 are inverses. Return the point at infinity. + return (0, 1, 0) + # p1 == p2. The formulas below fail when the two points are equal. + return self.double(p1) + h = u2 - x1 + r = s2 - y1 + h_2 = (h**2) % self.p + h_3 = (h_2 * h) % self.p + u1_h_2 = (x1 * h_2) % self.p + x3 = (r**2 - h_3 - 2*u1_h_2) % self.p + y3 = (r*(u1_h_2 - x3) - y1*h_3) % self.p + z3 = (h*z1) % self.p + return (x3, y3, z3) -ssl.EC_POINT_mul.restype = ctypes.c_int -ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + def add(self, p1, p2): + """Add two Jacobian tuples p1 and p2 -# this specifies the curve used with ECDSA. -NID_secp256k1 = 714 # from openssl/obj_mac.h + See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition""" + x1, y1, z1 = p1 + x2, y2, z2 = p2 + # Adding the point at infinity is a no-op + if z1 == 0: + return p2 + if z2 == 0: + return p1 + # Adding an Affine to a Jacobian is more efficient since we save field multiplications and squarings when z = 1 + if z1 == 1: + return self.add_mixed(p2, p1) + if z2 == 1: + return self.add_mixed(p1, p2) + z1_2 = (z1**2) % self.p + z1_3 = (z1_2 * z1) % self.p + z2_2 = (z2**2) % self.p + z2_3 = (z2_2 * z2) % self.p + u1 = (x1 * z2_2) % self.p + u2 = (x2 * z1_2) % self.p + s1 = (y1 * z2_3) % self.p + s2 = (y2 * z1_3) % self.p + if u1 == u2: + if (s1 != s2): + # p1 and p2 are inverses. Return the point at infinity. + return (0, 1, 0) + # p1 == p2. The formulas below fail when the two points are equal. + return self.double(p1) + h = u2 - u1 + r = s2 - s1 + h_2 = (h**2) % self.p + h_3 = (h_2 * h) % self.p + u1_h_2 = (u1 * h_2) % self.p + x3 = (r**2 - h_3 - 2*u1_h_2) % self.p + y3 = (r*(u1_h_2 - x3) - s1*h_3) % self.p + z3 = (h*z1*z2) % self.p + return (x3, y3, z3) + def mul(self, ps): + """Compute a (multi) point multiplication + + ps is a list of (Jacobian tuple, scalar) pairs. + """ + r = (0, 1, 0) + for i in range(255, -1, -1): + r = self.double(r) + for (p, n) in ps: + if ((n >> i) & 1): + r = self.add(r, p) + return r + +SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7) +SECP256K1_G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 -# Thx to Sam Devlin for the ctypes magic 64-bit fix. -def _check_result(val, func, args): - if val == 0: - raise ValueError - else: - return ctypes.c_void_p (val) - -ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p -ssl.EC_KEY_new_by_curve_name.errcheck = _check_result - -class CECKey(): - """Wrapper around OpenSSL's EC_KEY""" +class ECPubKey(): + """A secp256k1 public key""" def __init__(self): - self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1) + """Construct an uninitialized public key""" + self.valid = False - def __del__(self): - if ssl: - ssl.EC_KEY_free(self.k) - self.k = None - - def set_secretbytes(self, secret): - priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new()) - group = ssl.EC_KEY_get0_group(self.k) - pub_key = ssl.EC_POINT_new(group) - ctx = ssl.BN_CTX_new() - if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx): - raise ValueError("Could not derive public key from the supplied secret.") - ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx) - ssl.EC_KEY_set_private_key(self.k, priv_key) - ssl.EC_KEY_set_public_key(self.k, pub_key) - ssl.EC_POINT_free(pub_key) - ssl.BN_CTX_free(ctx) - return self.k - - def set_privkey(self, key): - self.mb = ctypes.create_string_buffer(key) - return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) - - def set_pubkey(self, key): - self.mb = ctypes.create_string_buffer(key) - return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) - - def get_privkey(self): - size = ssl.i2d_ECPrivateKey(self.k, 0) - mb_pri = ctypes.create_string_buffer(size) - ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri))) - return mb_pri.raw - - def get_pubkey(self): - size = ssl.i2o_ECPublicKey(self.k, 0) - mb = ctypes.create_string_buffer(size) - ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb))) - return mb.raw - - def get_raw_ecdh_key(self, other_pubkey): - ecdh_keybuffer = ctypes.create_string_buffer(32) - r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32, - ssl.EC_KEY_get0_public_key(other_pubkey.k), - self.k, 0) - if r != 32: - raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed') - return ecdh_keybuffer.raw - - def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()): - # FIXME: be warned it's not clear what the kdf should be as a default - r = self.get_raw_ecdh_key(other_pubkey) - return kdf(r) - - def sign(self, hash, low_s = True): - # FIXME: need unit tests for below cases - if not isinstance(hash, bytes): - raise TypeError('Hash must be bytes instance; got %r' % hash.__class__) - if len(hash) != 32: - raise ValueError('Hash must be exactly 32 bytes long') - - sig_size0 = ctypes.c_uint32() - sig_size0.value = ssl.ECDSA_size(self.k) - mb_sig = ctypes.create_string_buffer(sig_size0.value) - result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k) - assert 1 == result - assert mb_sig.raw[0] == 0x30 - assert mb_sig.raw[1] == sig_size0.value - 2 - total_size = mb_sig.raw[1] - assert mb_sig.raw[2] == 2 - r_size = mb_sig.raw[3] - assert mb_sig.raw[4 + r_size] == 2 - s_size = mb_sig.raw[5 + r_size] - s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big') - if (not low_s) or s_value <= SECP256K1_ORDER_HALF: - return mb_sig.raw[:sig_size0.value] + def set(self, data): + """Construct a public key from a serialization in compressed or uncompressed format""" + if (len(data) == 65 and data[0] == 0x04): + p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1) + self.valid = SECP256K1.on_curve(p) + if self.valid: + self.p = p + self.compressed = False + elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)): + x = int.from_bytes(data[1:33], 'big') + if SECP256K1.is_x_coord(x): + p = SECP256K1.lift_x(x) + # if the oddness of the y co-ord isn't correct, find the other + # valid y + if (p[1] & 1) != (data[0] & 1): + p = SECP256K1.negate(p) + self.p = p + self.valid = True + self.compressed = True + else: + self.valid = False else: - low_s_value = SECP256K1_ORDER - s_value - low_s_bytes = (low_s_value).to_bytes(33, byteorder='big') - while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80: - low_s_bytes = low_s_bytes[1:] - new_s_size = len(low_s_bytes) - new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big') - new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big') - return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes - - def verify(self, hash, sig): - """Verify a DER signature""" - return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1 - - -class CPubKey(bytes): - """An encapsulated public key - - Attributes: - - is_valid - Corresponds to CPubKey.IsValid() - is_fullyvalid - Corresponds to CPubKey.IsFullyValid() - is_compressed - Corresponds to CPubKey.IsCompressed() - """ - - def __new__(cls, buf, _cec_key=None): - self = super(CPubKey, cls).__new__(cls, buf) - if _cec_key is None: - _cec_key = CECKey() - self._cec_key = _cec_key - self.is_fullyvalid = _cec_key.set_pubkey(self) != 0 - return self - - @property - def is_valid(self): - return len(self) > 0 + self.valid = False @property def is_compressed(self): - return len(self) == 33 + return self.compressed - def verify(self, hash, sig): - return self._cec_key.verify(hash, sig) + @property + def is_valid(self): + return self.valid - def __str__(self): - return repr(self) + def get_bytes(self): + assert(self.valid) + p = SECP256K1.affine(self.p) + if p is None: + return None + if self.compressed: + return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big') + else: + return bytes([0x04]) + p[0].to_bytes(32, 'big') + p[1].to_bytes(32, 'big') - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) + def verify_ecdsa(self, sig, msg, low_s=True): + """Verify a strictly DER-encoded ECDSA signature against this pubkey. + See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the + ECDSA verifier algorithm""" + assert(self.valid) + + # Extract r and s from the DER formatted signature. Return false for + # any DER encoding errors. + if (sig[1] + 2 != len(sig)): + return False + if (len(sig) < 4): + return False + if (sig[0] != 0x30): + return False + if (sig[2] != 0x02): + return False + rlen = sig[3] + if (len(sig) < 6 + rlen): + return False + if rlen < 1 or rlen > 33: + return False + if sig[4] >= 0x80: + return False + if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): + return False + r = int.from_bytes(sig[4:4+rlen], 'big') + if (sig[4+rlen] != 0x02): + return False + slen = sig[5+rlen] + if slen < 1 or slen > 33: + return False + if (len(sig) != 6 + rlen + slen): + return False + if sig[6+rlen] >= 0x80: + return False + if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)): + return False + s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big') + + # Verify that r and s are within the group order + if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER: + return False + if low_s and s >= SECP256K1_ORDER_HALF: + return False + z = int.from_bytes(msg, 'big') + + # Run verifier algorithm on r, s + w = modinv(s, SECP256K1_ORDER) + u1 = z*w % SECP256K1_ORDER + u2 = r*w % SECP256K1_ORDER + R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)])) + if R is None or R[0] != r: + return False + return True + +class ECKey(): + """A secp256k1 private key""" + + def __init__(self): + self.valid = False + + def set(self, secret, compressed): + """Construct a private key object with given 32-byte secret and compressed flag.""" + assert(len(secret) == 32) + secret = int.from_bytes(secret, 'big') + self.valid = (secret > 0 and secret < SECP256K1_ORDER) + if self.valid: + self.secret = secret + self.compressed = compressed + + def generate(self, compressed=True): + """Generate a random private key (compressed or uncompressed).""" + self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big'), compressed) + + def get_bytes(self): + """Retrieve the 32-byte representation of this key.""" + assert(self.valid) + return self.secret.to_bytes(32, 'big') + + @property + def is_valid(self): + return self.valid + + @property + def is_compressed(self): + return self.compressed + + def get_pubkey(self): + """Compute an ECPubKey object for this secret key.""" + assert(self.valid) + ret = ECPubKey() + p = SECP256K1.mul([(SECP256K1_G, self.secret)]) + ret.p = p + ret.valid = True + ret.compressed = self.compressed + return ret + + def sign_ecdsa(self, msg, low_s=True): + """Construct a DER-encoded ECDSA signature with this key. + + See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the + ECDSA signer algorithm.""" + assert(self.valid) + z = int.from_bytes(msg, 'big') + # Note: no RFC6979, but a simple random nonce (some tests rely on distinct transactions for the same operation) + k = random.randrange(1, SECP256K1_ORDER) + R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) + r = R[0] % SECP256K1_ORDER + s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER + if low_s and s > SECP256K1_ORDER_HALF: + s = SECP256K1_ORDER - s + # Represent in DER format. The byte representations of r and s have + # length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33 + # bytes). + rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') + sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') + return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb From b41e46416f1aca9fdcf9f7d08b726a05d5409a34 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 7 Mar 2019 11:33:59 -0500 Subject: [PATCH 02/10] partial Merge #15530: doc: Move wallet lock annotations to header faebd2ef40 doc: Move wallet lock annotations to header (MarcoFalke) Pull request description: We put the annotations in a central place (the header) as opposed to spreading them over the cpp files, where they easily get outdated. Tree-SHA512: 18d8c7329efd3471713de18fe8d63d67c50fcb9fa99bc372294d829aa7668ea33e10d44e9e50121a04d8cc3302d5fd7759224f7935451a4693c4498a555257e6 --- src/wallet/wallet.cpp | 45 +++++++++++++++++++++---------------------- src/wallet/wallet.h | 6 +++--- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 87f187ea2a..7f108bfbf9 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -238,7 +238,7 @@ const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const CPubKey CWallet::GenerateNewKey(WalletBatch &batch, uint32_t nAccountIndex, bool fInternal) { assert(!IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); - AssertLockHeld(cs_wallet); // mapKeyMetadata + AssertLockHeld(cs_wallet); bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets CKey secret; @@ -418,9 +418,9 @@ bool CWallet::AddHDPubKey(WalletBatch &batch, const CExtPubKey &extPubKey, bool return batch.WriteHDPubKey(hdPubKey, mapKeyMetadata[extPubKey.pubkey.GetID()]); } -bool CWallet::AddKeyPubKeyWithDB(WalletBatch &batch, const CKey& secret, const CPubKey &pubkey) +bool CWallet::AddKeyPubKeyWithDB(WalletBatch& batch, const CKey& secret, const CPubKey& pubkey) { - AssertLockHeld(cs_wallet); // mapKeyMetadata + AssertLockHeld(cs_wallet); // Make sure we aren't adding private keys to private key disabled wallets assert(!IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); @@ -482,16 +482,16 @@ bool CWallet::AddCryptedKey(const CPubKey &vchPubKey, } } -void CWallet::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &meta) +void CWallet::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata& meta) { - AssertLockHeld(cs_wallet); // mapKeyMetadata + AssertLockHeld(cs_wallet); UpdateTimeFirstKey(meta.nCreateTime); mapKeyMetadata[keyID] = meta; } -void CWallet::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &meta) +void CWallet::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata& meta) { - AssertLockHeld(cs_wallet); // m_script_metadata + AssertLockHeld(cs_wallet); UpdateTimeFirstKey(meta.nCreateTime); m_script_metadata[script_id] = meta; } @@ -704,7 +704,7 @@ void CWallet::ChainStateFlushed(const CBlockLocator& loc) void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in, bool fExplicit) { - LOCK(cs_wallet); // nWalletVersion + LOCK(cs_wallet); if (nWalletVersion >= nVersion) return; @@ -728,7 +728,7 @@ void CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in, bool CWallet::SetMaxVersion(int nVersion) { - LOCK(cs_wallet); // nWalletVersion, nWalletMaxVersion + LOCK(cs_wallet); // cannot downgrade below current version if (nWalletVersion > nVersion) return false; @@ -1040,9 +1040,9 @@ DBErrors CWallet::ReorderTransactions() return DBErrors::LOAD_OK; } -int64_t CWallet::IncOrderPosNext(WalletBatch *batch) +int64_t CWallet::IncOrderPosNext(WalletBatch* batch) { - AssertLockHeld(cs_wallet); // nOrderPosNext + AssertLockHeld(cs_wallet); int64_t nRet = nOrderPosNext++; if (batch) { batch->WriteOrderPosNext(nOrderPosNext); @@ -4025,8 +4025,8 @@ void CWallet::AutoLockMasternodeCollaterals() DBErrors CWallet::ZapSelectTx(std::vector& vHashIn, std::vector& vHashOut) { - AssertLockHeld(cs_wallet); // mapWallet - DBErrors nZapSelectTxRet = WalletBatch(*database,"cr+").ZapSelectTx(vHashIn, vHashOut); + AssertLockHeld(cs_wallet); + DBErrors nZapSelectTxRet = WalletBatch(*database, "cr+").ZapSelectTx(vHashIn, vHashOut); for (uint256 hash : vHashOut) { const auto& it = mapWallet.find(hash); wtxOrdered.erase(it->second.m_it_wtxOrdered); @@ -4053,7 +4053,6 @@ DBErrors CWallet::ZapSelectTx(std::vector& vHashIn, std::vector& vWtx) @@ -4169,7 +4168,7 @@ bool CWallet::NewKeyPool() size_t CWallet::KeypoolCountExternalKeys() { - AssertLockHeld(cs_wallet); // setExternalKeyPool + AssertLockHeld(cs_wallet); return setExternalKeyPool.size(); } @@ -4438,7 +4437,7 @@ std::map CWallet::GetAddressBalances(interfaces::Chain: std::set< std::set > CWallet::GetAddressGroupings() { - AssertLockHeld(cs_wallet); // mapWallet + AssertLockHeld(cs_wallet); std::set< std::set > groupings; std::set grouping; @@ -4613,7 +4612,7 @@ void CWallet::GetScriptForMining(std::shared_ptr &script) void CWallet::LockCoin(const COutPoint& output) { - AssertLockHeld(cs_wallet); // setLockedCoins + AssertLockHeld(cs_wallet); setLockedCoins.insert(output); std::map::iterator it = mapWallet.find(output.hash); if (it != mapWallet.end()) it->second.MarkDirty(); // recalculate all credits for this tx @@ -4624,7 +4623,7 @@ void CWallet::LockCoin(const COutPoint& output) void CWallet::UnlockCoin(const COutPoint& output) { - AssertLockHeld(cs_wallet); // setLockedCoins + AssertLockHeld(cs_wallet); setLockedCoins.erase(output); std::map::iterator it = mapWallet.find(output.hash); if (it != mapWallet.end()) it->second.MarkDirty(); // recalculate all credits for this tx @@ -4635,13 +4634,13 @@ void CWallet::UnlockCoin(const COutPoint& output) void CWallet::UnlockAllCoins() { - AssertLockHeld(cs_wallet); // setLockedCoins + AssertLockHeld(cs_wallet); setLockedCoins.clear(); } bool CWallet::IsLockedCoin(uint256 hash, unsigned int n) const { - AssertLockHeld(cs_wallet); // setLockedCoins + AssertLockHeld(cs_wallet); COutPoint outpt(hash, n); return (setLockedCoins.count(outpt) > 0); @@ -4649,7 +4648,7 @@ bool CWallet::IsLockedCoin(uint256 hash, unsigned int n) const void CWallet::ListLockedCoins(std::vector& vOutpts) const { - AssertLockHeld(cs_wallet); // setLockedCoins + AssertLockHeld(cs_wallet); for (std::set::iterator it = setLockedCoins.begin(); it != setLockedCoins.end(); it++) { COutPoint outpt = (*it); @@ -4675,9 +4674,9 @@ void CWallet::ListProTxCoins(std::vector& vOutpts) const /** @} */ // end of Actions -void CWallet::GetKeyBirthTimes(interfaces::Chain::Lock& locked_chain, std::map &mapKeyBirth) const { +void CWallet::GetKeyBirthTimes(interfaces::Chain::Lock& locked_chain, std::map& mapKeyBirth) const { AssertLockHeld(::cs_main); // LookupBlockIndex - AssertLockHeld(cs_wallet); // mapKeyMetadata + AssertLockHeld(cs_wallet); mapKeyBirth.clear(); // get birth times for keys with metadata diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 7d9cfd7fff..690a5964a6 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -670,7 +670,7 @@ private: WalletBatch *encrypted_batch GUARDED_BY(cs_wallet) = nullptr; //! the current wallet version: clients below this version are not able to load the wallet - int nWalletVersion = FEATURE_BASE; + int nWalletVersion GUARDED_BY(cs_wallet){FEATURE_BASE}; //! the maximum wallet format version: memory-only variable that specifies to what version this wallet may be upgraded int nWalletMaxVersion GUARDED_BY(cs_wallet) = FEATURE_BASE; @@ -725,7 +725,7 @@ private: void SyncTransaction(const CTransactionRef& tx, const uint256& block_hash, int posInBlock = 0, bool update_tx = true) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); /* HD derive new child key (on internal or external chain) */ - void DeriveNewChildKey(WalletBatch &batch, const CKeyMetadata& metadata, CKey& secretRet, uint32_t nAccountIndex, bool fInternal /*= false*/) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void DeriveNewChildKey(WalletBatch& batch, const CKeyMetadata& metadata, CKey& secretRet, uint32_t nAccountIndex, bool fInternal /*= false*/) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); std::set setInternalKeyPool GUARDED_BY(cs_wallet); std::set setExternalKeyPool GUARDED_BY(cs_wallet); @@ -1152,7 +1152,7 @@ public: unsigned int GetKeyPoolSize() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { - AssertLockHeld(cs_wallet); // set{Ex,In}ternalKeyPool + AssertLockHeld(cs_wallet); return setInternalKeyPool.size() + setExternalKeyPool.size(); } From b9d4dbb12f9e445865eeb5b4c7d3b6c52073600c Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 19 Jun 2019 15:51:41 -0400 Subject: [PATCH 03/10] Merge #16243: doc: Remove travis badge from readme e91f0a7af2 doc: Remove travis badge from readme (MarcoFalke) Pull request description: The readme(s) are shipped in the released source-code archive, in which case the travis badge is useless since it doesn't link to the travis result of the correct commit/tag/branch. GitHub embeds the correct links for each tag or commit that ci ran on, so we don't need this link in the readme. ACKs for commit e91f0a: hebasto: ACK e91f0a7af2aec7d924f00da25c69d8f46e0dd33d Tree-SHA512: 860435a58b38a9bd0bc62a1e74b3a63c138c9a2f09008a090d5ecc7fd86fa908d2e5eda41d16606507a238d9488fa5323405364a9556b670684a2e4838aead2d --- test/README.md | 3 +-- test/functional/test_runner.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/README.md b/test/README.md index 8b9ea0677c..ceec8df242 100644 --- a/test/README.md +++ b/test/README.md @@ -13,8 +13,7 @@ dash-tx. - [lint](/test/lint/) which perform various static analysis checks. The util tests are run as part of `make check` target. The functional -tests and lint scripts are run by the travis continuous build process whenever a pull -request is opened. All sets of tests can also be run locally. +tests and lint scripts can be run as explained in the sections below. # Running tests locally diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 417a5aef84..fe94adfdb8 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -67,7 +67,7 @@ TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS = [ - # Scripts that are run by the travis build process. + # Scripts that are run by default. # Longest test should go first, to favor running tests in parallel 'feature_dip3_deterministicmns.py', # NOTE: needs dash_hash to pass 'feature_block_reward_reallocation.py', @@ -226,7 +226,7 @@ BASE_SCRIPTS = [ ] EXTENDED_SCRIPTS = [ - # These tests are not run by the travis build process. + # These tests are not run by default. # Longest test should go first, to favor running tests in parallel 'feature_pruning.py', # NOTE: Prune mode is incompatible with -txindex, should work with governance validation disabled though. 'feature_dbcrash.py', @@ -538,7 +538,8 @@ class TestHandler: for job in self.jobs: (name, start_time, proc, testdir, log_out, log_err) = job if int(time.time() - start_time) > self.timeout_duration: - # In travis, timeout individual tests (to stop tests hanging and not providing useful output). + # Timeout individual tests if timeout is specified (to stop + # tests hanging and not providing useful output). proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) @@ -626,7 +627,7 @@ def check_script_list(*, src_dir, fail_on_warn): if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) if fail_on_warn: - # On travis this warning is an error to prevent merging incomplete commits into master + # On CI this warning is an error to prevent merging incomplete commits into master sys.exit(1) From 22ef848f6e18529bfedec1f2607c180c5e481334 Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Mon, 15 Jul 2019 21:21:10 +0200 Subject: [PATCH 04/10] Merge #15824: docs: Improve netbase comments c7f6ce74d3a5cf2a0c5bac20eab1efd997175a72 docs: Improve netbase comments (Carl Dong) Pull request description: Second in a series of PRs documenting the net stack. Contributed with sincere thanks to sipa, laanwj, and gmaxwell for providing much of the history, context, and rationale. ACKs for top commit: laanwj: ACK c7f6ce74d3a5cf2a0c5bac20eab1efd997175a72 Tree-SHA512: ad83054d3b8d0c8c3fb55be011bcf294176e7509513bf61326866afd53e8159644e0d59bb3a2f404717f525cbf736096d4c1990e61cfd89845d51fa6b5394b7c --- src/netbase.cpp | 218 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 200 insertions(+), 18 deletions(-) diff --git a/src/netbase.cpp b/src/netbase.cpp index 680c3ac2a9..394c588065 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -70,6 +70,12 @@ bool static LookupIntern(const char *pszName, std::vector& vIP, unsign { CNetAddr addr; + // From our perspective, onion addresses are not hostnames but rather + // direct encodings of CNetAddr much like IPv4 dotted-decimal notation + // or IPv6 colon-separated hextet notation. Since we can't use + // getaddrinfo to decode them and it wouldn't make sense to resolve + // them, we return a network address representing it instead. See + // CNetAddr::SetSpecial(const std::string&) for more details. if (addr.SetSpecial(std::string(pszName))) { vIP.push_back(addr); return true; @@ -79,12 +85,20 @@ bool static LookupIntern(const char *pszName, std::vector& vIP, unsign struct addrinfo aiHint; memset(&aiHint, 0, sizeof(struct addrinfo)); + // We want a TCP port, which is a streaming socket type aiHint.ai_socktype = SOCK_STREAM; aiHint.ai_protocol = IPPROTO_TCP; + // We don't care which address family (IPv4 or IPv6) is returned aiHint.ai_family = AF_UNSPEC; #ifdef WIN32 aiHint.ai_flags = fAllowLookup ? 0 : AI_NUMERICHOST; #else + // If we allow lookups of hostnames, use the AI_ADDRCONFIG flag to only + // return addresses whose family we have an address configured for. + // + // If we don't allow lookups, then use the AI_NUMERICHOST flag for + // getaddrinfo to only decode numerical network addresses and suppress + // hostname lookups. aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST; #endif struct addrinfo *aiRes = nullptr; @@ -92,6 +106,8 @@ bool static LookupIntern(const char *pszName, std::vector& vIP, unsign if (nErr) return false; + // Traverse the linked list starting with aiTrav, add all non-internal + // IPv4,v6 addresses to vIP while respecting nMaxSolutions. struct addrinfo *aiTrav = aiRes; while (aiTrav != nullptr && (nMaxSolutions == 0 || vIP.size() < nMaxSolutions)) { @@ -121,6 +137,21 @@ bool static LookupIntern(const char *pszName, std::vector& vIP, unsign return (vIP.size() > 0); } +/** + * Resolve a host string to its corresponding network addresses. + * + * @param pszName The string representing a host. Could be a name or a numerical + * IP address (IPv6 addresses in their bracketed form are + * allowed). + * @param[out] vIP The resulting network addresses to which the specified host + * string resolved. + * + * @returns Whether or not the specified host string successfully resolved to + * any resulting network addresses. + * + * @see Lookup(const char *, std::vector&, int, bool, unsigned int) + * for additional parameter descriptions. + */ bool LookupHost(const char *pszName, std::vector& vIP, unsigned int nMaxSolutions, bool fAllowLookup) { std::string strHost(pszName); @@ -133,6 +164,12 @@ bool LookupHost(const char *pszName, std::vector& vIP, unsigned int nM return LookupIntern(strHost.c_str(), vIP, nMaxSolutions, fAllowLookup); } + /** + * Resolve a host string to its first corresponding network address. + * + * @see LookupHost(const char *, std::vector&, unsigned int, bool) for + * additional parameter descriptions. + */ bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup) { std::vector vIP; @@ -143,6 +180,26 @@ bool LookupHost(const char *pszName, CNetAddr& addr, bool fAllowLookup) return true; } +/** + * Resolve a service string to its corresponding service. + * + * @param pszName The string representing a service. Could be a name or a + * numerical IP address (IPv6 addresses should be in their + * disambiguated bracketed form), optionally followed by a port + * number. (e.g. example.com:8333 or + * [2001:db8:85a3:8d3:1319:8a2e:370:7348]:420) + * @param[out] vAddr The resulting services to which the specified service string + * resolved. + * @param portDefault The default port for resulting services if not specified + * by the service string. + * @param fAllowLookup Whether or not hostname lookups are permitted. If yes, + * external queries may be performed. + * @param nMaxSolutions The maximum number of results we want, specifying 0 + * means "as many solutions as we get." + * + * @returns Whether or not the service string successfully resolved to any + * resulting services. + */ bool Lookup(const char *pszName, std::vector& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions) { if (pszName[0] == 0) @@ -161,6 +218,12 @@ bool Lookup(const char *pszName, std::vector& vAddr, int portDefault, return true; } +/** + * Resolve a service string to its first corresponding service. + * + * @see Lookup(const char *, std::vector&, int, bool, unsigned int) + * for additional parameter descriptions. + */ bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLookup) { std::vector vService; @@ -171,6 +234,16 @@ bool Lookup(const char *pszName, CService& addr, int portDefault, bool fAllowLoo return true; } +/** + * Resolve a service string with a numeric IP to its first corresponding + * service. + * + * @returns The resulting CService if the resolution was successful, [::]:0 + * otherwise. + * + * @see Lookup(const char *, CService&, int, bool) for additional parameter + * descriptions. + */ CService LookupNumeric(const char *pszName, int portDefault) { CService addr; @@ -240,22 +313,29 @@ enum class IntrRecvError { }; /** - * Read bytes from socket. This will either read the full number of bytes requested - * or return False on error or timeout. - * This function can be interrupted by calling InterruptSocks5() + * Try to read a specified number of bytes from a socket. Please read the "see + * also" section for more detail. * - * @param data Buffer to receive into - * @param len Length of data to receive - * @param timeout Timeout in milliseconds for receive operation + * @param data The buffer where the read bytes should be stored. + * @param len The number of bytes to read into the specified buffer. + * @param timeout The total timeout in milliseconds for this read. + * @param hSocket The socket (has to be in non-blocking mode) from which to read + * bytes. * - * @note This function requires that hSocket is in non-blocking mode. + * @returns An IntrRecvError indicating the resulting status of this read. + * IntrRecvError::OK only if all of the specified number of bytes were + * read. + * + * @see This function can be interrupted by calling InterruptSocks5(bool). + * Sockets can be made non-blocking with SetSocketNonBlocking(const + * SOCKET&, bool). */ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const SOCKET& hSocket) { int64_t curTime = GetTimeMillis(); int64_t endTime = curTime + timeout; - // Maximum time to wait in one select call. It will take up until this time (in millis) - // to break off in case of an interruption. + // Maximum time to wait for I/O readiness. It will take up until this time + // (in millis) to break off in case of an interruption. const int64_t maxWait = 1000; while (len > 0 && curTime < endTime) { ssize_t ret = recv(hSocket, (char*)data, len, 0); // Optimistically try the recv first @@ -270,6 +350,8 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c if (!IsSelectableSocket(hSocket)) { return IntrRecvError::NetworkError; } + // Only wait at most maxWait milliseconds at a time, unless + // we're approaching the end of the specified total timeout int timeout_ms = std::min(endTime - curTime, maxWait); #ifdef USE_POLL struct pollfd pollfd = {}; @@ -329,7 +411,24 @@ static std::string Socks5ErrorString(uint8_t err) } } -/** Connect using SOCKS5 (as described in RFC1928) */ +/** + * Connect to a specified destination service through an already connected + * SOCKS5 proxy. + * + * @param strDest The destination fully-qualified domain name. + * @param port The destination port. + * @param auth The credentials with which to authenticate with the specified + * SOCKS5 proxy. + * @param hSocket The SOCKS5 proxy socket. + * + * @returns Whether or not the operation succeeded. + * + * @note The specified SOCKS5 proxy socket must already be connected to the + * SOCKS5 proxy. + * + * @see RFC1928: SOCKS Protocol + * Version 5 + */ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, const SOCKET& hSocket) { IntrRecvError recvr; @@ -337,15 +436,15 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials if (strDest.size() > 255) { return error("Hostname too long"); } - // Accepted authentication methods + // Construct the version identifier/method selection message std::vector vSocks5Init; - vSocks5Init.push_back(SOCKSVersion::SOCKS5); + vSocks5Init.push_back(SOCKSVersion::SOCKS5); // We want the SOCK5 protocol if (auth) { - vSocks5Init.push_back(0x02); // Number of methods + vSocks5Init.push_back(0x02); // 2 method identifiers follow... vSocks5Init.push_back(SOCKS5Method::NOAUTH); vSocks5Init.push_back(SOCKS5Method::USER_PASS); } else { - vSocks5Init.push_back(0x01); // Number of methods + vSocks5Init.push_back(0x01); // 1 method identifier follows... vSocks5Init.push_back(SOCKS5Method::NOAUTH); } ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); @@ -449,8 +548,16 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials return true; } +/** + * Try to create a socket file descriptor with specific properties in the + * communications domain (address family) of the specified service. + * + * For details on the desired properties, see the inline comments in the source + * code. + */ SOCKET CreateSocket(const CService &addrConnect) { + // Create a sockaddr from the specified service. struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { @@ -458,10 +565,13 @@ SOCKET CreateSocket(const CService &addrConnect) return INVALID_SOCKET; } + // Create a TCP socket in the address family of the specified service. SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); if (hSocket == INVALID_SOCKET) return INVALID_SOCKET; + // Ensure that waiting for I/O on this socket won't result in undefined + // behavior. if (!IsSelectableSocket(hSocket)) { CloseSocket(hSocket); LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n"); @@ -470,17 +580,18 @@ SOCKET CreateSocket(const CService &addrConnect) #ifdef SO_NOSIGPIPE int set = 1; - // Different way of disabling SIGPIPE on BSD + // Set the no-sigpipe option on the socket for BSD systems, other UNIXes + // should use the MSG_NOSIGNAL flag for every send. setsockopt(hSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&set, sizeof(int)); #endif - //Disable Nagle's algorithm + // Set the no-delay option (disable Nagle's algorithm) on the TCP socket. SetSocketNoDelay(hSocket); - // Set to non-blocking + // Set the non-blocking option on the socket. if (!SetSocketNonBlocking(hSocket, true)) { CloseSocket(hSocket); - LogPrintf("ConnectSocketDirectly: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); + LogPrintf("CreateSocket: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); } return hSocket; } @@ -495,8 +606,21 @@ static void LogConnectFailure(bool manual_connection, const char* fmt, const Arg } } +/** + * Try to connect to the specified service on the specified socket. + * + * @param addrConnect The service to which to connect. + * @param hSocket The socket on which to connect. + * @param nTimeout Wait this many milliseconds for the connection to be + * established. + * @param manual_connection Whether or not the connection was manually requested + * (e.g. thru the addnode RPC) + * + * @returns Whether or not a connection was successfully made. + */ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, int nTimeout, bool manual_connection) { + // Create a sockaddr from the specified service. struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (hSocket == INVALID_SOCKET) { @@ -507,12 +631,17 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToString()); return false; } + + // Connect to the addrConnect service on the hSocket socket. if (connect(hSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); // WSAEINVAL is here because some legacy version of winsock uses it if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL) { + // Connection didn't actually fail, but is being established + // asynchronously. Thus, use async I/O api (select/poll) + // synchronously to check for successful connection with a timeout. #ifdef USE_POLL struct pollfd pollfd = {}; pollfd.fd = hSocket; @@ -525,6 +654,10 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i FD_SET(hSocket, &fdset); int nRet = select(hSocket + 1, nullptr, &fdset, nullptr, &timeout); #endif + // Upon successful completion, both select and poll return the total + // number of file descriptors that have been selected. A value of 0 + // indicates that the call timed out and no file descriptors have + // been selected. if (nRet == 0) { LogPrint(BCLog::NET, "connection to %s timeout\n", addrConnect.ToString()); @@ -535,6 +668,11 @@ bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocket, i LogPrintf("select() for %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError())); return false; } + + // Even if the select/poll was successful, the connect might not + // have been successful. The reason for this failure is hidden away + // in the SO_ERROR for the socket in modern systems. We read it into + // nRet here. socklen_t nRetSize = sizeof(nRet); if (getsockopt(hSocket, SOL_SOCKET, SO_ERROR, (sockopt_arg_type)&nRet, &nRetSize) == SOCKET_ERROR) { @@ -578,6 +716,22 @@ bool GetProxy(enum Network net, proxyType &proxyInfoOut) { return true; } +/** + * Set the name proxy to use for all connections to nodes specified by a + * hostname. After setting this proxy, connecting to a node sepcified by a + * hostname won't result in a local lookup of said hostname, rather, connect to + * the node by asking the name proxy for a proxy connection to the hostname, + * effectively delegating the hostname lookup to the specified proxy. + * + * This delegation increases privacy for those who set the name proxy as they no + * longer leak their external hostname queries to their DNS servers. + * + * @returns Whether or not the operation succeeded. + * + * @note SOCKS5's support for UDP-over-SOCKS5 has been considered, but no SOCK5 + * server in common use (most notably Tor) actually implements UDP + * support, and a DNS resolver is beyond the scope of this project. + */ bool SetNameProxy(const proxyType &addrProxy) { if (!addrProxy.IsValid()) return false; @@ -608,6 +762,21 @@ bool IsProxy(const CNetAddr &addr) { return false; } +/** + * Connect to a specified destination service through a SOCKS5 proxy by first + * connecting to the SOCKS5 proxy. + * + * @param proxy The SOCKS5 proxy. + * @param strDest The destination service to which to connect. + * @param port The destination port. + * @param hSocket The socket on which to connect to the SOCKS5 proxy. + * @param nTimeout Wait this many milliseconds for the connection to the SOCKS5 + * proxy to be established. + * @param outProxyConnectionFailed[out] Whether or not the connection to the + * SOCKS5 proxy failed. + * + * @returns Whether or not the operation succeeded. + */ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool *outProxyConnectionFailed) { // first connect to proxy server @@ -632,6 +801,17 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int return true; } +/** + * Parse and resolve a specified subnet string into the appropriate internal + * representation. + * + * @param pszName A string representation of a subnet of the form `network + * address [ "/", ( CIDR-style suffix | netmask ) ]`(e.g. + * `2001:db8::/32`, `192.0.2.0/255.255.255.0`, or `8.8.8.8`). + * @param ret The resulting internal representation of a subnet. + * + * @returns Whether the operation succeeded or not. + */ bool LookupSubNet(const char* pszName, CSubNet& ret) { std::string strSubnet(pszName); @@ -639,6 +819,8 @@ bool LookupSubNet(const char* pszName, CSubNet& ret) std::vector vIP; std::string strAddress = strSubnet.substr(0, slash); + // TODO: Use LookupHost(const char *, CNetAddr&, bool) instead to just get + // one CNetAddr. if (LookupHost(strAddress.c_str(), vIP, 1, false)) { CNetAddr network = vIP[0]; From 4bf93d833ed94240e1c26c1548466837c22e8f73 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 16 Aug 2019 10:51:16 +0800 Subject: [PATCH 05/10] Merge #16587: doc: Improve versionbits.h documentation 6576a8765f67716aa6b87a2f0296fbac5956bec0 doc: Improve versionbits.h documentation (Antoine Riard) Pull request description: While reviewing burying of BIP 9 deployments, seen that versionbits.h wasn't that much documented. This is an attempt to improve it. It can be useful, given after burying this code isn't going to be used anymore and isn't straightforward at first sight. ACKs for top commit: jnewbery: ACK 6576a8765f67716aa6b87a2f0296fbac5956bec0 ajtowns: ACK 6576a8765f67716aa6b87a2f0296fbac5956bec0 fanquake: ACK 6576a8765f67716aa6b87a2f0296fbac5956bec0 Tree-SHA512: 906463e0b22b988f89d77f798bf94d294f70467d29975088b87384764fb5d0dd1350be67562cc264656f61f1eada2cba20f99c0d797d1d7f90203c269e34c714 --- src/versionbits.cpp | 1 - src/versionbits.h | 28 ++++++++++++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/src/versionbits.cpp b/src/versionbits.cpp index 41259839f6..ab78b02b64 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -98,7 +98,6 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* return state; } -// return the numerical statistics of blocks signalling the specified BIP9 condition in this current period BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, ThresholdConditionCache& cache) const { BIP9Stats stats = {}; diff --git a/src/versionbits.h b/src/versionbits.h index f8e547a3fc..cba2bae777 100644 --- a/src/versionbits.h +++ b/src/versionbits.h @@ -17,12 +17,17 @@ static const int32_t VERSIONBITS_TOP_MASK = 0xE0000000UL; /** Total bits available for versionbits */ static const int32_t VERSIONBITS_NUM_BITS = 29; +/** BIP 9 defines a finite-state-machine to deploy a softfork in multiple stages. + * State transitions happen during retarget period if conditions are met + * In case of reorg, transitions can go backward. Without transition, state is + * inherited between periods. All blocks of a period share the same state. + */ enum class ThresholdState { - DEFINED, - STARTED, - LOCKED_IN, - ACTIVE, - FAILED, + DEFINED, // First state that each softfork starts out as. The genesis block is by definition in this state for each deployment. + STARTED, // For blocks past the starttime. + LOCKED_IN, // For one retarget period after the first retarget period with STARTED blocks of which at least threshold have the associated bit set in nVersion. + ACTIVE, // For all blocks after the LOCKED_IN retarget period (final state) + FAILED, // For all blocks once the first retarget period after the timeout time is hit, if LOCKED_IN wasn't already reached (final state) }; // A map that gives the state for blocks whose height is a multiple of Period(). @@ -30,11 +35,17 @@ enum class ThresholdState { // will either be nullptr or a block with (height + 1) % Period() == 0. typedef std::map ThresholdConditionCache; +/** Display status of an in-progress BIP9 softfork */ struct BIP9Stats { + /** Length of blocks of the BIP9 signalling period */ int period; + /** Number of blocks with the version bit set required to activate the softfork */ int threshold; + /** Number of blocks elapsed since the beginning of the current period */ int elapsed; + /** Number of blocks with the version bit set since the beginning of the current period */ int count; + /** False if there are not enough blocks left in this period to pass activation threshold */ bool possible; }; @@ -50,12 +61,17 @@ protected: virtual int Threshold(const Consensus::Params& params, int nAttempt) const =0; public: + /** Returns the numerical statistics of an in-progress BIP9 softfork in the current period */ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, ThresholdConditionCache& cache) const; - // Note that the functions below take a pindexPrev as input: they compute information for block B based on its parent. + /** Returns the state for pindex A based on parent pindexPrev B. Applies any state transition if conditions are present. + * Caches state from first block of period. */ ThresholdState GetStateFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const; + /** Returns the height since when the ThresholdState has started for pindex A based on parent pindexPrev B, all blocks of a period share the same */ int GetStateSinceHeightFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const; }; +/** BIP 9 allows multiple softforks to be deployed in parallel. We cache per-period state for every one of them + * keyed by the bit position used to signal support. */ struct VersionBitsCache { ThresholdConditionCache caches[Consensus::MAX_VERSION_BITS_DEPLOYMENTS]; From 52a577891e449cbf49b46e27e5a82fc779a1252c Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 19 Aug 2019 09:25:26 -0400 Subject: [PATCH 06/10] Merge #16555: doc: mention whitelist is inbound, and applies to blocksonly 20ea9ef6ce9228a5258b99eeeeb40e6dfae2299f [doc] mention whitelist is inbound, and applies to blocksonly (Sjors Provoost) Pull request description: * `-whitelist` only impacts inbound nodes (see #9923). This is obvious in the context of allowing those nodes to connect to you, but there are additional whitelist features where this is less obvious, such as mempool relay behavior. * `whitelistrelay` (on by default) explains that `-blocksonly` makes an exception for transactions from whitelisted nodes, but it wasn't documented (nor obvious imo) the other way around. See also https://github.com/bitcoin/bitcoin/pull/15984#issuecomment-490645552 Top commit has no ACKs. Tree-SHA512: 03e363a5da5d81ad147d1c7e38bf11114df8bb89bdd66fb551520b25f810efa886ec6e649d3b435c4935e0ae4f39bb718bc7bb5778b9de6aa0b71e970a431af8 --- src/init.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 38b20c485f..fa1fb08071 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -703,8 +703,8 @@ void SetupServerArgs() gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); gArgs.AddArg("-minrelaytxfee=", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistforcerelay", strprintf("Force relay of transactions from whitelisted peers even if the transactions were already in the mempool or violate local relay policy (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistrelay", strprintf("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + gArgs.AddArg("-whitelistforcerelay", strprintf("Force relay of transactions from whitelisted inbound peers even if the transactions were already in the mempool or violate local relay policy (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + gArgs.AddArg("-whitelistrelay", strprintf("Accept relayed transactions received from whitelisted inbound peers even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); gArgs.AddArg("-blockmaxsize=", strprintf("Set maximum block size in bytes (default: %d)", DEFAULT_BLOCK_MAX_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); gArgs.AddArg("-blockmintxfee=", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); From 2c7254f13374f715d93841a0a185932d745ce8df Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 26 Aug 2019 09:21:56 -0400 Subject: [PATCH 07/10] Merge #16629: doc: Add documentation for the new whitelist permissions 66ad75472f47e219ff980f7aba249795403dd7e0 [Doc] Add documentation for the new whitelist permissions (nicolas.dorier) Pull request description: Documenting the new feature https://github.com/bitcoin/bitcoin/pull/16248 . Ping Sjors . ACKs for top commit: Sjors: Indeed, re-ACK 66ad75472 Tree-SHA512: e6860bb6fae921287da7920a8db534e6a1a23871dd78dd6da030f00adf23e204cd23b194d67361bf34d4ef5a7815fc3fd7c81a3f2f35e4cfbe6ee2f2e6daec25 --- src/init.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index fa1fb08071..1235f0213b 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -560,9 +560,17 @@ void SetupServerArgs() #else hidden_args.emplace_back("-upnp"); #endif - gArgs.AddArg("-whitebind=", "Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - gArgs.AddArg("-whitelist=", "Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be specified multiple times." - " Whitelisted peers cannot be DoS banned", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + gArgs.AddArg("-whitebind=<[permissions@]addr>", "Bind to given address and whitelist peers connecting to it. " + "Use [host]:port notation for IPv6. Allowed permissions are bloomfilter (allow requesting BIP37 filtered blocks and transactions), " + "noban (do not ban for misbehavior), " + "forcerelay (relay even non-standard transactions), " + "relay (relay even in -blocksonly mode), " + "and mempool (allow requesting BIP35 mempool contents). " + "Specify multiple permissions separated by commas (default: noban,mempool,relay). Can be specified multiple times.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + + gArgs.AddArg("-whitelist=<[permissions@]IP address or network>", "Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) or " + "CIDR notated network(e.g. 1.2.3.0/24). Uses same permissions as " + "-whitebind. Can be specified multiple times." , ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); g_wallet_init_interface.AddWalletOptions(); @@ -703,8 +711,8 @@ void SetupServerArgs() gArgs.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); gArgs.AddArg("-minrelaytxfee=", strprintf("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistforcerelay", strprintf("Force relay of transactions from whitelisted inbound peers even if the transactions were already in the mempool or violate local relay policy (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); - gArgs.AddArg("-whitelistrelay", strprintf("Accept relayed transactions received from whitelisted inbound peers even when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + gArgs.AddArg("-whitelistforcerelay", strprintf("Add 'forcerelay' permission to whitelisted inbound peers with default permissions. This will relay transactions even if the transactions were already in the mempool or violate local relay policy. (default: %d)", DEFAULT_WHITELISTFORCERELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); + gArgs.AddArg("-whitelistrelay", strprintf("Add 'relay' permission to whitelisted inbound peers with default permissions. The will accept relayed transactionseven when not relaying transactions (default: %d)", DEFAULT_WHITELISTRELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); gArgs.AddArg("-blockmaxsize=", strprintf("Set maximum block size in bytes (default: %d)", DEFAULT_BLOCK_MAX_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); gArgs.AddArg("-blockmintxfee=", strprintf("Set lowest fee rate (in %s/kB) for transactions to be included in block creation. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE)), ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION); From 47714ab3b71507d2a29859fb1e3e013a06d8aaab Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Mon, 16 Sep 2019 13:14:46 +0200 Subject: [PATCH 08/10] Merge #16847: doc: add comments clarifying how local services are advertised 82e53f37e1bfa6e34eac16b33329d70c3c0127da doc: add comments clarifying how local services are advertised (James O'Beirne) Pull request description: Recent questions have come up regarding dynamic service registration (see https://github.com/bitcoin/bitcoin/pull/16442#discussion_r308702676 and the assumeutxo project ~~which needs to dynamically flip NODE_NETWORK~~). While investigating how dynamic service registration might work, I was confused about how we convey local services to peers. This adds some documentation that hopefully clarifies this process. ACKs for top commit: laanwj: ACK 82e53f37e1bfa6e34eac16b33329d70c3c0127da darosior: ACK 82e53f37e1bfa6e34eac16b33329d70c3c0127da Tree-SHA512: a30c1020387d7a75b5b3cdde45f8b7f2ae46293da97e6227b2ee17e290b93deb5b16c0bbc2b1676972300e5c3c2ad74eb8b3910d6b93e028dac1ae2700468ef9 --- src/net.h | 37 +++++++++++++++++++++++++++++++++++-- src/net_processing.cpp | 3 +++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/net.h b/src/net.h index 6b00e12aa3..7c2e819865 100644 --- a/src/net.h +++ b/src/net.h @@ -428,6 +428,12 @@ public: bool DisconnectNode(const CNetAddr& addr); bool DisconnectNode(NodeId id); + //! Used to convey which local services we are offering peers during node + //! connection. + //! + //! The data returned by this is used in CNode construction, + //! which is used to advertise which services we are offering + //! that peer during `net_processing.cpp:PushNodeVersion()`. ServiceFlags GetLocalServices() const; //!set the max outbound target in bytes @@ -584,7 +590,18 @@ private: std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; - /** Services this instance offers */ + /** + * Services this instance offers. + * + * This data is replicated in each CNode instance we create during peer + * connection (in ConnectNode()) under a member also called + * nLocalServices. + * + * This data is not marked const, but after being set it should not + * change. See the note in CNode::nLocalServices documentation. + * + * \sa CNode::nLocalServices + */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; @@ -1066,8 +1083,24 @@ public: private: const NodeId id; const uint64_t nLocalHostNonce; - // Services offered to this peer + + //! Services offered to this peer. + //! + //! This is supplied by the parent CConnman during peer connection + //! (CConnman::ConnectNode()) from its attribute of the same name. + //! + //! This is const because there is no protocol defined for renegotiating + //! services initially offered to a peer. The set of local services we + //! offer should not change after initialization. + //! + //! An interesting example of this is NODE_NETWORK and initial block + //! download: a node which starts up from scratch doesn't have any blocks + //! to serve, but still advertises NODE_NETWORK because it will eventually + //! fulfill this role after IBD completes. P2P code is written in such a + //! way that it can gracefully handle peers who don't make good on their + //! service advertisements. const ServiceFlags nLocalServices; + const int nMyStartingHeight; int nSendVersion {0}; NetPermissionFlags m_permissionFlags{ PF_NONE }; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 92e68b39c4..bc867412cf 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -462,6 +462,9 @@ static void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime) { const auto& params = Params(); + // Note that pnode->GetLocalServices() is a reflection of the local + // services we were offering when the CNode object was created for this + // peer. ServiceFlags nLocalNodeServices = pnode->GetLocalServices(); uint64_t nonce = pnode->GetLocalNonce(); int nNodeStartingHeight = pnode->GetMyStartingHeight(); From 3c2eef8e822c166decd33042dd4068557d48738b Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 13 Sep 2019 10:43:26 +0800 Subject: [PATCH 09/10] Merge #16857: doc: Elaborate need to re-login on Debian-based after usermod for Tor group 706340150f3ae26fce4659f8fa0a5d57149d2fb3 Elaborate on the need to re-login on Debian-based systems to use tor following usermod (clashicly) Pull request description: Starting bitcoind with `-onlynet=onion` immediately after adding bitcoind user to debian-tor group will yield the following notice on debug.log: "tor: Authentication cookie /run/tor/control.authcookie could not be opened (check permissions)" Elaborate on the need to re-login to ensure debian-tor group has been applied to bitcoind user after: sudo usermod -a -G debian-tor username Verification can be done via `groups` command in shell. Otherwise operator may not be aware at first launch they are not running a tor enabled node. ACKs for top commit: fanquake: ACK 706340150f3ae26fce4659f8fa0a5d57149d2fb3 - Thanks for following up. Tree-SHA512: 3473966fb43b4f1c86bd8841dd6ea8c2798256c2ca926b10bd08cd655b954a9e77f0278c4fe63160e69cc75e240a3580af00ea46bf960fde2788aa88f03510b2 --- doc/tor.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/tor.md b/doc/tor.md index 8c3cbb24fd..17cb85ace8 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -139,7 +139,10 @@ preconfigured and the creation of a hidden service is automatic. If permission p are seen with `-debug=tor` they can be resolved by adding both the user running Tor and the user running dashd to the same group and setting permissions appropriately. On Debian-based systems the user running dashd can be added to the debian-tor group, -which has the appropriate permissions. +which has the appropriate permissions. Before starting dashd you will need to re-login +to allow debian-tor group to be applied. Otherwise you will see the following notice: "tor: +Authentication cookie /run/tor/control.authcookie could not be opened (check permissions)" +on debug.log. An alternative authentication method is the use of the `-torpassword=password` option. The `password` is the clear text form that From e4557b006713a6780f58c7bcd85574eabd9ec7d4 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 10 Dec 2018 14:02:15 -0500 Subject: [PATCH 10/10] Merge #14877: rpc: Document default values for optional arguments fa0c24c96e rpc: Document default values for optional arguments (MarcoFalke) Pull request description: Tree-SHA512: e1f5ea67d7ac67526ae87bffaeb308a9ad68632e161fe0148cd431a340bb7a30def18f1dbc7e98c6c1c269ac8942fd5d5334c85c48e4fb1cead70a42536b6eef --- src/rpc/mining.cpp | 2 +- src/rpc/rawtransaction.cpp | 4 ++-- src/rpc/util.h | 2 +- src/wallet/rpcwallet.cpp | 4 +--- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 6a16749e5d..07d89bbc37 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -907,7 +907,7 @@ static UniValue estimaterawfee(const JSONRPCRequest& request) {"conf_target", RPCArg::Type::NUM, RPCArg::Optional::NO, "Confirmation target in blocks (1 - 1008)"}, {"threshold", RPCArg::Type::NUM, /* default */ "0.95", "The proportion of transactions in a given feerate range that must have been\n" " confirmed within conf_target in order to consider those feerates as high enough and proceed to check\n" - " lower buckets. Default: 0.95"}, + " lower buckets."}, }, RPCResult{ "{\n" diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 2291cf0a82..4f9915624c 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -924,7 +924,7 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request) {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"}, {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"}, {"scriptPubKey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "script key"}, - {"redeemScript", RPCArg::Type::STR_HEX, /* default_val */ "", "(required for P2SH or P2WSH) redeem script"}, + {"redeemScript", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "(required for P2SH or P2WSH) redeem script"}, {"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "The amount spent"}, }, }, @@ -1484,7 +1484,7 @@ UniValue createpsbt(const JSONRPCRequest& request) { {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"}, {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"}, - {"sequence", RPCArg::Type::NUM, /* default */ "", "The sequence number"}, + {"sequence", RPCArg::Type::NUM, /* default */ "depends on the value of 'locktime' argument", "The sequence number"}, }, }, }, diff --git a/src/rpc/util.h b/src/rpc/util.h index e5e58e9042..fc0af5066c 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -115,7 +115,7 @@ struct RPCArg { /** * Return the type string of the argument. - * Set oneline to allow it to be overrided by a custom oneline type string (m_oneline_description). + * Set oneline to allow it to be overridden by a custom oneline type string (m_oneline_description). */ std::string ToString(bool oneline) const; /** diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 4ac2cb0cb4..ed7380d8ed 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -1601,7 +1601,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request) "If \"blockhash\" is no longer a part of the main chain, transactions from the fork point onward are included.\n" "Additionally, if include_removed is set, transactions affecting the wallet which were removed are returned in the \"removed\" array.\n", { - {"blockhash", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If set, the block hash to list transactions since"}, + {"blockhash", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, "If set, the block hash to list transactions since, otherwise list all transactions."}, {"target_confirmations", RPCArg::Type::NUM, /* default */ "1", "Return the nth block hash from the main chain. e.g. 1 would mean the best block hash. Note: this is not used as a filter, but only affects [lastblock] in the return value"}, {"include_watchonly", RPCArg::Type::BOOL, /* default */ "false", "Include transactions to watch-only addresses (see 'importaddress')"}, {"include_removed", RPCArg::Type::BOOL, /* default */ "true", "Show transactions that were removed due to a reorg in the \"removed\" array\n" @@ -3403,7 +3403,6 @@ static UniValue fundrawtransaction(const JSONRPCRequest& request) {"feeRate", RPCArg::Type::AMOUNT, /* default */ "not set: makes wallet determine the fee", "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"}, {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "A json array of integers.\n" " The fee will be equally deducted from the amount of each specified output.\n" - " The outputs are specified by their zero-based index, before any change output is added.\n" " Those recipients will receive less dash than you enter in their corresponding amount field.\n" " If no outputs are specified here, the sender pays the fee.", { @@ -4125,7 +4124,6 @@ UniValue walletcreatefundedpsbt(const JSONRPCRequest& request) {"feeRate", RPCArg::Type::AMOUNT, /* default */ "not set: makes wallet determine the fee", "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"}, {"subtractFeeFromOutputs", RPCArg::Type::ARR, /* default */ "empty array", "A json array of integers.\n" " The fee will be equally deducted from the amount of each specified output.\n" - " The outputs are specified by their zero-based index, before any change output is added.\n" " Those recipients will receive less Dash than you enter in their corresponding amount field.\n" " If no outputs are specified here, the sender pays the fee.", {