mirror of
https://github.com/dashpay/dash.git
synced 2024-12-24 19:42:46 +01:00
scripted-diff: change all tabs to spaces in linearize-hashes.py and linearize-data.py
It appears bitcoin has already done this previously(seemingly forever), however, we for some reason have tabs. This was causing the linter to fail because a backport touched lines which resulted in linter yelling at me for adding lines beginning with tabs. To fix this, replacing all tabs with spaces in these two files. -BEGIN VERIFY SCRIPT- sed -i 's/\t/ /g' contrib/linearize/linearize-*.py -END VERIFY SCRIPT- Signed-off-by: pasta <pasta@dashboost.org>
This commit is contained in:
parent
38f90834cc
commit
e6a1c6b5bb
@ -23,302 +23,302 @@ settings = {}
|
||||
|
||||
##### Switch endian-ness #####
|
||||
def hex_switchEndian(s):
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
|
||||
def uint32(x):
|
||||
return x & 0xffffffff
|
||||
return x & 0xffffffff
|
||||
|
||||
def bytereverse(x):
|
||||
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
|
||||
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
|
||||
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
|
||||
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
|
||||
|
||||
def bufreverse(in_buf):
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
word = struct.unpack('@I', in_buf[i:i+4])[0]
|
||||
out_words.append(struct.pack('@I', bytereverse(word)))
|
||||
return b''.join(out_words)
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
word = struct.unpack('@I', in_buf[i:i+4])[0]
|
||||
out_words.append(struct.pack('@I', bytereverse(word)))
|
||||
return b''.join(out_words)
|
||||
|
||||
def wordreverse(in_buf):
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
out_words.append(in_buf[i:i+4])
|
||||
out_words.reverse()
|
||||
return b''.join(out_words)
|
||||
out_words = []
|
||||
for i in range(0, len(in_buf), 4):
|
||||
out_words.append(in_buf[i:i+4])
|
||||
out_words.reverse()
|
||||
return b''.join(out_words)
|
||||
|
||||
def calc_hdr_hash(blk_hdr):
|
||||
#hash1 = hashlib.sha256()
|
||||
#hash1.update(blk_hdr)
|
||||
#hash1_o = hash1.digest()
|
||||
#hash1 = hashlib.sha256()
|
||||
#hash1.update(blk_hdr)
|
||||
#hash1_o = hash1.digest()
|
||||
|
||||
#hash2 = hashlib.sha256()
|
||||
#hash2.update(hash1_o)
|
||||
#hash2_o = hash2.digest()
|
||||
#hash2 = hashlib.sha256()
|
||||
#hash2.update(hash1_o)
|
||||
#hash2_o = hash2.digest()
|
||||
|
||||
#return hash2_o
|
||||
#return hash2_o
|
||||
pow_hash = dash_hash.getPoWHash(blk_hdr)
|
||||
return pow_hash
|
||||
|
||||
def calc_hash_str(blk_hdr):
|
||||
hash = calc_hdr_hash(blk_hdr)
|
||||
hash = bufreverse(hash)
|
||||
hash = wordreverse(hash)
|
||||
hash_str = hexlify(hash).decode('utf-8')
|
||||
return hash_str
|
||||
hash = calc_hdr_hash(blk_hdr)
|
||||
hash = bufreverse(hash)
|
||||
hash = wordreverse(hash)
|
||||
hash_str = hexlify(hash).decode('utf-8')
|
||||
return hash_str
|
||||
|
||||
def get_blk_dt(blk_hdr):
|
||||
members = struct.unpack("<I", blk_hdr[68:68+4])
|
||||
nTime = members[0]
|
||||
dt = datetime.datetime.fromtimestamp(nTime)
|
||||
dt_ym = datetime.datetime(dt.year, dt.month, 1)
|
||||
return (dt_ym, nTime)
|
||||
members = struct.unpack("<I", blk_hdr[68:68+4])
|
||||
nTime = members[0]
|
||||
dt = datetime.datetime.fromtimestamp(nTime)
|
||||
dt_ym = datetime.datetime(dt.year, dt.month, 1)
|
||||
return (dt_ym, nTime)
|
||||
|
||||
# When getting the list of block hashes, undo any byte reversals.
|
||||
def get_block_hashes(settings):
|
||||
blkindex = []
|
||||
f = open(settings['hashlist'], "r", encoding="utf8")
|
||||
for line in f:
|
||||
line = line.rstrip()
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
line = hex_switchEndian(line)
|
||||
blkindex.append(line)
|
||||
blkindex = []
|
||||
f = open(settings['hashlist'], "r", encoding="utf8")
|
||||
for line in f:
|
||||
line = line.rstrip()
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
line = hex_switchEndian(line)
|
||||
blkindex.append(line)
|
||||
|
||||
print("Read " + str(len(blkindex)) + " hashes")
|
||||
print("Read " + str(len(blkindex)) + " hashes")
|
||||
|
||||
return blkindex
|
||||
return blkindex
|
||||
|
||||
# The block map shouldn't give or receive byte-reversed hashes.
|
||||
def mkblockmap(blkindex):
|
||||
blkmap = {}
|
||||
for height,hash in enumerate(blkindex):
|
||||
blkmap[hash] = height
|
||||
return blkmap
|
||||
blkmap = {}
|
||||
for height,hash in enumerate(blkindex):
|
||||
blkmap[hash] = height
|
||||
return blkmap
|
||||
|
||||
# Block header and extent on disk
|
||||
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
|
||||
|
||||
class BlockDataCopier:
|
||||
def __init__(self, settings, blkindex, blkmap):
|
||||
self.settings = settings
|
||||
self.blkindex = blkindex
|
||||
self.blkmap = blkmap
|
||||
def __init__(self, settings, blkindex, blkmap):
|
||||
self.settings = settings
|
||||
self.blkindex = blkindex
|
||||
self.blkmap = blkmap
|
||||
|
||||
self.inFn = 0
|
||||
self.inF = None
|
||||
self.outFn = 0
|
||||
self.outsz = 0
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.blkCountIn = 0
|
||||
self.blkCountOut = 0
|
||||
self.inFn = 0
|
||||
self.inF = None
|
||||
self.outFn = 0
|
||||
self.outsz = 0
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.blkCountIn = 0
|
||||
self.blkCountOut = 0
|
||||
|
||||
self.lastDate = datetime.datetime(2000, 1, 1)
|
||||
self.highTS = 1408893517 - 315360000
|
||||
self.timestampSplit = False
|
||||
self.fileOutput = True
|
||||
self.setFileTime = False
|
||||
self.maxOutSz = settings['max_out_sz']
|
||||
if 'output' in settings:
|
||||
self.fileOutput = False
|
||||
if settings['file_timestamp'] != 0:
|
||||
self.setFileTime = True
|
||||
if settings['split_timestamp'] != 0:
|
||||
self.timestampSplit = True
|
||||
# Extents and cache for out-of-order blocks
|
||||
self.blockExtents = {}
|
||||
self.outOfOrderData = {}
|
||||
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
|
||||
self.lastDate = datetime.datetime(2000, 1, 1)
|
||||
self.highTS = 1408893517 - 315360000
|
||||
self.timestampSplit = False
|
||||
self.fileOutput = True
|
||||
self.setFileTime = False
|
||||
self.maxOutSz = settings['max_out_sz']
|
||||
if 'output' in settings:
|
||||
self.fileOutput = False
|
||||
if settings['file_timestamp'] != 0:
|
||||
self.setFileTime = True
|
||||
if settings['split_timestamp'] != 0:
|
||||
self.timestampSplit = True
|
||||
# Extents and cache for out-of-order blocks
|
||||
self.blockExtents = {}
|
||||
self.outOfOrderData = {}
|
||||
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
|
||||
|
||||
def writeBlock(self, inhdr, blk_hdr, rawblock):
|
||||
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
def writeBlock(self, inhdr, blk_hdr, rawblock):
|
||||
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
|
||||
(blkDate, blkTS) = get_blk_dt(blk_hdr)
|
||||
if self.timestampSplit and (blkDate > self.lastDate):
|
||||
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
|
||||
self.lastDate = blkDate
|
||||
if self.outF:
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
(blkDate, blkTS) = get_blk_dt(blk_hdr)
|
||||
if self.timestampSplit and (blkDate > self.lastDate):
|
||||
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
|
||||
self.lastDate = blkDate
|
||||
if self.outF:
|
||||
self.outF.close()
|
||||
if self.setFileTime:
|
||||
os.utime(self.outFname, (int(time.time()), self.highTS))
|
||||
self.outF = None
|
||||
self.outFname = None
|
||||
self.outFn = self.outFn + 1
|
||||
self.outsz = 0
|
||||
|
||||
if not self.outF:
|
||||
if self.fileOutput:
|
||||
self.outFname = self.settings['output_file']
|
||||
else:
|
||||
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
|
||||
print("Output file " + self.outFname)
|
||||
self.outF = open(self.outFname, "wb")
|
||||
if not self.outF:
|
||||
if self.fileOutput:
|
||||
self.outFname = self.settings['output_file']
|
||||
else:
|
||||
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
|
||||
print("Output file " + self.outFname)
|
||||
self.outF = open(self.outFname, "wb")
|
||||
|
||||
self.outF.write(inhdr)
|
||||
self.outF.write(blk_hdr)
|
||||
self.outF.write(rawblock)
|
||||
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
self.outF.write(inhdr)
|
||||
self.outF.write(blk_hdr)
|
||||
self.outF.write(rawblock)
|
||||
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
|
||||
|
||||
self.blkCountOut = self.blkCountOut + 1
|
||||
if blkTS > self.highTS:
|
||||
self.highTS = blkTS
|
||||
self.blkCountOut = self.blkCountOut + 1
|
||||
if blkTS > self.highTS:
|
||||
self.highTS = blkTS
|
||||
|
||||
if (self.blkCountOut % 1000) == 0:
|
||||
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
|
||||
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
|
||||
if (self.blkCountOut % 1000) == 0:
|
||||
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
|
||||
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
|
||||
|
||||
def inFileName(self, fn):
|
||||
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
|
||||
def inFileName(self, fn):
|
||||
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
|
||||
|
||||
def fetchBlock(self, extent):
|
||||
'''Fetch block contents from disk given extents'''
|
||||
with open(self.inFileName(extent.fn), "rb") as f:
|
||||
f.seek(extent.offset)
|
||||
return f.read(extent.size)
|
||||
def fetchBlock(self, extent):
|
||||
'''Fetch block contents from disk given extents'''
|
||||
with open(self.inFileName(extent.fn), "rb") as f:
|
||||
f.seek(extent.offset)
|
||||
return f.read(extent.size)
|
||||
|
||||
def copyOneBlock(self):
|
||||
'''Find the next block to be written in the input, and copy it to the output.'''
|
||||
extent = self.blockExtents.pop(self.blkCountOut)
|
||||
if self.blkCountOut in self.outOfOrderData:
|
||||
# If the data is cached, use it from memory and remove from the cache
|
||||
rawblock = self.outOfOrderData.pop(self.blkCountOut)
|
||||
self.outOfOrderSize -= len(rawblock)
|
||||
else: # Otherwise look up data on disk
|
||||
rawblock = self.fetchBlock(extent)
|
||||
def copyOneBlock(self):
|
||||
'''Find the next block to be written in the input, and copy it to the output.'''
|
||||
extent = self.blockExtents.pop(self.blkCountOut)
|
||||
if self.blkCountOut in self.outOfOrderData:
|
||||
# If the data is cached, use it from memory and remove from the cache
|
||||
rawblock = self.outOfOrderData.pop(self.blkCountOut)
|
||||
self.outOfOrderSize -= len(rawblock)
|
||||
else: # Otherwise look up data on disk
|
||||
rawblock = self.fetchBlock(extent)
|
||||
|
||||
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
|
||||
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
|
||||
|
||||
def run(self):
|
||||
while self.blkCountOut < len(self.blkindex):
|
||||
if not self.inF:
|
||||
fname = self.inFileName(self.inFn)
|
||||
print("Input file " + fname)
|
||||
try:
|
||||
self.inF = open(fname, "rb")
|
||||
except IOError:
|
||||
print("Premature end of block data")
|
||||
return
|
||||
def run(self):
|
||||
while self.blkCountOut < len(self.blkindex):
|
||||
if not self.inF:
|
||||
fname = self.inFileName(self.inFn)
|
||||
print("Input file " + fname)
|
||||
try:
|
||||
self.inF = open(fname, "rb")
|
||||
except IOError:
|
||||
print("Premature end of block data")
|
||||
return
|
||||
|
||||
inhdr = self.inF.read(8)
|
||||
if (not inhdr or (inhdr[0] == "\0")):
|
||||
self.inF.close()
|
||||
self.inF = None
|
||||
self.inFn = self.inFn + 1
|
||||
continue
|
||||
inhdr = self.inF.read(8)
|
||||
if (not inhdr or (inhdr[0] == "\0")):
|
||||
self.inF.close()
|
||||
self.inF = None
|
||||
self.inFn = self.inFn + 1
|
||||
continue
|
||||
|
||||
inMagic = inhdr[:4]
|
||||
if (inMagic != self.settings['netmagic']):
|
||||
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
|
||||
return
|
||||
inLenLE = inhdr[4:]
|
||||
su = struct.unpack("<I", inLenLE)
|
||||
inLen = su[0] - 80 # length without header
|
||||
blk_hdr = self.inF.read(80)
|
||||
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
|
||||
inMagic = inhdr[:4]
|
||||
if (inMagic != self.settings['netmagic']):
|
||||
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
|
||||
return
|
||||
inLenLE = inhdr[4:]
|
||||
su = struct.unpack("<I", inLenLE)
|
||||
inLen = su[0] - 80 # length without header
|
||||
blk_hdr = self.inF.read(80)
|
||||
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
|
||||
|
||||
self.hash_str = calc_hash_str(blk_hdr)
|
||||
if not self.hash_str in blkmap:
|
||||
# Because blocks can be written to files out-of-order as of 0.10, the script
|
||||
# may encounter blocks it doesn't know about. Treat as debug output.
|
||||
if settings['debug_output'] == 'true':
|
||||
print("Skipping unknown block " + self.hash_str)
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
continue
|
||||
self.hash_str = calc_hash_str(blk_hdr)
|
||||
if not self.hash_str in blkmap:
|
||||
# Because blocks can be written to files out-of-order as of 0.10, the script
|
||||
# may encounter blocks it doesn't know about. Treat as debug output.
|
||||
if settings['debug_output'] == 'true':
|
||||
print("Skipping unknown block " + self.hash_str)
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
continue
|
||||
|
||||
blkHeight = self.blkmap[self.hash_str]
|
||||
self.blkCountIn += 1
|
||||
blkHeight = self.blkmap[self.hash_str]
|
||||
self.blkCountIn += 1
|
||||
|
||||
if self.blkCountOut == blkHeight:
|
||||
# If in-order block, just copy
|
||||
rawblock = self.inF.read(inLen)
|
||||
self.writeBlock(inhdr, blk_hdr, rawblock)
|
||||
if self.blkCountOut == blkHeight:
|
||||
# If in-order block, just copy
|
||||
rawblock = self.inF.read(inLen)
|
||||
self.writeBlock(inhdr, blk_hdr, rawblock)
|
||||
|
||||
# See if we can catch up to prior out-of-order blocks
|
||||
while self.blkCountOut in self.blockExtents:
|
||||
self.copyOneBlock()
|
||||
# See if we can catch up to prior out-of-order blocks
|
||||
while self.blkCountOut in self.blockExtents:
|
||||
self.copyOneBlock()
|
||||
|
||||
else: # If out-of-order, skip over block data for now
|
||||
self.blockExtents[blkHeight] = inExtent
|
||||
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
|
||||
# If there is space in the cache, read the data
|
||||
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
|
||||
# but we don't want to fill up memory
|
||||
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
|
||||
self.outOfOrderSize += inLen
|
||||
else: # If no space in cache, seek forward
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
else: # If out-of-order, skip over block data for now
|
||||
self.blockExtents[blkHeight] = inExtent
|
||||
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
|
||||
# If there is space in the cache, read the data
|
||||
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
|
||||
# but we don't want to fill up memory
|
||||
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
|
||||
self.outOfOrderSize += inLen
|
||||
else: # If no space in cache, seek forward
|
||||
self.inF.seek(inLen, os.SEEK_CUR)
|
||||
|
||||
print("Done (%i blocks written)" % (self.blkCountOut))
|
||||
print("Done (%i blocks written)" % (self.blkCountOut))
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-data.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-data.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
|
||||
f = open(sys.argv[1], encoding="utf8")
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
f = open(sys.argv[1], encoding="utf8")
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
# Also place upfront in case any settings need to know about it.
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
# Also place upfront in case any settings need to know about it.
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
|
||||
if 'netmagic' not in settings:
|
||||
settings['netmagic'] = 'bf0c6bbd'
|
||||
if 'genesis' not in settings:
|
||||
settings['genesis'] = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
|
||||
if 'input' not in settings:
|
||||
settings['input'] = 'input'
|
||||
if 'hashlist' not in settings:
|
||||
settings['hashlist'] = 'hashlist.txt'
|
||||
if 'file_timestamp' not in settings:
|
||||
settings['file_timestamp'] = 0
|
||||
if 'split_timestamp' not in settings:
|
||||
settings['split_timestamp'] = 0
|
||||
if 'max_out_sz' not in settings:
|
||||
settings['max_out_sz'] = 1000 * 1000 * 1000
|
||||
if 'out_of_order_cache_sz' not in settings:
|
||||
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
|
||||
if 'debug_output' not in settings:
|
||||
settings['debug_output'] = 'false'
|
||||
if 'netmagic' not in settings:
|
||||
settings['netmagic'] = 'bf0c6bbd'
|
||||
if 'genesis' not in settings:
|
||||
settings['genesis'] = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
|
||||
if 'input' not in settings:
|
||||
settings['input'] = 'input'
|
||||
if 'hashlist' not in settings:
|
||||
settings['hashlist'] = 'hashlist.txt'
|
||||
if 'file_timestamp' not in settings:
|
||||
settings['file_timestamp'] = 0
|
||||
if 'split_timestamp' not in settings:
|
||||
settings['split_timestamp'] = 0
|
||||
if 'max_out_sz' not in settings:
|
||||
settings['max_out_sz'] = 1000 * 1000 * 1000
|
||||
if 'out_of_order_cache_sz' not in settings:
|
||||
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
|
||||
if 'debug_output' not in settings:
|
||||
settings['debug_output'] = 'false'
|
||||
|
||||
settings['max_out_sz'] = int(settings['max_out_sz'])
|
||||
settings['split_timestamp'] = int(settings['split_timestamp'])
|
||||
settings['file_timestamp'] = int(settings['file_timestamp'])
|
||||
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
|
||||
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
|
||||
settings['debug_output'] = settings['debug_output'].lower()
|
||||
settings['max_out_sz'] = int(settings['max_out_sz'])
|
||||
settings['split_timestamp'] = int(settings['split_timestamp'])
|
||||
settings['file_timestamp'] = int(settings['file_timestamp'])
|
||||
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
|
||||
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
|
||||
settings['debug_output'] = settings['debug_output'].lower()
|
||||
|
||||
if 'output_file' not in settings and 'output' not in settings:
|
||||
print("Missing output file / directory")
|
||||
sys.exit(1)
|
||||
if 'output_file' not in settings and 'output' not in settings:
|
||||
print("Missing output file / directory")
|
||||
sys.exit(1)
|
||||
|
||||
blkindex = get_block_hashes(settings)
|
||||
blkmap = mkblockmap(blkindex)
|
||||
blkindex = get_block_hashes(settings)
|
||||
blkmap = mkblockmap(blkindex)
|
||||
|
||||
# Block hash map won't be byte-reversed. Neither should the genesis hash.
|
||||
if not settings['genesis'] in blkmap:
|
||||
print("Genesis block not found in hashlist")
|
||||
else:
|
||||
BlockDataCopier(settings, blkindex, blkmap).run()
|
||||
# Block hash map won't be byte-reversed. Neither should the genesis hash.
|
||||
if not settings['genesis'] in blkmap:
|
||||
print("Genesis block not found in hashlist")
|
||||
else:
|
||||
BlockDataCopier(settings, blkindex, blkmap).run()
|
||||
|
@ -23,135 +23,135 @@ settings = {}
|
||||
|
||||
##### Switch endian-ness #####
|
||||
def hex_switchEndian(s):
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
""" Switches the endianness of a hex string (in pairs of hex chars) """
|
||||
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
|
||||
return b''.join(pairList[::-1]).decode()
|
||||
|
||||
class BitcoinRPC:
|
||||
def __init__(self, host, port, username, password):
|
||||
authpair = "%s:%s" % (username, password)
|
||||
authpair = authpair.encode('utf-8')
|
||||
self.authhdr = b"Basic " + base64.b64encode(authpair)
|
||||
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
|
||||
def __init__(self, host, port, username, password):
|
||||
authpair = "%s:%s" % (username, password)
|
||||
authpair = authpair.encode('utf-8')
|
||||
self.authhdr = b"Basic " + base64.b64encode(authpair)
|
||||
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
|
||||
|
||||
def execute(self, obj):
|
||||
try:
|
||||
self.conn.request('POST', '/', json.dumps(obj),
|
||||
{ 'Authorization' : self.authhdr,
|
||||
'Content-type' : 'application/json' })
|
||||
except ConnectionRefusedError:
|
||||
print('RPC connection refused. Check RPC settings and the server status.',
|
||||
file=sys.stderr)
|
||||
return None
|
||||
def execute(self, obj):
|
||||
try:
|
||||
self.conn.request('POST', '/', json.dumps(obj),
|
||||
{ 'Authorization' : self.authhdr,
|
||||
'Content-type' : 'application/json' })
|
||||
except ConnectionRefusedError:
|
||||
print('RPC connection refused. Check RPC settings and the server status.',
|
||||
file=sys.stderr)
|
||||
return None
|
||||
|
||||
resp = self.conn.getresponse()
|
||||
if resp is None:
|
||||
print("JSON-RPC: no response", file=sys.stderr)
|
||||
return None
|
||||
resp = self.conn.getresponse()
|
||||
if resp is None:
|
||||
print("JSON-RPC: no response", file=sys.stderr)
|
||||
return None
|
||||
|
||||
body = resp.read().decode('utf-8')
|
||||
resp_obj = json.loads(body)
|
||||
return resp_obj
|
||||
body = resp.read().decode('utf-8')
|
||||
resp_obj = json.loads(body)
|
||||
return resp_obj
|
||||
|
||||
@staticmethod
|
||||
def build_request(idx, method, params):
|
||||
obj = { 'version' : '1.1',
|
||||
'method' : method,
|
||||
'id' : idx }
|
||||
if params is None:
|
||||
obj['params'] = []
|
||||
else:
|
||||
obj['params'] = params
|
||||
return obj
|
||||
@staticmethod
|
||||
def build_request(idx, method, params):
|
||||
obj = { 'version' : '1.1',
|
||||
'method' : method,
|
||||
'id' : idx }
|
||||
if params is None:
|
||||
obj['params'] = []
|
||||
else:
|
||||
obj['params'] = params
|
||||
return obj
|
||||
|
||||
@staticmethod
|
||||
def response_is_error(resp_obj):
|
||||
return 'error' in resp_obj and resp_obj['error'] is not None
|
||||
@staticmethod
|
||||
def response_is_error(resp_obj):
|
||||
return 'error' in resp_obj and resp_obj['error'] is not None
|
||||
|
||||
def get_block_hashes(settings, max_blocks_per_call=10000):
|
||||
rpc = BitcoinRPC(settings['host'], settings['port'],
|
||||
settings['rpcuser'], settings['rpcpassword'])
|
||||
rpc = BitcoinRPC(settings['host'], settings['port'],
|
||||
settings['rpcuser'], settings['rpcpassword'])
|
||||
|
||||
height = settings['min_height']
|
||||
while height < settings['max_height']+1:
|
||||
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
|
||||
batch = []
|
||||
for x in range(num_blocks):
|
||||
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
|
||||
height = settings['min_height']
|
||||
while height < settings['max_height']+1:
|
||||
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
|
||||
batch = []
|
||||
for x in range(num_blocks):
|
||||
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
|
||||
|
||||
reply = rpc.execute(batch)
|
||||
if reply is None:
|
||||
print('Cannot continue. Program will halt.')
|
||||
return None
|
||||
reply = rpc.execute(batch)
|
||||
if reply is None:
|
||||
print('Cannot continue. Program will halt.')
|
||||
return None
|
||||
|
||||
for x,resp_obj in enumerate(reply):
|
||||
if rpc.response_is_error(resp_obj):
|
||||
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
|
||||
sys.exit(1)
|
||||
assert(resp_obj['id'] == x) # assume replies are in-sequence
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
|
||||
print(resp_obj['result'])
|
||||
for x,resp_obj in enumerate(reply):
|
||||
if rpc.response_is_error(resp_obj):
|
||||
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
|
||||
sys.exit(1)
|
||||
assert(resp_obj['id'] == x) # assume replies are in-sequence
|
||||
if settings['rev_hash_bytes'] == 'true':
|
||||
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
|
||||
print(resp_obj['result'])
|
||||
|
||||
height += num_blocks
|
||||
height += num_blocks
|
||||
|
||||
def get_rpc_cookie():
|
||||
# Open the cookie file
|
||||
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
|
||||
combined = f.readline()
|
||||
combined_split = combined.split(":")
|
||||
settings['rpcuser'] = combined_split[0]
|
||||
settings['rpcpassword'] = combined_split[1]
|
||||
# Open the cookie file
|
||||
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
|
||||
combined = f.readline()
|
||||
combined_split = combined.split(":")
|
||||
settings['rpcuser'] = combined_split[0]
|
||||
settings['rpcpassword'] = combined_split[1]
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-hashes.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: linearize-hashes.py CONFIG-FILE")
|
||||
sys.exit(1)
|
||||
|
||||
f = open(sys.argv[1], encoding="utf8")
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
f = open(sys.argv[1], encoding="utf8")
|
||||
for line in f:
|
||||
# skip comment lines
|
||||
m = re.search('^\s*#', line)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
# parse key=value lines
|
||||
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
|
||||
if m is None:
|
||||
continue
|
||||
settings[m.group(1)] = m.group(2)
|
||||
f.close()
|
||||
|
||||
if 'host' not in settings:
|
||||
settings['host'] = '127.0.0.1'
|
||||
if 'port' not in settings:
|
||||
settings['port'] = 9998
|
||||
if 'min_height' not in settings:
|
||||
settings['min_height'] = 0
|
||||
if 'max_height' not in settings:
|
||||
settings['max_height'] = 313000
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
if 'host' not in settings:
|
||||
settings['host'] = '127.0.0.1'
|
||||
if 'port' not in settings:
|
||||
settings['port'] = 9998
|
||||
if 'min_height' not in settings:
|
||||
settings['min_height'] = 0
|
||||
if 'max_height' not in settings:
|
||||
settings['max_height'] = 313000
|
||||
if 'rev_hash_bytes' not in settings:
|
||||
settings['rev_hash_bytes'] = 'false'
|
||||
|
||||
use_userpass = True
|
||||
use_datadir = False
|
||||
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
|
||||
use_userpass = False
|
||||
if 'datadir' in settings and not use_userpass:
|
||||
use_datadir = True
|
||||
if not use_userpass and not use_datadir:
|
||||
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
use_userpass = True
|
||||
use_datadir = False
|
||||
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
|
||||
use_userpass = False
|
||||
if 'datadir' in settings and not use_userpass:
|
||||
use_datadir = True
|
||||
if not use_userpass and not use_datadir:
|
||||
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
settings['port'] = int(settings['port'])
|
||||
settings['min_height'] = int(settings['min_height'])
|
||||
settings['max_height'] = int(settings['max_height'])
|
||||
settings['port'] = int(settings['port'])
|
||||
settings['min_height'] = int(settings['min_height'])
|
||||
settings['max_height'] = int(settings['max_height'])
|
||||
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
# Force hash byte format setting to be lowercase to make comparisons easier.
|
||||
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
|
||||
|
||||
# Get the rpc user and pass from the cookie if the datadir is set
|
||||
if use_datadir:
|
||||
get_rpc_cookie()
|
||||
# Get the rpc user and pass from the cookie if the datadir is set
|
||||
if use_datadir:
|
||||
get_rpc_cookie()
|
||||
|
||||
get_block_hashes(settings)
|
||||
get_block_hashes(settings)
|
||||
|
Loading…
Reference in New Issue
Block a user