2010-08-29 18:58:15 +02:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2015-12-13 14:51:43 +01:00
|
|
|
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
2014-10-31 04:34:30 +01:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2012-05-18 16:02:28 +02:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2013-04-13 07:13:08 +02:00
|
|
|
|
2011-05-15 09:11:04 +02:00
|
|
|
#ifndef BITCOIN_SERIALIZE_H
|
|
|
|
#define BITCOIN_SERIALIZE_H
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2015-03-27 13:19:49 +01:00
|
|
|
#include "compat/endian.h"
|
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <ios>
|
2014-09-14 12:43:56 +02:00
|
|
|
#include <limits>
|
2016-11-13 18:52:34 +01:00
|
|
|
#include <list>
|
2010-08-29 18:58:15 +02:00
|
|
|
#include <map>
|
2016-11-21 10:51:32 +01:00
|
|
|
#include <memory>
|
2010-09-06 23:03:04 +02:00
|
|
|
#include <set>
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <string>
|
|
|
|
#include <string.h>
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
#include <unordered_map>
|
|
|
|
#include <unordered_set>
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2011-05-15 09:11:04 +02:00
|
|
|
|
2015-10-29 07:11:24 +01:00
|
|
|
#include "prevector.h"
|
2013-04-13 07:13:08 +02:00
|
|
|
|
2010-08-29 18:58:15 +02:00
|
|
|
static const unsigned int MAX_SIZE = 0x02000000;
|
|
|
|
|
2016-11-21 10:51:32 +01:00
|
|
|
/**
|
|
|
|
* Dummy data type to identify deserializing constructors.
|
|
|
|
*
|
|
|
|
* By convention, a constructor of a type T with signature
|
|
|
|
*
|
|
|
|
* template <typename Stream> T::T(deserialize_type, Stream& s)
|
|
|
|
*
|
|
|
|
* is a deserializing constructor, which builds the type by
|
|
|
|
* deserializing it from s. If T contains const fields, this
|
|
|
|
* is likely the only way to do so.
|
|
|
|
*/
|
|
|
|
struct deserialize_type {};
|
|
|
|
constexpr deserialize_type deserialize {};
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Used to bypass the rule against non-const reference to temporary
|
|
|
|
* where it makes sense with wrappers such as CFlatData or CTxDB
|
|
|
|
*/
|
2011-08-11 18:12:14 +02:00
|
|
|
template<typename T>
|
|
|
|
inline T& REF(const T& val)
|
|
|
|
{
|
|
|
|
return const_cast<T&>(val);
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Used to acquire a non-const pointer "this" to generate bodies
|
|
|
|
* of const serialization operations from a template
|
|
|
|
*/
|
overhaul serialization code
The implementation of each class' serialization/deserialization is no longer
passed within a macro. The implementation now lies within a template of form:
template <typename T, typename Stream, typename Operation>
inline static size_t SerializationOp(T thisPtr, Stream& s, Operation ser_action, int nType, int nVersion) {
size_t nSerSize = 0;
/* CODE */
return nSerSize;
}
In cases when codepath should depend on whether or not we are just deserializing
(old fGetSize, fWrite, fRead flags) an additional clause can be used:
bool fRead = boost::is_same<Operation, CSerActionUnserialize>();
The IMPLEMENT_SERIALIZE macro will now be a freestanding clause added within
class' body (similiar to Qt's Q_OBJECT) to implement GetSerializeSize,
Serialize and Unserialize. These are now wrappers around
the "SerializationOp" template.
2014-08-20 08:42:31 +02:00
|
|
|
template<typename T>
|
2014-08-20 22:44:38 +02:00
|
|
|
inline T* NCONST_PTR(const T* val)
|
overhaul serialization code
The implementation of each class' serialization/deserialization is no longer
passed within a macro. The implementation now lies within a template of form:
template <typename T, typename Stream, typename Operation>
inline static size_t SerializationOp(T thisPtr, Stream& s, Operation ser_action, int nType, int nVersion) {
size_t nSerSize = 0;
/* CODE */
return nSerSize;
}
In cases when codepath should depend on whether or not we are just deserializing
(old fGetSize, fWrite, fRead flags) an additional clause can be used:
bool fRead = boost::is_same<Operation, CSerActionUnserialize>();
The IMPLEMENT_SERIALIZE macro will now be a freestanding clause added within
class' body (similiar to Qt's Q_OBJECT) to implement GetSerializeSize,
Serialize and Unserialize. These are now wrappers around
the "SerializationOp" template.
2014-08-20 08:42:31 +02:00
|
|
|
{
|
2014-08-20 22:44:38 +02:00
|
|
|
return const_cast<T*>(val);
|
overhaul serialization code
The implementation of each class' serialization/deserialization is no longer
passed within a macro. The implementation now lies within a template of form:
template <typename T, typename Stream, typename Operation>
inline static size_t SerializationOp(T thisPtr, Stream& s, Operation ser_action, int nType, int nVersion) {
size_t nSerSize = 0;
/* CODE */
return nSerSize;
}
In cases when codepath should depend on whether or not we are just deserializing
(old fGetSize, fWrite, fRead flags) an additional clause can be used:
bool fRead = boost::is_same<Operation, CSerActionUnserialize>();
The IMPLEMENT_SERIALIZE macro will now be a freestanding clause added within
class' body (similiar to Qt's Q_OBJECT) to implement GetSerializeSize,
Serialize and Unserialize. These are now wrappers around
the "SerializationOp" template.
2014-08-20 08:42:31 +02:00
|
|
|
}
|
|
|
|
|
2014-12-19 11:41:50 +01:00
|
|
|
/*
|
|
|
|
* Lowest-level serialization and conversion.
|
|
|
|
* @note Sizes of these types are verified in the tests
|
|
|
|
*/
|
|
|
|
template<typename Stream> inline void ser_writedata8(Stream &s, uint8_t obj)
|
|
|
|
{
|
|
|
|
s.write((char*)&obj, 1);
|
|
|
|
}
|
|
|
|
template<typename Stream> inline void ser_writedata16(Stream &s, uint16_t obj)
|
|
|
|
{
|
|
|
|
obj = htole16(obj);
|
|
|
|
s.write((char*)&obj, 2);
|
|
|
|
}
|
|
|
|
template<typename Stream> inline void ser_writedata32(Stream &s, uint32_t obj)
|
|
|
|
{
|
|
|
|
obj = htole32(obj);
|
|
|
|
s.write((char*)&obj, 4);
|
|
|
|
}
|
2016-03-16 19:00:50 +01:00
|
|
|
template<typename Stream> inline void ser_writedata32be(Stream &s, uint32_t obj)
|
|
|
|
{
|
|
|
|
obj = htobe32(obj);
|
|
|
|
s.write((char*)&obj, 4);
|
|
|
|
}
|
2014-12-19 11:41:50 +01:00
|
|
|
template<typename Stream> inline void ser_writedata64(Stream &s, uint64_t obj)
|
|
|
|
{
|
|
|
|
obj = htole64(obj);
|
|
|
|
s.write((char*)&obj, 8);
|
|
|
|
}
|
|
|
|
template<typename Stream> inline uint8_t ser_readdata8(Stream &s)
|
|
|
|
{
|
|
|
|
uint8_t obj;
|
|
|
|
s.read((char*)&obj, 1);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
template<typename Stream> inline uint16_t ser_readdata16(Stream &s)
|
|
|
|
{
|
|
|
|
uint16_t obj;
|
|
|
|
s.read((char*)&obj, 2);
|
|
|
|
return le16toh(obj);
|
|
|
|
}
|
|
|
|
template<typename Stream> inline uint32_t ser_readdata32(Stream &s)
|
|
|
|
{
|
|
|
|
uint32_t obj;
|
|
|
|
s.read((char*)&obj, 4);
|
|
|
|
return le32toh(obj);
|
|
|
|
}
|
2016-03-16 19:00:50 +01:00
|
|
|
template<typename Stream> inline uint32_t ser_readdata32be(Stream &s)
|
|
|
|
{
|
|
|
|
uint32_t obj;
|
|
|
|
s.read((char*)&obj, 4);
|
|
|
|
return be32toh(obj);
|
|
|
|
}
|
2014-12-19 11:41:50 +01:00
|
|
|
template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
|
|
|
|
{
|
|
|
|
uint64_t obj;
|
|
|
|
s.read((char*)&obj, 8);
|
|
|
|
return le64toh(obj);
|
|
|
|
}
|
|
|
|
inline uint64_t ser_double_to_uint64(double x)
|
|
|
|
{
|
|
|
|
union { double x; uint64_t y; } tmp;
|
|
|
|
tmp.x = x;
|
|
|
|
return tmp.y;
|
|
|
|
}
|
|
|
|
inline uint32_t ser_float_to_uint32(float x)
|
|
|
|
{
|
|
|
|
union { float x; uint32_t y; } tmp;
|
|
|
|
tmp.x = x;
|
|
|
|
return tmp.y;
|
|
|
|
}
|
|
|
|
inline double ser_uint64_to_double(uint64_t y)
|
|
|
|
{
|
|
|
|
union { double x; uint64_t y; } tmp;
|
|
|
|
tmp.y = y;
|
|
|
|
return tmp.x;
|
|
|
|
}
|
|
|
|
inline float ser_uint32_to_float(uint32_t y)
|
|
|
|
{
|
|
|
|
union { float x; uint32_t y; } tmp;
|
|
|
|
tmp.y = y;
|
|
|
|
return tmp.x;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-29 18:58:15 +02:00
|
|
|
/////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Templates for serializing to anything that looks like a stream,
|
2014-08-15 22:56:45 +02:00
|
|
|
// i.e. anything that supports .read(char*, size_t) and .write(char*, size_t)
|
2010-08-29 18:58:15 +02:00
|
|
|
//
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
class CSizeComputer;
|
|
|
|
|
2010-08-29 18:58:15 +02:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
// primary actions
|
|
|
|
SER_NETWORK = (1 << 0),
|
|
|
|
SER_DISK = (1 << 1),
|
|
|
|
SER_GETHASH = (1 << 2),
|
|
|
|
};
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
#define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action))
|
|
|
|
#define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__))
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Implement three methods for serializable objects. These are actually wrappers over
|
overhaul serialization code
The implementation of each class' serialization/deserialization is no longer
passed within a macro. The implementation now lies within a template of form:
template <typename T, typename Stream, typename Operation>
inline static size_t SerializationOp(T thisPtr, Stream& s, Operation ser_action, int nType, int nVersion) {
size_t nSerSize = 0;
/* CODE */
return nSerSize;
}
In cases when codepath should depend on whether or not we are just deserializing
(old fGetSize, fWrite, fRead flags) an additional clause can be used:
bool fRead = boost::is_same<Operation, CSerActionUnserialize>();
The IMPLEMENT_SERIALIZE macro will now be a freestanding clause added within
class' body (similiar to Qt's Q_OBJECT) to implement GetSerializeSize,
Serialize and Unserialize. These are now wrappers around
the "SerializationOp" template.
2014-08-20 08:42:31 +02:00
|
|
|
* "SerializationOp" template, which implements the body of each class' serialization
|
2014-09-02 09:58:09 +02:00
|
|
|
* code. Adding "ADD_SERIALIZE_METHODS" in the body of the class causes these wrappers to be
|
2014-10-31 04:34:30 +01:00
|
|
|
* added as members.
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
#define ADD_SERIALIZE_METHODS \
|
|
|
|
template<typename Stream> \
|
|
|
|
void Serialize(Stream& s) const { \
|
|
|
|
NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \
|
|
|
|
} \
|
|
|
|
template<typename Stream> \
|
|
|
|
void Unserialize(Stream& s) { \
|
|
|
|
SerializationOp(s, CSerActionUnserialize()); \
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, int8_t a ) { ser_writedata8(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, uint8_t a ) { ser_writedata8(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, int16_t a ) { ser_writedata16(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, uint16_t a) { ser_writedata16(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, int32_t a ) { ser_writedata32(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, uint32_t a) { ser_writedata32(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_writedata64(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, float a ) { ser_writedata32(s, ser_float_to_uint32(a)); }
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, double a ) { ser_writedata64(s, ser_double_to_uint64(a)); }
|
|
|
|
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a ) { a = ser_readdata8(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, int16_t& a ) { a = ser_readdata16(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, uint16_t& a) { a = ser_readdata16(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, int32_t& a ) { a = ser_readdata32(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a) { a = ser_readdata32(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = ser_readdata64(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, float& a ) { a = ser_uint32_to_float(ser_readdata32(s)); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, double& a ) { a = ser_uint64_to_double(ser_readdata64(s)); }
|
|
|
|
|
|
|
|
template<typename Stream> inline void Serialize(Stream& s, bool a) { char f=a; ser_writedata8(s, f); }
|
|
|
|
template<typename Stream> inline void Unserialize(Stream& s, bool& a) { char f=ser_readdata8(s); a=f; }
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2019-01-16 16:24:49 +01:00
|
|
|
template <typename T> size_t GetSerializeSize(const T& t, int nType, int nVersion = 0);
|
|
|
|
template <typename S, typename T> size_t GetSerializeSize(const S& s, const T& t);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Compact Size
|
|
|
|
* size < 253 -- 1 byte
|
|
|
|
* size <= USHRT_MAX -- 3 bytes (253 + 2 bytes)
|
|
|
|
* size <= UINT_MAX -- 5 bytes (254 + 4 bytes)
|
|
|
|
* size > UINT_MAX -- 9 bytes (255 + 8 bytes)
|
|
|
|
*/
|
2013-04-13 07:13:08 +02:00
|
|
|
inline unsigned int GetSizeOfCompactSize(uint64_t nSize)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2010-09-30 18:23:07 +02:00
|
|
|
if (nSize < 253) return sizeof(unsigned char);
|
2011-12-19 23:08:25 +01:00
|
|
|
else if (nSize <= std::numeric_limits<unsigned short>::max()) return sizeof(unsigned char) + sizeof(unsigned short);
|
|
|
|
else if (nSize <= std::numeric_limits<unsigned int>::max()) return sizeof(unsigned char) + sizeof(unsigned int);
|
2013-04-13 07:13:08 +02:00
|
|
|
else return sizeof(unsigned char) + sizeof(uint64_t);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void WriteCompactSize(CSizeComputer& os, uint64_t nSize);
|
|
|
|
|
2010-08-29 18:58:15 +02:00
|
|
|
template<typename Stream>
|
2013-04-13 07:13:08 +02:00
|
|
|
void WriteCompactSize(Stream& os, uint64_t nSize)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2010-09-30 18:23:07 +02:00
|
|
|
if (nSize < 253)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
ser_writedata8(os, nSize);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
2011-12-19 23:08:25 +01:00
|
|
|
else if (nSize <= std::numeric_limits<unsigned short>::max())
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
ser_writedata8(os, 253);
|
|
|
|
ser_writedata16(os, nSize);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
2011-12-19 23:08:25 +01:00
|
|
|
else if (nSize <= std::numeric_limits<unsigned int>::max())
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
ser_writedata8(os, 254);
|
|
|
|
ser_writedata32(os, nSize);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
ser_writedata8(os, 255);
|
|
|
|
ser_writedata64(os, nSize);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2013-04-13 07:13:08 +02:00
|
|
|
uint64_t ReadCompactSize(Stream& is)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
uint8_t chSize = ser_readdata8(is);
|
2013-04-13 07:13:08 +02:00
|
|
|
uint64_t nSizeRet = 0;
|
2010-09-30 18:23:07 +02:00
|
|
|
if (chSize < 253)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
nSizeRet = chSize;
|
|
|
|
}
|
2010-09-30 18:23:07 +02:00
|
|
|
else if (chSize == 253)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
nSizeRet = ser_readdata16(is);
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
if (nSizeRet < 253)
|
|
|
|
throw std::ios_base::failure("non-canonical ReadCompactSize()");
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
2010-09-30 18:23:07 +02:00
|
|
|
else if (chSize == 254)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
nSizeRet = ser_readdata32(is);
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
if (nSizeRet < 0x10000u)
|
|
|
|
throw std::ios_base::failure("non-canonical ReadCompactSize()");
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-12-19 11:41:50 +01:00
|
|
|
nSizeRet = ser_readdata64(is);
|
2014-09-29 00:22:44 +02:00
|
|
|
if (nSizeRet < 0x100000000ULL)
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
throw std::ios_base::failure("non-canonical ReadCompactSize()");
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
2013-04-13 07:13:08 +02:00
|
|
|
if (nSizeRet > (uint64_t)MAX_SIZE)
|
2015-01-08 11:44:25 +01:00
|
|
|
throw std::ios_base::failure("ReadCompactSize(): size too large");
|
2010-08-29 18:58:15 +02:00
|
|
|
return nSizeRet;
|
|
|
|
}
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Variable-length integers: bytes are a MSB base-128 encoding of the number.
|
|
|
|
* The high bit in each byte signifies whether another digit follows. To make
|
|
|
|
* sure the encoding is one-to-one, one is subtracted from all but the last digit.
|
|
|
|
* Thus, the byte sequence a[] with length len, where all but the last byte
|
|
|
|
* has bit 128 set, encodes the number:
|
|
|
|
*
|
|
|
|
* (a[len-1] & 0x7F) + sum(i=1..len-1, 128^i*((a[len-i-1] & 0x7F)+1))
|
|
|
|
*
|
|
|
|
* Properties:
|
|
|
|
* * Very small (0-127: 1 byte, 128-16511: 2 bytes, 16512-2113663: 3 bytes)
|
|
|
|
* * Every integer has exactly one encoding
|
|
|
|
* * Encoding does not depend on size of original integer type
|
|
|
|
* * No redundancy: every (infinite) byte sequence corresponds to a list
|
|
|
|
* of encoded integers.
|
|
|
|
*
|
|
|
|
* 0: [0x00] 256: [0x81 0x00]
|
|
|
|
* 1: [0x01] 16383: [0xFE 0x7F]
|
|
|
|
* 127: [0x7F] 16384: [0xFF 0x00]
|
2016-08-24 10:41:31 +02:00
|
|
|
* 128: [0x80 0x00] 16511: [0xFF 0x7F]
|
|
|
|
* 255: [0x80 0x7F] 65535: [0x82 0xFE 0x7F]
|
2014-10-31 04:34:30 +01:00
|
|
|
* 2^32: [0x8E 0xFE 0xFE 0xFF 0x00]
|
|
|
|
*/
|
2012-06-15 14:19:11 +02:00
|
|
|
|
|
|
|
template<typename I>
|
|
|
|
inline unsigned int GetSizeOfVarInt(I n)
|
|
|
|
{
|
|
|
|
int nRet = 0;
|
|
|
|
while(true) {
|
|
|
|
nRet++;
|
|
|
|
if (n <= 0x7F)
|
|
|
|
break;
|
|
|
|
n = (n >> 7) - 1;
|
|
|
|
}
|
|
|
|
return nRet;
|
|
|
|
}
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename I>
|
|
|
|
inline void WriteVarInt(CSizeComputer& os, I n);
|
|
|
|
|
2012-06-15 14:19:11 +02:00
|
|
|
template<typename Stream, typename I>
|
|
|
|
void WriteVarInt(Stream& os, I n)
|
|
|
|
{
|
|
|
|
unsigned char tmp[(sizeof(n)*8+6)/7];
|
|
|
|
int len=0;
|
|
|
|
while(true) {
|
|
|
|
tmp[len] = (n & 0x7F) | (len ? 0x80 : 0x00);
|
|
|
|
if (n <= 0x7F)
|
|
|
|
break;
|
|
|
|
n = (n >> 7) - 1;
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
do {
|
2014-12-19 11:41:50 +01:00
|
|
|
ser_writedata8(os, tmp[len]);
|
2012-06-15 14:19:11 +02:00
|
|
|
} while(len--);
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2012-06-15 14:19:11 +02:00
|
|
|
template<typename Stream, typename I>
|
|
|
|
I ReadVarInt(Stream& is)
|
|
|
|
{
|
|
|
|
I n = 0;
|
|
|
|
while(true) {
|
2014-12-19 11:41:50 +01:00
|
|
|
unsigned char chData = ser_readdata8(is);
|
2017-04-17 13:46:34 +02:00
|
|
|
if (n > (std::numeric_limits<I>::max() >> 7)) {
|
|
|
|
throw std::ios_base::failure("ReadVarInt(): size too large");
|
|
|
|
}
|
2012-06-15 14:19:11 +02:00
|
|
|
n = (n << 7) | (chData & 0x7F);
|
2017-04-17 13:46:34 +02:00
|
|
|
if (chData & 0x80) {
|
|
|
|
if (n == std::numeric_limits<I>::max()) {
|
|
|
|
throw std::ios_base::failure("ReadVarInt(): size too large");
|
|
|
|
}
|
2012-06-15 14:19:11 +02:00
|
|
|
n++;
|
2017-04-17 13:46:34 +02:00
|
|
|
} else {
|
2012-06-15 14:19:11 +02:00
|
|
|
return n;
|
2017-04-17 13:46:34 +02:00
|
|
|
}
|
2012-06-15 14:19:11 +02:00
|
|
|
}
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-08-07 23:00:01 +02:00
|
|
|
#define FLATDATA(obj) REF(CFlatData((char*)&(obj), (char*)&(obj) + sizeof(obj)))
|
2018-09-14 17:53:49 +02:00
|
|
|
#define FIXEDBITSET(obj, size) REF(CFixedBitSet(REF(obj), (size)))
|
|
|
|
#define DYNBITSET(obj) REF(CDynamicBitSet(REF(obj)))
|
2019-01-16 16:24:49 +01:00
|
|
|
#define FIXEDVARINTSBITSET(obj, size) REF(CFixedVarIntsBitSet(REF(obj), (size)))
|
|
|
|
#define AUTOBITSET(obj, size) REF(CAutoBitSet(REF(obj), (size)))
|
2014-08-07 23:00:01 +02:00
|
|
|
#define VARINT(obj) REF(WrapVarInt(REF(obj)))
|
2016-05-18 22:11:42 +02:00
|
|
|
#define COMPACTSIZE(obj) REF(CCompactSize(REF(obj)))
|
2014-08-07 23:00:01 +02:00
|
|
|
#define LIMITED_STRING(obj,n) REF(LimitedString< n >(REF(obj)))
|
2012-03-26 16:48:23 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Wrapper for serializing arrays and POD.
|
2012-03-26 16:48:23 +02:00
|
|
|
*/
|
2010-08-29 18:58:15 +02:00
|
|
|
class CFlatData
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
char* pbegin;
|
|
|
|
char* pend;
|
|
|
|
public:
|
|
|
|
CFlatData(void* pbeginIn, void* pendIn) : pbegin((char*)pbeginIn), pend((char*)pendIn) { }
|
2014-06-05 10:10:52 +02:00
|
|
|
template <class T, class TAl>
|
|
|
|
explicit CFlatData(std::vector<T,TAl> &v)
|
|
|
|
{
|
2016-12-13 12:20:26 +01:00
|
|
|
pbegin = (char*)v.data();
|
|
|
|
pend = (char*)(v.data() + v.size());
|
2014-06-05 10:10:52 +02:00
|
|
|
}
|
2015-10-29 07:11:24 +01:00
|
|
|
template <unsigned int N, typename T, typename S, typename D>
|
|
|
|
explicit CFlatData(prevector<N, T, S, D> &v)
|
|
|
|
{
|
2016-12-13 12:20:26 +01:00
|
|
|
pbegin = (char*)v.data();
|
|
|
|
pend = (char*)(v.data() + v.size());
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
char* begin() { return pbegin; }
|
|
|
|
const char* begin() const { return pbegin; }
|
|
|
|
char* end() { return pend; }
|
|
|
|
const char* end() const { return pend; }
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream& s) const
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
s.write(pbegin, pend - pbegin);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& s)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
s.read(pbegin, pend - pbegin);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-09-14 17:53:49 +02:00
|
|
|
class CFixedBitSet
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
std::vector<bool>& vec;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
public:
|
|
|
|
CFixedBitSet(std::vector<bool>& vecIn, size_t sizeIn) : vec(vecIn), size(sizeIn) {}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Serialize(Stream& s) const
|
|
|
|
{
|
|
|
|
std::vector<unsigned char> vBytes((size + 7) / 8);
|
|
|
|
size_t ms = std::min(size, vec.size());
|
|
|
|
for (size_t p = 0; p < ms; p++)
|
|
|
|
vBytes[p / 8] |= vec[p] << (p % 8);
|
|
|
|
s.write((char*)vBytes.data(), vBytes.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Unserialize(Stream& s)
|
|
|
|
{
|
|
|
|
vec.resize(size);
|
|
|
|
|
|
|
|
std::vector<unsigned char> vBytes((size + 7) / 8);
|
|
|
|
s.read((char*)vBytes.data(), vBytes.size());
|
|
|
|
for (size_t p = 0; p < size; p++)
|
|
|
|
vec[p] = (vBytes[p / 8] & (1 << (p % 8))) != 0;
|
|
|
|
if (vBytes.size() * 8 != size) {
|
|
|
|
size_t rem = vBytes.size() * 8 - size;
|
2019-03-15 09:48:24 +01:00
|
|
|
uint8_t m = ~(uint8_t)(0xff >> rem);
|
2018-09-14 17:53:49 +02:00
|
|
|
if (vBytes[vBytes.size() - 1] & m) {
|
|
|
|
throw std::ios_base::failure("Out-of-range bits set");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class CDynamicBitSet
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
std::vector<bool>& vec;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit CDynamicBitSet(std::vector<bool>& vecIn) : vec(vecIn) {}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Serialize(Stream& s) const
|
|
|
|
{
|
|
|
|
WriteCompactSize(s, vec.size());
|
|
|
|
CFixedBitSet(REF(vec), vec.size()).Serialize(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Unserialize(Stream& s)
|
|
|
|
{
|
|
|
|
vec.resize(ReadCompactSize(s));
|
|
|
|
CFixedBitSet(vec, vec.size()).Unserialize(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-01-16 16:24:49 +01:00
|
|
|
/**
|
|
|
|
* Stores a fixed size bitset as a series of VarInts. Each VarInt is an offset from the last entry and the sum of the
|
|
|
|
* last entry and the offset gives an index into the bitset for a set bit. The series of VarInts ends with a 0.
|
|
|
|
*/
|
|
|
|
class CFixedVarIntsBitSet
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
std::vector<bool>& vec;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
public:
|
|
|
|
CFixedVarIntsBitSet(std::vector<bool>& vecIn, size_t sizeIn) : vec(vecIn), size(sizeIn) {}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Serialize(Stream& s) const
|
|
|
|
{
|
|
|
|
int32_t last = -1;
|
|
|
|
for (int32_t i = 0; i < (int32_t)vec.size(); i++) {
|
|
|
|
if (vec[i]) {
|
|
|
|
WriteVarInt<Stream, uint32_t>(s, (uint32_t)(i - last));
|
|
|
|
last = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WriteVarInt(s, 0); // stopper
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Unserialize(Stream& s)
|
|
|
|
{
|
|
|
|
vec.assign(size, false);
|
|
|
|
|
|
|
|
int32_t last = -1;
|
|
|
|
while(true) {
|
|
|
|
uint32_t offset = ReadVarInt<Stream, uint32_t>(s);
|
|
|
|
if (offset == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
int32_t idx = last + offset;
|
|
|
|
if (idx >= size) {
|
|
|
|
throw std::ios_base::failure("out of bounds index");
|
|
|
|
}
|
|
|
|
if (last != -1 && idx <= last) {
|
|
|
|
throw std::ios_base::failure("offset overflow");
|
|
|
|
}
|
|
|
|
vec[idx] = true;
|
|
|
|
last = idx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Serializes either as a CFixedBitSet or CFixedVarIntsBitSet, depending on which would give a smaller size
|
|
|
|
*/
|
|
|
|
class CAutoBitSet
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
std::vector<bool>& vec;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit CAutoBitSet(std::vector<bool>& vecIn, size_t sizeIn) : vec(vecIn), size(sizeIn) {}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Serialize(Stream& s) const
|
|
|
|
{
|
|
|
|
assert(vec.size() == size);
|
|
|
|
|
|
|
|
size_t size1 = ::GetSerializeSize(s, CFixedBitSet(vec, size));
|
|
|
|
size_t size2 = ::GetSerializeSize(s, CFixedVarIntsBitSet(vec, size));
|
|
|
|
|
|
|
|
if (size1 < size2) {
|
|
|
|
ser_writedata8(s, 0);
|
|
|
|
s << FIXEDBITSET(vec, vec.size());
|
|
|
|
} else {
|
|
|
|
ser_writedata8(s, 1);
|
|
|
|
s << FIXEDVARINTSBITSET(vec, vec.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
|
|
|
void Unserialize(Stream& s)
|
|
|
|
{
|
|
|
|
uint8_t isVarInts = ser_readdata8(s);
|
|
|
|
if (isVarInts != 0 && isVarInts != 1) {
|
|
|
|
throw std::ios_base::failure("invalid value for isVarInts byte");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isVarInts) {
|
|
|
|
s >> FIXEDBITSET(vec, size);
|
|
|
|
} else {
|
|
|
|
s >> FIXEDVARINTSBITSET(vec, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-06-15 14:19:11 +02:00
|
|
|
template<typename I>
|
|
|
|
class CVarInt
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
I &n;
|
|
|
|
public:
|
|
|
|
CVarInt(I& nIn) : n(nIn) { }
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream &s) const {
|
2012-06-15 14:19:11 +02:00
|
|
|
WriteVarInt<Stream,I>(s, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& s) {
|
2012-06-15 14:19:11 +02:00
|
|
|
n = ReadVarInt<Stream,I>(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-05-18 22:11:42 +02:00
|
|
|
class CCompactSize
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
uint64_t &n;
|
|
|
|
public:
|
|
|
|
CCompactSize(uint64_t& nIn) : n(nIn) { }
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
unsigned int GetSerializeSize() const {
|
2016-05-18 22:11:42 +02:00
|
|
|
return GetSizeOfCompactSize(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream &s) const {
|
2016-05-18 22:11:42 +02:00
|
|
|
WriteCompactSize<Stream>(s, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& s) {
|
2016-05-18 22:11:42 +02:00
|
|
|
n = ReadCompactSize<Stream>(s);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-08-07 23:00:01 +02:00
|
|
|
template<size_t Limit>
|
|
|
|
class LimitedString
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
std::string& string;
|
|
|
|
public:
|
2016-11-09 12:32:57 +01:00
|
|
|
LimitedString(std::string& _string) : string(_string) {}
|
2014-08-07 23:00:01 +02:00
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& s)
|
2014-08-07 23:00:01 +02:00
|
|
|
{
|
|
|
|
size_t size = ReadCompactSize(s);
|
|
|
|
if (size > Limit) {
|
|
|
|
throw std::ios_base::failure("String length limit exceeded");
|
|
|
|
}
|
|
|
|
string.resize(size);
|
|
|
|
if (size != 0)
|
2017-07-13 01:23:59 +02:00
|
|
|
s.read((char*)string.data(), size);
|
2014-08-07 23:00:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream& s) const
|
2014-08-07 23:00:01 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(s, string.size());
|
|
|
|
if (!string.empty())
|
2017-07-13 01:23:59 +02:00
|
|
|
s.write((char*)string.data(), string.size());
|
2014-08-07 23:00:01 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-06-15 14:19:11 +02:00
|
|
|
template<typename I>
|
|
|
|
CVarInt<I> WrapVarInt(I& n) { return CVarInt<I>(n); }
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Forward declarations
|
|
|
|
*/
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* string
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, typename C> void Serialize(Stream& os, const std::basic_string<C>& str);
|
|
|
|
template<typename Stream, typename C> void Unserialize(Stream& is, std::basic_string<C>& str);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2015-10-29 07:11:24 +01:00
|
|
|
/**
|
|
|
|
* prevector
|
|
|
|
* prevectors of unsigned char are a special case and are intended to be serialized as a single opaque blob.
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, unsigned int N, typename T> void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&);
|
|
|
|
template<typename Stream, unsigned int N, typename T, typename V> void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&);
|
|
|
|
template<typename Stream, unsigned int N, typename T> inline void Serialize(Stream& os, const prevector<N, T>& v);
|
|
|
|
template<typename Stream, unsigned int N, typename T> void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&);
|
|
|
|
template<typename Stream, unsigned int N, typename T, typename V> void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&);
|
|
|
|
template<typename Stream, unsigned int N, typename T> inline void Unserialize(Stream& is, prevector<N, T>& v);
|
2015-10-29 07:11:24 +01:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* vector
|
|
|
|
* vectors of unsigned char are a special case and are intended to be serialized as a single opaque blob.
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, typename T, typename A> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&);
|
|
|
|
template<typename Stream, typename T, typename A, typename V> void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&);
|
|
|
|
template<typename Stream, typename T, typename A> inline void Serialize(Stream& os, const std::vector<T, A>& v);
|
|
|
|
template<typename Stream, typename T, typename A> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&);
|
|
|
|
template<typename Stream, typename T, typename A, typename V> void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&);
|
|
|
|
template<typename Stream, typename T, typename A> inline void Unserialize(Stream& is, std::vector<T, A>& v);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* pair
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, typename K, typename T> void Serialize(Stream& os, const std::pair<K, T>& item);
|
|
|
|
template<typename Stream, typename K, typename T> void Unserialize(Stream& is, std::pair<K, T>& item);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2018-09-14 17:53:49 +02:00
|
|
|
/**
|
|
|
|
* pair
|
|
|
|
*/
|
|
|
|
template<typename Stream, typename... Elements> void Serialize(Stream& os, const std::tuple<Elements...>& item);
|
|
|
|
template<typename Stream, typename... Elements> void Unserialize(Stream& is, std::tuple<Elements...>& item);
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* map
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, typename K, typename T, typename Pred, typename A> void Serialize(Stream& os, const std::map<K, T, Pred, A>& m);
|
|
|
|
template<typename Stream, typename K, typename T, typename Pred, typename A> void Unserialize(Stream& is, std::map<K, T, Pred, A>& m);
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename K, typename T, typename Hash, typename Pred, typename A> void Serialize(Stream& os, const std::unordered_map<K, T, Hash, Pred, A>& m);
|
|
|
|
template<typename Stream, typename K, typename T, typename Hash, typename Pred, typename A> void Unserialize(Stream& is, std::unordered_map<K, T, Hash, Pred, A>& m);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* set
|
|
|
|
*/
|
2016-11-09 12:32:57 +01:00
|
|
|
template<typename Stream, typename K, typename Pred, typename A> void Serialize(Stream& os, const std::set<K, Pred, A>& m);
|
|
|
|
template<typename Stream, typename K, typename Pred, typename A> void Unserialize(Stream& is, std::set<K, Pred, A>& m);
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename K, typename Hash, typename Pred, typename A> void Serialize(Stream& os, const std::unordered_set<K, Hash, Pred, A>& m);
|
|
|
|
template<typename Stream, typename K, typename Hash, typename Pred, typename A> void Unserialize(Stream& is, std::unordered_set<K, Hash, Pred, A>& m);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2016-11-21 10:51:32 +01:00
|
|
|
/**
|
|
|
|
* shared_ptr
|
|
|
|
*/
|
2019-01-03 21:08:34 +01:00
|
|
|
template<typename Stream, typename T> void Serialize(Stream& os, const std::shared_ptr<T>& p);
|
|
|
|
template<typename Stream, typename T> void Unserialize(Stream& os, std::shared_ptr<T>& p);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2016-11-21 10:51:32 +01:00
|
|
|
/**
|
|
|
|
* unique_ptr
|
|
|
|
*/
|
|
|
|
template<typename Stream, typename T> void Serialize(Stream& os, const std::unique_ptr<const T>& p);
|
|
|
|
template<typename Stream, typename T> void Unserialize(Stream& os, std::unique_ptr<const T>& p);
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
2019-05-28 15:15:37 +02:00
|
|
|
* If none of the specialized versions above matched and T is a class, default to calling member function.
|
2014-10-31 04:34:30 +01:00
|
|
|
*/
|
2019-05-28 15:15:37 +02:00
|
|
|
template<typename Stream, typename T, typename std::enable_if<std::is_class<T>::value>::type* = nullptr>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Serialize(Stream& os, const T& a)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
a.Serialize(os);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:15:37 +02:00
|
|
|
template<typename Stream, typename T, typename std::enable_if<std::is_class<T>::value>::type* = nullptr>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Unserialize(Stream& is, T& a)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
a.Unserialize(is);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:15:37 +02:00
|
|
|
/**
|
|
|
|
* If none of the specialized versions above matched and T is an enum, default to calling
|
|
|
|
* Serialize/Unserialze with the underlying type. This is only allowed when a specialized struct of is_serializable_enum<Enum>
|
|
|
|
* is found which derives from std::true_type. This is to ensure that enums are not serialized with the wrong type by
|
|
|
|
* accident.
|
|
|
|
*/
|
|
|
|
|
|
|
|
template<typename T> struct is_serializable_enum;
|
|
|
|
template<typename T> struct is_serializable_enum : std::false_type {};
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename std::enable_if<std::is_enum<T>::value>::type* = nullptr>
|
|
|
|
inline void Serialize(Stream& s, T a )
|
|
|
|
{
|
|
|
|
// If you ever get into this situation, it usaully means you forgot to declare is_serializable_enum for the desired enum type
|
2019-05-30 10:00:31 +02:00
|
|
|
static_assert(is_serializable_enum<T>::value, "Missing declararion of is_serializable_enum");
|
2019-05-28 15:15:37 +02:00
|
|
|
|
|
|
|
typedef typename std::underlying_type<T>::type T2;
|
|
|
|
T2 b = (T2)a;
|
|
|
|
Serialize(s, b);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename std::enable_if<std::is_enum<T>::value>::type* = nullptr>
|
|
|
|
inline void Unserialize(Stream& s, T& a )
|
|
|
|
{
|
|
|
|
// If you ever get into this situation, it usaully means you forgot to declare is_serializable_enum for the desired enum type
|
2019-05-30 10:00:31 +02:00
|
|
|
static_assert(is_serializable_enum<T>::value, "Missing declararion of is_serializable_enum");
|
2019-05-28 15:15:37 +02:00
|
|
|
|
|
|
|
typedef typename std::underlying_type<T>::type T2;
|
|
|
|
T2 b;
|
|
|
|
Unserialize(s, b);
|
|
|
|
a = (T)b;
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* string
|
|
|
|
*/
|
2010-08-29 18:58:15 +02:00
|
|
|
template<typename Stream, typename C>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream& os, const std::basic_string<C>& str)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, str.size());
|
|
|
|
if (!str.empty())
|
2017-07-13 01:23:59 +02:00
|
|
|
os.write((char*)str.data(), str.size() * sizeof(C));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename C>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& is, std::basic_string<C>& str)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
str.resize(nSize);
|
|
|
|
if (nSize != 0)
|
2017-07-13 01:23:59 +02:00
|
|
|
is.read((char*)str.data(), nSize * sizeof(C));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2015-10-29 07:11:24 +01:00
|
|
|
/**
|
|
|
|
* prevector
|
|
|
|
*/
|
|
|
|
template<typename Stream, unsigned int N, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, v.size());
|
|
|
|
if (!v.empty())
|
2017-07-13 01:23:59 +02:00
|
|
|
os.write((char*)v.data(), v.size() * sizeof(T));
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, unsigned int N, typename T, typename V>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize_impl(Stream& os, const prevector<N, T>& v, const V&)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, v.size());
|
|
|
|
for (typename prevector<N, T>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(os, (*vi));
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, unsigned int N, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Serialize(Stream& os, const prevector<N, T>& v)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize_impl(os, v, T());
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<typename Stream, unsigned int N, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
|
|
|
// Limit size per read so bogus size value won't cause out of memory
|
|
|
|
v.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
unsigned int i = 0;
|
|
|
|
while (i < nSize)
|
|
|
|
{
|
|
|
|
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
|
Merge #12324: speed up Unserialize_impl for prevector
86b47fa741408b061ab0bda784b8678bfd7dfa88 speed up Unserialize_impl for prevector (Akio Nakamura)
Pull request description:
The unserializer for prevector uses `resize()` for reserve the area, but it's prefer to use `reserve()` because `resize()` have overhead to call its constructor many times.
However, `reserve()` does not change the value of `_size` (a private member of prevector).
This PR make the logic of read from stream to callback function, and prevector handles initilizing new values with that call-back and ajust the value of `_size`.
The changes are as follows:
1. prevector.h
Add a public member function named 'append'.
This function has 2 params, number of elemenst to append and call-back function that initilizing new appended values.
2. serialize.h
In the following two function:
- `Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)`
- `Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)`
Make a callback function from each original logic of reading values from stream, and call prevector's `append()`.
3. test/prevector_tests.cpp
Add a test for `append()`.
## A benchmark result is following:
[Machine]
MacBook Pro (macOS 10.13.3/i7 2.2GHz/mem 16GB/SSD)
[result]
DeserializeAndCheckBlockTest => 22% faster
DeserializeBlockTest => 29% faster
[before PR]
# Benchmark, evals, iterations, total, min, max, median
DeserializeAndCheckBlockTest, 60, 160, 94.4901, 0.0094644, 0.0104715, 0.0098339
DeserializeBlockTest, 60, 130, 65.0964, 0.00800362, 0.00895134, 0.00824187
[After PR]
# Benchmark, evals, iterations, total, min, max, median
DeserializeAndCheckBlockTest, 60, 160, 77.1597, 0.00767013, 0.00858959, 0.00805757
DeserializeBlockTest, 60, 130, 49.9443, 0.00613926, 0.00691187, 0.00635527
ACKs for top commit:
laanwj:
utACK 86b47fa741408b061ab0bda784b8678bfd7dfa88
Tree-SHA512: 62ea121ccd45a306fefc67485a1b03a853435af762607dae2426a87b15a3033d802c8556e1923727ddd1023a1837d0e5f6720c2c77b38196907e750e15fbb902
2019-06-18 16:52:11 +02:00
|
|
|
v.resize_uninitialized(i + blk);
|
2015-10-29 07:11:24 +01:00
|
|
|
is.read((char*)&v[i], blk * sizeof(T));
|
|
|
|
i += blk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, unsigned int N, typename T, typename V>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
|
|
|
v.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
unsigned int i = 0;
|
|
|
|
unsigned int nMid = 0;
|
|
|
|
while (nMid < nSize)
|
|
|
|
{
|
|
|
|
nMid += 5000000 / sizeof(T);
|
|
|
|
if (nMid > nSize)
|
|
|
|
nMid = nSize;
|
Merge #12324: speed up Unserialize_impl for prevector
86b47fa741408b061ab0bda784b8678bfd7dfa88 speed up Unserialize_impl for prevector (Akio Nakamura)
Pull request description:
The unserializer for prevector uses `resize()` for reserve the area, but it's prefer to use `reserve()` because `resize()` have overhead to call its constructor many times.
However, `reserve()` does not change the value of `_size` (a private member of prevector).
This PR make the logic of read from stream to callback function, and prevector handles initilizing new values with that call-back and ajust the value of `_size`.
The changes are as follows:
1. prevector.h
Add a public member function named 'append'.
This function has 2 params, number of elemenst to append and call-back function that initilizing new appended values.
2. serialize.h
In the following two function:
- `Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)`
- `Unserialize_impl(Stream& is, prevector<N, T>& v, const V&)`
Make a callback function from each original logic of reading values from stream, and call prevector's `append()`.
3. test/prevector_tests.cpp
Add a test for `append()`.
## A benchmark result is following:
[Machine]
MacBook Pro (macOS 10.13.3/i7 2.2GHz/mem 16GB/SSD)
[result]
DeserializeAndCheckBlockTest => 22% faster
DeserializeBlockTest => 29% faster
[before PR]
# Benchmark, evals, iterations, total, min, max, median
DeserializeAndCheckBlockTest, 60, 160, 94.4901, 0.0094644, 0.0104715, 0.0098339
DeserializeBlockTest, 60, 130, 65.0964, 0.00800362, 0.00895134, 0.00824187
[After PR]
# Benchmark, evals, iterations, total, min, max, median
DeserializeAndCheckBlockTest, 60, 160, 77.1597, 0.00767013, 0.00858959, 0.00805757
DeserializeBlockTest, 60, 130, 49.9443, 0.00613926, 0.00691187, 0.00635527
ACKs for top commit:
laanwj:
utACK 86b47fa741408b061ab0bda784b8678bfd7dfa88
Tree-SHA512: 62ea121ccd45a306fefc67485a1b03a853435af762607dae2426a87b15a3033d802c8556e1923727ddd1023a1837d0e5f6720c2c77b38196907e750e15fbb902
2019-06-18 16:52:11 +02:00
|
|
|
v.resize_uninitialized(nMid);
|
|
|
|
for (; i < nMid; ++i)
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, v[i]);
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, unsigned int N, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Unserialize(Stream& is, prevector<N, T>& v)
|
2015-10-29 07:11:24 +01:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize_impl(is, v, T());
|
2015-10-29 07:11:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* vector
|
|
|
|
*/
|
2010-08-29 18:58:15 +02:00
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, v.size());
|
|
|
|
if (!v.empty())
|
2017-07-13 01:23:59 +02:00
|
|
|
os.write((char*)v.data(), v.size() * sizeof(T));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2014-10-08 20:27:07 +02:00
|
|
|
template<typename Stream, typename T, typename A, typename V>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize_impl(Stream& os, const std::vector<T, A>& v, const V&)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, v.size());
|
|
|
|
for (typename std::vector<T, A>::const_iterator vi = v.begin(); vi != v.end(); ++vi)
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(os, (*vi));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Serialize(Stream& os, const std::vector<T, A>& v)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize_impl(os, v, T());
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
// Limit size per read so bogus size value won't cause out of memory
|
|
|
|
v.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
unsigned int i = 0;
|
|
|
|
while (i < nSize)
|
|
|
|
{
|
2011-05-15 09:11:04 +02:00
|
|
|
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
|
2010-08-29 18:58:15 +02:00
|
|
|
v.resize(i + blk);
|
|
|
|
is.read((char*)&v[i], blk * sizeof(T));
|
|
|
|
i += blk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-08 20:27:07 +02:00
|
|
|
template<typename Stream, typename T, typename A, typename V>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize_impl(Stream& is, std::vector<T, A>& v, const V&)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
v.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
unsigned int i = 0;
|
|
|
|
unsigned int nMid = 0;
|
|
|
|
while (nMid < nSize)
|
|
|
|
{
|
|
|
|
nMid += 5000000 / sizeof(T);
|
|
|
|
if (nMid > nSize)
|
|
|
|
nMid = nSize;
|
|
|
|
v.resize(nMid);
|
|
|
|
for (; i < nMid; i++)
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, v[i]);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void Unserialize(Stream& is, std::vector<T, A>& v)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize_impl(is, v, T());
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* pair
|
|
|
|
*/
|
2010-08-29 18:58:15 +02:00
|
|
|
template<typename Stream, typename K, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream& os, const std::pair<K, T>& item)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize(os, item.first);
|
|
|
|
Serialize(os, item.second);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& is, std::pair<K, T>& item)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, item.first);
|
|
|
|
Unserialize(is, item.second);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
2018-09-14 17:53:49 +02:00
|
|
|
/**
|
|
|
|
* tuple
|
|
|
|
*/
|
2019-03-22 11:52:37 +01:00
|
|
|
template<typename Stream, int index, typename... Ts>
|
|
|
|
struct SerializeTuple {
|
2018-09-14 17:53:49 +02:00
|
|
|
void operator() (Stream&s, std::tuple<Ts...>& t) {
|
2019-03-22 11:52:37 +01:00
|
|
|
SerializeTuple<Stream, index - 1, Ts...>{}(s, t);
|
|
|
|
s << std::get<index>(t);
|
2018-09-14 17:53:49 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-03-22 11:52:37 +01:00
|
|
|
template<typename Stream, typename... Ts>
|
|
|
|
struct SerializeTuple<Stream, 0, Ts...> {
|
2018-09-14 17:53:49 +02:00
|
|
|
void operator() (Stream&s, std::tuple<Ts...>& t) {
|
2019-03-22 11:52:37 +01:00
|
|
|
s << std::get<0>(t);
|
2018-09-14 17:53:49 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-03-22 11:52:37 +01:00
|
|
|
template<typename Stream, int index, typename... Ts>
|
|
|
|
struct DeserializeTuple {
|
|
|
|
void operator() (Stream&s, std::tuple<Ts...>& t) {
|
|
|
|
DeserializeTuple<Stream, index - 1, Ts...>{}(s, t);
|
|
|
|
s >> std::get<index>(t);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template<typename Stream, typename... Ts>
|
|
|
|
struct DeserializeTuple<Stream, 0, Ts...> {
|
|
|
|
void operator() (Stream&s, std::tuple<Ts...>& t) {
|
|
|
|
s >> std::get<0>(t);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-09-14 17:53:49 +02:00
|
|
|
template<typename Stream, typename... Elements>
|
|
|
|
void Serialize(Stream& os, const std::tuple<Elements...>& item)
|
|
|
|
{
|
|
|
|
const auto size = std::tuple_size<std::tuple<Elements...>>::value;
|
2019-03-22 11:52:37 +01:00
|
|
|
SerializeTuple<Stream, size - 1, Elements...>{}(os, const_cast<std::tuple<Elements...>&>(item));
|
2018-09-14 17:53:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename... Elements>
|
|
|
|
void Unserialize(Stream& is, std::tuple<Elements...>& item)
|
|
|
|
{
|
|
|
|
const auto size = std::tuple_size<std::tuple<Elements...>>::value;
|
2019-03-22 11:52:37 +01:00
|
|
|
DeserializeTuple<Stream, size - 1, Elements...>{}(is, item);
|
2018-09-14 17:53:49 +02:00
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* map
|
|
|
|
*/
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename Map>
|
|
|
|
void SerializeMap(Stream& os, const Map& m)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, m.size());
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
for (auto mi = m.begin(); mi != m.end(); ++mi)
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize(os, (*mi));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename Map>
|
|
|
|
void UnserializeMap(Stream& is, Map& m)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
m.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
auto mi = m.begin();
|
2010-08-29 18:58:15 +02:00
|
|
|
for (unsigned int i = 0; i < nSize; i++)
|
|
|
|
{
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
std::pair<typename std::remove_const<typename Map::key_type>::type, typename std::remove_const<typename Map::mapped_type>::type> item;
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, item);
|
2010-08-29 18:58:15 +02:00
|
|
|
mi = m.insert(mi, item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename K, typename T, typename Pred, typename A>
|
|
|
|
void Serialize(Stream& os, const std::map<K, T, Pred, A>& m)
|
|
|
|
{
|
|
|
|
SerializeMap(os, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename T, typename Pred, typename A>
|
|
|
|
void Unserialize(Stream& is, std::map<K, T, Pred, A>& m)
|
|
|
|
{
|
|
|
|
UnserializeMap(is, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename T, typename Hash, typename Pred, typename A>
|
|
|
|
void Serialize(Stream& os, const std::unordered_map<K, T, Hash, Pred, A>& m)
|
|
|
|
{
|
|
|
|
SerializeMap(os, m);
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename K, typename T, typename Hash, typename Pred, typename A>
|
|
|
|
void Unserialize(Stream& is, std::unordered_map<K, T, Hash, Pred, A>& m)
|
|
|
|
{
|
|
|
|
UnserializeMap(is, m);
|
|
|
|
}
|
2010-08-29 18:58:15 +02:00
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* set
|
|
|
|
*/
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
|
|
|
|
template<typename Stream, typename Set>
|
|
|
|
void SerializeSet(Stream& os, const Set& m)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, m.size());
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
for (auto it = m.begin(); it != m.end(); ++it)
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize(os, (*it));
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename Set>
|
|
|
|
void UnserializeSet(Stream& is, Set& m)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
|
|
|
m.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
auto it = m.begin();
|
2010-08-29 18:58:15 +02:00
|
|
|
for (unsigned int i = 0; i < nSize; i++)
|
|
|
|
{
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
typename std::remove_const<typename Set::key_type>::type key;
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, key);
|
2010-08-29 18:58:15 +02:00
|
|
|
it = m.insert(it, key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Collection of minor performance optimizations (#2855)
* Merge #13176: Improve CRollingBloomFilter performance: replace modulus with FastMod
9aac9f90d5e56752cc6cbfac48063ad29a01143c replace modulus with FastMod (Martin Ankerl)
Pull request description:
Not sure if this is optimization is necessary, but anyway I have some spare time so here it is. This replaces the slow modulo operation with a much faster 64bit multiplication & shift. This works when the hash is uniformly distributed between 0 and 2^32-1. This speeds up the benchmark by a factor of about 1.3:
```
RollingBloom, 5, 1500000, 3.73733, 4.97569e-07, 4.99002e-07, 4.98372e-07 # before
RollingBloom, 5, 1500000, 2.86842, 3.81630e-07, 3.83730e-07, 3.82473e-07 # FastMod
```
Be aware that this changes the internal data of the filter, so this should probably
not be used for CBloomFilter because of interoperability problems.
Tree-SHA512: 04104f3fb09f56c9d14458a6aad919aeb0a5af944e8ee6a31f00e93c753e22004648c1cd65bf36752b6addec528d19fb665c27b955ce1666a85a928e17afa47a
* Use unordered_map in CSporkManager
In one of my profiling sessions with many InstantSend transactions
happening, calls into CSporkManager added up to about 1% of total CPU time.
This is easily avoidable by using unordered maps.
* Use std::unordered_map instead of std::map in limitedmap
* Use unordered_set for CNode::setAskFor
* Add serialization support for unordered maps and sets
* Use unordered_map for mapArgs and mapMultiArgs
* Let limitedmap prune in batches and use unordered_multimap
Due to the batched pruning, there is no need to maintain an ordered map
of values anymore. Only when nPruneAfterSize, there is a need to create
a temporary ordered vector of values to figure out what can be removed.
* Instead of using a multimap for mapAskFor, use a vector which we sort on demand
CNode::AskFor will now push entries into an initially unordered vector
instead of an ordered multimap. Only when we later want to use vecAskFor in
SendMessages, we sort the vector.
The vector will actually be mostly sorted in most cases as insertion order
usually mimics the desired ordering. Only the last few entries might need
some shuffling around. Doing the sort on-demand should be less wasteful
then trying to maintain correct order all the time.
* Fix compilation of tests
* Fix limitedmap tests
* Rename limitedmap to unordered_limitedmap to ensure backports conflict
This ensures that future backports that depends on limitedmap's ordering
conflict so that we are made aware of needed action.
* Fix compilation error on Travis
2019-04-11 14:42:14 +02:00
|
|
|
template<typename Stream, typename K, typename Pred, typename A>
|
|
|
|
void Serialize(Stream& os, const std::set<K, Pred, A>& m)
|
|
|
|
{
|
|
|
|
SerializeSet(os, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename Pred, typename A>
|
|
|
|
void Unserialize(Stream& is, std::set<K, Pred, A>& m)
|
|
|
|
{
|
|
|
|
UnserializeSet(is, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename Hash, typename Pred, typename A>
|
|
|
|
void Serialize(Stream& os, const std::unordered_set<K, Hash, Pred, A>& m)
|
|
|
|
{
|
|
|
|
SerializeSet(os, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename K, typename Hash, typename Pred, typename A>
|
|
|
|
void Unserialize(Stream& is, std::unordered_set<K, Hash, Pred, A>& m)
|
|
|
|
{
|
|
|
|
UnserializeSet(is, m);
|
|
|
|
}
|
|
|
|
|
2016-11-13 18:52:34 +01:00
|
|
|
/**
|
|
|
|
* list
|
|
|
|
*/
|
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Serialize(Stream& os, const std::list<T, A>& l)
|
2016-11-13 18:52:34 +01:00
|
|
|
{
|
|
|
|
WriteCompactSize(os, l.size());
|
|
|
|
for (typename std::list<T, A>::const_iterator it = l.begin(); it != l.end(); ++it)
|
2016-11-09 12:32:57 +01:00
|
|
|
Serialize(os, (*it));
|
2016-11-13 18:52:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T, typename A>
|
2016-11-09 12:32:57 +01:00
|
|
|
void Unserialize(Stream& is, std::list<T, A>& l)
|
2016-11-13 18:52:34 +01:00
|
|
|
{
|
|
|
|
l.clear();
|
|
|
|
unsigned int nSize = ReadCompactSize(is);
|
|
|
|
for (unsigned int i = 0; i < nSize; i++)
|
|
|
|
{
|
|
|
|
T val;
|
2016-11-09 12:32:57 +01:00
|
|
|
Unserialize(is, val);
|
2016-11-13 18:52:34 +01:00
|
|
|
l.push_back(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
|
2016-11-21 10:51:32 +01:00
|
|
|
/**
|
|
|
|
* unique_ptr
|
|
|
|
*/
|
|
|
|
template<typename Stream, typename T> void
|
|
|
|
Serialize(Stream& os, const std::unique_ptr<const T>& p)
|
|
|
|
{
|
|
|
|
Serialize(os, *p);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T>
|
|
|
|
void Unserialize(Stream& is, std::unique_ptr<const T>& p)
|
|
|
|
{
|
|
|
|
p.reset(new T(deserialize, is));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* shared_ptr
|
|
|
|
*/
|
|
|
|
template<typename Stream, typename T> void
|
2019-01-03 21:08:34 +01:00
|
|
|
Serialize(Stream& os, const std::shared_ptr<T>& p)
|
2016-11-21 10:51:32 +01:00
|
|
|
{
|
|
|
|
Serialize(os, *p);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T>
|
2019-01-03 21:08:34 +01:00
|
|
|
void Unserialize(Stream& is, std::shared_ptr<T>& p)
|
2016-11-21 10:51:32 +01:00
|
|
|
{
|
2019-01-03 21:08:34 +01:00
|
|
|
p = std::make_shared<T>(deserialize, is);
|
2016-11-21 10:51:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2014-10-31 04:34:30 +01:00
|
|
|
/**
|
|
|
|
* Support for ADD_SERIALIZE_METHODS and READWRITE macro
|
|
|
|
*/
|
2014-08-21 00:49:32 +02:00
|
|
|
struct CSerActionSerialize
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
constexpr bool ForRead() const { return false; }
|
2014-08-21 00:49:32 +02:00
|
|
|
};
|
|
|
|
struct CSerActionUnserialize
|
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
constexpr bool ForRead() const { return true; }
|
2014-08-21 00:49:32 +02:00
|
|
|
};
|
2010-08-29 18:58:15 +02:00
|
|
|
|
|
|
|
template<typename Stream, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void SerReadWrite(Stream& s, const T& obj, CSerActionSerialize ser_action)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(s, obj);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename T>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void SerReadWrite(Stream& s, T& obj, CSerActionUnserialize ser_action)
|
2010-08-29 18:58:15 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, obj);
|
2010-08-29 18:58:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2012-01-03 09:03:07 +01:00
|
|
|
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
/* ::GetSerializeSize implementations
|
|
|
|
*
|
|
|
|
* Computing the serialized size of objects is done through a special stream
|
|
|
|
* object of type CSizeComputer, which only records the number of bytes written
|
|
|
|
* to it.
|
|
|
|
*
|
|
|
|
* If your Serialize or SerializationOp method has non-trivial overhead for
|
|
|
|
* serialization, it may be worthwhile to implement a specialized version for
|
|
|
|
* CSizeComputer, which uses the s.seek() method to record bytes that would
|
|
|
|
* be written instead.
|
|
|
|
*/
|
2014-07-10 20:16:58 +02:00
|
|
|
class CSizeComputer
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
size_t nSize;
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
const int nType;
|
|
|
|
const int nVersion;
|
2014-07-10 20:16:58 +02:00
|
|
|
public:
|
|
|
|
CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {}
|
|
|
|
|
2016-11-09 12:32:57 +01:00
|
|
|
void write(const char *psz, size_t _nSize)
|
|
|
|
{
|
|
|
|
this->nSize += _nSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Pretend _nSize bytes are written, without specifying them. */
|
|
|
|
void seek(size_t _nSize)
|
2014-07-10 20:16:58 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
this->nSize += _nSize;
|
2014-07-10 20:16:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
|
CSizeComputer& operator<<(const T& obj)
|
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(*this, obj);
|
2014-07-10 20:16:58 +02:00
|
|
|
return (*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t size() const {
|
|
|
|
return nSize;
|
|
|
|
}
|
2016-11-09 12:32:57 +01:00
|
|
|
|
|
|
|
int GetVersion() const { return nVersion; }
|
|
|
|
int GetType() const { return nType; }
|
2014-07-10 20:16:58 +02:00
|
|
|
};
|
|
|
|
|
2017-07-27 16:28:05 +02:00
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
void SerializeMany(Stream& s)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename Arg>
|
2016-11-09 12:32:57 +01:00
|
|
|
void SerializeMany(Stream& s, Arg&& arg)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(s, std::forward<Arg>(arg));
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename Arg, typename... Args>
|
2016-11-09 12:32:57 +01:00
|
|
|
void SerializeMany(Stream& s, Arg&& arg, Args&&... args)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Serialize(s, std::forward<Arg>(arg));
|
|
|
|
::SerializeMany(s, std::forward<Args>(args)...);
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void UnserializeMany(Stream& s)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename Arg>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void UnserializeMany(Stream& s, Arg& arg)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, arg);
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename Arg, typename... Args>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void UnserializeMany(Stream& s, Arg& arg, Args&... args)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::Unserialize(s, arg);
|
|
|
|
::UnserializeMany(s, args...);
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename... Args>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void SerReadWriteMany(Stream& s, CSerActionSerialize ser_action, Args&&... args)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
::SerializeMany(s, std::forward<Args>(args)...);
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Stream, typename... Args>
|
2016-11-09 12:32:57 +01:00
|
|
|
inline void SerReadWriteMany(Stream& s, CSerActionUnserialize ser_action, Args&... args)
|
|
|
|
{
|
|
|
|
::UnserializeMany(s, args...);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename I>
|
|
|
|
inline void WriteVarInt(CSizeComputer &s, I n)
|
|
|
|
{
|
|
|
|
s.seek(GetSizeOfVarInt<I>(n));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize)
|
|
|
|
{
|
|
|
|
s.seek(GetSizeOfCompactSize(nSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-01-16 16:24:49 +01:00
|
|
|
size_t GetSerializeSize(const T& t, int nType, int nVersion)
|
2016-11-09 12:32:57 +01:00
|
|
|
{
|
|
|
|
return (CSizeComputer(nType, nVersion) << t).size();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename S, typename T>
|
|
|
|
size_t GetSerializeSize(const S& s, const T& t)
|
2017-07-27 16:28:05 +02:00
|
|
|
{
|
2016-11-09 12:32:57 +01:00
|
|
|
return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size();
|
2017-07-27 16:28:05 +02:00
|
|
|
}
|
|
|
|
|
2014-08-28 22:21:03 +02:00
|
|
|
#endif // BITCOIN_SERIALIZE_H
|