2013-04-13 07:13:08 +02:00
|
|
|
#include "serialize.h"
|
2012-06-15 14:19:11 +02:00
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <stdint.h>
|
2012-06-15 14:19:11 +02:00
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
#include <boost/test/unit_test.hpp>
|
2012-06-15 14:19:11 +02:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_SUITE(serialize_tests)
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_CASE(varints)
|
|
|
|
{
|
|
|
|
// encode
|
|
|
|
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
CDataStream::size_type size = 0;
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
ss << VARINT(i);
|
|
|
|
size += ::GetSerializeSize(VARINT(i), 0, 0);
|
|
|
|
BOOST_CHECK(size == ss.size());
|
|
|
|
}
|
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
|
2012-06-15 14:19:11 +02:00
|
|
|
ss << VARINT(i);
|
|
|
|
size += ::GetSerializeSize(VARINT(i), 0, 0);
|
|
|
|
BOOST_CHECK(size == ss.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// decode
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
2013-03-07 18:38:25 +01:00
|
|
|
int j = -1;
|
2012-06-15 14:19:11 +02:00
|
|
|
ss >> VARINT(j);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
|
|
|
|
2013-04-13 07:13:08 +02:00
|
|
|
for (uint64_t i = 0; i < 100000000000ULL; i += 999999937) {
|
|
|
|
uint64_t j = -1;
|
2012-06-15 14:19:11 +02:00
|
|
|
ss >> VARINT(j);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
BOOST_AUTO_TEST_CASE(compactsize)
|
|
|
|
{
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
vector<char>::size_type i, j;
|
|
|
|
|
|
|
|
for (i = 1; i <= MAX_SIZE; i *= 2)
|
|
|
|
{
|
|
|
|
WriteCompactSize(ss, i-1);
|
|
|
|
WriteCompactSize(ss, i);
|
|
|
|
}
|
|
|
|
for (i = 1; i <= MAX_SIZE; i *= 2)
|
|
|
|
{
|
|
|
|
j = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK_MESSAGE((i-1) == j, "decoded:" << j << " expected:" << (i-1));
|
|
|
|
j = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK_MESSAGE(i == j, "decoded:" << j << " expected:" << i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isCanonicalException(const std::ios_base::failure& ex)
|
|
|
|
{
|
2013-12-15 15:25:41 +01:00
|
|
|
std::string strExplanatoryString("non-canonical ReadCompactSize()");
|
|
|
|
|
|
|
|
return strExplanatoryString == ex.what() ||
|
|
|
|
// OSX Apple LLVM version 5.0 (OSX 10.9)
|
|
|
|
strExplanatoryString + ": unspecified iostream_category error" == ex.what();
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
}
|
|
|
|
|
2013-12-15 15:25:41 +01:00
|
|
|
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
BOOST_AUTO_TEST_CASE(noncanonical)
|
|
|
|
{
|
|
|
|
// Write some non-canonical CompactSize encodings, and
|
|
|
|
// make sure an exception is thrown when read back.
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
vector<char>::size_type n;
|
|
|
|
|
|
|
|
// zero encoded with three bytes:
|
|
|
|
ss.write("\xfd\x00\x00", 3);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xfc encoded with three bytes:
|
|
|
|
ss.write("\xfd\xfc\x00", 3);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xfd encoded with three bytes is OK:
|
|
|
|
ss.write("\xfd\xfd\x00", 3);
|
|
|
|
n = ReadCompactSize(ss);
|
|
|
|
BOOST_CHECK(n == 0xfd);
|
|
|
|
|
|
|
|
// zero encoded with five bytes:
|
|
|
|
ss.write("\xfe\x00\x00\x00\x00", 5);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// 0xffff encoded with five bytes:
|
|
|
|
ss.write("\xfe\xff\xff\x00\x00", 5);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
|
|
|
|
|
|
|
// zero encoded with nine bytes:
|
|
|
|
ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
2012-06-15 14:19:11 +02:00
|
|
|
|
Reject non-canonically-encoded sizes
The length of vectors, maps, sets, etc are serialized using
Write/ReadCompactSize -- which, unfortunately, do not use a
unique encoding.
So deserializing and then re-serializing a transaction (for example)
can give you different bits than you started with. That doesn't
cause any problems that we are aware of, but it is exactly the type
of subtle mismatch that can lead to exploits.
With this pull, reading a non-canonical CompactSize throws an
exception, which means nodes will ignore 'tx' or 'block' or
other messages that are not properly encoded.
Please check my logic... but this change is safe with respect to
causing a network split. Old clients that receive
non-canonically-encoded transactions or blocks deserialize
them into CTransaction/CBlock structures in memory, and then
re-serialize them before relaying them to peers.
And please check my logic with respect to causing a blockchain
split: there are no CompactSize fields in the block header, so
the block hash is always canonical. The merkle root in the block
header is computed on a vector<CTransaction>, so
any non-canonical encoding of the transactions in 'tx' or 'block'
messages is erased as they are read into memory by old clients,
and does not affect the block hash. And, as noted above, old
clients re-serialize (with canonical encoding) 'tx' and 'block'
messages before relaying to peers.
2013-08-07 04:21:34 +02:00
|
|
|
// 0x01ffffff encoded with nine bytes:
|
|
|
|
ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9);
|
|
|
|
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
|
2012-06-15 14:19:11 +02:00
|
|
|
}
|
|
|
|
|
2013-10-29 02:16:27 +01:00
|
|
|
BOOST_AUTO_TEST_CASE(insert_delete)
|
|
|
|
{
|
|
|
|
// Test inserting/deleting bytes.
|
|
|
|
CDataStream ss(SER_DISK, 0);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 0);
|
|
|
|
|
|
|
|
ss.write("\x00\x01\x02\xff", 4);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 4);
|
|
|
|
|
|
|
|
char c = (char)11;
|
|
|
|
|
|
|
|
// Inserting at beginning/end/middle:
|
|
|
|
ss.insert(ss.begin(), c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 5);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], c);
|
|
|
|
BOOST_CHECK_EQUAL(ss[1], 0);
|
|
|
|
|
|
|
|
ss.insert(ss.end(), c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 6);
|
|
|
|
BOOST_CHECK_EQUAL(ss[4], (char)0xff);
|
|
|
|
BOOST_CHECK_EQUAL(ss[5], c);
|
|
|
|
|
|
|
|
ss.insert(ss.begin()+2, c);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 7);
|
|
|
|
BOOST_CHECK_EQUAL(ss[2], c);
|
|
|
|
|
|
|
|
// Delete at beginning/end/middle
|
|
|
|
ss.erase(ss.begin());
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 6);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], 0);
|
|
|
|
|
|
|
|
ss.erase(ss.begin()+ss.size()-1);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 5);
|
|
|
|
BOOST_CHECK_EQUAL(ss[4], (char)0xff);
|
|
|
|
|
|
|
|
ss.erase(ss.begin()+1);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 4);
|
|
|
|
BOOST_CHECK_EQUAL(ss[0], 0);
|
|
|
|
BOOST_CHECK_EQUAL(ss[1], 1);
|
|
|
|
BOOST_CHECK_EQUAL(ss[2], 2);
|
|
|
|
BOOST_CHECK_EQUAL(ss[3], (char)0xff);
|
|
|
|
|
|
|
|
// Make sure GetAndClear does the right thing:
|
|
|
|
CSerializeData d;
|
|
|
|
ss.GetAndClear(d);
|
|
|
|
BOOST_CHECK_EQUAL(ss.size(), 0);
|
|
|
|
}
|
|
|
|
|
2012-06-15 14:19:11 +02:00
|
|
|
BOOST_AUTO_TEST_SUITE_END()
|