dash/src/memusage.h
2024-09-04 16:29:30 +00:00

193 lines
6.4 KiB
C++

// Copyright (c) 2015-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_MEMUSAGE_H
#define BITCOIN_MEMUSAGE_H
#include <indirectmap.h>
#include <prevector.h>
#include <support/allocators/pool.h>
#include <stdlib.h>
#include <cassert>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include <unordered_map>
#include <unordered_set>
namespace memusage
{
/** Compute the total memory used by allocating alloc bytes. */
static size_t MallocUsage(size_t alloc);
/** Dynamic memory usage for built-in types is zero. */
static inline size_t DynamicUsage(const int8_t& v) { return 0; }
static inline size_t DynamicUsage(const uint8_t& v) { return 0; }
static inline size_t DynamicUsage(const int16_t& v) { return 0; }
static inline size_t DynamicUsage(const uint16_t& v) { return 0; }
static inline size_t DynamicUsage(const int32_t& v) { return 0; }
static inline size_t DynamicUsage(const uint32_t& v) { return 0; }
static inline size_t DynamicUsage(const int64_t& v) { return 0; }
static inline size_t DynamicUsage(const uint64_t& v) { return 0; }
static inline size_t DynamicUsage(const float& v) { return 0; }
static inline size_t DynamicUsage(const double& v) { return 0; }
template<typename X> static inline size_t DynamicUsage(X * const &v) { return 0; }
template<typename X> static inline size_t DynamicUsage(const X * const &v) { return 0; }
/** Compute the memory used for dynamically allocated but owned data structures.
* For generic data types, this is *not* recursive. DynamicUsage(vector<vector<int> >)
* will compute the memory used for the vector<int>'s, but not for the ints inside.
* This is for efficiency reasons, as these functions are intended to be fast. If
* application data structures require more accurate inner accounting, they should
* iterate themselves, or use more efficient caching + updating on modification.
*/
static inline size_t MallocUsage(size_t alloc)
{
// Measured on libc6 2.19 on Linux.
if (alloc == 0) {
return 0;
} else if (sizeof(void*) == 8) {
return ((alloc + 31) >> 4) << 4;
} else if (sizeof(void*) == 4) {
return ((alloc + 15) >> 3) << 3;
} else {
assert(0);
}
}
// STL data structures
template<typename X>
struct stl_tree_node
{
private:
int color;
void* parent;
void* left;
void* right;
X x;
};
struct stl_shared_counter
{
/* Various platforms use different sized counters here.
* Conservatively assume that they won't be larger than size_t. */
void* class_type;
size_t use_count;
size_t weak_count;
};
template<typename X>
static inline size_t DynamicUsage(const std::vector<X>& v)
{
return MallocUsage(v.capacity() * sizeof(X));
}
template<unsigned int N, typename X, typename S, typename D>
static inline size_t DynamicUsage(const prevector<N, X, S, D>& v)
{
return MallocUsage(v.allocated_memory());
}
template<typename X, typename Y>
static inline size_t DynamicUsage(const std::set<X, Y>& s)
{
return MallocUsage(sizeof(stl_tree_node<X>)) * s.size();
}
template<typename X, typename Y>
static inline size_t IncrementalDynamicUsage(const std::set<X, Y>& s)
{
return MallocUsage(sizeof(stl_tree_node<X>));
}
template<typename X, typename Y, typename Z>
static inline size_t DynamicUsage(const std::map<X, Y, Z>& m)
{
return MallocUsage(sizeof(stl_tree_node<std::pair<const X, Y> >)) * m.size();
}
template<typename X, typename Y, typename Z>
static inline size_t IncrementalDynamicUsage(const std::map<X, Y, Z>& m)
{
return MallocUsage(sizeof(stl_tree_node<std::pair<const X, Y> >));
}
// indirectmap has underlying map with pointer as key
template<typename X, typename Y>
static inline size_t DynamicUsage(const indirectmap<X, Y>& m)
{
return MallocUsage(sizeof(stl_tree_node<std::pair<const X*, Y> >)) * m.size();
}
template<typename X, typename Y>
static inline size_t IncrementalDynamicUsage(const indirectmap<X, Y>& m)
{
return MallocUsage(sizeof(stl_tree_node<std::pair<const X*, Y> >));
}
template<typename X>
static inline size_t DynamicUsage(const std::unique_ptr<X>& p)
{
return p ? MallocUsage(sizeof(X)) : 0;
}
template<typename X>
static inline size_t DynamicUsage(const std::shared_ptr<X>& p)
{
// A shared_ptr can either use a single continuous memory block for both
// the counter and the storage (when using std::make_shared), or separate.
// We can't observe the difference, however, so assume the worst.
return p ? MallocUsage(sizeof(X)) + MallocUsage(sizeof(stl_shared_counter)) : 0;
}
template<typename X>
struct unordered_node : private X
{
private:
void* ptr;
};
template<typename X, typename Y>
static inline size_t DynamicUsage(const std::unordered_set<X, Y>& s)
{
return MallocUsage(sizeof(unordered_node<X>)) * s.size() + MallocUsage(sizeof(void*) * s.bucket_count());
}
template<typename X, typename Y, typename Z>
static inline size_t DynamicUsage(const std::unordered_map<X, Y, Z>& m)
{
return MallocUsage(sizeof(unordered_node<std::pair<const X, Y> >)) * m.size() + MallocUsage(sizeof(void*) * m.bucket_count());
}
template <class Key, class T, class Hash, class Pred, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
static inline size_t DynamicUsage(const std::unordered_map<Key,
T,
Hash,
Pred,
PoolAllocator<std::pair<const Key, T>,
MAX_BLOCK_SIZE_BYTES,
ALIGN_BYTES>>& m)
{
auto* pool_resource = m.get_allocator().resource();
// The allocated chunks are stored in a std::list. Size per node should
// therefore be 3 pointers: next, previous, and a pointer to the chunk.
size_t estimated_list_node_size = MallocUsage(sizeof(void*) * 3);
size_t usage_resource = estimated_list_node_size * pool_resource->NumAllocatedChunks();
size_t usage_chunks = MallocUsage(pool_resource->ChunkSizeBytes()) * pool_resource->NumAllocatedChunks();
return usage_resource + usage_chunks + MallocUsage(sizeof(void*) * m.bucket_count());
}
} // namespace memusage
#endif // BITCOIN_MEMUSAGE_H