Skip to content

Commit

Permalink
Avoid linking into free list on block alloc
Browse files Browse the repository at this point in the history
Also use increasing batch size
  • Loading branch information
greg7mdp committed Dec 16, 2024
1 parent d127509 commit 54950d3
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 35 deletions.
39 changes: 22 additions & 17 deletions include/chainbase/chainbase_node_allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,15 @@ namespace chainbase {

pointer allocate(std::size_t num) {
if (num == 1) {
if (_freelist == nullptr) {
get_some(allocation_batch_size);
if (_block_start == _block_end && _freelist == nullptr) {
get_some(_allocation_batch_size);
}
if (_block_start < _block_end) {
pointer result = pointer{static_cast<T*>(static_cast<void*>(_block_start.get()))};
_block_start += sizeof(T);
return result;
}
assert(_freelist != nullptr);
list_item* result = &*_freelist;
_freelist = _freelist->_next;
result->~list_item();
Expand All @@ -49,41 +55,40 @@ namespace chainbase {
}

void preallocate(std::size_t num) {
if (num >= 2 * allocation_batch_size)
if (num >= 2 * _allocation_batch_size)
get_some(((num - _freelist_size) + 7) & ~7);
}

bool operator==(const chainbase_node_allocator& other) const { return this == &other; }
bool operator!=(const chainbase_node_allocator& other) const { return this != &other; }
segment_manager* get_segment_manager() const { return _manager.get(); }
size_t freelist_memory_usage() const { return _freelist_size * sizeof(T); }
size_t freelist_memory_usage() const { return _freelist_size * sizeof(T) + (_block_end - _block_start); }

private:
template<typename T2, typename S2>
friend class chainbase_node_allocator;

void get_some(size_t allocation_batch_size) {
void get_some(size_t num_to_alloc) {
static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list");
static_assert(sizeof(T) % alignof(list_item) == 0, "Bad alignment for free list");

char* result = (char*)_manager->allocate(sizeof(T) * allocation_batch_size);
_freelist_size += allocation_batch_size;
auto old_freelist = _freelist;
_freelist = bip::offset_ptr<list_item>{(list_item*)result};
for(unsigned i = 0; i < allocation_batch_size-1; ++i) {
char* next = result + sizeof(T);
new(result) list_item{bip::offset_ptr<list_item>{(list_item*)next}};
result = next;
}
new(result) list_item{old_freelist};
_block_start = static_cast<char*>(_manager->allocate(sizeof(T) * num_to_alloc));
_block_end = _block_start + sizeof(T) * num_to_alloc;

if (_allocation_batch_size < max_allocation_batch_size)
_allocation_batch_size *= 2;
}

struct list_item { bip::offset_ptr<list_item> _next; };

static constexpr size_t allocation_batch_size = 512;
static constexpr size_t max_allocation_batch_size = 512;

bip::offset_ptr<char> _block_start;
bip::offset_ptr<char> _block_end;
bip::offset_ptr<list_item> _freelist{};
bip::offset_ptr<ss_allocator_t> _ss_alloc;
bip::offset_ptr<segment_manager> _manager;
bip::offset_ptr<list_item> _freelist{};
size_t _allocation_batch_size = 4;
size_t _freelist_size = 0;
};

Expand Down
40 changes: 22 additions & 18 deletions include/chainbase/small_size_allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,20 @@ class allocator {
using pointer = backing_allocator::pointer;

allocator(backing_allocator back_alloc, std::size_t sz)
: _back_alloc(back_alloc)
, _sz(sz) {}
: _sz(sz)
, _back_alloc(back_alloc) {}

pointer allocate() {
std::lock_guard g(_m);
if (_freelist == nullptr) {
if (_block_start == _block_end && _freelist == nullptr) {
get_some();
}
if (_block_start < _block_end) {
pointer result = pointer{_block_start.get()};
_block_start += _sz;
return result;
}
assert(_freelist != nullptr);
list_item* result = &*_freelist;
_freelist = _freelist->_next;
result->~list_item();
Expand All @@ -56,7 +62,7 @@ class allocator {

size_t freelist_memory_usage() const {
std::lock_guard g(_m);
return _freelist_size * _sz;
return _freelist_size * _sz + (_block_end - _block_start);
}

size_t num_blocks_allocated() const {
Expand All @@ -66,29 +72,27 @@ class allocator {

private:
struct list_item { bip::offset_ptr<list_item> _next; };
static constexpr size_t allocation_batch_size = 512;
static constexpr size_t max_allocation_batch_size = 512;

void get_some() {
assert(_sz >= sizeof(list_item));
assert(_sz % alignof(list_item) == 0);

char* result = (char*)&*_back_alloc.allocate(_sz * allocation_batch_size);
_freelist_size += allocation_batch_size;
_block_start = _back_alloc.allocate(_sz * _allocation_batch_size);
_block_end = _block_start + _sz * _allocation_batch_size;
++_num_blocks_allocated;
_freelist = bip::offset_ptr<list_item>{(list_item*)result};
for (unsigned i = 0; i < allocation_batch_size - 1; ++i) {
char* next = result + _sz;
new (result) list_item{bip::offset_ptr<list_item>{(list_item*)next}};
result = next;
}
new (result) list_item{nullptr};
if (_allocation_batch_size < max_allocation_batch_size)
_allocation_batch_size *= 2;
}

backing_allocator _back_alloc;
std::size_t _sz;
bip::offset_ptr<list_item> _freelist;
size_t _freelist_size = 0;
size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator
bip::offset_ptr<char> _block_start;
bip::offset_ptr<char> _block_end;
backing_allocator _back_alloc;
size_t _allocation_batch_size = 4;
size_t _freelist_size = 0;
size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator
mutable std::mutex _m;
};

Expand All @@ -104,7 +108,7 @@ class allocator {
// - Any requested size greater than `num_allocators * size_increment` will be routed
// to the backing_allocator
// ---------------------------------------------------------------------------------------
template <class backing_allocator, size_t num_allocators = 64, size_t size_increment = 8>
template <class backing_allocator, size_t num_allocators = 128, size_t size_increment = 8>
requires ((size_increment & (size_increment - 1)) == 0) // power of two
class small_size_allocator {
public:
Expand Down

0 comments on commit 54950d3

Please sign in to comment.