rippled
Loading...
Searching...
No Matches
SlabAllocator.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#pragma once
4
5#include <xrpl/basics/ByteUtilities.h>
6#include <xrpl/beast/type_name.h>
7#include <xrpl/beast/utility/instrumentation.h>
8
9#include <boost/align.hpp>
10#include <boost/container/static_vector.hpp>
11#include <boost/predef.h>
12
13#include <algorithm>
14#include <atomic>
15#include <cstdint>
16#include <cstring>
17#include <mutex>
18#include <vector>
19
20#if BOOST_OS_LINUX
21#include <sys/mman.h>
22#endif
23
24namespace xrpl {
25
26template <typename Type>
28{
29 static_assert(
30 sizeof(Type) >= sizeof(std::uint8_t*),
31 "SlabAllocator: the requested object must be larger than a pointer.");
32
33 static_assert(alignof(Type) == 8 || alignof(Type) == 4);
34
36 struct SlabBlock
37 {
38 // A mutex to protect the freelist for this block:
40
41 // A linked list of appropriately sized free buffers:
42 std::uint8_t* l_ = nullptr;
43
44 // The next memory block
46
47 // The underlying memory block:
48 std::uint8_t const* const p_ = nullptr;
49
50 // The extent of the underlying memory block:
52
54 : next_(next), p_(data), size_(size)
55 {
56 // We don't need to grab the mutex here, since we're the only
57 // ones with access at this moment.
58
59 while (data + item <= p_ + size_)
60 {
61 // Use memcpy to avoid unaligned UB
62 // (will optimize to equivalent code)
63 std::memcpy(data, &l_, sizeof(std::uint8_t*));
64 l_ = data;
65 data += item;
66 }
67 }
68
70 {
71 // Calling this destructor will release the allocated memory but
72 // will not properly destroy any objects that are constructed in
73 // the block itself.
74 }
75
76 SlabBlock(SlabBlock const& other) = delete;
78 operator=(SlabBlock const& other) = delete;
79
80 SlabBlock(SlabBlock&& other) = delete;
82 operator=(SlabBlock&& other) = delete;
83
85 bool
86 own(std::uint8_t const* p) const noexcept
87 {
88 return (p >= p_) && (p < p_ + size_);
89 }
90
92 allocate() noexcept
93 {
94 std::uint8_t* ret = nullptr; // NOLINT(misc-const-correctness)
95
96 {
97 std::lock_guard const l(m_);
98
99 ret = l_;
100
101 if (ret)
102 {
103 // Use memcpy to avoid unaligned UB
104 // (will optimize to equivalent code)
105 std::memcpy(&l_, ret, sizeof(std::uint8_t*));
106 }
107 }
108
109 return ret;
110 }
111
121 void
123 {
124 XRPL_ASSERT(own(ptr), "xrpl::SlabAllocator::SlabBlock::deallocate : own input");
125
126 std::lock_guard const l(m_);
127
128 // Use memcpy to avoid unaligned UB
129 // (will optimize to equivalent code)
130 std::memcpy(ptr, &l_, sizeof(std::uint8_t*));
131 l_ = ptr;
132 }
133 };
134
135private:
136 // A linked list of slabs
138
139 // The alignment requirements of the item we're allocating:
141
142 // The size of an item, including the extra bytes requested and
143 // any padding needed for alignment purposes:
145
146 // The size of each individual slab:
148
149public:
158 constexpr explicit SlabAllocator(
159 std::size_t extra,
160 std::size_t alloc = 0,
161 std::size_t align = 0)
162 : itemAlignment_(align ? align : alignof(Type))
163 , itemSize_(boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
164 , slabSize_(alloc)
165 {
166 XRPL_ASSERT(
167 (itemAlignment_ & (itemAlignment_ - 1)) == 0,
168 "xrpl::SlabAllocator::SlabAllocator : valid alignment");
169 }
170
171 SlabAllocator(SlabAllocator const& other) = delete;
173 operator=(SlabAllocator const& other) = delete;
174
175 SlabAllocator(SlabAllocator&& other) = delete;
177 operator=(SlabAllocator&& other) = delete;
178
180 {
181 // FIXME: We can't destroy the memory blocks we've allocated, because
182 // we can't be sure that they are not being used. Cleaning the
183 // shutdown process up could make this possible.
184 }
185
187 constexpr std::size_t
188 size() const noexcept
189 {
190 return itemSize_;
191 }
192
199 allocate() noexcept
200 {
201 auto slab = slabs_.load();
202
203 while (slab != nullptr)
204 {
205 if (auto ret = slab->allocate())
206 return ret;
207
208 slab = slab->next_;
209 }
210
211 // No slab can satisfy our request, so we attempt to allocate a new
212 // one here:
213 std::size_t const size = slabSize_;
214
215 // We want to allocate the memory at a 2 MiB boundary, to make it
216 // possible to use hugepage mappings on Linux:
217 auto buf = boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
218 if (!buf) [[unlikely]]
219 return nullptr;
220
221#if BOOST_OS_LINUX
222 // When allocating large blocks, attempt to leverage Linux's
223 // transparent hugepage support. It is unclear and difficult
224 // to accurately determine if doing this impacts performance
225 // enough to justify using platform-specific tricks.
226 if (size >= megabytes(std::size_t(4)))
227 madvise(buf, size, MADV_HUGEPAGE);
228#endif
229
230 // We need to carve out a bit of memory for the slab header
231 // and then align the rest appropriately:
232 auto slabData =
233 reinterpret_cast<void*>(reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
234 auto slabSize = size - sizeof(SlabBlock);
235
236 // This operation is essentially guaranteed not to fail but
237 // let's be careful anyways.
238 if (!boost::alignment::align(itemAlignment_, itemSize_, slabData, slabSize))
239 {
240 boost::alignment::aligned_free(buf);
241 return nullptr;
242 }
243
244 slab = new (buf) SlabBlock(
245 slabs_.load(), reinterpret_cast<std::uint8_t*>(slabData), slabSize, itemSize_);
246
247 // Link the new slab
248 while (!slabs_.compare_exchange_weak(
250 {
251 ; // Nothing to do
252 }
253
254 return slab->allocate();
255 }
256
264 bool
266 {
267 XRPL_ASSERT(
268 ptr,
269 "xrpl::SlabAllocator::SlabAllocator::deallocate : non-null "
270 "input");
271
272 for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_)
273 {
274 if (slab->own(ptr))
275 {
276 slab->deallocate(ptr);
277 return true;
278 }
279 }
280
281 return false;
282 }
283};
284
286template <typename Type>
288{
289private:
290 // The list of allocators that belong to this set
291 boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
292
294
295public:
297 {
298 friend class SlabAllocatorSet;
299
300 private:
304
305 public:
306 constexpr SlabConfig(
307 std::size_t extra_,
308 std::size_t alloc_ = 0,
309 std::size_t align_ = alignof(Type))
310 : extra(extra_), alloc(alloc_), align(align_)
311 {
312 }
313 };
314
316 {
317 // Ensure that the specified allocators are sorted from smallest to
318 // largest by size:
319 std::sort(std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
320 return a.extra < b.extra;
321 });
322
323 // We should never have two slabs of the same size
325 std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
326 return a.extra == b.extra;
327 }) != cfg.end())
328 {
329 throw std::runtime_error(
330 "SlabAllocatorSet<" + beast::type_name<Type>() + ">: duplicate slab size");
331 }
332
333 for (auto const& c : cfg)
334 {
335 auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align);
336
337 if (a.size() > maxSize_)
338 maxSize_ = a.size();
339 }
340 }
341
342 SlabAllocatorSet(SlabAllocatorSet const& other) = delete;
344 operator=(SlabAllocatorSet const& other) = delete;
345
348 operator=(SlabAllocatorSet&& other) = delete;
349
351 {
352 }
353
363 allocate(std::size_t extra) noexcept
364 {
365 if (auto const size = sizeof(Type) + extra; size <= maxSize_)
366 {
367 for (auto& a : allocators_)
368 {
369 if (a.size() >= size)
370 return a.allocate();
371 }
372 }
373
374 return nullptr;
375 }
376
384 bool
386 {
387 for (auto& a : allocators_)
388 {
389 if (a.deallocate(ptr))
390 return true;
391 }
392
393 return false;
394 }
395};
396
397} // namespace xrpl
T adjacent_find(T... args)
T begin(T... args)
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_=0, std::size_t align_=alignof(Type))
A collection of slab allocators of various sizes for a given type.
boost::container::static_vector< SlabAllocator< Type >, 64 > allocators_
constexpr SlabAllocatorSet(std::vector< SlabConfig > cfg)
SlabAllocatorSet & operator=(SlabAllocatorSet &&other)=delete
SlabAllocatorSet(SlabAllocatorSet const &other)=delete
SlabAllocatorSet & operator=(SlabAllocatorSet const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate(std::size_t extra) noexcept
Returns a suitably aligned pointer, if one is available.
SlabAllocatorSet(SlabAllocatorSet &&other)=delete
SlabAllocator & operator=(SlabAllocator &&other)=delete
constexpr std::size_t size() const noexcept
Returns the size of the memory block this allocator returns.
constexpr SlabAllocator(std::size_t extra, std::size_t alloc=0, std::size_t align=0)
Constructs a slab allocator able to allocate objects of a fixed size.
std::atomic< SlabBlock * > slabs_
SlabAllocator & operator=(SlabAllocator const &other)=delete
SlabAllocator(SlabAllocator &&other)=delete
std::size_t const itemAlignment_
SlabAllocator(SlabAllocator const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate() noexcept
Returns a suitably aligned pointer, if one is available.
std::size_t const slabSize_
std::size_t const itemSize_
T end(T... args)
T is_same_v
T memcpy(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
constexpr auto megabytes(T value) noexcept
T sort(T... args)
A block of memory that is owned by a slab allocator.
SlabBlock(SlabBlock const &other)=delete
SlabBlock(SlabBlock *next, std::uint8_t *data, std::size_t size, std::size_t item)
SlabBlock & operator=(SlabBlock &&other)=delete
std::uint8_t * allocate() noexcept
void deallocate(std::uint8_t *ptr) noexcept
Return an item to this allocator's freelist.
std::uint8_t const *const p_
bool own(std::uint8_t const *p) const noexcept
Determines whether the given pointer belongs to this allocator.
SlabBlock(SlabBlock &&other)=delete
SlabBlock & operator=(SlabBlock const &other)=delete