rippled
Loading...
Searching...
No Matches
SlabAllocator.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#pragma once
4
5#include <xrpl/basics/ByteUtilities.h>
6#include <xrpl/beast/type_name.h>
7#include <xrpl/beast/utility/instrumentation.h>
8
9#include <boost/align.hpp>
10#include <boost/container/static_vector.hpp>
11#include <boost/predef.h>
12
13#include <algorithm>
14#include <atomic>
15#include <cstdint>
16#include <cstring>
17#include <mutex>
18#include <vector>
19
20#if BOOST_OS_LINUX
21#include <sys/mman.h>
22#endif
23
24namespace xrpl {
25
26template <typename Type>
28{
29 static_assert(
30 sizeof(Type) >= sizeof(std::uint8_t*),
31 "SlabAllocator: the requested object must be larger than a pointer.");
32
33 static_assert(alignof(Type) == 8 || alignof(Type) == 4);
34
36 struct SlabBlock
37 {
38 // A mutex to protect the freelist for this block:
40
41 // A linked list of appropriately sized free buffers:
42 std::uint8_t* l_ = nullptr;
43
44 // The next memory block
46
47 // The underlying memory block:
48 std::uint8_t const* const p_ = nullptr;
49
50 // The extent of the underlying memory block:
52
54 : next_(next), p_(data), size_(size)
55 {
56 // We don't need to grab the mutex here, since we're the only
57 // ones with access at this moment.
58
59 while (data + item <= p_ + size_)
60 {
61 // Use memcpy to avoid unaligned UB
62 // (will optimize to equivalent code)
63 std::memcpy(data, &l_, sizeof(std::uint8_t*));
64 l_ = data;
65 data += item;
66 }
67 }
68
70 {
71 // Calling this destructor will release the allocated memory but
72 // will not properly destroy any objects that are constructed in
73 // the block itself.
74 }
75
76 SlabBlock(SlabBlock const& other) = delete;
78 operator=(SlabBlock const& other) = delete;
79
80 SlabBlock(SlabBlock&& other) = delete;
82 operator=(SlabBlock&& other) = delete;
83
85 bool
86 own(std::uint8_t const* p) const noexcept
87 {
88 return (p >= p_) && (p < p_ + size_);
89 }
90
92 allocate() noexcept
93 {
94 std::uint8_t* ret;
95
96 {
98
99 ret = l_;
100
101 if (ret)
102 {
103 // Use memcpy to avoid unaligned UB
104 // (will optimize to equivalent code)
105 std::memcpy(&l_, ret, sizeof(std::uint8_t*));
106 }
107 }
108
109 return ret;
110 }
111
121 void
123 {
124 XRPL_ASSERT(own(ptr), "xrpl::SlabAllocator::SlabBlock::deallocate : own input");
125
127
128 // Use memcpy to avoid unaligned UB
129 // (will optimize to equivalent code)
130 std::memcpy(ptr, &l_, sizeof(std::uint8_t*));
131 l_ = ptr;
132 }
133 };
134
135private:
136 // A linked list of slabs
138
139 // The alignment requirements of the item we're allocating:
141
142 // The size of an item, including the extra bytes requested and
143 // any padding needed for alignment purposes:
145
146 // The size of each individual slab:
148
149public:
158 constexpr explicit SlabAllocator(std::size_t extra, std::size_t alloc = 0, std::size_t align = 0)
159 : itemAlignment_(align ? align : alignof(Type))
160 , itemSize_(boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
161 , slabSize_(alloc)
162 {
163 XRPL_ASSERT(
164 (itemAlignment_ & (itemAlignment_ - 1)) == 0, "xrpl::SlabAllocator::SlabAllocator : valid alignment");
165 }
166
167 SlabAllocator(SlabAllocator const& other) = delete;
169 operator=(SlabAllocator const& other) = delete;
170
171 SlabAllocator(SlabAllocator&& other) = delete;
173 operator=(SlabAllocator&& other) = delete;
174
176 {
177 // FIXME: We can't destroy the memory blocks we've allocated, because
178 // we can't be sure that they are not being used. Cleaning the
179 // shutdown process up could make this possible.
180 }
181
183 constexpr std::size_t
184 size() const noexcept
185 {
186 return itemSize_;
187 }
188
195 allocate() noexcept
196 {
197 auto slab = slabs_.load();
198
199 while (slab != nullptr)
200 {
201 if (auto ret = slab->allocate())
202 return ret;
203
204 slab = slab->next_;
205 }
206
207 // No slab can satisfy our request, so we attempt to allocate a new
208 // one here:
210
211 // We want to allocate the memory at a 2 MiB boundary, to make it
212 // possible to use hugepage mappings on Linux:
213 auto buf = boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
214
215 // clang-format off
216 if (!buf) [[unlikely]]
217 return nullptr;
218 // clang-format on
219
220#if BOOST_OS_LINUX
221 // When allocating large blocks, attempt to leverage Linux's
222 // transparent hugepage support. It is unclear and difficult
223 // to accurately determine if doing this impacts performance
224 // enough to justify using platform-specific tricks.
225 if (size >= megabytes(std::size_t(4)))
226 madvise(buf, size, MADV_HUGEPAGE);
227#endif
228
229 // We need to carve out a bit of memory for the slab header
230 // and then align the rest appropriately:
231 auto slabData = reinterpret_cast<void*>(reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
232 auto slabSize = size - sizeof(SlabBlock);
233
234 // This operation is essentially guaranteed not to fail but
235 // let's be careful anyways.
236 if (!boost::alignment::align(itemAlignment_, itemSize_, slabData, slabSize))
237 {
238 boost::alignment::aligned_free(buf);
239 return nullptr;
240 }
241
242 slab = new (buf) SlabBlock(slabs_.load(), reinterpret_cast<std::uint8_t*>(slabData), slabSize, itemSize_);
243
244 // Link the new slab
245 while (!slabs_.compare_exchange_weak(slab->next_, slab, std::memory_order_release, std::memory_order_relaxed))
246 {
247 ; // Nothing to do
248 }
249
250 return slab->allocate();
251 }
252
260 bool
262 {
263 XRPL_ASSERT(
264 ptr,
265 "xrpl::SlabAllocator::SlabAllocator::deallocate : non-null "
266 "input");
267
268 for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_)
269 {
270 if (slab->own(ptr))
271 {
272 slab->deallocate(ptr);
273 return true;
274 }
275 }
276
277 return false;
278 }
279};
280
282template <typename Type>
284{
285private:
286 // The list of allocators that belong to this set
287 boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
288
290
291public:
293 {
294 friend class SlabAllocatorSet;
295
296 private:
300
301 public:
302 constexpr SlabConfig(std::size_t extra_, std::size_t alloc_ = 0, std::size_t align_ = alignof(Type))
303 : extra(extra_), alloc(alloc_), align(align_)
304 {
305 }
306 };
307
309 {
310 // Ensure that the specified allocators are sorted from smallest to
311 // largest by size:
312 std::sort(
313 std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) { return a.extra < b.extra; });
314
315 // We should never have two slabs of the same size
316 if (std::adjacent_find(std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
317 return a.extra == b.extra;
318 }) != cfg.end())
319 {
320 throw std::runtime_error("SlabAllocatorSet<" + beast::type_name<Type>() + ">: duplicate slab size");
321 }
322
323 for (auto const& c : cfg)
324 {
325 auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align);
326
327 if (a.size() > maxSize_)
328 maxSize_ = a.size();
329 }
330 }
331
332 SlabAllocatorSet(SlabAllocatorSet const& other) = delete;
334 operator=(SlabAllocatorSet const& other) = delete;
335
338 operator=(SlabAllocatorSet&& other) = delete;
339
341 {
342 }
343
353 allocate(std::size_t extra) noexcept
354 {
355 if (auto const size = sizeof(Type) + extra; size <= maxSize_)
356 {
357 for (auto& a : allocators_)
358 {
359 if (a.size() >= size)
360 return a.allocate();
361 }
362 }
363
364 return nullptr;
365 }
366
374 bool
376 {
377 for (auto& a : allocators_)
378 {
379 if (a.deallocate(ptr))
380 return true;
381 }
382
383 return false;
384 }
385};
386
387} // namespace xrpl
T adjacent_find(T... args)
T begin(T... args)
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_=0, std::size_t align_=alignof(Type))
A collection of slab allocators of various sizes for a given type.
boost::container::static_vector< SlabAllocator< Type >, 64 > allocators_
constexpr SlabAllocatorSet(std::vector< SlabConfig > cfg)
SlabAllocatorSet & operator=(SlabAllocatorSet &&other)=delete
SlabAllocatorSet(SlabAllocatorSet const &other)=delete
SlabAllocatorSet & operator=(SlabAllocatorSet const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate(std::size_t extra) noexcept
Returns a suitably aligned pointer, if one is available.
SlabAllocatorSet(SlabAllocatorSet &&other)=delete
SlabAllocator & operator=(SlabAllocator &&other)=delete
constexpr std::size_t size() const noexcept
Returns the size of the memory block this allocator returns.
constexpr SlabAllocator(std::size_t extra, std::size_t alloc=0, std::size_t align=0)
Constructs a slab allocator able to allocate objects of a fixed size.
std::atomic< SlabBlock * > slabs_
SlabAllocator & operator=(SlabAllocator const &other)=delete
SlabAllocator(SlabAllocator &&other)=delete
std::size_t const itemAlignment_
SlabAllocator(SlabAllocator const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate() noexcept
Returns a suitably aligned pointer, if one is available.
std::size_t const slabSize_
std::size_t const itemSize_
T end(T... args)
T is_same_v
T memcpy(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
constexpr auto megabytes(T value) noexcept
T sort(T... args)
A block of memory that is owned by a slab allocator.
SlabBlock(SlabBlock const &other)=delete
SlabBlock(SlabBlock *next, std::uint8_t *data, std::size_t size, std::size_t item)
SlabBlock & operator=(SlabBlock &&other)=delete
std::uint8_t * allocate() noexcept
void deallocate(std::uint8_t *ptr) noexcept
Return an item to this allocator's freelist.
std::uint8_t const *const p_
bool own(std::uint8_t const *p) const noexcept
Determines whether the given pointer belongs to this allocator.
SlabBlock(SlabBlock &&other)=delete
SlabBlock & operator=(SlabBlock const &other)=delete