rippled
Loading...
Searching...
No Matches
SlabAllocator.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#ifndef XRPL_BASICS_SLABALLOCATOR_H_INCLUDED
4#define XRPL_BASICS_SLABALLOCATOR_H_INCLUDED
5
6#include <xrpl/basics/ByteUtilities.h>
7#include <xrpl/beast/type_name.h>
8#include <xrpl/beast/utility/instrumentation.h>
9
10#include <boost/align.hpp>
11#include <boost/container/static_vector.hpp>
12#include <boost/predef.h>
13
14#include <algorithm>
15#include <atomic>
16#include <cstdint>
17#include <cstring>
18#include <mutex>
19#include <vector>
20
21#if BOOST_OS_LINUX
22#include <sys/mman.h>
23#endif
24
25namespace ripple {
26
27template <typename Type>
29{
30 static_assert(
31 sizeof(Type) >= sizeof(std::uint8_t*),
32 "SlabAllocator: the requested object must be larger than a pointer.");
33
34 static_assert(alignof(Type) == 8 || alignof(Type) == 4);
35
37 struct SlabBlock
38 {
39 // A mutex to protect the freelist for this block:
41
42 // A linked list of appropriately sized free buffers:
43 std::uint8_t* l_ = nullptr;
44
45 // The next memory block
47
48 // The underlying memory block:
49 std::uint8_t const* const p_ = nullptr;
50
51 // The extent of the underlying memory block:
53
55 SlabBlock* next,
56 std::uint8_t* data,
58 std::size_t item)
59 : next_(next), p_(data), size_(size)
60 {
61 // We don't need to grab the mutex here, since we're the only
62 // ones with access at this moment.
63
64 while (data + item <= p_ + size_)
65 {
66 // Use memcpy to avoid unaligned UB
67 // (will optimize to equivalent code)
68 std::memcpy(data, &l_, sizeof(std::uint8_t*));
69 l_ = data;
70 data += item;
71 }
72 }
73
75 {
76 // Calling this destructor will release the allocated memory but
77 // will not properly destroy any objects that are constructed in
78 // the block itself.
79 }
80
81 SlabBlock(SlabBlock const& other) = delete;
83 operator=(SlabBlock const& other) = delete;
84
85 SlabBlock(SlabBlock&& other) = delete;
87 operator=(SlabBlock&& other) = delete;
88
90 bool
91 own(std::uint8_t const* p) const noexcept
92 {
93 return (p >= p_) && (p < p_ + size_);
94 }
95
97 allocate() noexcept
98 {
99 std::uint8_t* ret;
100
101 {
103
104 ret = l_;
105
106 if (ret)
107 {
108 // Use memcpy to avoid unaligned UB
109 // (will optimize to equivalent code)
110 std::memcpy(&l_, ret, sizeof(std::uint8_t*));
111 }
112 }
113
114 return ret;
115 }
116
126 void
128 {
129 XRPL_ASSERT(
130 own(ptr),
131 "ripple::SlabAllocator::SlabBlock::deallocate : own input");
132
134
135 // Use memcpy to avoid unaligned UB
136 // (will optimize to equivalent code)
137 std::memcpy(ptr, &l_, sizeof(std::uint8_t*));
138 l_ = ptr;
139 }
140 };
141
142private:
143 // A linked list of slabs
145
146 // The alignment requirements of the item we're allocating:
148
149 // The size of an item, including the extra bytes requested and
150 // any padding needed for alignment purposes:
152
153 // The size of each individual slab:
155
156public:
165 constexpr explicit SlabAllocator(
166 std::size_t extra,
167 std::size_t alloc = 0,
168 std::size_t align = 0)
169 : itemAlignment_(align ? align : alignof(Type))
170 , itemSize_(
171 boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
172 , slabSize_(alloc)
173 {
174 XRPL_ASSERT(
175 (itemAlignment_ & (itemAlignment_ - 1)) == 0,
176 "ripple::SlabAllocator::SlabAllocator : valid alignment");
177 }
178
179 SlabAllocator(SlabAllocator const& other) = delete;
181 operator=(SlabAllocator const& other) = delete;
182
183 SlabAllocator(SlabAllocator&& other) = delete;
185 operator=(SlabAllocator&& other) = delete;
186
188 {
189 // FIXME: We can't destroy the memory blocks we've allocated, because
190 // we can't be sure that they are not being used. Cleaning the
191 // shutdown process up could make this possible.
192 }
193
195 constexpr std::size_t
196 size() const noexcept
197 {
198 return itemSize_;
199 }
200
207 allocate() noexcept
208 {
209 auto slab = slabs_.load();
210
211 while (slab != nullptr)
212 {
213 if (auto ret = slab->allocate())
214 return ret;
215
216 slab = slab->next_;
217 }
218
219 // No slab can satisfy our request, so we attempt to allocate a new
220 // one here:
222
223 // We want to allocate the memory at a 2 MiB boundary, to make it
224 // possible to use hugepage mappings on Linux:
225 auto buf =
226 boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size);
227
228 // clang-format off
229 if (!buf) [[unlikely]]
230 return nullptr;
231 // clang-format on
232
233#if BOOST_OS_LINUX
234 // When allocating large blocks, attempt to leverage Linux's
235 // transparent hugepage support. It is unclear and difficult
236 // to accurately determine if doing this impacts performance
237 // enough to justify using platform-specific tricks.
238 if (size >= megabytes(std::size_t(4)))
239 madvise(buf, size, MADV_HUGEPAGE);
240#endif
241
242 // We need to carve out a bit of memory for the slab header
243 // and then align the rest appropriately:
244 auto slabData = reinterpret_cast<void*>(
245 reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
246 auto slabSize = size - sizeof(SlabBlock);
247
248 // This operation is essentially guaranteed not to fail but
249 // let's be careful anyways.
250 if (!boost::alignment::align(
251 itemAlignment_, itemSize_, slabData, slabSize))
252 {
253 boost::alignment::aligned_free(buf);
254 return nullptr;
255 }
256
257 slab = new (buf) SlabBlock(
258 slabs_.load(),
259 reinterpret_cast<std::uint8_t*>(slabData),
260 slabSize,
261 itemSize_);
262
263 // Link the new slab
264 while (!slabs_.compare_exchange_weak(
265 slab->next_,
266 slab,
269 {
270 ; // Nothing to do
271 }
272
273 return slab->allocate();
274 }
275
283 bool
285 {
286 XRPL_ASSERT(
287 ptr,
288 "ripple::SlabAllocator::SlabAllocator::deallocate : non-null "
289 "input");
290
291 for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_)
292 {
293 if (slab->own(ptr))
294 {
295 slab->deallocate(ptr);
296 return true;
297 }
298 }
299
300 return false;
301 }
302};
303
305template <typename Type>
307{
308private:
309 // The list of allocators that belong to this set
310 boost::container::static_vector<SlabAllocator<Type>, 64> allocators_;
311
313
314public:
316 {
317 friend class SlabAllocatorSet;
318
319 private:
323
324 public:
325 constexpr SlabConfig(
326 std::size_t extra_,
327 std::size_t alloc_ = 0,
328 std::size_t align_ = alignof(Type))
329 : extra(extra_), alloc(alloc_), align(align_)
330 {
331 }
332 };
333
335 {
336 // Ensure that the specified allocators are sorted from smallest to
337 // largest by size:
338 std::sort(
339 std::begin(cfg),
340 std::end(cfg),
341 [](SlabConfig const& a, SlabConfig const& b) {
342 return a.extra < b.extra;
343 });
344
345 // We should never have two slabs of the same size
347 std::begin(cfg),
348 std::end(cfg),
349 [](SlabConfig const& a, SlabConfig const& b) {
350 return a.extra == b.extra;
351 }) != cfg.end())
352 {
353 throw std::runtime_error(
354 "SlabAllocatorSet<" + beast::type_name<Type>() +
355 ">: duplicate slab size");
356 }
357
358 for (auto const& c : cfg)
359 {
360 auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align);
361
362 if (a.size() > maxSize_)
363 maxSize_ = a.size();
364 }
365 }
366
367 SlabAllocatorSet(SlabAllocatorSet const& other) = delete;
369 operator=(SlabAllocatorSet const& other) = delete;
370
373 operator=(SlabAllocatorSet&& other) = delete;
374
376 {
377 }
378
388 allocate(std::size_t extra) noexcept
389 {
390 if (auto const size = sizeof(Type) + extra; size <= maxSize_)
391 {
392 for (auto& a : allocators_)
393 {
394 if (a.size() >= size)
395 return a.allocate();
396 }
397 }
398
399 return nullptr;
400 }
401
409 bool
411 {
412 for (auto& a : allocators_)
413 {
414 if (a.deallocate(ptr))
415 return true;
416 }
417
418 return false;
419 }
420};
421
422} // namespace ripple
423
424#endif // XRPL_BASICS_SLABALLOCATOR_H_INCLUDED
T adjacent_find(T... args)
T begin(T... args)
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_=0, std::size_t align_=alignof(Type))
A collection of slab allocators of various sizes for a given type.
constexpr SlabAllocatorSet(std::vector< SlabConfig > cfg)
SlabAllocatorSet & operator=(SlabAllocatorSet &&other)=delete
SlabAllocatorSet(SlabAllocatorSet &&other)=delete
boost::container::static_vector< SlabAllocator< Type >, 64 > allocators_
std::uint8_t * allocate(std::size_t extra) noexcept
Returns a suitably aligned pointer, if one is available.
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
SlabAllocatorSet(SlabAllocatorSet const &other)=delete
SlabAllocatorSet & operator=(SlabAllocatorSet const &other)=delete
constexpr SlabAllocator(std::size_t extra, std::size_t alloc=0, std::size_t align=0)
Constructs a slab allocator able to allocate objects of a fixed size.
std::size_t const itemSize_
std::size_t const itemAlignment_
constexpr std::size_t size() const noexcept
Returns the size of the memory block this allocator returns.
std::atomic< SlabBlock * > slabs_
SlabAllocator & operator=(SlabAllocator const &other)=delete
bool deallocate(std::uint8_t *ptr) noexcept
Returns the memory block to the allocator.
std::uint8_t * allocate() noexcept
Returns a suitably aligned pointer, if one is available.
std::size_t const slabSize_
SlabAllocator(SlabAllocator &&other)=delete
SlabAllocator(SlabAllocator const &other)=delete
SlabAllocator & operator=(SlabAllocator &&other)=delete
T end(T... args)
T is_same_v
T memcpy(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
constexpr auto megabytes(T value) noexcept
T sort(T... args)
A block of memory that is owned by a slab allocator.
bool own(std::uint8_t const *p) const noexcept
Determines whether the given pointer belongs to this allocator.
void deallocate(std::uint8_t *ptr) noexcept
Return an item to this allocator's freelist.
SlabBlock & operator=(SlabBlock const &other)=delete
SlabBlock(SlabBlock &&other)=delete
std::uint8_t const *const p_
SlabBlock(SlabBlock *next, std::uint8_t *data, std::size_t size, std::size_t item)
SlabBlock(SlabBlock const &other)=delete
std::uint8_t * allocate() noexcept
SlabBlock & operator=(SlabBlock &&other)=delete