rippled
Loading...
Searching...
No Matches
IntrusiveRefCounts.h
1#pragma once
2
3#include <xrpl/beast/utility/instrumentation.h>
4
5#include <atomic>
6#include <cstdint>
7
8namespace xrpl {
9
22
32
40{
41 virtual ~IntrusiveRefCounts() noexcept;
42
43 // This must be `noexcept` or the make_SharedIntrusive function could leak
44 // memory.
45 void
46 addStrongRef() const noexcept;
47
48 void
49 addWeakRef() const noexcept;
50
52 releaseStrongRef() const;
53
54 // Same as:
55 // {
56 // addWeakRef();
57 // return releaseStrongRef;
58 // }
59 // done as one atomic operation
62
64 releaseWeakRef() const;
65
66 // Returns true is able to checkout a strong ref. False otherwise
67 bool
68 checkoutStrongRefFromWeak() const noexcept;
69
70 bool
71 expired() const noexcept;
72
74 use_count() const noexcept;
75
76 // This function MUST be called after a partial destructor finishes running.
77 // Calling this function may cause other threads to delete the object
78 // pointed to by `o`, so `o` should never be used after calling this
79 // function. The parameter will be set to a `nullptr` after calling this
80 // function to emphasize that it should not be used.
81 // Note: This is intentionally NOT called at the end of `partialDestructor`.
82 // The reason for this is if new classes are written to support this smart
83 // pointer class, they need to write their own `partialDestructor` function
84 // and ensure `partialDestructorFinished` is called at the end. Putting this
85 // call inside the smart pointer class itself is expected to be less error
86 // prone.
87 // Note: The "two-star" programming is intentional. It emphasizes that `o`
88 // may be deleted and the unergonomic API is meant to signal the special
89 // nature of this function call to callers.
90 // Note: This is a template to support incompletely defined classes.
91 template <class T>
92 friend void
94
95private:
96 // TODO: We may need to use a uint64_t for both counts. This will reduce the
97 // memory savings. We need to audit the code to make sure 16 bit counts are
98 // enough for strong pointers and 14 bit counts are enough for weak
99 // pointers. Use type aliases to make it easy to switch types.
101 static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
102 static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
104 static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
105 static constexpr FieldType one = 1;
106
141
147 static constexpr FieldType strongDelta = 1;
148
154 static constexpr FieldType weakDelta = (one << StrongCountNumBits);
155
163
170
175
179 static constexpr FieldType valueMask = ~tagMask;
180
183 static constexpr FieldType strongMask = ((one << StrongCountNumBits) - 1) & valueMask;
184
188
191 {
205 RefCountPair(FieldType v) noexcept;
206 RefCountPair(CountType s, CountType w) noexcept;
207
210 combinedValue() const noexcept;
211
212 static constexpr CountType maxStrongValue = static_cast<CountType>((one << StrongCountNumBits) - 1);
213 static constexpr CountType maxWeakValue = static_cast<CountType>((one << WeakCountNumBits) - 1);
219 static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
220 };
221};
222
223inline void
228
229inline void
234
237{
238 // Subtract `strongDelta` from refCounts. If this releases the last strong
239 // ref, set the `partialDestroyStarted` bit. It is important that the ref
240 // count and the `partialDestroyStartedBit` are changed atomically (hence
241 // the loop and `compare_exchange` op). If this didn't need to be done
242 // atomically, the loop could be replaced with a `fetch_sub` and a
243 // conditional `fetch_or`. This loop will almost always run once.
244
245 using enum ReleaseStrongRefAction;
246 auto prevIntVal = refCounts.load(std::memory_order_acquire);
247 while (1)
248 {
249 RefCountPair const prevVal{prevIntVal};
250 XRPL_ASSERT(
251 (prevVal.strong >= strongDelta),
252 "xrpl::IntrusiveRefCounts::releaseStrongRef : previous ref "
253 "higher than new");
254 auto nextIntVal = prevIntVal - strongDelta;
256 if (prevVal.strong == 1)
257 {
258 if (prevVal.weak == 0)
259 {
260 action = destroy;
261 }
262 else
263 {
264 nextIntVal |= partialDestroyStartedMask;
265 action = partialDestroy;
266 }
267 }
268
269 if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
270 {
271 // Can't be in partial destroy because only decrementing the strong
272 // count to zero can start a partial destroy, and that can't happen
273 // twice.
274 XRPL_ASSERT(
275 (action == noop) || !(prevIntVal & partialDestroyStartedMask),
276 "xrpl::IntrusiveRefCounts::releaseStrongRef : not in partial "
277 "destroy");
278 return action;
279 }
280 }
281}
282
285{
286 using enum ReleaseStrongRefAction;
287
288 static_assert(weakDelta > strongDelta);
289 auto constexpr delta = weakDelta - strongDelta;
290 auto prevIntVal = refCounts.load(std::memory_order_acquire);
291 // This loop will almost always run once. The loop is needed to atomically
292 // change the counts and flags (the count could be atomically changed, but
293 // the flags depend on the current value of the counts).
294 //
295 // Note: If this becomes a perf bottleneck, the `partialDestroyStartedMask`
296 // may be able to be set non-atomically. But it is easier to reason about
297 // the code if the flag is set atomically.
298 while (1)
299 {
300 RefCountPair const prevVal{prevIntVal};
301 // Converted the last strong pointer to a weak pointer.
302 //
303 // Can't be in partial destroy because only decrementing the
304 // strong count to zero can start a partial destroy, and that
305 // can't happen twice.
306 XRPL_ASSERT(
307 (!prevVal.partialDestroyStartedBit),
308 "xrpl::IntrusiveRefCounts::addWeakReleaseStrongRef : not in "
309 "partial destroy");
310
311 auto nextIntVal = prevIntVal + delta;
313 if (prevVal.strong == 1)
314 {
315 if (prevVal.weak == 0)
316 {
317 action = noop;
318 }
319 else
320 {
321 nextIntVal |= partialDestroyStartedMask;
322 action = partialDestroy;
323 }
324 }
325 if (refCounts.compare_exchange_weak(prevIntVal, nextIntVal, std::memory_order_acq_rel))
326 {
327 XRPL_ASSERT(
328 (!(prevIntVal & partialDestroyStartedMask)),
329 "xrpl::IntrusiveRefCounts::addWeakReleaseStrongRef : not "
330 "started partial destroy");
331 return action;
332 }
333 }
334}
335
338{
339 auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
340 RefCountPair prev = prevIntVal;
341 if (prev.weak == 1 && prev.strong == 0)
342 {
343 if (!prev.partialDestroyStartedBit)
344 {
345 // This case should only be hit if the partialDestroyStartedBit is
346 // set non-atomically (and even then very rarely). The code is kept
347 // in case we need to set the flag non-atomically for perf reasons.
348 refCounts.wait(prevIntVal, std::memory_order_acquire);
349 prevIntVal = refCounts.load(std::memory_order_acquire);
350 prev = RefCountPair{prevIntVal};
351 }
352 if (!prev.partialDestroyFinishedBit)
353 {
354 // partial destroy MUST finish before running a full destroy (when
355 // using weak pointers)
357 }
359 }
361}
362
363inline bool
365{
366 auto curValue = RefCountPair{1, 1}.combinedValue();
367 auto desiredValue = RefCountPair{2, 1}.combinedValue();
368
369 while (!refCounts.compare_exchange_weak(curValue, desiredValue, std::memory_order_acq_rel))
370 {
371 RefCountPair const prev{curValue};
372 if (!prev.strong)
373 return false;
374
375 desiredValue = curValue + strongDelta;
376 }
377 return true;
378}
379
380inline bool
382{
384 return val.strong == 0;
385}
386
387inline std::size_t
389{
391 return val.strong;
392}
393
395{
396#ifndef NDEBUG
398 XRPL_ASSERT((!(v & valueMask)), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
399 auto t = v & tagMask;
400 XRPL_ASSERT((!t || t == tagMask), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
401#endif
402}
403
404//------------------------------------------------------------------------------
405
407 : strong{static_cast<CountType>(v & strongMask)}
408 , weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
411{
412 XRPL_ASSERT(
414 "xrpl::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside "
415 "range");
416}
417
421 : strong{s}, weak{w}
422{
423 XRPL_ASSERT(
425 "xrpl::IntrusiveRefCounts::RefCountPair(CountType, CountType) : "
426 "inputs inside range");
427}
428
431{
432 XRPL_ASSERT(
434 "xrpl::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
435 "inside range");
438}
439
440template <class T>
441inline void
443{
444 T& self = **o;
446 XRPL_ASSERT(
448 "xrpl::partialDestructorFinished : not a weak ref");
449 if (!p.weak)
450 {
451 // There was a weak count before the partial destructor ran (or we would
452 // have run the full destructor) and now there isn't a weak count. Some
453 // thread is waiting to run the destructor.
454 self.refCounts.notify_one();
455 }
456 // Set the pointer to null to emphasize that the object shouldn't be used
457 // after calling this function as it may be destroyed in another thread.
458 *o = nullptr;
459}
460//------------------------------------------------------------------------------
461
462} // namespace xrpl
T is_same_v
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
ReleaseStrongRefAction
Action to perform when releasing a strong pointer.
ReleaseWeakRefAction
Action to perform when releasing a weak pointer.
Unpack the count and tag fields from the packed atomic integer form.
FieldType combinedValue() const noexcept
Convert back to the packed integer form.
static constexpr CountType checkWeakMaxValue
static constexpr CountType maxStrongValue
FieldType partialDestroyStartedBit
The partialDestroyStartedBit is set to on when the partial destroy function is started.
static constexpr CountType checkStrongMaxValue
Put an extra margin to detect when running up against limits.
FieldType partialDestroyFinishedBit
The partialDestroyFinishedBit is set to on when the partial destroy function has finished.
Implement the strong count, weak count, and bit flags for an intrusive pointer.
bool checkoutStrongRefFromWeak() const noexcept
static constexpr FieldType strongDelta
Amount to change the strong count when adding or releasing a reference.
static constexpr FieldType weakMask
Mask that will zero out everything except the weak count.
static constexpr FieldType tagMask
Mask that will zero out all the count bits and leave the tag bits unchanged.
ReleaseStrongRefAction addWeakReleaseStrongRef() const
void addWeakRef() const noexcept
static constexpr FieldType one
static constexpr size_t StrongCountNumBits
bool expired() const noexcept
friend void partialDestructorFinished(T **o)
virtual ~IntrusiveRefCounts() noexcept
std::size_t use_count() const noexcept
static constexpr FieldType partialDestroyFinishedMask
Flag that is set when the partialDestroy function has finished running.
static constexpr FieldType weakDelta
Amount to change the weak count when adding or releasing a reference.
static constexpr FieldType valueMask
Mask that will zero out the tag bits and leave the count bits unchanged.
ReleaseWeakRefAction releaseWeakRef() const
static constexpr FieldType strongMask
Mask that will zero out everything except the strong count.
ReleaseStrongRefAction releaseStrongRef() const
void addStrongRef() const noexcept
static constexpr size_t FieldTypeBits
static constexpr size_t WeakCountNumBits
static constexpr FieldType partialDestroyStartedMask
Flag that is set when the partialDestroy function has started running (or is about to start running).
std::atomic< FieldType > refCounts
refCounts consists of four fields that are treated atomically: