rippled
Loading...
Searching...
No Matches
IntrusiveRefCounts.h
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2023 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#ifndef RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
21#define RIPPLE_BASICS_INTRUSIVEREFCOUNTS_H_INCLUDED
22
23#include <xrpl/beast/utility/instrumentation.h>
24
25#include <atomic>
26#include <cstdint>
27
28namespace ripple {
29
42
52
60{
61 virtual ~IntrusiveRefCounts() noexcept;
62
63 // This must be `noexcept` or the make_SharedIntrusive function could leak
64 // memory.
65 void
66 addStrongRef() const noexcept;
67
68 void
69 addWeakRef() const noexcept;
70
72 releaseStrongRef() const;
73
74 // Same as:
75 // {
76 // addWeakRef();
77 // return releaseStrongRef;
78 // }
79 // done as one atomic operation
82
84 releaseWeakRef() const;
85
86 // Returns true is able to checkout a strong ref. False otherwise
87 bool
88 checkoutStrongRefFromWeak() const noexcept;
89
90 bool
91 expired() const noexcept;
92
94 use_count() const noexcept;
95
96 // This function MUST be called after a partial destructor finishes running.
97 // Calling this function may cause other threads to delete the object
98 // pointed to by `o`, so `o` should never be used after calling this
99 // function. The parameter will be set to a `nullptr` after calling this
100 // function to emphasize that it should not be used.
101 // Note: This is intentionally NOT called at the end of `partialDestructor`.
102 // The reason for this is if new classes are written to support this smart
103 // pointer class, they need to write their own `partialDestructor` function
104 // and ensure `partialDestructorFinished` is called at the end. Putting this
105 // call inside the smart pointer class itself is expected to be less error
106 // prone.
107 // Note: The "two-star" programming is intentional. It emphasizes that `o`
108 // may be deleted and the unergonomic API is meant to signal the special
109 // nature of this function call to callers.
110 // Note: This is a template to support incompletely defined classes.
111 template <class T>
112 friend void
114
115private:
116 // TODO: We may need to use a uint64_t for both counts. This will reduce the
117 // memory savings. We need to audit the code to make sure 16 bit counts are
118 // enough for strong pointers and 14 bit counts are enough for weak
119 // pointers. Use type aliases to make it easy to switch types.
121 static constexpr size_t StrongCountNumBits = sizeof(CountType) * 8;
122 static constexpr size_t WeakCountNumBits = StrongCountNumBits - 2;
124 static constexpr size_t FieldTypeBits = sizeof(FieldType) * 8;
125 static constexpr FieldType one = 1;
126
161
167 static constexpr FieldType strongDelta = 1;
168
174 static constexpr FieldType weakDelta = (one << StrongCountNumBits);
175
183 (one << (FieldTypeBits - 1));
184
191 (one << (FieldTypeBits - 2));
192
196 static constexpr FieldType tagMask =
198
202 static constexpr FieldType valueMask = ~tagMask;
203
206 static constexpr FieldType strongMask =
207 ((one << StrongCountNumBits) - 1) & valueMask;
208
211 static constexpr FieldType weakMask =
213
216 {
230 RefCountPair(FieldType v) noexcept;
231 RefCountPair(CountType s, CountType w) noexcept;
232
235 combinedValue() const noexcept;
236
237 static constexpr CountType maxStrongValue =
238 static_cast<CountType>((one << StrongCountNumBits) - 1);
239 static constexpr CountType maxWeakValue =
240 static_cast<CountType>((one << WeakCountNumBits) - 1);
246 static constexpr CountType checkWeakMaxValue = maxWeakValue - 32;
247 };
248};
249
250inline void
255
256inline void
261
264{
265 // Subtract `strongDelta` from refCounts. If this releases the last strong
266 // ref, set the `partialDestroyStarted` bit. It is important that the ref
267 // count and the `partialDestroyStartedBit` are changed atomically (hence
268 // the loop and `compare_exchange` op). If this didn't need to be done
269 // atomically, the loop could be replaced with a `fetch_sub` and a
270 // conditional `fetch_or`. This loop will almost always run once.
271
272 using enum ReleaseStrongRefAction;
273 auto prevIntVal = refCounts.load(std::memory_order_acquire);
274 while (1)
275 {
276 RefCountPair const prevVal{prevIntVal};
277 XRPL_ASSERT(
278 (prevVal.strong >= strongDelta),
279 "ripple::IntrusiveRefCounts::releaseStrongRef : previous ref "
280 "higher than new");
281 auto nextIntVal = prevIntVal - strongDelta;
283 if (prevVal.strong == 1)
284 {
285 if (prevVal.weak == 0)
286 {
287 action = destroy;
288 }
289 else
290 {
291 nextIntVal |= partialDestroyStartedMask;
292 action = partialDestroy;
293 }
294 }
295
296 if (refCounts.compare_exchange_weak(
297 prevIntVal, nextIntVal, std::memory_order_acq_rel))
298 {
299 // Can't be in partial destroy because only decrementing the strong
300 // count to zero can start a partial destroy, and that can't happen
301 // twice.
302 XRPL_ASSERT(
303 (action == noop) || !(prevIntVal & partialDestroyStartedMask),
304 "ripple::IntrusiveRefCounts::releaseStrongRef : not in partial "
305 "destroy");
306 return action;
307 }
308 }
309}
310
313{
314 using enum ReleaseStrongRefAction;
315
316 static_assert(weakDelta > strongDelta);
317 auto constexpr delta = weakDelta - strongDelta;
318 auto prevIntVal = refCounts.load(std::memory_order_acquire);
319 // This loop will almost always run once. The loop is needed to atomically
320 // change the counts and flags (the count could be atomically changed, but
321 // the flags depend on the current value of the counts).
322 //
323 // Note: If this becomes a perf bottleneck, the `partialDestoryStartedMask`
324 // may be able to be set non-atomically. But it is easier to reason about
325 // the code if the flag is set atomically.
326 while (1)
327 {
328 RefCountPair const prevVal{prevIntVal};
329 // Converted the last strong pointer to a weak pointer.
330 //
331 // Can't be in partial destroy because only decrementing the
332 // strong count to zero can start a partial destroy, and that
333 // can't happen twice.
334 XRPL_ASSERT(
335 (!prevVal.partialDestroyStartedBit),
336 "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not in "
337 "partial destroy");
338
339 auto nextIntVal = prevIntVal + delta;
341 if (prevVal.strong == 1)
342 {
343 if (prevVal.weak == 0)
344 {
345 action = noop;
346 }
347 else
348 {
349 nextIntVal |= partialDestroyStartedMask;
350 action = partialDestroy;
351 }
352 }
353 if (refCounts.compare_exchange_weak(
354 prevIntVal, nextIntVal, std::memory_order_acq_rel))
355 {
356 XRPL_ASSERT(
357 (!(prevIntVal & partialDestroyStartedMask)),
358 "ripple::IntrusiveRefCounts::addWeakReleaseStrongRef : not "
359 "started partial destroy");
360 return action;
361 }
362 }
363}
364
367{
368 auto prevIntVal = refCounts.fetch_sub(weakDelta, std::memory_order_acq_rel);
369 RefCountPair prev = prevIntVal;
370 if (prev.weak == 1 && prev.strong == 0)
371 {
372 if (!prev.partialDestroyStartedBit)
373 {
374 // This case should only be hit if the partialDestroyStartedBit is
375 // set non-atomically (and even then very rarely). The code is kept
376 // in case we need to set the flag non-atomically for perf reasons.
377 refCounts.wait(prevIntVal, std::memory_order_acquire);
378 prevIntVal = refCounts.load(std::memory_order_acquire);
379 prev = RefCountPair{prevIntVal};
380 }
381 if (!prev.partialDestroyFinishedBit)
382 {
383 // partial destroy MUST finish before running a full destroy (when
384 // using weak pointers)
386 }
388 }
390}
391
392inline bool
394{
395 auto curValue = RefCountPair{1, 1}.combinedValue();
396 auto desiredValue = RefCountPair{2, 1}.combinedValue();
397
398 while (!refCounts.compare_exchange_weak(
399 curValue, desiredValue, std::memory_order_acq_rel))
400 {
401 RefCountPair const prev{curValue};
402 if (!prev.strong)
403 return false;
404
405 desiredValue = curValue + strongDelta;
406 }
407 return true;
408}
409
410inline bool
412{
414 return val.strong == 0;
415}
416
417inline std::size_t
419{
421 return val.strong;
422}
423
425{
426#ifndef NDEBUG
428 XRPL_ASSERT(
429 (!(v & valueMask)),
430 "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
431 auto t = v & tagMask;
432 XRPL_ASSERT(
433 (!t || t == tagMask),
434 "ripple::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
435#endif
436}
437
438//------------------------------------------------------------------------------
439
442 : strong{static_cast<CountType>(v & strongMask)}
443 , weak{static_cast<CountType>((v & weakMask) >> StrongCountNumBits)}
446{
447 XRPL_ASSERT(
449 "ripple::IntrusiveRefCounts::RefCountPair(FieldType) : inputs inside "
450 "range");
451}
452
456 : strong{s}, weak{w}
457{
458 XRPL_ASSERT(
460 "ripple::IntrusiveRefCounts::RefCountPair(CountType, CountType) : "
461 "inputs inside range");
462}
463
466{
467 XRPL_ASSERT(
469 "ripple::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
470 "inside range");
471 return (static_cast<IntrusiveRefCounts::FieldType>(weak)
475}
476
477template <class T>
478inline void
480{
481 T& self = **o;
483 self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
484 XRPL_ASSERT(
486 !p.strong),
487 "ripple::partialDestructorFinished : not a weak ref");
488 if (!p.weak)
489 {
490 // There was a weak count before the partial destructor ran (or we would
491 // have run the full destructor) and now there isn't a weak count. Some
492 // thread is waiting to run the destructor.
493 self.refCounts.notify_one();
494 }
495 // Set the pointer to null to emphasize that the object shouldn't be used
496 // after calling this function as it may be destroyed in another thread.
497 *o = nullptr;
498}
499//------------------------------------------------------------------------------
500
501} // namespace ripple
502#endif
T is_same_v
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
ReleaseWeakRefAction
Action to perform when releasing a weak pointer.
ReleaseStrongRefAction
Action to perform when releasing a strong pointer.
Unpack the count and tag fields from the packed atomic integer form.
FieldType combinedValue() const noexcept
Convert back to the packed integer form.
FieldType partialDestroyFinishedBit
The partialDestroyFinishedBit is set to on when the partial destroy function has finished.
FieldType partialDestroyStartedBit
The partialDestroyStartedBit is set to on when the partial destroy function is started.
static constexpr CountType checkStrongMaxValue
Put an extra margin to detect when running up against limits.
Implement the strong count, weak count, and bit flags for an intrusive pointer.
ReleaseWeakRefAction releaseWeakRef() const
static constexpr size_t StrongCountNumBits
static constexpr FieldType partialDestroyFinishedMask
Flag that is set when the partialDestroy function has finished running.
ReleaseStrongRefAction addWeakReleaseStrongRef() const
static constexpr FieldType tagMask
Mask that will zero out all the count bits and leave the tag bits unchanged.
static constexpr size_t FieldTypeBits
std::size_t use_count() const noexcept
bool expired() const noexcept
static constexpr FieldType weakMask
Mask that will zero out everything except the weak count.
static constexpr FieldType strongMask
Mask that will zero out everything except the strong count.
void addStrongRef() const noexcept
static constexpr FieldType partialDestroyStartedMask
Flag that is set when the partialDestroy function has started running (or is about to start running).
bool checkoutStrongRefFromWeak() const noexcept
static constexpr FieldType strongDelta
Amount to change the strong count when adding or releasing a reference.
friend void partialDestructorFinished(T **o)
virtual ~IntrusiveRefCounts() noexcept
static constexpr FieldType weakDelta
Amount to change the weak count when adding or releasing a reference.
static constexpr size_t WeakCountNumBits
void addWeakRef() const noexcept
static constexpr FieldType valueMask
Mask that will zero out the tag bits and leave the count bits unchanged.
static constexpr FieldType one
std::atomic< FieldType > refCounts
refCounts consists of four fields that are treated atomically:
ReleaseStrongRefAction releaseStrongRef() const