rippled
Loading...
Searching...
No Matches
IntrusiveShared_test.cpp
1#include <test/unit_test/SuiteJournal.h>
2
3#include <xrpl/basics/IntrusivePointer.ipp>
4#include <xrpl/basics/IntrusiveRefCounts.h>
5#include <xrpl/beast/unit_test.h>
6#include <xrpl/beast/utility/Journal.h>
7
8#include <array>
9#include <atomic>
10#include <barrier>
11#include <chrono>
12#include <condition_variable>
13#include <latch>
14#include <optional>
15#include <random>
16#include <string>
17#include <thread>
18#include <variant>
19
20namespace xrpl {
21namespace tests {
22
45struct Barrier
46{
49 int count;
50 int const initial;
51
52 Barrier(int n) : count(n), initial(n)
53 {
54 }
55
56 void
58 {
60 if (--count == 0)
61 {
62 count = initial;
63 cv.notify_all();
64 }
65 else
66 {
67 cv.wait(lock, [&] { return count == initial; });
68 }
69 }
70};
71
72namespace {
73enum class TrackedState : std::uint8_t {
74 uninitialized,
75 alive,
76 partiallyDeletedStarted,
77 partiallyDeleted,
78 deletedStarted,
79 deleted
80};
81
82class TIBase : public IntrusiveRefCounts
83{
84public:
85 static constexpr std::size_t maxStates = 128;
86 static std::array<std::atomic<TrackedState>, maxStates> state;
87 static std::atomic<int> nextId;
88 static TrackedState
89 getState(int id)
90 {
91 assert(id < state.size());
92 return state[id].load(std::memory_order_acquire);
93 }
94 static void
95 resetStates(bool resetCallback)
96 {
97 for (int i = 0; i < maxStates; ++i)
98 {
99 state[i].store(TrackedState::uninitialized, std::memory_order_release);
100 }
102 if (resetCallback)
103 TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
104 }
105
106 struct ResetStatesGuard
107 {
108 bool resetCallback_{false};
109
110 ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
111 {
112 TIBase::resetStates(resetCallback_);
113 }
114 ~ResetStatesGuard()
115 {
116 TIBase::resetStates(resetCallback_);
117 }
118 };
119
120 TIBase() : id_{checkoutID()}
121 {
122 assert(state.size() > id_);
123 state[id_].store(TrackedState::alive, std::memory_order_relaxed);
124 }
125 ~TIBase()
126 {
127 using enum TrackedState;
128
129 assert(state.size() > id_);
130 tracingCallback_(state[id_].load(std::memory_order_relaxed), deletedStarted);
131
132 assert(state.size() > id_);
133 // Use relaxed memory order to try to avoid atomic operations from
134 // adding additional memory synchronizations that may hide threading
135 // errors in the underlying shared pointer class.
136 state[id_].store(deletedStarted, std::memory_order_relaxed);
137
138 tracingCallback_(deletedStarted, deleted);
139
140 assert(state.size() > id_);
141 state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
142
143 tracingCallback_(TrackedState::deleted, std::nullopt);
144 }
145
146 void
147 partialDestructor() const
148 {
149 using enum TrackedState;
150
151 assert(state.size() > id_);
152 tracingCallback_(state[id_].load(std::memory_order_relaxed), partiallyDeletedStarted);
153
154 assert(state.size() > id_);
155 state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
156
157 tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
158
159 assert(state.size() > id_);
160 state[id_].store(partiallyDeleted, std::memory_order_relaxed);
161
162 tracingCallback_(partiallyDeleted, std::nullopt);
163 }
164
165 static std::function<void(TrackedState, std::optional<TrackedState>)> tracingCallback_;
166
167 int id_;
168
169private:
170 static int
171 checkoutID()
172 {
173 return nextId.fetch_add(1, std::memory_order_acq_rel);
174 }
175};
176
177std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
178std::atomic<int> TIBase::nextId{0};
179
180std::function<void(TrackedState, std::optional<TrackedState>)> TIBase::tracingCallback_ =
181 [](TrackedState, std::optional<TrackedState>) {};
182
183} // namespace
184
186{
187public:
188 void
190 {
191 testcase("Basics");
192
193 {
194 TIBase::ResetStatesGuard const rsg{true};
195
196 TIBase const b;
197 BEAST_EXPECT(b.use_count() == 1);
198 b.addWeakRef();
199 BEAST_EXPECT(b.use_count() == 1);
200 auto s = b.releaseStrongRef();
202 BEAST_EXPECT(b.use_count() == 0);
203 TIBase const* pb = &b;
205 BEAST_EXPECT(!pb);
206 auto w = b.releaseWeakRef();
207 BEAST_EXPECT(w == ReleaseWeakRefAction::destroy);
208 }
209
212 {
213 TIBase::ResetStatesGuard const rsg{true};
214
215 using enum TrackedState;
216 auto b = make_SharedIntrusive<TIBase>();
217 auto id = b->id_;
218 BEAST_EXPECT(TIBase::getState(id) == alive);
219 BEAST_EXPECT(b->use_count() == 1);
220 for (int i = 0; i < 10; ++i)
221 {
222 strong.push_back(b);
223 }
224 b.reset();
225 BEAST_EXPECT(TIBase::getState(id) == alive);
226 strong.resize(strong.size() - 1);
227 BEAST_EXPECT(TIBase::getState(id) == alive);
228 strong.clear();
229 BEAST_EXPECT(TIBase::getState(id) == deleted);
230
231 b = make_SharedIntrusive<TIBase>();
232 id = b->id_;
233 BEAST_EXPECT(TIBase::getState(id) == alive);
234 BEAST_EXPECT(b->use_count() == 1);
235 for (int i = 0; i < 10; ++i)
236 {
237 weak.push_back(b);
238 BEAST_EXPECT(b->use_count() == 1);
239 }
240 BEAST_EXPECT(TIBase::getState(id) == alive);
241 weak.resize(weak.size() - 1);
242 BEAST_EXPECT(TIBase::getState(id) == alive);
243 b.reset();
244 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
245 while (!weak.empty())
246 {
247 weak.resize(weak.size() - 1);
248 if (!weak.empty())
249 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
250 }
251 BEAST_EXPECT(TIBase::getState(id) == deleted);
252 }
253 {
254 TIBase::ResetStatesGuard const rsg{true};
255
256 using enum TrackedState;
257 auto b = make_SharedIntrusive<TIBase>();
258 auto id = b->id_;
259 BEAST_EXPECT(TIBase::getState(id) == alive);
261 BEAST_EXPECT(TIBase::getState(id) == alive);
262 auto s = w.lock();
263 BEAST_EXPECT(s && s->use_count() == 2);
264 b.reset();
265 BEAST_EXPECT(TIBase::getState(id) == alive);
266 BEAST_EXPECT(s && s->use_count() == 1);
267 s.reset();
268 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
269 BEAST_EXPECT(w.expired());
270 s = w.lock();
271 // Cannot convert a weak pointer to a strong pointer if object is
272 // already partially deleted
273 BEAST_EXPECT(!s);
274 w.reset();
275 BEAST_EXPECT(TIBase::getState(id) == deleted);
276 }
277 {
278 TIBase::ResetStatesGuard const rsg{true};
279
280 using enum TrackedState;
281 using swu = SharedWeakUnion<TIBase>;
282 swu b = make_SharedIntrusive<TIBase>();
283 BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
284 auto id = b.get()->id_;
285 BEAST_EXPECT(TIBase::getState(id) == alive);
286 swu w = b;
287 BEAST_EXPECT(TIBase::getState(id) == alive);
288 BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
289 w.convertToWeak();
290 BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
291 swu s = w;
292 BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
293 s.convertToStrong();
294 BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
295 b.reset();
296 BEAST_EXPECT(TIBase::getState(id) == alive);
297 BEAST_EXPECT(s.use_count() == 1);
298 BEAST_EXPECT(!w.expired());
299 s.reset();
300 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
301 BEAST_EXPECT(w.expired());
302 w.convertToStrong();
303 // Cannot convert a weak pointer to a strong pointer if object is
304 // already partially deleted
305 BEAST_EXPECT(w.isWeak());
306 w.reset();
307 BEAST_EXPECT(TIBase::getState(id) == deleted);
308 }
309 {
310 // Testing SharedWeakUnion assignment operator
311
312 TIBase::ResetStatesGuard const rsg{true};
313
314 auto strong1 = make_SharedIntrusive<TIBase>();
315 auto strong2 = make_SharedIntrusive<TIBase>();
316
317 auto id1 = strong1->id_;
318 auto id2 = strong2->id_;
319
320 BEAST_EXPECT(id1 != id2);
321
322 SharedWeakUnion<TIBase> union1 = strong1;
323 SharedWeakUnion<TIBase> union2 = strong2;
324
325 BEAST_EXPECT(union1.isStrong());
326 BEAST_EXPECT(union2.isStrong());
327 BEAST_EXPECT(union1.get() == strong1.get());
328 BEAST_EXPECT(union2.get() == strong2.get());
329
330 // 1) Normal assignment: explicitly calls SharedWeakUnion assignment
331 union1 = union2;
332 BEAST_EXPECT(union1.isStrong());
333 BEAST_EXPECT(union2.isStrong());
334 BEAST_EXPECT(union1.get() == union2.get());
335 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
336 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive);
337
338 // 2) Test self-assignment
339 BEAST_EXPECT(union1.isStrong());
340 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
341 int const initialRefCount = strong1->use_count();
342#pragma clang diagnostic push
343#pragma clang diagnostic ignored "-Wself-assign-overloaded"
344 union1 = union1; // Self-assignment
345#pragma clang diagnostic pop
346 BEAST_EXPECT(union1.isStrong());
347 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
348 BEAST_EXPECT(strong1->use_count() == initialRefCount);
349
350 // 3) Test assignment from null union pointer
351 union1 = SharedWeakUnion<TIBase>();
352 BEAST_EXPECT(union1.get() == nullptr);
353
354 // 4) Test assignment to expired union pointer
355 strong2.reset();
356 union2.reset();
357 union1 = union2;
358 BEAST_EXPECT(union1.get() == nullptr);
359 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted);
360 }
361 }
362
363 void
365 {
366 testcase("Partial Delete");
367
368 // This test creates two threads. One with a strong pointer and one
369 // with a weak pointer. The strong pointer is reset while the weak
370 // pointer still holds a reference, triggering a partial delete.
371 // While the partial delete function runs (a sleep is inserted) the
372 // weak pointer is reset. The destructor should wait to run until
373 // after the partial delete function has completed running.
374
375 using enum TrackedState;
376
377 TIBase::ResetStatesGuard const rsg{true};
378
379 auto strong = make_SharedIntrusive<TIBase>();
380 WeakIntrusive<TIBase> weak{strong};
381 bool destructorRan = false;
382 bool partialDeleteRan = false;
383 std::latch partialDeleteStartedSyncPoint{2};
384 strong->tracingCallback_ = [&](TrackedState cur, std::optional<TrackedState> next) {
385 using enum TrackedState;
386 if (next == deletedStarted)
387 {
388 // strong goes out of scope while weak is still in scope
389 // This checks that partialDelete has run to completion
390 // before the destructor is called. A sleep is inserted
391 // inside the partial delete to make sure the destructor is
392 // given an opportunity to run during partial delete.
393 BEAST_EXPECT(cur == partiallyDeleted);
394 }
395 if (next == partiallyDeletedStarted)
396 {
397 partialDeleteStartedSyncPoint.arrive_and_wait();
398 using namespace std::chrono_literals;
399 // Sleep and let the weak pointer go out of scope,
400 // potentially triggering a destructor while partial delete
401 // is running. The test is to make sure that doesn't happen.
403 }
404 if (next == partiallyDeleted)
405 {
406 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
407 partialDeleteRan = true;
408 }
409 if (next == deleted)
410 {
411 BEAST_EXPECT(!destructorRan);
412 destructorRan = true;
413 }
414 };
415 std::thread t1{[&] {
416 partialDeleteStartedSyncPoint.arrive_and_wait();
417 weak.reset(); // Trigger a full delete as soon as the partial
418 // delete starts
419 }};
420 std::thread t2{[&] {
421 strong.reset(); // Trigger a partial delete
422 }};
423 t1.join();
424 t2.join();
425
426 BEAST_EXPECT(destructorRan && partialDeleteRan);
427 }
428
429 void
431 {
432 testcase("Destructor");
433
434 // This test creates two threads. One with a strong pointer and one
435 // with a weak pointer. The weak pointer is reset while the strong
436 // pointer still holds a reference. Then the strong pointer is
437 // reset. Only the destructor should run. The partial destructor
438 // should not be called. Since the weak reset runs to completion
439 // before the strong pointer is reset, threading doesn't add much to
440 // this test, but there is no harm in keeping it.
441
442 using enum TrackedState;
443
444 TIBase::ResetStatesGuard const rsg{true};
445
446 auto strong = make_SharedIntrusive<TIBase>();
447 WeakIntrusive<TIBase> weak{strong};
448 bool destructorRan = false;
449 bool partialDeleteRan = false;
450 std::latch weakResetSyncPoint{2};
451 strong->tracingCallback_ = [&](TrackedState cur, std::optional<TrackedState> next) {
452 using enum TrackedState;
453 if (next == partiallyDeleted)
454 {
455 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
456 partialDeleteRan = true;
457 }
458 if (next == deleted)
459 {
460 BEAST_EXPECT(!destructorRan);
461 destructorRan = true;
462 }
463 };
464 std::thread t1{[&] {
465 weak.reset();
466 weakResetSyncPoint.arrive_and_wait();
467 }};
468 std::thread t2{[&] {
469 weakResetSyncPoint.arrive_and_wait();
470 strong.reset(); // Trigger a partial delete
471 }};
472 t1.join();
473 t2.join();
474
475 BEAST_EXPECT(destructorRan && !partialDeleteRan);
476 }
477
478 void
480 {
481 testcase("Multithreaded Clear Mixed Variant");
482
483 // This test creates and destroys many strong and weak pointers in a
484 // loop. There is a random mix of strong and weak pointers stored in
485 // a vector (held as a variant). Both threads clear all the pointers
486 // and check that the invariants hold.
487
488 using enum TrackedState;
489 TIBase::ResetStatesGuard const rsg{true};
490
491 std::atomic<int> destructionState{0};
492 // returns destructorRan and partialDestructorRan (in that order)
493 auto getDestructorState = [&]() -> std::pair<bool, bool> {
494 int const s = destructionState.load(std::memory_order_relaxed);
495 return {(s & 1) != 0, (s & 2) != 0};
496 };
497 auto setDestructorRan = [&]() -> void {
498 destructionState.fetch_or(1, std::memory_order_acq_rel);
499 };
500 auto setPartialDeleteRan = [&]() -> void {
501 destructionState.fetch_or(2, std::memory_order_acq_rel);
502 };
503 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
504 using enum TrackedState;
505 auto [destructorRan, partialDeleteRan] = getDestructorState();
506 if (next == partiallyDeleted)
507 {
508 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
509 setPartialDeleteRan();
510 }
511 if (next == deleted)
512 {
513 BEAST_EXPECT(!destructorRan);
514 setDestructorRan();
515 }
516 };
517 auto createVecOfPointers = [&](auto const& toClone, std::default_random_engine& eng)
520 std::uniform_int_distribution<> toCreateDist(4, 64);
521 std::uniform_int_distribution<> isStrongDist(0, 1);
522 auto numToCreate = toCreateDist(eng);
523 result.reserve(numToCreate);
524 for (int i = 0; i < numToCreate; ++i)
525 {
526 if (isStrongDist(eng))
527 {
528 result.push_back(SharedIntrusive<TIBase>(toClone));
529 }
530 else
531 {
532 result.push_back(WeakIntrusive<TIBase>(toClone));
533 }
534 }
535 return result;
536 };
537 constexpr int loopIters = 2 * 1024;
538 constexpr int numThreads = 16;
540 Barrier loopStartSyncPoint{numThreads};
541 Barrier postCreateToCloneSyncPoint{numThreads};
542 Barrier postCreateVecOfPointersSyncPoint{numThreads};
543 auto engines = [&]() -> std::vector<std::default_random_engine> {
546 result.reserve(numThreads);
547 for (int i = 0; i < numThreads; ++i)
548 result.emplace_back(rd());
549 return result;
550 }();
551
552 // cloneAndDestroy clones the strong pointer into a vector of mixed
553 // strong and weak pointers and destroys them all at once.
554 // threadId==0 is special.
555 auto cloneAndDestroy = [&](int threadId) {
556 for (int i = 0; i < loopIters; ++i)
557 {
558 // ------ Sync Point ------
559 loopStartSyncPoint.arrive_and_wait();
560
561 // only thread 0 should reset the state
563 if (threadId == 0)
564 {
565 // Thread 0 is the genesis thread. It creates the strong
566 // pointers to be cloned by the other threads. This
567 // thread will also check that the destructor ran and
568 // clear the temporary variables.
569
570 rsg.emplace(false);
571 auto [destructorRan, partialDeleteRan] = getDestructorState();
572 BEAST_EXPECT(!i || destructorRan);
573 destructionState.store(0, std::memory_order_release);
574
575 toClone.clear();
576 toClone.resize(numThreads);
577 auto strong = make_SharedIntrusive<TIBase>();
578 strong->tracingCallback_ = tracingCallback;
579 std::fill(toClone.begin(), toClone.end(), strong);
580 }
581
582 // ------ Sync Point ------
583 postCreateToCloneSyncPoint.arrive_and_wait();
584
585 auto v = createVecOfPointers(toClone[threadId], engines[threadId]);
586 toClone[threadId].reset();
587
588 // ------ Sync Point ------
589 postCreateVecOfPointersSyncPoint.arrive_and_wait();
590
591 v.clear();
592 }
593 };
595 threads.reserve(numThreads);
596 for (int i = 0; i < numThreads; ++i)
597 {
598 threads.emplace_back(cloneAndDestroy, i);
599 }
600 for (int i = 0; i < numThreads; ++i)
601 {
602 threads[i].join();
603 }
604 }
605
606 void
608 {
609 testcase("Multithreaded Clear Mixed Union");
610
611 // This test creates and destroys many SharedWeak pointers in a
612 // loop. All the pointers start as strong and a loop randomly
613 // convert them between strong and weak pointers. Both threads clear
614 // all the pointers and check that the invariants hold.
615 //
616 // Note: This test also differs from the test above in that the pointers
617 // randomly change from strong to weak and from weak to strong in a
618 // loop. This can't be done in the variant test above because variant is
619 // not thread safe while the SharedWeakUnion is thread safe.
620
621 using enum TrackedState;
622
623 TIBase::ResetStatesGuard const rsg{true};
624
625 std::atomic<int> destructionState{0};
626 // returns destructorRan and partialDestructorRan (in that order)
627 auto getDestructorState = [&]() -> std::pair<bool, bool> {
628 int const s = destructionState.load(std::memory_order_relaxed);
629 return {(s & 1) != 0, (s & 2) != 0};
630 };
631 auto setDestructorRan = [&]() -> void {
632 destructionState.fetch_or(1, std::memory_order_acq_rel);
633 };
634 auto setPartialDeleteRan = [&]() -> void {
635 destructionState.fetch_or(2, std::memory_order_acq_rel);
636 };
637 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
638 using enum TrackedState;
639 auto [destructorRan, partialDeleteRan] = getDestructorState();
640 if (next == partiallyDeleted)
641 {
642 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
643 setPartialDeleteRan();
644 }
645 if (next == deleted)
646 {
647 BEAST_EXPECT(!destructorRan);
648 setDestructorRan();
649 }
650 };
651 auto createVecOfPointers =
652 [&](auto const& toClone,
655 std::uniform_int_distribution<> toCreateDist(4, 64);
656 auto numToCreate = toCreateDist(eng);
657 result.reserve(numToCreate);
658 for (int i = 0; i < numToCreate; ++i)
659 result.push_back(SharedIntrusive<TIBase>(toClone));
660 return result;
661 };
662 constexpr int loopIters = 2 * 1024;
663 constexpr int flipPointersLoopIters = 256;
664 constexpr int numThreads = 16;
666 Barrier loopStartSyncPoint{numThreads};
667 Barrier postCreateToCloneSyncPoint{numThreads};
668 Barrier postCreateVecOfPointersSyncPoint{numThreads};
669 Barrier postFlipPointersLoopSyncPoint{numThreads};
670 auto engines = [&]() -> std::vector<std::default_random_engine> {
673 result.reserve(numThreads);
674 for (int i = 0; i < numThreads; ++i)
675 result.emplace_back(rd());
676 return result;
677 }();
678
679 // cloneAndDestroy clones the strong pointer into a vector of
680 // mixed strong and weak pointers, runs a loop that randomly
681 // changes strong pointers to weak pointers, and destroys them
682 // all at once.
683 auto cloneAndDestroy = [&](int threadId) {
684 for (int i = 0; i < loopIters; ++i)
685 {
686 // ------ Sync Point ------
687 loopStartSyncPoint.arrive_and_wait();
688
689 // only thread 0 should reset the state
691 if (threadId == 0)
692 {
693 // threadId 0 is the genesis thread. It creates the
694 // strong point to be cloned by the other threads. This
695 // thread will also check that the destructor ran and
696 // clear the temporary variables.
697 rsg.emplace(false);
698 auto [destructorRan, partialDeleteRan] = getDestructorState();
699 BEAST_EXPECT(!i || destructorRan);
700 destructionState.store(0, std::memory_order_release);
701
702 toClone.clear();
703 toClone.resize(numThreads);
704 auto strong = make_SharedIntrusive<TIBase>();
705 strong->tracingCallback_ = tracingCallback;
706 std::fill(toClone.begin(), toClone.end(), strong);
707 }
708
709 // ------ Sync Point ------
710 postCreateToCloneSyncPoint.arrive_and_wait();
711
712 auto v = createVecOfPointers(toClone[threadId], engines[threadId]);
713 toClone[threadId].reset();
714
715 // ------ Sync Point ------
716 postCreateVecOfPointersSyncPoint.arrive_and_wait();
717
718 std::uniform_int_distribution<> isStrongDist(0, 1);
719 for (int f = 0; f < flipPointersLoopIters; ++f)
720 {
721 for (auto& p : v)
722 {
723 if (isStrongDist(engines[threadId]))
724 {
725 p.convertToStrong();
726 }
727 else
728 {
729 p.convertToWeak();
730 }
731 }
732 }
733
734 // ------ Sync Point ------
735 postFlipPointersLoopSyncPoint.arrive_and_wait();
736
737 v.clear();
738 }
739 };
741 threads.reserve(numThreads);
742 for (int i = 0; i < numThreads; ++i)
743 {
744 threads.emplace_back(cloneAndDestroy, i);
745 }
746 for (int i = 0; i < numThreads; ++i)
747 {
748 threads[i].join();
749 }
750 }
751
752 void
754 {
755 testcase("Multithreaded Locking Weak");
756
757 // This test creates a single shared atomic pointer that multiple thread
758 // create weak pointers from. The threads then lock the weak pointers.
759 // Both threads clear all the pointers and check that the invariants
760 // hold.
761
762 using enum TrackedState;
763
764 TIBase::ResetStatesGuard const rsg{true};
765
766 std::atomic<int> destructionState{0};
767 // returns destructorRan and partialDestructorRan (in that order)
768 auto getDestructorState = [&]() -> std::pair<bool, bool> {
769 int const s = destructionState.load(std::memory_order_relaxed);
770 return {(s & 1) != 0, (s & 2) != 0};
771 };
772 auto setDestructorRan = [&]() -> void {
773 destructionState.fetch_or(1, std::memory_order_acq_rel);
774 };
775 auto setPartialDeleteRan = [&]() -> void {
776 destructionState.fetch_or(2, std::memory_order_acq_rel);
777 };
778 auto tracingCallback = [&](TrackedState cur, std::optional<TrackedState> next) {
779 using enum TrackedState;
780 auto [destructorRan, partialDeleteRan] = getDestructorState();
781 if (next == partiallyDeleted)
782 {
783 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
784 setPartialDeleteRan();
785 }
786 if (next == deleted)
787 {
788 BEAST_EXPECT(!destructorRan);
789 setDestructorRan();
790 }
791 };
792
793 constexpr int loopIters = 2 * 1024;
794 constexpr int lockWeakLoopIters = 256;
795 constexpr int numThreads = 16;
797 Barrier loopStartSyncPoint{numThreads};
798 Barrier postCreateToLockSyncPoint{numThreads};
799 Barrier postLockWeakLoopSyncPoint{numThreads};
800
801 // lockAndDestroy creates weak pointers from the strong pointer
802 // and runs a loop that locks the weak pointer. At the end of the loop
803 // all the pointers are destroyed all at once.
804 auto lockAndDestroy = [&](int threadId) {
805 for (int i = 0; i < loopIters; ++i)
806 {
807 // ------ Sync Point ------
808 loopStartSyncPoint.arrive_and_wait();
809
810 // only thread 0 should reset the state
812 if (threadId == 0)
813 {
814 // threadId 0 is the genesis thread. It creates the
815 // strong point to be locked by the other threads. This
816 // thread will also check that the destructor ran and
817 // clear the temporary variables.
818 rsg.emplace(false);
819 auto [destructorRan, partialDeleteRan] = getDestructorState();
820 BEAST_EXPECT(!i || destructorRan);
821 destructionState.store(0, std::memory_order_release);
822
823 toLock.clear();
824 toLock.resize(numThreads);
825 auto strong = make_SharedIntrusive<TIBase>();
826 strong->tracingCallback_ = tracingCallback;
827 std::fill(toLock.begin(), toLock.end(), strong);
828 }
829
830 // ------ Sync Point ------
831 postCreateToLockSyncPoint.arrive_and_wait();
832
833 // Multiple threads all create a weak pointer from the same
834 // strong pointer
835 WeakIntrusive const weak{toLock[threadId]};
836 for (int wi = 0; wi < lockWeakLoopIters; ++wi)
837 {
838 BEAST_EXPECT(!weak.expired());
839 auto strong = weak.lock();
840 BEAST_EXPECT(strong);
841 }
842
843 // ------ Sync Point ------
844 postLockWeakLoopSyncPoint.arrive_and_wait();
845
846 toLock[threadId].reset();
847 }
848 };
850 threads.reserve(numThreads);
851 for (int i = 0; i < numThreads; ++i)
852 {
853 threads.emplace_back(lockAndDestroy, i);
854 }
855 for (int i = 0; i < numThreads; ++i)
856 {
857 threads[i].join();
858 }
859 }
860
861 void
871}; // namespace tests
872
873BEAST_DEFINE_TESTSUITE(IntrusiveShared, basics, xrpl);
874} // namespace tests
875} // namespace xrpl
T begin(T... args)
A testsuite class.
Definition suite.h:51
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:150
A shared intrusive pointer class that supports weak pointers.
A combination of a strong and a weak intrusive pointer stored in the space of a single pointer.
void reset()
Set the pointer to null, decrement the appropriate ref count, and run the appropriate release action.
T * get() const
If this is a strong pointer, return the raw pointer.
bool isStrong() const
Return true is this represents a strong pointer.
A weak intrusive pointer class for the SharedIntrusive pointer class.
void run() override
Runs the suite.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T fetch_add(T... args)
T fill(T... args)
T is_same_v
T join(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
void partialDestructorFinished(T **o)
T push_back(T... args)
T reserve(T... args)
T resize(T... args)
T size(T... args)
T sleep_for(T... args)
T store(T... args)
Experimentally, we discovered that using std::barrier performs extremely poorly (~1 hour vs ~1 minute...
std::condition_variable cv