rippled
Loading...
Searching...
No Matches
IntrusiveShared_test.cpp
1#include <test/unit_test/SuiteJournal.h>
2
3#include <xrpl/basics/IntrusivePointer.ipp>
4#include <xrpl/basics/IntrusiveRefCounts.h>
5#include <xrpl/beast/unit_test.h>
6#include <xrpl/beast/utility/Journal.h>
7
8#include <array>
9#include <atomic>
10#include <barrier>
11#include <chrono>
12#include <condition_variable>
13#include <latch>
14#include <optional>
15#include <random>
16#include <string>
17#include <thread>
18#include <variant>
19
20namespace ripple {
21namespace tests {
22
45struct Barrier
46{
49 int count;
50 int const initial;
51
52 Barrier(int n) : count(n), initial(n)
53 {
54 }
55
56 void
58 {
60 if (--count == 0)
61 {
62 count = initial;
63 cv.notify_all();
64 }
65 else
66 {
67 cv.wait(lock, [&] { return count == initial; });
68 }
69 }
70};
71
72namespace {
73enum class TrackedState : std::uint8_t {
74 uninitialized,
75 alive,
76 partiallyDeletedStarted,
77 partiallyDeleted,
78 deletedStarted,
79 deleted
80};
81
82class TIBase : public IntrusiveRefCounts
83{
84public:
85 static constexpr std::size_t maxStates = 128;
86 static std::array<std::atomic<TrackedState>, maxStates> state;
87 static std::atomic<int> nextId;
88 static TrackedState
89 getState(int id)
90 {
91 assert(id < state.size());
92 return state[id].load(std::memory_order_acquire);
93 }
94 static void
95 resetStates(bool resetCallback)
96 {
97 for (int i = 0; i < maxStates; ++i)
98 {
99 state[i].store(
100 TrackedState::uninitialized, std::memory_order_release);
101 }
103 if (resetCallback)
104 TIBase::tracingCallback_ = [](TrackedState,
106 }
107
108 struct ResetStatesGuard
109 {
110 bool resetCallback_{false};
111
112 ResetStatesGuard(bool resetCallback) : resetCallback_{resetCallback}
113 {
114 TIBase::resetStates(resetCallback_);
115 }
116 ~ResetStatesGuard()
117 {
118 TIBase::resetStates(resetCallback_);
119 }
120 };
121
122 TIBase() : id_{checkoutID()}
123 {
124 assert(state.size() > id_);
125 state[id_].store(TrackedState::alive, std::memory_order_relaxed);
126 }
127 ~TIBase()
128 {
129 using enum TrackedState;
130
131 assert(state.size() > id_);
132 tracingCallback_(
133 state[id_].load(std::memory_order_relaxed), deletedStarted);
134
135 assert(state.size() > id_);
136 // Use relaxed memory order to try to avoid atomic operations from
137 // adding additional memory synchronizations that may hide threading
138 // errors in the underlying shared pointer class.
139 state[id_].store(deletedStarted, std::memory_order_relaxed);
140
141 tracingCallback_(deletedStarted, deleted);
142
143 assert(state.size() > id_);
144 state[id_].store(TrackedState::deleted, std::memory_order_relaxed);
145
146 tracingCallback_(TrackedState::deleted, std::nullopt);
147 }
148
149 void
150 partialDestructor()
151 {
152 using enum TrackedState;
153
154 assert(state.size() > id_);
155 tracingCallback_(
156 state[id_].load(std::memory_order_relaxed),
157 partiallyDeletedStarted);
158
159 assert(state.size() > id_);
160 state[id_].store(partiallyDeletedStarted, std::memory_order_relaxed);
161
162 tracingCallback_(partiallyDeletedStarted, partiallyDeleted);
163
164 assert(state.size() > id_);
165 state[id_].store(partiallyDeleted, std::memory_order_relaxed);
166
167 tracingCallback_(partiallyDeleted, std::nullopt);
168 }
169
170 static std::function<void(TrackedState, std::optional<TrackedState>)>
171 tracingCallback_;
172
173 int id_;
174
175private:
176 static int
177 checkoutID()
178 {
179 return nextId.fetch_add(1, std::memory_order_acq_rel);
180 }
181};
182
183std::array<std::atomic<TrackedState>, TIBase::maxStates> TIBase::state;
184std::atomic<int> TIBase::nextId{0};
185
187 TIBase::tracingCallback_ = [](TrackedState, std::optional<TrackedState>) {};
188
189} // namespace
190
192{
193public:
194 void
196 {
197 testcase("Basics");
198
199 {
200 TIBase::ResetStatesGuard rsg{true};
201
202 TIBase b;
203 BEAST_EXPECT(b.use_count() == 1);
204 b.addWeakRef();
205 BEAST_EXPECT(b.use_count() == 1);
206 auto s = b.releaseStrongRef();
208 BEAST_EXPECT(b.use_count() == 0);
209 TIBase* pb = &b;
211 BEAST_EXPECT(!pb);
212 auto w = b.releaseWeakRef();
213 BEAST_EXPECT(w == ReleaseWeakRefAction::destroy);
214 }
215
218 {
219 TIBase::ResetStatesGuard rsg{true};
220
221 using enum TrackedState;
222 auto b = make_SharedIntrusive<TIBase>();
223 auto id = b->id_;
224 BEAST_EXPECT(TIBase::getState(id) == alive);
225 BEAST_EXPECT(b->use_count() == 1);
226 for (int i = 0; i < 10; ++i)
227 {
228 strong.push_back(b);
229 }
230 b.reset();
231 BEAST_EXPECT(TIBase::getState(id) == alive);
232 strong.resize(strong.size() - 1);
233 BEAST_EXPECT(TIBase::getState(id) == alive);
234 strong.clear();
235 BEAST_EXPECT(TIBase::getState(id) == deleted);
236
237 b = make_SharedIntrusive<TIBase>();
238 id = b->id_;
239 BEAST_EXPECT(TIBase::getState(id) == alive);
240 BEAST_EXPECT(b->use_count() == 1);
241 for (int i = 0; i < 10; ++i)
242 {
243 weak.push_back(b);
244 BEAST_EXPECT(b->use_count() == 1);
245 }
246 BEAST_EXPECT(TIBase::getState(id) == alive);
247 weak.resize(weak.size() - 1);
248 BEAST_EXPECT(TIBase::getState(id) == alive);
249 b.reset();
250 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
251 while (!weak.empty())
252 {
253 weak.resize(weak.size() - 1);
254 if (weak.size())
255 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
256 }
257 BEAST_EXPECT(TIBase::getState(id) == deleted);
258 }
259 {
260 TIBase::ResetStatesGuard rsg{true};
261
262 using enum TrackedState;
263 auto b = make_SharedIntrusive<TIBase>();
264 auto id = b->id_;
265 BEAST_EXPECT(TIBase::getState(id) == alive);
267 BEAST_EXPECT(TIBase::getState(id) == alive);
268 auto s = w.lock();
269 BEAST_EXPECT(s && s->use_count() == 2);
270 b.reset();
271 BEAST_EXPECT(TIBase::getState(id) == alive);
272 BEAST_EXPECT(s && s->use_count() == 1);
273 s.reset();
274 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
275 BEAST_EXPECT(w.expired());
276 s = w.lock();
277 // Cannot convert a weak pointer to a strong pointer if object is
278 // already partially deleted
279 BEAST_EXPECT(!s);
280 w.reset();
281 BEAST_EXPECT(TIBase::getState(id) == deleted);
282 }
283 {
284 TIBase::ResetStatesGuard rsg{true};
285
286 using enum TrackedState;
287 using swu = SharedWeakUnion<TIBase>;
288 swu b = make_SharedIntrusive<TIBase>();
289 BEAST_EXPECT(b.isStrong() && b.use_count() == 1);
290 auto id = b.get()->id_;
291 BEAST_EXPECT(TIBase::getState(id) == alive);
292 swu w = b;
293 BEAST_EXPECT(TIBase::getState(id) == alive);
294 BEAST_EXPECT(w.isStrong() && b.use_count() == 2);
295 w.convertToWeak();
296 BEAST_EXPECT(w.isWeak() && b.use_count() == 1);
297 swu s = w;
298 BEAST_EXPECT(s.isWeak() && b.use_count() == 1);
299 s.convertToStrong();
300 BEAST_EXPECT(s.isStrong() && b.use_count() == 2);
301 b.reset();
302 BEAST_EXPECT(TIBase::getState(id) == alive);
303 BEAST_EXPECT(s.use_count() == 1);
304 BEAST_EXPECT(!w.expired());
305 s.reset();
306 BEAST_EXPECT(TIBase::getState(id) == partiallyDeleted);
307 BEAST_EXPECT(w.expired());
308 w.convertToStrong();
309 // Cannot convert a weak pointer to a strong pointer if object is
310 // already partially deleted
311 BEAST_EXPECT(w.isWeak());
312 w.reset();
313 BEAST_EXPECT(TIBase::getState(id) == deleted);
314 }
315 {
316 // Testing SharedWeakUnion assignment operator
317
318 TIBase::ResetStatesGuard rsg{true};
319
320 auto strong1 = make_SharedIntrusive<TIBase>();
321 auto strong2 = make_SharedIntrusive<TIBase>();
322
323 auto id1 = strong1->id_;
324 auto id2 = strong2->id_;
325
326 BEAST_EXPECT(id1 != id2);
327
328 SharedWeakUnion<TIBase> union1 = strong1;
329 SharedWeakUnion<TIBase> union2 = strong2;
330
331 BEAST_EXPECT(union1.isStrong());
332 BEAST_EXPECT(union2.isStrong());
333 BEAST_EXPECT(union1.get() == strong1.get());
334 BEAST_EXPECT(union2.get() == strong2.get());
335
336 // 1) Normal assignment: explicitly calls SharedWeakUnion assignment
337 union1 = union2;
338 BEAST_EXPECT(union1.isStrong());
339 BEAST_EXPECT(union2.isStrong());
340 BEAST_EXPECT(union1.get() == union2.get());
341 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
342 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::alive);
343
344 // 2) Test self-assignment
345 BEAST_EXPECT(union1.isStrong());
346 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
347 int initialRefCount = strong1->use_count();
348#pragma clang diagnostic push
349#pragma clang diagnostic ignored "-Wself-assign-overloaded"
350 union1 = union1; // Self-assignment
351#pragma clang diagnostic pop
352 BEAST_EXPECT(union1.isStrong());
353 BEAST_EXPECT(TIBase::getState(id1) == TrackedState::alive);
354 BEAST_EXPECT(strong1->use_count() == initialRefCount);
355
356 // 3) Test assignment from null union pointer
357 union1 = SharedWeakUnion<TIBase>();
358 BEAST_EXPECT(union1.get() == nullptr);
359
360 // 4) Test assignment to expired union pointer
361 strong2.reset();
362 union2.reset();
363 union1 = union2;
364 BEAST_EXPECT(union1.get() == nullptr);
365 BEAST_EXPECT(TIBase::getState(id2) == TrackedState::deleted);
366 }
367 }
368
369 void
371 {
372 testcase("Partial Delete");
373
374 // This test creates two threads. One with a strong pointer and one
375 // with a weak pointer. The strong pointer is reset while the weak
376 // pointer still holds a reference, triggering a partial delete.
377 // While the partial delete function runs (a sleep is inserted) the
378 // weak pointer is reset. The destructor should wait to run until
379 // after the partial delete function has completed running.
380
381 using enum TrackedState;
382
383 TIBase::ResetStatesGuard rsg{true};
384
385 auto strong = make_SharedIntrusive<TIBase>();
386 WeakIntrusive<TIBase> weak{strong};
387 bool destructorRan = false;
388 bool partialDeleteRan = false;
389 std::latch partialDeleteStartedSyncPoint{2};
390 strong->tracingCallback_ = [&](TrackedState cur,
392 using enum TrackedState;
393 if (next == deletedStarted)
394 {
395 // strong goes out of scope while weak is still in scope
396 // This checks that partialDelete has run to completion
397 // before the desturctor is called. A sleep is inserted
398 // inside the partial delete to make sure the destructor is
399 // given an opportunity to run durring partial delete.
400 BEAST_EXPECT(cur == partiallyDeleted);
401 }
402 if (next == partiallyDeletedStarted)
403 {
404 partialDeleteStartedSyncPoint.arrive_and_wait();
405 using namespace std::chrono_literals;
406 // Sleep and let the weak pointer go out of scope,
407 // potentially triggering a destructor while partial delete
408 // is running. The test is to make sure that doesn't happen.
410 }
411 if (next == partiallyDeleted)
412 {
413 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
414 partialDeleteRan = true;
415 }
416 if (next == deleted)
417 {
418 BEAST_EXPECT(!destructorRan);
419 destructorRan = true;
420 }
421 };
422 std::thread t1{[&] {
423 partialDeleteStartedSyncPoint.arrive_and_wait();
424 weak.reset(); // Trigger a full delete as soon as the partial
425 // delete starts
426 }};
427 std::thread t2{[&] {
428 strong.reset(); // Trigger a partial delete
429 }};
430 t1.join();
431 t2.join();
432
433 BEAST_EXPECT(destructorRan && partialDeleteRan);
434 }
435
436 void
438 {
439 testcase("Destructor");
440
441 // This test creates two threads. One with a strong pointer and one
442 // with a weak pointer. The weak pointer is reset while the strong
443 // pointer still holds a reference. Then the strong pointer is
444 // reset. Only the destructor should run. The partial destructor
445 // should not be called. Since the weak reset runs to completion
446 // before the strong pointer is reset, threading doesn't add much to
447 // this test, but there is no harm in keeping it.
448
449 using enum TrackedState;
450
451 TIBase::ResetStatesGuard rsg{true};
452
453 auto strong = make_SharedIntrusive<TIBase>();
454 WeakIntrusive<TIBase> weak{strong};
455 bool destructorRan = false;
456 bool partialDeleteRan = false;
457 std::latch weakResetSyncPoint{2};
458 strong->tracingCallback_ = [&](TrackedState cur,
460 using enum TrackedState;
461 if (next == partiallyDeleted)
462 {
463 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
464 partialDeleteRan = true;
465 }
466 if (next == deleted)
467 {
468 BEAST_EXPECT(!destructorRan);
469 destructorRan = true;
470 }
471 };
472 std::thread t1{[&] {
473 weak.reset();
474 weakResetSyncPoint.arrive_and_wait();
475 }};
476 std::thread t2{[&] {
477 weakResetSyncPoint.arrive_and_wait();
478 strong.reset(); // Trigger a partial delete
479 }};
480 t1.join();
481 t2.join();
482
483 BEAST_EXPECT(destructorRan && !partialDeleteRan);
484 }
485
486 void
488 {
489 testcase("Multithreaded Clear Mixed Variant");
490
491 // This test creates and destroys many strong and weak pointers in a
492 // loop. There is a random mix of strong and weak pointers stored in
493 // a vector (held as a variant). Both threads clear all the pointers
494 // and check that the invariants hold.
495
496 using enum TrackedState;
497 TIBase::ResetStatesGuard rsg{true};
498
499 std::atomic<int> destructionState{0};
500 // returns destructorRan and partialDestructorRan (in that order)
501 auto getDestructorState = [&]() -> std::pair<bool, bool> {
502 int s = destructionState.load(std::memory_order_relaxed);
503 return {(s & 1) != 0, (s & 2) != 0};
504 };
505 auto setDestructorRan = [&]() -> void {
506 destructionState.fetch_or(1, std::memory_order_acq_rel);
507 };
508 auto setPartialDeleteRan = [&]() -> void {
509 destructionState.fetch_or(2, std::memory_order_acq_rel);
510 };
511 auto tracingCallback = [&](TrackedState cur,
513 using enum TrackedState;
514 auto [destructorRan, partialDeleteRan] = getDestructorState();
515 if (next == partiallyDeleted)
516 {
517 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
518 setPartialDeleteRan();
519 }
520 if (next == deleted)
521 {
522 BEAST_EXPECT(!destructorRan);
523 setDestructorRan();
524 }
525 };
526 auto createVecOfPointers = [&](auto const& toClone,
528 -> std::vector<
532 result;
533 std::uniform_int_distribution<> toCreateDist(4, 64);
534 std::uniform_int_distribution<> isStrongDist(0, 1);
535 auto numToCreate = toCreateDist(eng);
536 result.reserve(numToCreate);
537 for (int i = 0; i < numToCreate; ++i)
538 {
539 if (isStrongDist(eng))
540 {
541 result.push_back(SharedIntrusive<TIBase>(toClone));
542 }
543 else
544 {
545 result.push_back(WeakIntrusive<TIBase>(toClone));
546 }
547 }
548 return result;
549 };
550 constexpr int loopIters = 2 * 1024;
551 constexpr int numThreads = 16;
553 Barrier loopStartSyncPoint{numThreads};
554 Barrier postCreateToCloneSyncPoint{numThreads};
555 Barrier postCreateVecOfPointersSyncPoint{numThreads};
556 auto engines = [&]() -> std::vector<std::default_random_engine> {
559 result.reserve(numThreads);
560 for (int i = 0; i < numThreads; ++i)
561 result.emplace_back(rd());
562 return result;
563 }();
564
565 // cloneAndDestroy clones the strong pointer into a vector of mixed
566 // strong and weak pointers and destroys them all at once.
567 // threadId==0 is special.
568 auto cloneAndDestroy = [&](int threadId) {
569 for (int i = 0; i < loopIters; ++i)
570 {
571 // ------ Sync Point ------
572 loopStartSyncPoint.arrive_and_wait();
573
574 // only thread 0 should reset the state
576 if (threadId == 0)
577 {
578 // Thread 0 is the genesis thread. It creates the strong
579 // pointers to be cloned by the other threads. This
580 // thread will also check that the destructor ran and
581 // clear the temporary variables.
582
583 rsg.emplace(false);
584 auto [destructorRan, partialDeleteRan] =
585 getDestructorState();
586 BEAST_EXPECT(!i || destructorRan);
587 destructionState.store(0, std::memory_order_release);
588
589 toClone.clear();
590 toClone.resize(numThreads);
591 auto strong = make_SharedIntrusive<TIBase>();
592 strong->tracingCallback_ = tracingCallback;
593 std::fill(toClone.begin(), toClone.end(), strong);
594 }
595
596 // ------ Sync Point ------
597 postCreateToCloneSyncPoint.arrive_and_wait();
598
599 auto v =
600 createVecOfPointers(toClone[threadId], engines[threadId]);
601 toClone[threadId].reset();
602
603 // ------ Sync Point ------
604 postCreateVecOfPointersSyncPoint.arrive_and_wait();
605
606 v.clear();
607 }
608 };
610 for (int i = 0; i < numThreads; ++i)
611 {
612 threads.emplace_back(cloneAndDestroy, i);
613 }
614 for (int i = 0; i < numThreads; ++i)
615 {
616 threads[i].join();
617 }
618 }
619
620 void
622 {
623 testcase("Multithreaded Clear Mixed Union");
624
625 // This test creates and destroys many SharedWeak pointers in a
626 // loop. All the pointers start as strong and a loop randomly
627 // convert them between strong and weak pointers. Both threads clear
628 // all the pointers and check that the invariants hold.
629 //
630 // Note: This test also differs from the test above in that the pointers
631 // randomly change from strong to weak and from weak to strong in a
632 // loop. This can't be done in the variant test above because variant is
633 // not thread safe while the SharedWeakUnion is thread safe.
634
635 using enum TrackedState;
636
637 TIBase::ResetStatesGuard rsg{true};
638
639 std::atomic<int> destructionState{0};
640 // returns destructorRan and partialDestructorRan (in that order)
641 auto getDestructorState = [&]() -> std::pair<bool, bool> {
642 int s = destructionState.load(std::memory_order_relaxed);
643 return {(s & 1) != 0, (s & 2) != 0};
644 };
645 auto setDestructorRan = [&]() -> void {
646 destructionState.fetch_or(1, std::memory_order_acq_rel);
647 };
648 auto setPartialDeleteRan = [&]() -> void {
649 destructionState.fetch_or(2, std::memory_order_acq_rel);
650 };
651 auto tracingCallback = [&](TrackedState cur,
653 using enum TrackedState;
654 auto [destructorRan, partialDeleteRan] = getDestructorState();
655 if (next == partiallyDeleted)
656 {
657 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
658 setPartialDeleteRan();
659 }
660 if (next == deleted)
661 {
662 BEAST_EXPECT(!destructorRan);
663 setDestructorRan();
664 }
665 };
666 auto createVecOfPointers = [&](auto const& toClone,
670 std::uniform_int_distribution<> toCreateDist(4, 64);
671 auto numToCreate = toCreateDist(eng);
672 result.reserve(numToCreate);
673 for (int i = 0; i < numToCreate; ++i)
674 result.push_back(SharedIntrusive<TIBase>(toClone));
675 return result;
676 };
677 constexpr int loopIters = 2 * 1024;
678 constexpr int flipPointersLoopIters = 256;
679 constexpr int numThreads = 16;
681 Barrier loopStartSyncPoint{numThreads};
682 Barrier postCreateToCloneSyncPoint{numThreads};
683 Barrier postCreateVecOfPointersSyncPoint{numThreads};
684 Barrier postFlipPointersLoopSyncPoint{numThreads};
685 auto engines = [&]() -> std::vector<std::default_random_engine> {
688 result.reserve(numThreads);
689 for (int i = 0; i < numThreads; ++i)
690 result.emplace_back(rd());
691 return result;
692 }();
693
694 // cloneAndDestroy clones the strong pointer into a vector of
695 // mixed strong and weak pointers, runs a loop that randomly
696 // changes strong pointers to weak pointers, and destroys them
697 // all at once.
698 auto cloneAndDestroy = [&](int threadId) {
699 for (int i = 0; i < loopIters; ++i)
700 {
701 // ------ Sync Point ------
702 loopStartSyncPoint.arrive_and_wait();
703
704 // only thread 0 should reset the state
706 if (threadId == 0)
707 {
708 // threadId 0 is the genesis thread. It creates the
709 // strong point to be cloned by the other threads. This
710 // thread will also check that the destructor ran and
711 // clear the temporary variables.
712 rsg.emplace(false);
713 auto [destructorRan, partialDeleteRan] =
714 getDestructorState();
715 BEAST_EXPECT(!i || destructorRan);
716 destructionState.store(0, std::memory_order_release);
717
718 toClone.clear();
719 toClone.resize(numThreads);
720 auto strong = make_SharedIntrusive<TIBase>();
721 strong->tracingCallback_ = tracingCallback;
722 std::fill(toClone.begin(), toClone.end(), strong);
723 }
724
725 // ------ Sync Point ------
726 postCreateToCloneSyncPoint.arrive_and_wait();
727
728 auto v =
729 createVecOfPointers(toClone[threadId], engines[threadId]);
730 toClone[threadId].reset();
731
732 // ------ Sync Point ------
733 postCreateVecOfPointersSyncPoint.arrive_and_wait();
734
735 std::uniform_int_distribution<> isStrongDist(0, 1);
736 for (int f = 0; f < flipPointersLoopIters; ++f)
737 {
738 for (auto& p : v)
739 {
740 if (isStrongDist(engines[threadId]))
741 {
742 p.convertToStrong();
743 }
744 else
745 {
746 p.convertToWeak();
747 }
748 }
749 }
750
751 // ------ Sync Point ------
752 postFlipPointersLoopSyncPoint.arrive_and_wait();
753
754 v.clear();
755 }
756 };
758 for (int i = 0; i < numThreads; ++i)
759 {
760 threads.emplace_back(cloneAndDestroy, i);
761 }
762 for (int i = 0; i < numThreads; ++i)
763 {
764 threads[i].join();
765 }
766 }
767
768 void
770 {
771 testcase("Multithreaded Locking Weak");
772
773 // This test creates a single shared atomic pointer that multiple thread
774 // create weak pointers from. The threads then lock the weak pointers.
775 // Both threads clear all the pointers and check that the invariants
776 // hold.
777
778 using enum TrackedState;
779
780 TIBase::ResetStatesGuard rsg{true};
781
782 std::atomic<int> destructionState{0};
783 // returns destructorRan and partialDestructorRan (in that order)
784 auto getDestructorState = [&]() -> std::pair<bool, bool> {
785 int s = destructionState.load(std::memory_order_relaxed);
786 return {(s & 1) != 0, (s & 2) != 0};
787 };
788 auto setDestructorRan = [&]() -> void {
789 destructionState.fetch_or(1, std::memory_order_acq_rel);
790 };
791 auto setPartialDeleteRan = [&]() -> void {
792 destructionState.fetch_or(2, std::memory_order_acq_rel);
793 };
794 auto tracingCallback = [&](TrackedState cur,
796 using enum TrackedState;
797 auto [destructorRan, partialDeleteRan] = getDestructorState();
798 if (next == partiallyDeleted)
799 {
800 BEAST_EXPECT(!partialDeleteRan && !destructorRan);
801 setPartialDeleteRan();
802 }
803 if (next == deleted)
804 {
805 BEAST_EXPECT(!destructorRan);
806 setDestructorRan();
807 }
808 };
809
810 constexpr int loopIters = 2 * 1024;
811 constexpr int lockWeakLoopIters = 256;
812 constexpr int numThreads = 16;
814 Barrier loopStartSyncPoint{numThreads};
815 Barrier postCreateToLockSyncPoint{numThreads};
816 Barrier postLockWeakLoopSyncPoint{numThreads};
817
818 // lockAndDestroy creates weak pointers from the strong pointer
819 // and runs a loop that locks the weak pointer. At the end of the loop
820 // all the pointers are destroyed all at once.
821 auto lockAndDestroy = [&](int threadId) {
822 for (int i = 0; i < loopIters; ++i)
823 {
824 // ------ Sync Point ------
825 loopStartSyncPoint.arrive_and_wait();
826
827 // only thread 0 should reset the state
829 if (threadId == 0)
830 {
831 // threadId 0 is the genesis thread. It creates the
832 // strong point to be locked by the other threads. This
833 // thread will also check that the destructor ran and
834 // clear the temporary variables.
835 rsg.emplace(false);
836 auto [destructorRan, partialDeleteRan] =
837 getDestructorState();
838 BEAST_EXPECT(!i || destructorRan);
839 destructionState.store(0, std::memory_order_release);
840
841 toLock.clear();
842 toLock.resize(numThreads);
843 auto strong = make_SharedIntrusive<TIBase>();
844 strong->tracingCallback_ = tracingCallback;
845 std::fill(toLock.begin(), toLock.end(), strong);
846 }
847
848 // ------ Sync Point ------
849 postCreateToLockSyncPoint.arrive_and_wait();
850
851 // Multiple threads all create a weak pointer from the same
852 // strong pointer
853 WeakIntrusive weak{toLock[threadId]};
854 for (int wi = 0; wi < lockWeakLoopIters; ++wi)
855 {
856 BEAST_EXPECT(!weak.expired());
857 auto strong = weak.lock();
858 BEAST_EXPECT(strong);
859 }
860
861 // ------ Sync Point ------
862 postLockWeakLoopSyncPoint.arrive_and_wait();
863
864 toLock[threadId].reset();
865 }
866 };
868 for (int i = 0; i < numThreads; ++i)
869 {
870 threads.emplace_back(lockAndDestroy, i);
871 }
872 for (int i = 0; i < numThreads; ++i)
873 {
874 threads[i].join();
875 }
876 }
877
878 void
888}; // namespace tests
889
890BEAST_DEFINE_TESTSUITE(IntrusiveShared, basics, ripple);
891} // namespace tests
892} // namespace ripple
T begin(T... args)
A testsuite class.
Definition suite.h:52
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:152
A shared intrusive pointer class that supports weak pointers.
A combination of a strong and a weak intrusive pointer stored in the space of a single pointer.
bool isStrong() const
Return true is this represents a strong pointer.
T * get() const
If this is a strong pointer, return the raw pointer.
void reset()
Set the pointer to null, decrement the appropriate ref count, and run the appropriate release action.
A weak intrusive pointer class for the SharedIntrusive pointer class.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T fetch_add(T... args)
T fill(T... args)
T is_same_v
T join(T... args)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
void partialDestructorFinished(T **o)
T push_back(T... args)
T reserve(T... args)
T resize(T... args)
T size(T... args)
T sleep_for(T... args)
T store(T... args)
Experimentally, we discovered that using std::barrier performs extremely poorly (~1 hour vs ~1 minute...
std::condition_variable cv