rippled
Loading...
Searching...
No Matches
spinlock.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#ifndef XRPL_BASICS_SPINLOCK_H_INCLUDED
4#define XRPL_BASICS_SPINLOCK_H_INCLUDED
5
6#include <xrpl/beast/utility/instrumentation.h>
7
8#include <atomic>
9#include <limits>
10#include <type_traits>
11
12#ifndef __aarch64__
13#include <immintrin.h>
14#endif
15
16namespace ripple {
17
18namespace detail {
29inline void
30spin_pause() noexcept
31{
32#ifdef __aarch64__
33 asm volatile("yield");
34#else
35 _mm_pause();
36#endif
37}
38
39} // namespace detail
40
74template <class T>
76{
77 // clang-format off
78 static_assert(std::is_unsigned_v<T>);
80 static_assert(
81 std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
83 "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
84 // clang-format on
85
86private:
88 T const mask_;
89
90public:
93 operator=(packed_spinlock const&) = delete;
94
104 : bits_(lock), mask_(static_cast<T>(1) << index)
105 {
106 XRPL_ASSERT(
107 index >= 0 && (mask_ != 0),
108 "ripple::packed_spinlock::packed_spinlock : valid index and mask");
109 }
110
111 [[nodiscard]] bool
113 {
114 return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
115 }
116
117 void
119 {
120 while (!try_lock())
121 {
122 // The use of relaxed memory ordering here is intentional and
123 // serves to help reduce cache coherency traffic during times
124 // of contention by avoiding writes that would definitely not
125 // result in the lock being acquired.
126 while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
128 }
129 }
130
131 void
133 {
135 }
136};
137
150template <class T>
152{
153 static_assert(std::is_unsigned_v<T>);
155
156private:
158
159public:
160 spinlock(spinlock const&) = delete;
161 spinlock&
162 operator=(spinlock const&) = delete;
163
174
175 [[nodiscard]] bool
177 {
178 T expected = 0;
179
180 return lock_.compare_exchange_weak(
181 expected,
185 }
186
187 void
189 {
190 while (!try_lock())
191 {
192 // The use of relaxed memory ordering here is intentional and
193 // serves to help reduce cache coherency traffic during times
194 // of contention by avoiding writes that would definitely not
195 // result in the lock being acquired.
196 while (lock_.load(std::memory_order_relaxed) != 0)
198 }
199 }
200
201 void
203 {
205 }
206};
209} // namespace ripple
210
211#endif
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition spinlock.h:76
std::atomic< T > & bits_
Definition spinlock.h:87
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition spinlock.h:103
packed_spinlock(packed_spinlock const &)=delete
packed_spinlock & operator=(packed_spinlock const &)=delete
A spinlock implemented on top of an atomic integer.
Definition spinlock.h:152
spinlock & operator=(spinlock const &)=delete
spinlock(spinlock const &)=delete
std::atomic< T > & lock_
Definition spinlock.h:157
spinlock(std::atomic< T > &lock)
Grabs the.
Definition spinlock.h:171
T declval(T... args)
T fetch_and(T... args)
T is_same_v
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition spinlock.h:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6