rippled
Loading...
Searching...
No Matches
spinlock.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#pragma once
4
5#include <xrpl/beast/utility/instrumentation.h>
6
7#include <atomic>
8#include <limits>
9#include <type_traits>
10
11#ifndef __aarch64__
12#include <immintrin.h>
13#endif
14
15namespace xrpl {
16
17namespace detail {
28inline void
29spin_pause() noexcept
30{
31#ifdef __aarch64__
32 asm volatile("yield");
33#else
34 _mm_pause();
35#endif
36}
37
38} // namespace detail
39
73template <class T>
75{
76 // clang-format off
77 static_assert(std::is_unsigned_v<T>);
79 static_assert(
80 std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
82 "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
83 // clang-format on
84
85private:
87 T const mask_;
88
89public:
92 operator=(packed_spinlock const&) = delete;
93
103 : bits_(lock), mask_(static_cast<T>(1) << index)
104 {
105 XRPL_ASSERT(
106 index >= 0 && (mask_ != 0),
107 "xrpl::packed_spinlock::packed_spinlock : valid index and mask");
108 }
109
110 [[nodiscard]] bool
112 {
113 return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
114 }
115
116 void
118 {
119 while (!try_lock())
120 {
121 // The use of relaxed memory ordering here is intentional and
122 // serves to help reduce cache coherency traffic during times
123 // of contention by avoiding writes that would definitely not
124 // result in the lock being acquired.
125 while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
127 }
128 }
129
130 void
132 {
134 }
135};
136
149template <class T>
151{
152 static_assert(std::is_unsigned_v<T>);
154
155private:
157
158public:
159 spinlock(spinlock const&) = delete;
160 spinlock&
161 operator=(spinlock const&) = delete;
162
173
174 [[nodiscard]] bool
176 {
177 T expected = 0;
178
179 return lock_.compare_exchange_weak(
180 expected,
184 }
185
186 void
188 {
189 while (!try_lock())
190 {
191 // The use of relaxed memory ordering here is intentional and
192 // serves to help reduce cache coherency traffic during times
193 // of contention by avoiding writes that would definitely not
194 // result in the lock being acquired.
195 while (lock_.load(std::memory_order_relaxed) != 0)
197 }
198 }
199
200 void
202 {
204 }
205};
208} // namespace xrpl
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition spinlock.h:75
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition spinlock.h:102
packed_spinlock & operator=(packed_spinlock const &)=delete
std::atomic< T > & bits_
Definition spinlock.h:86
packed_spinlock(packed_spinlock const &)=delete
A spinlock implemented on top of an atomic integer.
Definition spinlock.h:151
bool try_lock()
Definition spinlock.h:175
spinlock(spinlock const &)=delete
spinlock(std::atomic< T > &lock)
Grabs the.
Definition spinlock.h:170
std::atomic< T > & lock_
Definition spinlock.h:156
void unlock()
Definition spinlock.h:201
spinlock & operator=(spinlock const &)=delete
T declval(T... args)
T fetch_and(T... args)
T is_same_v
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition spinlock.h:29
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5