rippled
Loading...
Searching...
No Matches
spinlock.h
1// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
2
3#pragma once
4
5#include <xrpl/beast/utility/instrumentation.h>
6
7#include <atomic>
8#include <limits>
9#include <type_traits>
10
11#ifndef __aarch64__
12#include <immintrin.h>
13#endif
14
15namespace xrpl {
16
17namespace detail {
28inline void
29spin_pause() noexcept
30{
31#ifdef __aarch64__
32 asm volatile("yield");
33#else
34 _mm_pause();
35#endif
36}
37
38} // namespace detail
39
73template <class T>
75{
76 // clang-format off
77 static_assert(std::is_unsigned_v<T>);
79 static_assert(
80 std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
82 "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
83 // clang-format on
84
85private:
87 T const mask_;
88
89public:
92 operator=(packed_spinlock const&) = delete;
93
102 packed_spinlock(std::atomic<T>& lock, int index) : bits_(lock), mask_(static_cast<T>(1) << index)
103 {
104 XRPL_ASSERT(index >= 0 && (mask_ != 0), "xrpl::packed_spinlock::packed_spinlock : valid index and mask");
105 }
106
107 [[nodiscard]] bool
109 {
110 return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
111 }
112
113 void
115 {
116 while (!try_lock())
117 {
118 // The use of relaxed memory ordering here is intentional and
119 // serves to help reduce cache coherency traffic during times
120 // of contention by avoiding writes that would definitely not
121 // result in the lock being acquired.
122 while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
124 }
125 }
126
127 void
129 {
131 }
132};
133
146template <class T>
148{
149 static_assert(std::is_unsigned_v<T>);
151
152private:
154
155public:
156 spinlock(spinlock const&) = delete;
157 spinlock&
158 operator=(spinlock const&) = delete;
159
170
171 [[nodiscard]] bool
173 {
174 T expected = 0;
175
176 return lock_.compare_exchange_weak(
178 }
179
180 void
182 {
183 while (!try_lock())
184 {
185 // The use of relaxed memory ordering here is intentional and
186 // serves to help reduce cache coherency traffic during times
187 // of contention by avoiding writes that would definitely not
188 // result in the lock being acquired.
189 while (lock_.load(std::memory_order_relaxed) != 0)
191 }
192 }
193
194 void
196 {
198 }
199};
202} // namespace xrpl
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition spinlock.h:75
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition spinlock.h:102
packed_spinlock & operator=(packed_spinlock const &)=delete
std::atomic< T > & bits_
Definition spinlock.h:86
packed_spinlock(packed_spinlock const &)=delete
A spinlock implemented on top of an atomic integer.
Definition spinlock.h:148
bool try_lock()
Definition spinlock.h:172
spinlock(spinlock const &)=delete
spinlock(std::atomic< T > &lock)
Grabs the.
Definition spinlock.h:167
std::atomic< T > & lock_
Definition spinlock.h:153
void unlock()
Definition spinlock.h:195
spinlock & operator=(spinlock const &)=delete
T declval(T... args)
T fetch_and(T... args)
T is_same_v
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition spinlock.h:29
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5