rippled
spinlock.h
1 /*
2  This file is part of rippled: https://github.com/ripple/rippled
3  Copyright 2022, Nikolaos D. Bougalis <nikb@bougalis.net>
4 
5  Permission to use, copy, modify, and/or distribute this software for any
6  purpose with or without fee is hereby granted, provided that the above
7  copyright notice and this permission notice appear in all copies.
8 
9  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 
18 #ifndef RIPPLE_BASICS_SPINLOCK_H_INCLUDED
19 #define RIPPLE_BASICS_SPINLOCK_H_INCLUDED
20 
21 #include <atomic>
22 #include <cassert>
23 #include <limits>
24 #include <type_traits>
25 
26 #ifndef __aarch64__
27 #include <immintrin.h>
28 #endif
29 
30 namespace ripple {
31 
32 namespace detail {
43 inline void
44 spin_pause() noexcept
45 {
46 #ifdef __aarch64__
47  asm volatile("yield");
48 #else
49  _mm_pause();
50 #endif
51 }
52 
53 } // namespace detail
54 
88 template <class T>
90 {
91  // clang-format off
92  static_assert(std::is_unsigned_v<T>);
94  static_assert(
95  std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_or(0)), T> &&
96  std::is_same_v<decltype(std::declval<std::atomic<T>&>().fetch_and(0)), T>,
97  "std::atomic<T>::fetch_and(T) and std::atomic<T>::fetch_and(T) are required by packed_spinlock");
98  // clang-format on
99 
100 private:
102  T const mask_;
103 
104 public:
105  packed_spinlock(packed_spinlock const&) = delete;
107  operator=(packed_spinlock const&) = delete;
108 
118  : bits_(lock), mask_(static_cast<T>(1) << index)
119  {
120  assert(index >= 0 && (mask_ != 0));
121  }
122 
123  [[nodiscard]] bool
125  {
126  return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0;
127  }
128 
129  void
131  {
132  while (!try_lock())
133  {
134  // The use of relaxed memory ordering here is intentional and
135  // serves to help reduce cache coherency traffic during times
136  // of contention by avoiding writes that would definitely not
137  // result in the lock being acquired.
138  while ((bits_.load(std::memory_order_relaxed) & mask_) != 0)
140  }
141  }
142 
143  void
145  {
146  bits_.fetch_and(~mask_, std::memory_order_release);
147  }
148 };
149 
162 template <class T>
163 class spinlock
164 {
165  static_assert(std::is_unsigned_v<T>);
167 
168 private:
170 
171 public:
172  spinlock(spinlock const&) = delete;
173  spinlock&
174  operator=(spinlock const&) = delete;
175 
184  {
185  }
186 
187  [[nodiscard]] bool
189  {
190  T expected = 0;
191 
192  return lock_.compare_exchange_weak(
193  expected,
195  std::memory_order_acquire,
196  std::memory_order_relaxed);
197  }
198 
199  void
201  {
202  while (!try_lock())
203  {
204  // The use of relaxed memory ordering here is intentional and
205  // serves to help reduce cache coherency traffic during times
206  // of contention by avoiding writes that would definitely not
207  // result in the lock being acquired.
208  while (lock_.load(std::memory_order_relaxed) != 0)
210  }
211  }
212 
213  void
215  {
216  lock_.store(0, std::memory_order_release);
217  }
218 };
221 } // namespace ripple
222 
223 #endif
std::is_same_v
T is_same_v
ripple::packed_spinlock::packed_spinlock
packed_spinlock(packed_spinlock const &)=delete
ripple::packed_spinlock::operator=
packed_spinlock & operator=(packed_spinlock const &)=delete
ripple::packed_spinlock::mask_
const T mask_
Definition: spinlock.h:102
ripple::packed_spinlock
Classes to handle arrays of spinlocks packed into a single atomic integer:
Definition: spinlock.h:89
ripple::packed_spinlock::bits_
std::atomic< T > & bits_
Definition: spinlock.h:92
ripple::packed_spinlock::try_lock
bool try_lock()
Definition: spinlock.h:124
ripple::packed_spinlock::unlock
void unlock()
Definition: spinlock.h:144
atomic
ripple::spinlock::operator=
spinlock & operator=(spinlock const &)=delete
ripple::spinlock::unlock
void unlock()
Definition: spinlock.h:214
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::spinlock::spinlock
spinlock(std::atomic< T > &lock)
Grabs the.
Definition: spinlock.h:183
ripple::detail::spin_pause
void spin_pause() noexcept
Inform the processor that we are in a tight spin-wait loop.
Definition: spinlock.h:44
limits
cassert
ripple::spinlock
A spinlock implemented on top of an atomic integer.
Definition: spinlock.h:163
ripple::spinlock::lock_
std::atomic< T > & lock_
Definition: spinlock.h:165
ripple::spinlock::lock
void lock()
Definition: spinlock.h:200
ripple::spinlock::try_lock
bool try_lock()
Definition: spinlock.h:188
ripple::packed_spinlock::lock
void lock()
Definition: spinlock.h:130
std::numeric_limits
ripple::packed_spinlock::packed_spinlock
packed_spinlock(std::atomic< T > &lock, int index)
A single spinlock packed inside the specified atomic.
Definition: spinlock.h:117
type_traits
std::declval
T declval(T... args)
ripple::spinlock::spinlock
spinlock(spinlock const &)=delete