NexusForce 1.0.0
A Modern C++ Library with extended functionality, web components, and utility libraries
载入中...
搜索中...
未找到
atomic_wait.hpp
浏览该文件的文档.
1#ifndef NEFORCE_CORE_ASYNC_ATOMIC_WAIT_HPP__
2#define NEFORCE_CORE_ASYNC_ATOMIC_WAIT_HPP__
3
10
14NEFORCE_BEGIN_NAMESPACE__
15
21
27
35 bool operator()() const noexcept { return false; }
36};
37
52template <typename Pred, typename Spin = default_spin_policy>
53bool atomic_spin(Pred& pred, Spin spin = Spin{}) noexcept {
54 constexpr auto atomic_spin_count = 16;
55 constexpr auto atomic_spin_count_relax = 12;
56 for (auto idx = 0; idx < atomic_spin_count; ++idx) {
57 if (pred()) {
58 return true;
59 }
60 if (idx < atomic_spin_count_relax) {
61 this_thread::relax();
62 } else {
63 this_thread::yield();
64 }
65 }
66
67 while (spin()) {
68 if (pred()) {
69 return true;
70 }
71 }
72 return false;
73}
74
76NEFORCE_BEGIN_INNER__
77
85struct waiter_pool_base {
86 static constexpr auto align_inner = 64;
87
88 alignas(align_inner) platform_wait_t wait = 0;
89 alignas(align_inner) platform_wait_t value = 0;
90
91 waiter_pool_base() = default;
92
98 void waiter_enter_wait() noexcept {
99#ifdef NEFORCE_PLATFORM_WINDOWS
100 ::_InterlockedIncrement(&wait);
101#else
102 __atomic_fetch_add(&wait, 1, __ATOMIC_SEQ_CST);
103#endif
104 }
105
111 void waiter_leave_wait() noexcept {
112#ifdef NEFORCE_PLATFORM_WINDOWS
113 ::_InterlockedDecrement(&wait);
114#else
115 __atomic_fetch_sub(&wait, 1, __ATOMIC_RELEASE);
116#endif
117 }
118
123 bool waiter_waiting() const noexcept {
124#ifdef NEFORCE_PLATFORM_WINDOWS
125 platform_wait_t res = ::_InterlockedExchangeAdd(const_cast<volatile platform_wait_t*>(&wait), 0);
126#else
127 platform_wait_t res;
128 __atomic_load(&wait, &res, __ATOMIC_SEQ_CST);
129#endif
130 return res != 0;
131 }
132
139 void waiter_notify(platform_wait_t* addr, bool all, const bool bare) const noexcept {
140 if (addr == &value) {
141#ifdef NEFORCE_PLATFORM_WINDOWS
142 ::_InterlockedIncrement(addr);
143#else
144 __atomic_fetch_add(addr, 1, __ATOMIC_SEQ_CST);
145#endif
146 all = true;
147 }
148 if (bare || waiter_waiting()) {
149 futex_notify(addr, all);
150 }
151 }
152
160 static waiter_pool_base& waiter_for(const void* addr) noexcept {
161 constexpr uintptr_t pool_size = 16;
162 static waiter_pool_base waiter[pool_size];
163 const auto key = (reinterpret_cast<uintptr_t>(addr) >> 2) % pool_size;
164 return waiter[key];
165 }
166};
167
174struct waiter_pool : waiter_pool_base {
180 NEFORCE_ALWAYS_INLINE void waiter_do_wait(platform_wait_t* addr, const platform_wait_t old) const noexcept {
181 _NEFORCE futex_wait(addr, old);
182 }
183};
184
185
193template <typename T>
194struct waiter_base {
195private:
205 template <typename U>
206 static constexpr bool platform_wait_valid_v =
207 is_scalar_v<U> && sizeof(U) == sizeof(platform_wait_t) && alignof(U*) >= alignof(platform_wait_t);
208
209 template <typename U, enable_if_t<platform_wait_valid_v<U>, int> = 0>
210 NEFORCE_ALWAYS_INLINE static void waiter_do_spin_v_impl(platform_wait_t*, const U& old, platform_wait_t& value) {
211 _NEFORCE memory_copy(&value, &old, sizeof(value));
212 }
213 template <typename U, enable_if_t<!platform_wait_valid_v<U>, int> = 0>
214 NEFORCE_ALWAYS_INLINE static void waiter_do_spin_v_impl(platform_wait_t* addr, const U&, platform_wait_t& value) {
215#ifdef NEFORCE_PLATFORM_WINDOWS
216 value = ::_InterlockedExchangeAdd(addr, 0);
217#else
218 __atomic_load(addr, &value, __ATOMIC_ACQUIRE);
219#endif
220 }
221
222public:
223 using waiter_type = T;
224
225 waiter_type& waiter_;
226 platform_wait_t* addr_;
227
231 template <typename U>
232 static enable_if_t<platform_wait_valid_v<U>, platform_wait_t*> waiter_wait_addr(const U* addr, platform_wait_t*) {
233 return reinterpret_cast<platform_wait_t*>(const_cast<U*>(addr));
234 }
235
239 template <typename U>
240 static enable_if_t<!platform_wait_valid_v<U>, platform_wait_t*> waiter_wait_addr(const U*, platform_wait_t* wait) {
241 return wait;
242 }
243
247 static waiter_type& waiter_for(const void* addr) noexcept {
248 static_assert(sizeof(waiter_type) == sizeof(waiter_pool_base),
249 "waiter_for should be same size with waiter_pool_base");
250 auto& res = waiter_pool_base::waiter_for(addr);
251 return reinterpret_cast<waiter_type&>(res);
252 }
253
259 template <typename U>
260 explicit waiter_base(const U* addr) noexcept :
261 waiter_(waiter_base::waiter_for(addr)),
262 addr_(waiter_base::waiter_wait_addr(addr, &waiter_.value)) {}
263
269 void waiter_notify(bool all, bool bare = false) noexcept { waiter_.waiter_notify(addr_, all, bare); }
270
274 template <typename U, typename Func, typename Spin = default_spin_policy>
275 static bool waiter_do_spin_v(platform_wait_t* addr, const U& old, Func f, platform_wait_t& value,
276 Spin spin = Spin{}) {
277 auto const pred = [=] { return _NEFORCE memory_compare<U>(old, f()) != 0; };
278 waiter_base::waiter_do_spin_v_impl(addr, old, value);
279 return _NEFORCE atomic_spin(pred, spin);
280 }
281
285 template <typename U, typename Func, typename Spin = default_spin_policy>
286 bool waiter_do_spin_v(const U& old, Func f, platform_wait_t& value, Spin spin = Spin{}) {
287 return waiter_base::waiter_do_spin_v(addr_, old, f, value, spin);
288 }
289
293 template <typename Pred, typename Spin = default_spin_policy>
294 static bool waiter_do_spin(const platform_wait_t* addr, Pred pred, platform_wait_t& value, Spin spin = Spin{}) {
295#ifdef NEFORCE_PLATFORM_WINDOWS
296 value = ::_InterlockedExchangeAdd(const_cast<volatile LONG*>(addr), 0);
297#else
298 __atomic_load(addr, &value, __ATOMIC_ACQUIRE);
299#endif
300 return _NEFORCE atomic_spin(pred, spin);
301 }
302
306 template <typename Pred, typename Spin = default_spin_policy>
307 bool waiter_do_spin(Pred pred, platform_wait_t& value, Spin spin = Spin{}) {
308 return waiter_base::waiter_do_spin(addr_, pred, value, spin);
309 }
310};
311
312
320template <typename EntersWait>
321struct waiter : waiter_base<waiter_pool> {
322public:
323 using base_type = waiter_base<waiter_pool>;
324
325private:
326 template <bool Wait = EntersWait::value, enable_if_t<Wait, int> = 0>
327 NEFORCE_ALWAYS_INLINE void enter() const noexcept {
328 waiter_.waiter_enter_wait();
329 }
330 template <bool Wait = EntersWait::value, enable_if_t<!Wait, int> = 0>
331 NEFORCE_ALWAYS_INLINE void enter() const noexcept {}
332
333 template <bool Wait = EntersWait::value, enable_if_t<Wait, int> = 0>
334 NEFORCE_ALWAYS_INLINE void leave() const noexcept {
335 waiter_.waiter_leave_wait();
336 }
337 template <bool Wait = EntersWait::value, enable_if_t<!Wait, int> = 0>
338 NEFORCE_ALWAYS_INLINE void leave() const noexcept {}
339
340public:
346 template <typename T>
347 explicit waiter(const T* addr) noexcept :
348 base_type(addr) {
349 enter();
350 }
351
355 ~waiter() { leave(); }
356
366 template <typename T, typename Func>
367 void waiter_do_wait_v(T old, Func f) {
368 do {
369 platform_wait_t value;
370 if (base_type::waiter_do_spin_v(old, f, value)) {
371 return;
372 }
373 waiter_.waiter_do_wait(base_type::addr_, value);
374 } while (_NEFORCE memory_compare<T>(old, f()) == 0);
375 }
376
382 template <typename Pred>
383 void waiter_do_wait(Pred pred) noexcept {
384 do {
385 platform_wait_t value;
386 if (base_type::waiter_do_spin(pred, value)) {
387 return;
388 }
389 waiter_.waiter_do_wait(base_type::addr_, value);
390 } while (!pred());
391 }
392};
393
395using enters_wait = waiter<true_type>;
396
398using bare_wait = waiter<false_type>;
399
400NEFORCE_END_INNER__
402
413template <typename T, typename Func>
414void atomic_wait_address_v(const T* addr, T old, Func f) noexcept {
415 inner::enters_wait waiter(addr);
416 waiter.waiter_do_wait_v(old, f);
417}
418
428template <typename T, typename Pred>
429void atomic_wait_address(const T* addr, Pred pred) noexcept {
430 inner::enters_wait waiter(addr);
431 waiter.waiter_do_wait(pred);
432}
433
443template <typename T>
444void atomic_notify_address(const T* addr, const bool all) noexcept {
445 inner::bare_wait waiter(addr);
446 waiter.waiter_notify(all);
447}
448 // AtomicOperations
450 // AsyncComponents
452
453NEFORCE_END_NAMESPACE__
454#endif // NEFORCE_CORE_ASYNC_ATOMIC_WAIT_HPP__
快速用户空间互斥锁
void atomic_wait_address(const T *addr, Pred pred) noexcept
基于谓词的原子等待
bool atomic_spin(Pred &pred, Spin spin=Spin{}) noexcept
原子自旋等待
void atomic_wait_address_v(const T *addr, T old, Func f) noexcept
基于值的原子等待
void atomic_notify_address(const T *addr, const bool all) noexcept
原子通知
NEFORCE_INLINE17 constexpr bool is_scalar_v
is_scalar的便捷变量模板
void NEFORCE_API futex_notify(void *addr, bool all) noexcept
通知等待的线程
void NEFORCE_API futex_wait(void *addr, platform_wait_t value) noexcept
无限期等待FUTEX
int platform_wait_t
平台等待类型别名
@ wait
等待操作
NEFORCE_CONSTEXPR14 void * memory_copy(void *NEFORCE_RESTRICT dest, const void *NEFORCE_RESTRICT src, size_t count) noexcept
从源内存复制到目标内存
NEFORCE_PURE_FUNCTION NEFORCE_CONSTEXPR14 int memory_compare(const void *lhs, const void *rhs, size_t count) noexcept
比较两个内存区域的内容
uint64_t uintptr_t
可容纳指针的无符号整数类型
typename enable_if< Test, T >::type enable_if_t
enable_if的便捷别名
内存操作函数
默认自旋策略
当前线程操作