NexusForce 1.0.0
A Modern C++ Library with extended functionality, web components, and utility libraries
载入中...
搜索中...
未找到
atomic_base.hpp
浏览该文件的文档.
1#ifndef NEFORCE_CORE_ASYNC_ATOMIC_BASE_HPP__
2#define NEFORCE_CORE_ASYNC_ATOMIC_BASE_HPP__
3
10
14#ifdef NEFORCE_COMPILER_MSVC
15# include <intrin.h>
16#endif
17NEFORCE_BEGIN_NAMESPACE__
18
24
30
37NEFORCE_ALWAYS_INLINE_INLINE void atomic_thread_fence(const memory_order mo) noexcept {
38#ifdef NEFORCE_COMPILER_MSVC
39 if (mo == memory_order_relaxed) {
40 return;
41 }
42# if defined(NEFORCE_ARCH_X86)
43 ::_ReadWriteBarrier();
44 if (mo == memory_order_seq_cst) {
45 volatile long guard;
46 ::_InterlockedIncrement(&guard);
47 ::_ReadWriteBarrier();
48 }
49# elif defined(NEFORCE_ARCH_ARM)
50 if (mo == memory_order_acquire || mo == memory_order_consume) {
51 ::_Memory_load_acquire_barrier();
52 } else {
53 ::_ReadWriteBarrier();
54 }
55# else
56 assert(false);
57# endif
58#else
59 __atomic_thread_fence(static_cast<int32_t>(mo));
60#endif
61}
62
69NEFORCE_ALWAYS_INLINE_INLINE void atomic_signal_fence(const memory_order mo) noexcept {
70#ifdef NEFORCE_COMPILER_MSVC
71 if (mo != memory_order_relaxed) {
72 ::_ReadWriteBarrier();
73 }
74#else
75 __atomic_signal_fence(static_cast<int32_t>(mo));
76#endif
77}
78
79
81NEFORCE_BEGIN_INNER__
82
83#ifdef NEFORCE_COMPILER_MSVC
84
85template <size_t Size>
86struct interlocked_exchange_impl;
87
88template <>
89struct interlocked_exchange_impl<1> {
90 template <typename T>
91 static T call(volatile T* target, T value) {
92 return static_cast<T>(
93 ::_InterlockedExchange8(reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
94 }
95};
96template <>
97struct interlocked_exchange_impl<2> {
98 template <typename T>
99 static T call(volatile T* target, T value) {
100 return static_cast<T>(
101 ::_InterlockedExchange16(reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
102 }
103};
104template <>
105struct interlocked_exchange_impl<4> {
106 template <typename T>
107 static T call(volatile T* target, T value) {
108 return static_cast<T>(
109 ::_InterlockedExchange(reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
110 }
111};
112template <>
113struct interlocked_exchange_impl<8> {
114 template <typename T>
115 static T call(volatile T* target, T value) {
116 return static_cast<T>(
117 ::_InterlockedExchange64(reinterpret_cast<volatile long long*>(target), static_cast<long long>(value)));
118 }
119};
120
121template <size_t Size>
122struct interlocked_compare_exchange_impl;
123
124template <>
125struct interlocked_compare_exchange_impl<1> {
126 template <typename T>
127 static bool call(volatile T* target, T* expected, T desired) {
128 const char old =
129 ::_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(target),
130 *reinterpret_cast<char*>(&desired), *reinterpret_cast<char*>(expected));
131 if (old == *reinterpret_cast<char*>(expected)) {
132 return true;
133 }
134 *reinterpret_cast<char*>(expected) = old;
135 return false;
136 }
137};
138template <>
139struct interlocked_compare_exchange_impl<2> {
140 template <typename T>
141 static bool call(volatile T* target, T* expected, T desired) {
142 const short old = ::_InterlockedCompareExchange16(reinterpret_cast<volatile short*>(target),
143 *reinterpret_cast<short*>(&desired),
144 *reinterpret_cast<short*>(expected));
145 if (old == *reinterpret_cast<short*>(expected)) {
146 return true;
147 }
148 *reinterpret_cast<short*>(expected) = old;
149 return false;
150 }
151};
152template <>
153struct interlocked_compare_exchange_impl<4> {
154 template <typename T>
155 static bool call(volatile T* target, T* expected, T desired) {
156 const long old =
157 ::_InterlockedCompareExchange(reinterpret_cast<volatile long*>(target),
158 *reinterpret_cast<long*>(&desired), *reinterpret_cast<long*>(expected));
159 if (old == *reinterpret_cast<long*>(expected)) {
160 return true;
161 }
162 *reinterpret_cast<long*>(expected) = old;
163 return false;
164 }
165};
166template <>
167struct interlocked_compare_exchange_impl<8> {
168 template <typename T>
169 static bool call(volatile T* target, T* expected, T desired) {
170 const long long old = ::_InterlockedCompareExchange64(reinterpret_cast<volatile long long*>(target),
171 *reinterpret_cast<long long*>(&desired),
172 *reinterpret_cast<long long*>(expected));
173 if (old == *reinterpret_cast<long long*>(expected)) {
174 return true;
175 }
176 *reinterpret_cast<long long*>(expected) = old;
177 return false;
178 }
179};
180template <>
181struct interlocked_compare_exchange_impl<16> {
182 template <typename T>
183 static bool call(volatile T* target, T* expected, T desired) {
184 alignas(16) long long exp_arr[2];
185 alignas(16) long long des_arr[2];
186 memory_copy(exp_arr, expected, 16);
187 _NEFORCE memory_copy(des_arr, &desired, 16);
188
189 const bool result = ::_InterlockedCompareExchange128(reinterpret_cast<volatile long long*>(target), des_arr[1],
190 des_arr[0], exp_arr) != 0;
191
192 if (!result) {
193 _NEFORCE memory_copy(expected, exp_arr, 16);
194 }
195 return result;
196 }
197};
198
199template <size_t Size>
200struct interlocked_fetch_add_impl;
201
202template <>
203struct interlocked_fetch_add_impl<1> {
204 template <typename T>
205 static T call(volatile T* target, T value) {
206 return static_cast<T>(
207 ::_InterlockedExchangeAdd8(reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
208 }
209};
210template <>
211struct interlocked_fetch_add_impl<2> {
212 template <typename T>
213 static T call(volatile T* target, T value) {
214 return static_cast<T>(
215 ::_InterlockedExchangeAdd16(reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
216 }
217};
218template <>
219struct interlocked_fetch_add_impl<4> {
220 template <typename T>
221 static T call(volatile T* target, T value) {
222 return static_cast<T>(
223 ::_InterlockedExchangeAdd(reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
224 }
225};
226template <>
227struct interlocked_fetch_add_impl<8> {
228 template <typename T>
229 static T call(volatile T* target, T value) {
230 return static_cast<T>(::_InterlockedExchangeAdd64(reinterpret_cast<volatile long long*>(target),
231 static_cast<long long>(value)));
232 }
233};
234
235template <size_t Size>
236struct interlocked_fetch_and_impl;
237
238template <>
239struct interlocked_fetch_and_impl<1> {
240 template <typename T>
241 static T call(volatile T* target, T value) {
242 return static_cast<T>(::_InterlockedAnd8(reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
243 }
244};
245template <>
246struct interlocked_fetch_and_impl<2> {
247 template <typename T>
248 static T call(volatile T* target, T value) {
249 return static_cast<T>(
250 ::_InterlockedAnd16(reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
251 }
252};
253template <>
254struct interlocked_fetch_and_impl<4> {
255 template <typename T>
256 static T call(volatile T* target, T value) {
257 return static_cast<T>(::_InterlockedAnd(reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
258 }
259};
260template <>
261struct interlocked_fetch_and_impl<8> {
262 template <typename T>
263 static T call(volatile T* target, T value) {
264 return static_cast<T>(
265 ::_InterlockedAnd64(reinterpret_cast<volatile long long*>(target), static_cast<long long>(value)));
266 }
267};
268
269template <size_t Size>
270struct interlocked_fetch_or_impl;
271
272template <>
273struct interlocked_fetch_or_impl<1> {
274 template <typename T>
275 static T call(volatile T* target, T value) {
276 return static_cast<T>(::_InterlockedOr8(reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
277 }
278};
279template <>
280struct interlocked_fetch_or_impl<2> {
281 template <typename T>
282 static T call(volatile T* target, T value) {
283 return static_cast<T>(::_InterlockedOr16(reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
284 }
285};
286template <>
287struct interlocked_fetch_or_impl<4> {
288 template <typename T>
289 static T call(volatile T* target, T value) {
290 return static_cast<T>(::_InterlockedOr(reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
291 }
292};
293template <>
294struct interlocked_fetch_or_impl<8> {
295 template <typename T>
296 static T call(volatile T* target, T value) {
297 return static_cast<T>(
298 ::_InterlockedOr64(reinterpret_cast<volatile long long*>(target), static_cast<long long>(value)));
299 }
300};
301
302template <size_t Size>
303struct interlocked_fetch_xor_impl;
304
305template <>
306struct interlocked_fetch_xor_impl<1> {
307 template <typename T>
308 static T call(volatile T* target, T value) {
309 return static_cast<T>(::_InterlockedXor8(reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
310 }
311};
312template <>
313struct interlocked_fetch_xor_impl<2> {
314 template <typename T>
315 static T call(volatile T* target, T value) {
316 return static_cast<T>(
317 ::_InterlockedXor16(reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
318 }
319};
320template <>
321struct interlocked_fetch_xor_impl<4> {
322 template <typename T>
323 static T call(volatile T* target, T value) {
324 return static_cast<T>(::_InterlockedXor(reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
325 }
326};
327template <>
328struct interlocked_fetch_xor_impl<8> {
329 template <typename T>
330 static T call(volatile T* target, T value) {
331 return static_cast<T>(
332 ::_InterlockedXor64(reinterpret_cast<volatile long long*>(target), static_cast<long long>(value)));
333 }
334};
335
336#endif
337
338#ifdef NEFORCE_COMPILER_MSVC
339template <size_t Size>
340struct atomic_is_always_lock_free_impl {
341 static constexpr bool value = false;
342};
343template <>
344struct atomic_is_always_lock_free_impl<1> {
345 static constexpr bool value = true;
346};
347template <>
348struct atomic_is_always_lock_free_impl<2> {
349 static constexpr bool value = true;
350};
351template <>
352struct atomic_is_always_lock_free_impl<4> {
353 static constexpr bool value = true;
354};
355template <>
356struct atomic_is_always_lock_free_impl<8> {
357 static constexpr bool value = true;
358};
359template <>
360struct atomic_is_always_lock_free_impl<16> {
361# ifdef NEFORCE_ARCH_X86_64
362 static constexpr bool value = true;
363# else
364 static constexpr bool value = false;
365# endif
366};
367#endif
368
369NEFORCE_END_INNER__
371
372
377template <typename T>
379
380
389template <typename T>
390NEFORCE_ALWAYS_INLINE_INLINE void atomic_store(volatile T* ptr, remove_volatile_t<T> value,
391 const memory_order mo) noexcept {
392 static_assert(is_integral_v<T>, "T must be integral type");
393#ifdef NEFORCE_COMPILER_GNUC
394 __atomic_store_n(ptr, value, static_cast<int32_t>(mo));
395#else
396 inner::interlocked_exchange_impl<sizeof(T)>::call(ptr, value);
397 if (mo == memory_order_seq_cst || mo == memory_order_release) {
398 ::_ReadWriteBarrier();
399 }
400#endif
401}
402
411template <typename T>
412NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_load(const volatile T* ptr, const memory_order mo) noexcept {
413 static_assert(is_integral_v<T>, "T must be integral type");
414#ifdef NEFORCE_COMPILER_GNUC
415 return __atomic_load_n(ptr, static_cast<int32_t>(mo));
416#else
417 remove_volatile_t<T> result = *ptr;
418 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
419 ::_ReadWriteBarrier();
420 }
421 return result;
422#endif
423}
424
434template <typename T>
435NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_exchange(volatile T* ptr, remove_volatile_t<T> value,
436 const memory_order mo) noexcept {
437 static_assert(is_integral_v<T>, "T must be integral type");
438#ifdef NEFORCE_COMPILER_GNUC
439 return __atomic_exchange_n(ptr, value, static_cast<int32_t>(mo));
440#else
441 remove_volatile_t<T> old = inner::interlocked_exchange_impl<sizeof(T)>::call(ptr, value);
442 if (mo == memory_order_seq_cst) {
443 ::_ReadWriteBarrier();
444 }
445 return old;
446#endif
447}
448
461template <typename T>
462NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak(volatile T* ptr, remove_volatile_t<T>* expected,
464 const memory_order failure) noexcept {
465 static_assert(is_integral_v<T>, "T must be integral type");
467#ifdef NEFORCE_COMPILER_GNUC
468 return __atomic_compare_exchange_n(ptr, expected, desired, true, static_cast<int32_t>(success),
469 static_cast<int32_t>(failure));
470#else
471# if defined(NEFORCE_ARCH_X86)
472 const bool result = inner::interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, desired);
474 ::_ReadWriteBarrier();
475 }
476 return result;
477# else
478 remove_volatile_t<T> old_val = *expected;
480 bool success_flag;
481# if defined(NEFORCE_ARCH_ARM)
482 NEFORCE_IF_CONSTEXPR(sizeof(T) == 1) {
483 asm volatile("ldrexb %[loaded], [%[ptr]]\n\t"
484 "cmp %[loaded], %[old_val]\n\t"
485 "bne 1f\n\t"
486 "strexb %w[success], %w[desired], [%[ptr]]\n\t"
487 "1:"
488 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
489 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
490 : "cc", "memory");
491 }
492 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 2) {
493 asm volatile("ldrexh %[loaded], [%[ptr]]\n\t"
494 "cmp %[loaded], %[old_val]\n\t"
495 "bne 1f\n\t"
496 "strexh %w[success], %w[desired], [%[ptr]]\n\t"
497 "1:"
498 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
499 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
500 : "cc", "memory");
501 }
502 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
503 asm volatile("ldrex %[loaded], [%[ptr]]\n\t"
504 "cmp %[loaded], %[old_val]\n\t"
505 "bne 1f\n\t"
506 "strex %w[success], %w[desired], [%[ptr]]\n\t"
507 "1:"
508 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
509 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
510 : "cc", "memory");
511 }
512 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
513 uint32_t loaded_lo, loaded_hi;
514 uint32_t old_lo = static_cast<uint32_t>(old_val);
515 uint32_t old_hi = static_cast<uint32_t>(static_cast<uint64_t>(old_val) >> 32);
516 uint32_t des_lo = static_cast<uint32_t>(static_cast<uint64_t>(desired));
517 uint32_t des_hi = static_cast<uint32_t>(static_cast<uint64_t>(desired) >> 32);
518 uint32_t tmp_success = 0;
519 asm volatile(
520 "ldrexd %[lo], %[hi], [%[ptr]]\n\t"
521 "cmp %[lo], %[old_lo]\n\t"
522 "cmpeq %[hi], %[old_hi]\n\t"
523 "bne 1f\n\t"
524 "strexd %[success], %[des_lo], %[des_hi], [%[ptr]]\n\t"
525 "1:"
526 : [lo] "=&r"(loaded_lo), [hi] "=&r"(loaded_hi), [success] "=&r"(tmp_success)
527 : [ptr] "r"(ptr), [old_lo] "r"(old_lo), [old_hi] "r"(old_hi), [des_lo] "r"(des_lo), [des_hi] "r"(des_hi)
528 : "cc", "memory");
529 loaded = static_cast<T>(static_cast<uint64_t>(loaded_lo) | (static_cast<uint64_t>(loaded_hi) << 32));
530 success_flag = (tmp_success == 0);
531 if (loaded != old_val) {
532 *expected = loaded;
533 return false;
534 }
535 return success_flag;
536 }
537# elif defined(NEFORCE_ARCH_RISCV)
538 NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
539 asm volatile("lr.w %[loaded], (%[ptr])\n\t"
540 "bne %[loaded], %[old_val], 1f\n\t"
541 "sc.w %[success], %[desired], (%[ptr])\n\t"
542 "1:"
543 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
544 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
545 : "memory");
546 }
547 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
548 asm volatile("lr.d %[loaded], (%[ptr])\n\t"
549 "bne %[loaded], %[old_val], 1f\n\t"
550 "sc.d %[success], %[desired], (%[ptr])\n\t"
551 "1:"
552 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
553 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
554 : "memory");
555 }
556# elif defined(NEFORCE_ARCH_LOONGARCH)
557 NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
558 uint32_t sc_result;
559 uint32_t des_copy = static_cast<uint32_t>(desired);
560 asm volatile("ll.w %[loaded], %[ptr]\n\t"
561 "bne %[loaded], %[old_val], 1f\n\t"
562 "sc.w %[des_copy], %[ptr]\n\t"
563 "1:\n\t"
564 "move %[success], %[des_copy]\n\t"
565 : [loaded] "=&r"(loaded), [des_copy] "+r"(des_copy), [success] "=r"(sc_result)
566 : [ptr] "m"(*ptr), [old_val] "r"(old_val)
567 : "memory");
568 success_flag = (sc_result == 0);
569 }
570 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
571 asm volatile("ll.d %[loaded], %[ptr]\n\t"
572 "bne %[loaded], %[old_val], 1f\n\t"
573 "sc.d %[desired], %[ptr]\n\t"
574 "move %[success], %[desired]\n\t"
575 "1:"
576 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
577 : [ptr] "m"(*ptr), [old_val] "r"(old_val), [desired] "r"(desired)
578 : "memory");
579 }
580# endif
581 if (loaded != old_val) {
582 *expected = loaded;
583 return false;
584 }
585 return success_flag == 0;
586# endif
587#endif
588}
589
602template <typename T>
603NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong(volatile T* ptr, remove_volatile_t<T>* expected,
605 const memory_order failure) noexcept {
606 static_assert(is_integral_v<T>, "T must be integral type");
608#ifdef NEFORCE_COMPILER_GNUC
609 return __atomic_compare_exchange_n(ptr, expected, desired, false, static_cast<int32_t>(success),
610 static_cast<int32_t>(failure));
611#else
612# if defined(NEFORCE_ARCH_X86)
613 return _NEFORCE atomic_cmpexch_weak(ptr, expected, desired, success, failure);
614# else
615 remove_volatile_t<T> old_val = *expected;
616 while (true) {
617 if (_NEFORCE atomic_cmpexch_weak(ptr, expected, desired, success, failure)) {
618 return true;
619 }
620 if (*expected != old_val) {
621 return false;
622 }
623 }
624# endif
625#endif
626}
627
637template <typename T>
638NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_fetch_add(volatile T* ptr, atomic_diff_t<T> value,
639 const memory_order mo) noexcept {
640 static_assert(is_integral_v<T>, "T must be integral type");
641#ifdef NEFORCE_COMPILER_GNUC
642 return __atomic_fetch_add(ptr, value, static_cast<int32_t>(mo));
643#else
644 remove_volatile_t<T> old = inner::interlocked_fetch_add_impl<sizeof(T)>::call(ptr, value);
645 if (mo == memory_order_seq_cst) {
646 ::_ReadWriteBarrier();
647 }
648 return old;
649#endif
650}
651
661template <typename T>
662NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_fetch_sub(volatile T* ptr, atomic_diff_t<T> value,
663 const memory_order mo) noexcept {
664 static_assert(is_integral_v<T>, "T must be integral type");
665#ifdef NEFORCE_COMPILER_GNUC
666 return __atomic_fetch_sub(ptr, value, static_cast<int32_t>(mo));
667#else
668 return _NEFORCE atomic_fetch_add(ptr, static_cast<atomic_diff_t<T>>(-value), mo);
669#endif
670}
671
681template <typename T>
682NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_fetch_and(volatile T* ptr, remove_volatile_t<T> value,
683 const memory_order mo) noexcept {
684 static_assert(is_integral_v<T>, "T must be integral type");
685#ifdef NEFORCE_COMPILER_GNUC
686 return __atomic_fetch_and(ptr, value, static_cast<int32_t>(mo));
687#else
688 remove_volatile_t<T> old = inner::interlocked_fetch_and_impl<sizeof(T)>::call(ptr, value);
689 if (mo == memory_order_seq_cst) {
690 ::_ReadWriteBarrier();
691 }
692 return old;
693#endif
694}
695
705template <typename T>
706NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_fetch_or(volatile T* ptr, remove_volatile_t<T> value,
707 const memory_order mo) noexcept {
708 static_assert(is_integral_v<T>, "T must be integral type");
709#ifdef NEFORCE_COMPILER_GNUC
710 return __atomic_fetch_or(ptr, value, static_cast<int32_t>(mo));
711#else
712 remove_volatile_t<T> old = inner::interlocked_fetch_or_impl<sizeof(T)>::call(ptr, value);
713 if (mo == memory_order_seq_cst) {
714 ::_ReadWriteBarrier();
715 }
716 return old;
717#endif
718}
719
729template <typename T>
730NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_fetch_xor(volatile T* ptr, remove_volatile_t<T> value,
731 const memory_order mo) noexcept {
732 static_assert(is_integral_v<T>, "T must be integral type");
733#ifdef NEFORCE_COMPILER_GNUC
734 return __atomic_fetch_xor(ptr, value, static_cast<int32_t>(mo));
735#else
736 remove_volatile_t<T> old = inner::interlocked_fetch_xor_impl<sizeof(T)>::call(ptr, value);
737 if (mo == memory_order_seq_cst) {
738 ::_ReadWriteBarrier();
739 }
740 return old;
741#endif
742}
743
753template <typename T>
754NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_add_fetch(volatile T* ptr, atomic_diff_t<T> value,
755 memory_order mo) noexcept {
756 static_assert(is_integral_v<T>, "T must be integral type");
757#ifdef NEFORCE_COMPILER_GNUC
758 return __atomic_add_fetch(ptr, value, static_cast<int32_t>(mo));
759#else
760 return _NEFORCE atomic_fetch_add(ptr, value, mo) + value;
761#endif
762}
763
773template <typename T>
774NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_sub_fetch(volatile T* ptr, atomic_diff_t<T> value,
775 memory_order mo) noexcept {
776 static_assert(is_integral_v<T>, "T must be integral type");
777#ifdef NEFORCE_COMPILER_GNUC
778 return __atomic_sub_fetch(ptr, value, static_cast<int32_t>(mo));
779#else
780 return _NEFORCE atomic_fetch_sub(ptr, value, mo) - value;
781#endif
782}
783
793template <typename T>
794NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_and_fetch(volatile T* ptr, remove_volatile_t<T> value,
795 memory_order mo) noexcept {
796 static_assert(is_integral_v<T>, "T must be integral type");
797#ifdef NEFORCE_COMPILER_GNUC
798 return __atomic_and_fetch(ptr, value, static_cast<int32_t>(mo));
799#else
800 return _NEFORCE atomic_fetch_and(ptr, value, mo) & value;
801#endif
802}
803
813template <typename T>
814NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_or_fetch(volatile T* ptr, remove_volatile_t<T> value,
815 memory_order mo) noexcept {
816 static_assert(is_integral_v<T>, "T must be integral type");
817#ifdef NEFORCE_COMPILER_GNUC
818 return __atomic_or_fetch(ptr, value, static_cast<int32_t>(mo));
819#else
820 return _NEFORCE atomic_fetch_or(ptr, value, mo) | value;
821#endif
822}
823
833template <typename T>
834NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_xor_fetch(volatile T* ptr, remove_volatile_t<T> value,
835 memory_order mo) noexcept {
836 static_assert(is_integral_v<T>, "T must be integral type");
837#ifdef NEFORCE_COMPILER_GNUC
838 return __atomic_xor_fetch(ptr, value, static_cast<int32_t>(mo));
839#else
840 return _NEFORCE atomic_fetch_xor(ptr, value, mo) ^ value;
841#endif
842}
843
844
856template <typename T>
857NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak_any(volatile T* ptr, remove_volatile_t<T>* expected,
859 const memory_order failure) noexcept {
861#ifdef NEFORCE_COMPILER_GNUC
862 return __atomic_compare_exchange(ptr, expected, desired, true, static_cast<int32_t>(success),
863 static_cast<int32_t>(failure));
864#else
865# if defined(NEFORCE_ARCH_X86)
866 const bool result = inner::interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, *desired);
868 ::_ReadWriteBarrier();
869 }
870 return result;
871# else
872 remove_volatile_t<T> old_val = *expected;
874 bool success_flag;
875# if defined(NEFORCE_ARCH_ARM)
876 NEFORCE_IF_CONSTEXPR(sizeof(T) == 1) {
877 asm volatile("ldrexb %[loaded], [%[ptr]]\n\t"
878 "cmp %[loaded], %[old_val]\n\t"
879 "bne 1f\n\t"
880 "strexb %w[success], %w[desired], [%[ptr]]\n\t"
881 "1:"
882 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
883 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
884 : "cc", "memory");
885 }
886 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 2) {
887 asm volatile("ldrexh %[loaded], [%[ptr]]\n\t"
888 "cmp %[loaded], %[old_val]\n\t"
889 "bne 1f\n\t"
890 "strexh %w[success], %w[desired], [%[ptr]]\n\t"
891 "1:"
892 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
893 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
894 : "cc", "memory");
895 }
896 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
897 asm volatile("ldrex %[loaded], [%[ptr]]\n\t"
898 "cmp %[loaded], %[old_val]\n\t"
899 "bne 1f\n\t"
900 "strex %w[success], %w[desired], [%[ptr]]\n\t"
901 "1:"
902 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
903 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
904 : "cc", "memory");
905 }
906 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
907 asm volatile("ldrexd %[loaded], [%[ptr]]\n\t"
908 "cmp %[loaded], %[old_val]\n\t"
909 "bne 1f\n\t"
910 "strexd %w[success], %[desired], [%[ptr]]\n\t"
911 "1:"
912 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
913 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
914 : "cc", "memory");
915 }
916# elif defined(NEFORCE_ARCH_RISCV)
917 NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
918 asm volatile("lr.w %[loaded], (%[ptr])\n\t"
919 "bne %[loaded], %[old_val], 1f\n\t"
920 "sc.w %[success], %[desired], (%[ptr])\n\t"
921 "1:"
922 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
923 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
924 : "memory");
925 }
926 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
927 asm volatile("lr.d %[loaded], (%[ptr])\n\t"
928 "bne %[loaded], %[old_val], 1f\n\t"
929 "sc.d %[success], %[desired], (%[ptr])\n\t"
930 "1:"
931 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
932 : [ptr] "r"(ptr), [old_val] "r"(old_val), [desired] "r"(desired)
933 : "memory");
934 }
935# elif defined(NEFORCE_ARCH_LOONGARCH)
936 NEFORCE_IF_CONSTEXPR(sizeof(T) == 4) {
937 asm volatile("ll.w %[loaded], %[ptr]\n\t"
938 "bne %[loaded], %[old_val], 1f\n\t"
939 "sc.w %[desired], %[ptr]\n\t"
940 "move %[success], %[desired]\n\t"
941 "1:"
942 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
943 : [ptr] "m"(*ptr), [old_val] "r"(old_val), [desired] "r"(desired)
944 : "memory");
945 }
946 else NEFORCE_IF_CONSTEXPR(sizeof(T) == 8) {
947 asm volatile("ll.d %[loaded], %[ptr]\n\t"
948 "bne %[loaded], %[old_val], 1f\n\t"
949 "sc.d %[desired], %[ptr]\n\t"
950 "move %[success], %[desired]\n\t"
951 "1:"
952 : [loaded] "=&r"(loaded), [success] "=&r"(success_flag)
953 : [ptr] "m"(*ptr), [old_val] "r"(old_val), [desired] "r"(desired)
954 : "memory");
955 }
956# endif
957 if (loaded != old_val) {
958 *expected = loaded;
959 return false;
960 }
961 return success_flag == 0;
962# endif
963#endif
964}
965
977template <typename T>
978NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong_any(volatile T* ptr, remove_volatile_t<T>* expected,
980 const memory_order failure) noexcept {
982#ifdef NEFORCE_COMPILER_GNUC
983 return __atomic_compare_exchange(ptr, expected, desired, false, static_cast<int32_t>(success),
984 static_cast<int32_t>(failure));
985#else
986# if defined(NEFORCE_ARCH_X86)
987 const bool result = inner::interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, *desired);
989 ::_ReadWriteBarrier();
990 }
991 return result;
992# else
993 remove_volatile_t<T> old_val = *expected;
994 while (true) {
995 if (_NEFORCE atomic_cmpexch_weak_any(ptr, expected, desired, success, failure)) {
996 return true;
997 }
998 if (_NEFORCE memory_compare<remove_volatile_t<T>>(old_val, *expected) != 0) {
999 return false;
1000 }
1001 }
1002# endif
1003#endif
1004}
1005
1014template <typename T>
1015NEFORCE_ALWAYS_INLINE_INLINE void atomic_store_any(T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
1016#ifdef NEFORCE_COMPILER_GNUC
1017 __atomic_store(ptr, _NEFORCE addressof(value), static_cast<int32_t>(mo));
1018#else
1019 remove_volatile_t<T> expected = *ptr;
1020 while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &expected, &value, mo, memory_order_relaxed)) {
1021 // Retry
1022 }
1023#endif
1024}
1025
1034template <typename T>
1035NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_load_any(const T* ptr, memory_order mo) noexcept {
1036#ifdef NEFORCE_COMPILER_GNUC
1037 alignas(T) byte_t buffer[sizeof(T)];
1038 T* dest = reinterpret_cast<remove_volatile_t<T>*>(buffer);
1039 __atomic_load(ptr, dest, static_cast<int32_t>(mo));
1040 return *dest;
1041#else
1042 remove_volatile_t<T> result;
1043 _NEFORCE memory_copy<remove_volatile_t<T>>(&result, ptr);
1044 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
1045 ::_ReadWriteBarrier();
1046 }
1047 return result;
1048#endif
1049}
1050
1060template <typename T>
1061NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t<T> atomic_exchange_any(T* ptr, remove_volatile_t<T> desired,
1062 memory_order mo) noexcept {
1063#ifdef NEFORCE_COMPILER_GNUC
1064 alignas(T) byte_t buffer[sizeof(T)];
1065 T* dest = reinterpret_cast<remove_volatile_t<T>*>(buffer);
1066 __atomic_exchange(ptr, _NEFORCE addressof(desired), dest, static_cast<int32_t>(mo));
1067 return *dest;
1068#else
1070 while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &old, &desired, mo, memory_order_relaxed)) {
1071 // Retry
1072 }
1073 return old;
1074#endif
1075}
1076
1085template <typename T>
1088 remove_volatile_t<T> new_value;
1089 do {
1090 new_value = old_value + value;
1091 } while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed));
1092 return old_value;
1093}
1094
1103template <typename T>
1106 remove_volatile_t<T> new_value;
1107 do {
1108 new_value = old_value - value;
1109 } while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed));
1110 return old_value;
1111}
1112
1121template <typename T>
1124 remove_volatile_t<T> new_value;
1125 do {
1126 new_value = old_value + value;
1127 } while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed));
1128 return new_value;
1129}
1130
1139template <typename T>
1142 remove_volatile_t<T> new_value;
1143 do {
1144 new_value = old_value - value;
1145 } while (!_NEFORCE atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed));
1146 return new_value;
1147}
1148
1149
1156template <size_t Size, size_t Align>
1157NEFORCE_CONSTEXPR17 bool is_always_lock_free() noexcept {
1158#ifdef NEFORCE_COMPILER_GNUC
1159 return __atomic_is_lock_free(Size, reinterpret_cast<void*>(-Align));
1160#else
1161 return inner::atomic_is_always_lock_free_impl<Size>::value;
1162#endif
1163}
1164
1165
1172struct atomic_flag {
1177#ifdef NEFORCE_COMPILER_MSVC
1178 long;
1179#else
1180 bool;
1181#endif
1182
1184
1185 atomic_flag() noexcept = default;
1186 atomic_flag(const atomic_flag&) = delete;
1187 atomic_flag& operator=(const atomic_flag&) = delete;
1188 atomic_flag& operator=(const atomic_flag&) volatile = delete;
1189 atomic_flag(atomic_flag&&) noexcept = default;
1190 atomic_flag& operator=(atomic_flag&&) noexcept = default;
1191 ~atomic_flag() noexcept = default;
1192
1197 constexpr atomic_flag(const value_type flag) noexcept :
1198 flag_(static_cast<value_type>(flag ? 1 : 0)) {}
1199
1205 NEFORCE_ALWAYS_INLINE bool test_and_set(const memory_order mo = memory_order_seq_cst) noexcept {
1206#ifdef NEFORCE_COMPILER_GNUC
1207 return __atomic_test_and_set(&flag_, static_cast<int32_t>(mo));
1208#else
1209 const long old_val = ::_InterlockedExchange(&flag_, 1);
1210 if (mo == memory_order_seq_cst) {
1211 ::_ReadWriteBarrier();
1212 }
1213 return old_val != 0;
1214#endif
1215 }
1216
1220 NEFORCE_ALWAYS_INLINE_INLINE bool test_and_set(const memory_order mo = memory_order_seq_cst) volatile noexcept {
1221#ifdef NEFORCE_COMPILER_GNUC
1222 return __atomic_test_and_set(&flag_, static_cast<int32_t>(mo));
1223#else
1224 const long old_val = ::_InterlockedExchange(&flag_, 1);
1225 if (mo == memory_order_seq_cst) {
1226 ::_ReadWriteBarrier();
1227 }
1228 return old_val != 0;
1229#endif
1230 }
1231
1237 NEFORCE_ALWAYS_INLINE bool test(const memory_order mo = memory_order_seq_cst) const noexcept {
1238#ifdef NEFORCE_COMPILER_GNUC
1239 value_type value;
1240 __atomic_load(&flag_, &value, static_cast<int32_t>(mo));
1241 return value != static_cast<value_type>(0);
1242#else
1243 const long as_bytes = flag_;
1244 if (mo != memory_order_relaxed) {
1245 ::_ReadWriteBarrier();
1246 }
1247 return as_bytes != 0;
1248#endif
1249 }
1250
1254 NEFORCE_ALWAYS_INLINE_INLINE bool test(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
1255#ifdef NEFORCE_COMPILER_GNUC
1256 value_type value;
1257 __atomic_load(&flag_, &value, static_cast<int32_t>(mo));
1258 return value != static_cast<value_type>(0);
1259#else
1260 const long as_bytes = flag_;
1261 if (mo != memory_order_relaxed) {
1262 ::_ReadWriteBarrier();
1263 }
1264 return as_bytes != 0;
1265#endif
1266 }
1267
1273 NEFORCE_ALWAYS_INLINE void wait(const bool old, const memory_order mo = memory_order_seq_cst) const noexcept {
1274 const value_type value = old ? 1 : 0;
1275 _NEFORCE atomic_wait_address_v(const_cast<const value_type*>(&flag_), value,
1276 [this, mo] { return this->test(mo); });
1277 }
1278
1282 NEFORCE_ALWAYS_INLINE_INLINE void wait(const bool old, const memory_order mo = memory_order_seq_cst) const
1283 volatile noexcept {
1284 const value_type value = old ? 1 : 0;
1285 _NEFORCE atomic_wait_address_v(const_cast<const value_type*>(&flag_), value,
1286 [this, mo] { return this->test(mo); });
1287 }
1288
1292 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(&flag_, false); }
1293
1297 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(&flag_, true); }
1298
1303 NEFORCE_ALWAYS_INLINE void clear(const memory_order mo = memory_order_seq_cst) noexcept {
1308
1309#ifdef NEFORCE_COMPILER_GNUC
1310 __atomic_clear(&flag_, static_cast<int32_t>(mo));
1311#else
1312 _NEFORCE atomic_store(&flag_, static_cast<value_type>(0), mo);
1313#endif
1314 }
1315
1319 NEFORCE_ALWAYS_INLINE_INLINE void clear(const memory_order mo = memory_order_seq_cst) volatile noexcept {
1324
1325#ifdef NEFORCE_COMPILER_GNUC
1326 __atomic_clear(&flag_, static_cast<int32_t>(mo));
1327#else
1328 _NEFORCE atomic_store(&flag_, static_cast<value_type>(0), mo);
1329#endif
1330 }
1331};
1332
1333
1340template <typename T>
1341struct atomic_base {
1342 using value_type = T;
1344
1345 static_assert(is_integral_like_v<T>, "T must be an integral-like type");
1346
1347private:
1348 static constexpr size_t align_inner = sizeof(T) > alignof(T) ? sizeof(T) : alignof(T);
1349
1350 alignas(align_inner) value_type value_;
1351
1352public:
1353 atomic_base() noexcept = default;
1354 ~atomic_base() noexcept = default;
1355 atomic_base(const atomic_base&) = delete;
1356 atomic_base& operator=(const atomic_base&) = delete;
1357 atomic_base& operator=(const atomic_base&) volatile = delete;
1358 atomic_base(atomic_base&&) noexcept = default;
1359 atomic_base& operator=(atomic_base&&) noexcept = default;
1360
1365 constexpr atomic_base(value_type value) noexcept :
1366 value_(value) {}
1367
1372 operator value_type() const noexcept { return load(); }
1373
1377 operator value_type() const volatile noexcept { return load(); }
1378
1385 atomic_base::store(value);
1386 return value;
1387 }
1388
1392 value_type operator=(value_type value) volatile noexcept {
1393 atomic_base::store(value);
1394 return value;
1395 }
1396
1401 value_type operator++(int) noexcept { return fetch_add(1); }
1402
1406 value_type operator++(int) volatile noexcept { return fetch_add(1); }
1407
1412 value_type operator--(int) noexcept { return fetch_sub(1); }
1413
1417 value_type operator--(int) volatile noexcept { return fetch_sub(1); }
1418
1423 value_type operator++() noexcept { return _NEFORCE atomic_add_fetch(&value_, 1, memory_order_seq_cst); }
1424
1428 value_type operator++() volatile noexcept { return _NEFORCE atomic_add_fetch(&value_, 1, memory_order_seq_cst); }
1429
1434 value_type operator--() noexcept { return _NEFORCE atomic_sub_fetch(&value_, 1, memory_order_seq_cst); }
1435
1439 value_type operator--() volatile noexcept { return _NEFORCE atomic_sub_fetch(&value_, 1, memory_order_seq_cst); }
1440
1447 return _NEFORCE atomic_add_fetch(&value_, value, memory_order_seq_cst);
1448 }
1449
1453 value_type operator+=(value_type value) volatile noexcept {
1454 return _NEFORCE atomic_add_fetch(&value_, value, memory_order_seq_cst);
1455 }
1456
1463 return _NEFORCE atomic_sub_fetch(&value_, value, memory_order_seq_cst);
1464 }
1465
1469 value_type operator-=(value_type value) volatile noexcept {
1470 return _NEFORCE atomic_sub_fetch(&value_, value, memory_order_seq_cst);
1471 }
1472
1479 return _NEFORCE atomic_and_fetch(&value_, value, memory_order_seq_cst);
1480 }
1481
1485 value_type operator&=(value_type value) volatile noexcept {
1486 return _NEFORCE atomic_and_fetch(&value_, value, memory_order_seq_cst);
1487 }
1488
1495 return _NEFORCE atomic_or_fetch(&value_, value, memory_order_seq_cst);
1496 }
1497
1501 value_type operator|=(value_type value) volatile noexcept {
1502 return _NEFORCE atomic_or_fetch(&value_, value, memory_order_seq_cst);
1503 }
1504
1511 return _NEFORCE atomic_xor_fetch(&value_, value, memory_order_seq_cst);
1512 }
1513
1517 value_type operator^=(value_type value) volatile noexcept {
1518 return _NEFORCE atomic_xor_fetch(&value_, value, memory_order_seq_cst);
1519 }
1520
1525 bool is_lock_free() const noexcept { return _NEFORCE is_always_lock_free<sizeof(T), align_inner>(); }
1526
1530 bool is_lock_free() const volatile noexcept { return _NEFORCE is_always_lock_free<sizeof(T), align_inner>(); }
1531
1537 NEFORCE_ALWAYS_INLINE void store(value_type value, const memory_order mo = memory_order_seq_cst) noexcept {
1542 _NEFORCE atomic_store(&value_, value, mo);
1543 }
1544
1548 NEFORCE_ALWAYS_INLINE void store(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1553 _NEFORCE atomic_store(&value_, value, mo);
1554 }
1555
1561 NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo = memory_order_seq_cst) const noexcept {
1565 return _NEFORCE atomic_load(&value_, mo);
1566 }
1567
1571 NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
1575 return _NEFORCE atomic_load(&value_, mo);
1576 }
1577
1584 NEFORCE_ALWAYS_INLINE value_type exchange(value_type value, const memory_order mo = memory_order_seq_cst) noexcept {
1585 return _NEFORCE atomic_exchange(&value_, value, mo);
1586 }
1587
1591 NEFORCE_ALWAYS_INLINE value_type exchange(value_type value,
1592 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1593 return _NEFORCE atomic_exchange(&value_, value, mo);
1594 }
1595
1604 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
1605 const memory_order success, const memory_order failure) noexcept {
1606 return _NEFORCE atomic_cmpexch_weak_any(&value_, &expected, &desired, success, failure);
1607 }
1608
1612 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
1613 const memory_order success,
1614 const memory_order failure) volatile noexcept {
1615 return _NEFORCE atomic_cmpexch_weak_any(&value_, &expected, &desired, success, failure);
1616 }
1617
1625 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
1626 const memory_order mo = memory_order_seq_cst) noexcept {
1627 return this->compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
1628 }
1629
1633 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
1634 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1635 return this->compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
1636 }
1637
1646 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
1647 const memory_order success,
1648 const memory_order failure) noexcept {
1649 return _NEFORCE atomic_cmpexch_strong_any(&value_, &expected, &desired, success, failure);
1650 }
1651
1655 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
1656 const memory_order success,
1657 const memory_order failure) volatile noexcept {
1658 return _NEFORCE atomic_cmpexch_strong_any(&value_, &expected, &desired, success, failure);
1659 }
1660
1668 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
1669 const memory_order mo = memory_order_seq_cst) noexcept {
1670 return this->compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
1671 }
1672
1676 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
1677 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1678 return this->compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
1679 }
1680
1686 NEFORCE_ALWAYS_INLINE void wait(value_type old, const memory_order mo = memory_order_seq_cst) const noexcept {
1687 _NEFORCE atomic_wait_address_v(&value_, old, [mo, this] { return this->load(mo); });
1688 }
1689
1693 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(&value_, false); }
1694
1698 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(&value_, true); }
1699
1706 NEFORCE_ALWAYS_INLINE value_type fetch_add(value_type value,
1707 const memory_order mo = memory_order_seq_cst) noexcept {
1708 return _NEFORCE atomic_fetch_add(&value_, value, mo);
1709 }
1710
1714 NEFORCE_ALWAYS_INLINE value_type fetch_add(value_type value,
1715 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1716 return _NEFORCE atomic_fetch_add(&value_, value, mo);
1717 }
1718
1725 NEFORCE_ALWAYS_INLINE value_type fetch_sub(value_type value,
1726 const memory_order mo = memory_order_seq_cst) noexcept {
1727 return _NEFORCE atomic_fetch_sub(&value_, value, mo);
1728 }
1729
1733 NEFORCE_ALWAYS_INLINE value_type fetch_sub(value_type value,
1734 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1735 return _NEFORCE atomic_fetch_sub(&value_, value, mo);
1736 }
1737
1744 NEFORCE_ALWAYS_INLINE value_type fetch_and(value_type value,
1745 const memory_order mo = memory_order_seq_cst) noexcept {
1746 return _NEFORCE atomic_fetch_and(&value_, value, mo);
1747 }
1748
1752 NEFORCE_ALWAYS_INLINE value_type fetch_and(value_type value,
1753 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1754 return _NEFORCE atomic_fetch_and(&value_, value, mo);
1755 }
1756
1763 NEFORCE_ALWAYS_INLINE value_type fetch_or(value_type value, const memory_order mo = memory_order_seq_cst) noexcept {
1764 return _NEFORCE atomic_fetch_or(&value_, value, mo);
1765 }
1766
1770 NEFORCE_ALWAYS_INLINE value_type fetch_or(value_type value,
1771 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1772 return _NEFORCE atomic_fetch_or(&value_, value, mo);
1773 }
1774
1781 NEFORCE_ALWAYS_INLINE value_type fetch_xor(value_type value,
1782 const memory_order mo = memory_order_seq_cst) noexcept {
1783 return _NEFORCE atomic_fetch_xor(&value_, value, mo);
1784 }
1785
1789 NEFORCE_ALWAYS_INLINE value_type fetch_xor(value_type value,
1790 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1791 return _NEFORCE atomic_fetch_xor(&value_, value, mo);
1792 }
1793};
1794
1801template <typename T>
1802struct atomic_base<T*> {
1803 using value_type = T*;
1805
1806private:
1807 value_type ptr_ = nullptr;
1808
1809 NEFORCE_ALWAYS_INLINE_INLINE static constexpr difference_type real_type_sizes(const difference_type dest) noexcept {
1810 return dest * sizeof(T);
1811 }
1812
1813public:
1814 atomic_base() noexcept = default;
1815 atomic_base(const atomic_base&) = delete;
1816 atomic_base& operator=(const atomic_base&) = delete;
1817 atomic_base& operator=(const atomic_base&) volatile = delete;
1818 atomic_base(atomic_base&&) noexcept = default;
1819 atomic_base& operator=(atomic_base&&) noexcept = default;
1820 ~atomic_base() noexcept = default;
1821
1826 constexpr atomic_base(const value_type ptr) noexcept :
1827 ptr_(ptr) {}
1828
1833 operator value_type() const noexcept { return load(); }
1834
1838 operator value_type() const volatile noexcept { return load(); }
1839
1845 value_type operator=(const value_type ptr) noexcept {
1846 atomic_base::store(ptr);
1847 return ptr;
1848 }
1849
1853 value_type operator=(const value_type ptr) volatile noexcept {
1854 atomic_base::store(ptr);
1855 return ptr;
1856 }
1857
1862 value_type operator++(int) noexcept { return fetch_add(1); }
1863
1867 value_type operator++(int) volatile noexcept { return fetch_add(1); }
1868
1873 value_type operator--(int) noexcept { return fetch_sub(1); }
1874
1878 value_type operator--(int) volatile noexcept { return fetch_sub(1); }
1879
1884 value_type operator++() noexcept { return fetch_add(1) + 1; }
1885
1889 value_type operator++() volatile noexcept { return fetch_add(1) + 1; }
1890
1895 value_type operator--() noexcept { return fetch_sub(1) - 1; }
1896
1900 value_type operator--() volatile noexcept { return fetch_sub(1) - 1; }
1901
1907 value_type operator+=(const ptrdiff_t dest) noexcept { return fetch_add(dest) + dest; }
1908
1912 value_type operator+=(const ptrdiff_t dest) volatile noexcept { return fetch_add(dest) + dest; }
1913
1919 value_type operator-=(const ptrdiff_t dest) noexcept { return fetch_sub(dest) - dest; }
1920
1924 value_type operator-=(const ptrdiff_t dest) volatile noexcept { return fetch_sub(dest) - dest; }
1925
1930 bool is_lock_free() const noexcept {
1932 }
1933
1937 bool is_lock_free() const volatile noexcept {
1939 }
1940
1946 NEFORCE_ALWAYS_INLINE void store(const value_type ptr, const memory_order mo = memory_order_seq_cst) noexcept {
1951
1952#ifdef NEFORCE_COMPILER_GNUC
1953 __atomic_store_n(&ptr_, ptr, static_cast<int32_t>(mo));
1954#else
1955 ::_InterlockedExchangePointer(reinterpret_cast<void* volatile*>(&ptr_), ptr);
1956 if (mo == memory_order_seq_cst || mo == memory_order_release) {
1957 ::_ReadWriteBarrier();
1958 }
1959#endif
1960 }
1961
1965 NEFORCE_ALWAYS_INLINE_INLINE void store(const value_type ptr,
1966 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1971
1972#ifdef NEFORCE_COMPILER_GNUC
1973 __atomic_store_n(&ptr_, ptr, static_cast<int32_t>(mo));
1974#else
1975 ::_InterlockedExchangePointer(reinterpret_cast<void* volatile*>(&ptr_), ptr);
1976 if (mo == memory_order_seq_cst || mo == memory_order_release) {
1977 ::_ReadWriteBarrier();
1978 }
1979#endif
1980 }
1981
1987 NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo = memory_order_seq_cst) const noexcept {
1991#ifdef NEFORCE_COMPILER_GNUC
1992 return __atomic_load_n(&ptr_, static_cast<int32_t>(mo));
1993#else
1994 const value_type result = *reinterpret_cast<value_type const volatile*>(&ptr_);
1995 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
1996 ::_ReadWriteBarrier();
1997 }
1998 return result;
1999#endif
2000 }
2001
2005 NEFORCE_ALWAYS_INLINE_INLINE value_type load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
2009#ifdef NEFORCE_COMPILER_GNUC
2010 return __atomic_load_n(&ptr_, static_cast<int32_t>(mo));
2011#else
2012 const value_type result = *reinterpret_cast<value_type const volatile*>(&ptr_);
2013 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
2014 ::_ReadWriteBarrier();
2015 }
2016 return result;
2017#endif
2018 }
2019
2026 NEFORCE_ALWAYS_INLINE value_type exchange(const value_type ptr,
2027 const memory_order mo = memory_order_seq_cst) noexcept {
2028#ifdef NEFORCE_COMPILER_GNUC
2029 return __atomic_exchange_n(&ptr_, ptr, static_cast<int32_t>(mo));
2030#else
2031 const value_type old =
2032 static_cast<value_type>(::_InterlockedExchangePointer(reinterpret_cast<void* volatile*>(&ptr_), ptr));
2033 if (mo == memory_order_seq_cst) {
2034 ::_ReadWriteBarrier();
2035 }
2036 return old;
2037#endif
2038 }
2039
2043 NEFORCE_ALWAYS_INLINE_INLINE value_type exchange(const value_type ptr,
2044 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2045#ifdef NEFORCE_COMPILER_GNUC
2046 return __atomic_exchange_n(&ptr_, ptr, static_cast<int32_t>(mo));
2047#else
2048 const value_type old =
2049 static_cast<value_type>(::_InterlockedExchangePointer(reinterpret_cast<void* volatile*>(&ptr_), ptr));
2050 if (mo == memory_order_seq_cst) {
2051 ::_ReadWriteBarrier();
2052 }
2053 return old;
2054#endif
2055 }
2056
2065 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
2066 const memory_order success, const memory_order failure) noexcept {
2068 return _NEFORCE atomic_cmpexch_weak_any(_NEFORCE addressof(ptr_), _NEFORCE addressof(expected),
2069 _NEFORCE addressof(desired), success, failure);
2070 }
2071
2075 NEFORCE_ALWAYS_INLINE_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
2076 const memory_order success,
2077 const memory_order failure) volatile noexcept {
2079 return _NEFORCE atomic_cmpexch_weak_any(_NEFORCE addressof(ptr_), _NEFORCE addressof(expected),
2080 _NEFORCE addressof(desired), success, failure);
2081 }
2082
2090 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
2091 const memory_order mo = memory_order_seq_cst) noexcept {
2092 return atomic_base::compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
2093 }
2094
2098 NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type& expected, value_type desired,
2099 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2100 return atomic_base::compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
2101 }
2102
2111 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
2112 const memory_order success,
2113 const memory_order failure) noexcept {
2115 return _NEFORCE atomic_cmpexch_strong_any(_NEFORCE addressof(ptr_), _NEFORCE addressof(expected),
2116 _NEFORCE addressof(desired), success, failure);
2117 }
2118
2122 NEFORCE_ALWAYS_INLINE_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
2123 const memory_order success,
2124 const memory_order failure) volatile noexcept {
2126 return _NEFORCE atomic_cmpexch_strong_any(_NEFORCE addressof(ptr_), _NEFORCE addressof(expected),
2127 _NEFORCE addressof(desired), success, failure);
2128 }
2129
2137 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
2138 const memory_order mo = memory_order_seq_cst) noexcept {
2139 return atomic_base::compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
2140 }
2141
2145 NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type& expected, value_type desired,
2146 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2147 return atomic_base::compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
2148 }
2149
2155 NEFORCE_ALWAYS_INLINE void wait(value_type old, const memory_order mo = memory_order_seq_cst) const noexcept {
2156 _NEFORCE atomic_wait_address_v(&ptr_, old, [mo, this] { return this->load(mo); });
2157 }
2158
2162 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(&ptr_, false); }
2163
2167 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(&ptr_, true); }
2168
2175 NEFORCE_ALWAYS_INLINE value_type fetch_add(const ptrdiff_t dest,
2176 const memory_order mo = memory_order_seq_cst) noexcept {
2177 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<ptrdiff_t>(sizeof(T)));
2178 uintptr_t old_val =
2179 _NEFORCE atomic_fetch_add_any(reinterpret_cast<uintptr_t*>(_NEFORCE addressof(ptr_)), byte_offset, mo);
2180 return reinterpret_cast<value_type>(old_val);
2181 }
2182
2186 NEFORCE_ALWAYS_INLINE_INLINE value_type fetch_add(const ptrdiff_t dest,
2187 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2188 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<ptrdiff_t>(sizeof(T)));
2189 uintptr_t old_val =
2190 _NEFORCE atomic_fetch_add_any(reinterpret_cast<uintptr_t*>(_NEFORCE addressof(ptr_)), byte_offset, mo);
2191 return reinterpret_cast<value_type>(old_val);
2192 }
2193
2200 NEFORCE_ALWAYS_INLINE value_type fetch_sub(const ptrdiff_t dest,
2201 const memory_order mo = memory_order_seq_cst) noexcept {
2202 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<ptrdiff_t>(sizeof(T)));
2203 uintptr_t old_val =
2204 _NEFORCE atomic_fetch_sub_any(reinterpret_cast<uintptr_t*>(_NEFORCE addressof(ptr_)), byte_offset, mo);
2205 return reinterpret_cast<value_type>(old_val);
2206 }
2207
2211 NEFORCE_ALWAYS_INLINE_INLINE value_type fetch_sub(const ptrdiff_t dest,
2212 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2213 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<ptrdiff_t>(sizeof(T)));
2214 uintptr_t old_val =
2215 _NEFORCE atomic_fetch_sub_any(reinterpret_cast<uintptr_t*>(_NEFORCE addressof(ptr_)), byte_offset, mo);
2216 return reinterpret_cast<value_type>(old_val);
2217 }
2218};
2219
2220
2228template <typename Float>
2229struct atomic_float_base {
2230 static_assert(is_floating_point_v<Float>, "atomic_ref_base need floating point T");
2231
2232 using value_type = Float;
2234
2235private:
2236 alignas(alignof(Float)) Float float_ = static_cast<Float>(0);
2237
2238public:
2239 atomic_float_base() = default;
2240 atomic_float_base(const atomic_float_base&) = delete;
2241 atomic_float_base& operator=(const atomic_float_base&) = delete;
2242 atomic_float_base& operator=(const atomic_float_base&) volatile = delete;
2243 atomic_float_base(atomic_float_base&&) noexcept = default;
2244 atomic_float_base& operator=(atomic_float_base&&) noexcept = default;
2245
2250 constexpr atomic_float_base(Float value) noexcept(is_nothrow_copy_constructible_v<Float>) :
2251 float_(value) {}
2252
2256 Float operator=(Float value) noexcept {
2257 this->store(value);
2258 return value;
2259 }
2260
2264 Float operator=(Float value) volatile noexcept {
2265 this->store(value);
2266 return value;
2267 }
2268
2272 bool is_lock_free() const noexcept { return _NEFORCE is_always_lock_free<sizeof(Float), alignof(Float)>(); }
2273
2277 bool is_lock_free() const volatile noexcept {
2279 }
2280
2284 void store(Float value, const memory_order mo = memory_order_seq_cst) noexcept {
2285 _NEFORCE atomic_store_any(&float_, value, mo);
2286 }
2287
2291 void store(Float value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2292 _NEFORCE atomic_store_any(&float_, value, mo);
2293 }
2294
2298 Float load(const memory_order mo = memory_order_seq_cst) const noexcept {
2299 return _NEFORCE atomic_load_any(&float_, mo);
2300 }
2301
2305 Float load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
2306 return _NEFORCE atomic_load_any(&float_, mo);
2307 }
2308
2312 operator Float() const noexcept { return this->load(); }
2313
2317 operator Float() const volatile noexcept { return this->load(); }
2318
2322 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
2323 return _NEFORCE atomic_exchange_any(&float_, desire, mo);
2324 }
2325
2329 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2330 return _NEFORCE atomic_exchange_any(&float_, desire, mo);
2331 }
2332
2336 bool compare_exchange_weak(Float& expected, Float desire, const memory_order success,
2337 const memory_order failure) noexcept {
2338 return _NEFORCE atomic_cmpexch_weak_any(&float_, expected, desire, success, failure);
2339 }
2340
2344 bool compare_exchange_weak(Float& expected, Float desire, const memory_order success,
2345 const memory_order failure) volatile noexcept {
2346 return _NEFORCE atomic_cmpexch_weak_any(&float_, expected, desire, success, failure);
2347 }
2348
2352 bool compare_exchange_strong(Float& expected, Float desire, const memory_order success,
2353 const memory_order failure) noexcept {
2354 return _NEFORCE atomic_cmpexch_strong_any(&float_, expected, desire, success, failure);
2355 }
2356
2360 bool compare_exchange_strong(Float& expected, Float desire, const memory_order success,
2361 const memory_order failure) volatile noexcept {
2362 return _NEFORCE atomic_cmpexch_strong_any(&float_, expected, desire, success, failure);
2363 }
2364
2368 bool compare_exchange_weak(Float& expected, Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
2369 return _NEFORCE atomic_cmpexch_weak(&float_, expected, desire, mo, cmpexch_failure_order(mo));
2370 }
2371
2375 bool compare_exchange_weak(Float& expected, Float desire,
2376 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2377 return _NEFORCE atomic_cmpexch_weak(&float_, expected, desire, mo, cmpexch_failure_order(mo));
2378 }
2379
2383 bool compare_exchange_strong(Float& expected, Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
2384 return _NEFORCE atomic_cmpexch_strong(&float_, expected, desire, mo, cmpexch_failure_order(mo));
2385 }
2386
2390 bool compare_exchange_strong(Float& expected, Float desire,
2391 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2392 return _NEFORCE atomic_cmpexch_strong(&float_, expected, desire, mo, cmpexch_failure_order(mo));
2393 }
2394
2400 NEFORCE_ALWAYS_INLINE void wait(Float old, const memory_order mo = memory_order_seq_cst) const noexcept {
2401 _NEFORCE atomic_wait_address_v(&float_, old, [mo, this] { return this->load(mo); });
2402 }
2403
2407 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(&float_, false); }
2408
2412 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(&float_, true); }
2413
2421 return _NEFORCE atomic_fetch_add_any(&float_, value, mo);
2422 }
2423
2427 value_type fetch_add(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2428 return _NEFORCE atomic_fetch_add_any(&float_, value, mo);
2429 }
2430
2438 return _NEFORCE atomic_fetch_sub_any(&float_, value, mo);
2439 }
2440
2444 value_type fetch_sub(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2445 return _NEFORCE atomic_fetch_sub_any(&float_, value, mo);
2446 }
2447
2454 return _NEFORCE atomic_add_fetch_any(&float_, value, memory_order_seq_cst);
2455 }
2456
2460 value_type operator+=(value_type value) volatile noexcept {
2461 return _NEFORCE atomic_add_fetch_any(&float_, value, memory_order_seq_cst);
2462 }
2463
2470 return _NEFORCE atomic_sub_fetch_any(&float_, value, memory_order_seq_cst);
2471 }
2472
2476 value_type operator-=(value_type value) volatile noexcept {
2477 return _NEFORCE atomic_sub_fetch_any(&float_, value, memory_order_seq_cst);
2478 }
2479};
2480
2481
2491template <typename T, bool IsIntegral = is_integral_v<T>, bool IsFloatingPoint = is_floating_point_v<T>>
2493
2494
2499template <typename T>
2500struct atomic_ref_base<T, false, false> {
2501 static_assert(is_trivially_copyable_v<T>, "atomic_ref_base need trivially copyable T");
2502
2503private:
2504 static constexpr int align_inner = (sizeof(T) & (sizeof(T) - 1)) || sizeof(T) > 16 ? 0 : sizeof(T);
2505
2506 T* ptr_;
2507
2508public:
2509 using value_type = T;
2510
2512 static constexpr size_t required_alignment = align_inner > alignof(T) ? align_inner : alignof(T);
2513
2518 explicit atomic_ref_base(T& value) :
2519 ptr_(_NEFORCE addressof(value)) {
2520 NEFORCE_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
2521 }
2522
2523 atomic_ref_base(const atomic_ref_base&) noexcept = default;
2524 atomic_ref_base& operator=(const atomic_ref_base&) = delete;
2525
2531 T operator=(T value) noexcept {
2532 this->store(value);
2533 return value;
2534 }
2535
2540 operator T() const noexcept { return this->load(); }
2541
2546 bool is_lock_free() const noexcept { return _NEFORCE is_always_lock_free<sizeof(T), required_alignment>(); }
2547
2553 void store(T value, const memory_order mo = memory_order_seq_cst) noexcept {
2554 _NEFORCE atomic_store_any(ptr_, value, mo);
2555 }
2556
2562 T load(const memory_order mo = memory_order_seq_cst) const noexcept { return _NEFORCE atomic_load_any(ptr_, mo); }
2563
2570 T exchange(T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2571 return _NEFORCE atomic_exchange_any(ptr_, desire, mo);
2572 }
2573
2582 bool compare_exchange_weak(T& expected, T desire, const memory_order success, const memory_order failure) noexcept {
2583 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, success, failure);
2584 }
2585
2594 bool compare_exchange_strong(T& expected, T desire, const memory_order success,
2595 const memory_order failure) noexcept {
2596 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, success, failure);
2597 }
2598
2606 bool compare_exchange_weak(T& expected, T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2607 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
2608 }
2609
2617 bool compare_exchange_strong(T& expected, T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2618 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
2619 }
2620
2626 NEFORCE_ALWAYS_INLINE void wait(T old, const memory_order mo = memory_order_seq_cst) const noexcept {
2627 _NEFORCE atomic_wait_address_v(ptr_, old, [this, mo] { return this->load(mo); });
2628 }
2629
2633 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(ptr_, false); }
2634
2638 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(ptr_, true); }
2639};
2640
2645template <typename T>
2646struct atomic_ref_base<T, true, false> {
2647 static_assert(is_integral_like_v<T>, "atomic_ref need integral-like T");
2648
2649private:
2650 T* ptr_;
2651
2652public:
2653 using value_type = T;
2655
2657 static constexpr size_t required_alignment = sizeof(T) > alignof(T) ? sizeof(T) : alignof(T);
2658
2659 atomic_ref_base() = delete;
2660 atomic_ref_base(const atomic_ref_base&) noexcept = default;
2661 atomic_ref_base& operator=(const atomic_ref_base&) = delete;
2662
2667 explicit atomic_ref_base(T& value) :
2668 ptr_(&value) {
2669 NEFORCE_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
2670 }
2671
2677 T operator=(T value) noexcept {
2678 this->store(value);
2679 return value;
2680 }
2681
2686 NEFORCE_NODISCARD operator T() const noexcept { return this->load(); }
2687
2692 NEFORCE_NODISCARD bool is_lock_free() const noexcept {
2694 }
2695
2701 void store(T value, const memory_order mo = memory_order_seq_cst) noexcept {
2702 _NEFORCE atomic_store(ptr_, value, mo);
2703 }
2704
2710 T load(const memory_order mo = memory_order_seq_cst) const noexcept { return _NEFORCE atomic_load(ptr_, mo); }
2711
2718 T exchange(T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2719 return _NEFORCE atomic_exchange(ptr_, desire, mo);
2720 }
2721
2730 bool compare_exchange_weak(T& expected, T desire, const memory_order success, const memory_order failure) noexcept {
2731 return _NEFORCE atomic_cmpexch_weak(ptr_, expected, desire, success, failure);
2732 }
2733
2742 bool compare_exchange_strong(T& expected, T desire, const memory_order success,
2743 const memory_order failure) noexcept {
2744 return _NEFORCE atomic_cmpexch_strong(ptr_, expected, desire, success, failure);
2745 }
2746
2754 bool compare_exchange_weak(T& expected, T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2755 return _NEFORCE atomic_cmpexch_weak(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
2756 }
2757
2765 bool compare_exchange_strong(T& expected, T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2766 return _NEFORCE atomic_cmpexch_strong(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
2767 }
2768
2774 NEFORCE_ALWAYS_INLINE void wait(T old, const memory_order mo = memory_order_seq_cst) const noexcept {
2775 _NEFORCE atomic_wait_address_v(ptr_, old, [this, mo] { return this->load(mo); });
2776 }
2777
2781 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(ptr_, false); }
2782
2786 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(ptr_, true); }
2787
2795 return _NEFORCE atomic_fetch_add(ptr_, value, mo);
2796 }
2797
2805 return _NEFORCE atomic_fetch_sub(ptr_, value, mo);
2806 }
2807
2815 return _NEFORCE atomic_fetch_and(ptr_, value, mo);
2816 }
2817
2825 return _NEFORCE atomic_fetch_or(ptr_, value, mo);
2826 }
2827
2835 return _NEFORCE atomic_fetch_xor(ptr_, value, mo);
2836 }
2837
2842 NEFORCE_ALWAYS_INLINE value_type operator++(int) noexcept { return fetch_add(1); }
2843
2848 NEFORCE_ALWAYS_INLINE value_type operator--(int) noexcept { return fetch_sub(1); }
2849
2854 value_type operator++() noexcept { return _NEFORCE atomic_add_fetch(ptr_, value_type(1)); }
2855
2860 value_type operator--() noexcept { return _NEFORCE atomic_sub_fetch(ptr_, value_type(1)); }
2861
2867 value_type operator+=(value_type value) noexcept { return _NEFORCE atomic_add_fetch(ptr_, value); }
2868
2874 value_type operator-=(value_type value) noexcept { return _NEFORCE atomic_sub_fetch(ptr_, value); }
2875
2881 value_type operator&=(value_type value) noexcept { return _NEFORCE atomic_and_fetch(ptr_, value); }
2882
2888 value_type operator|=(value_type value) noexcept { return _NEFORCE atomic_or_fetch(ptr_, value); }
2889
2895 value_type operator^=(value_type value) noexcept { return _NEFORCE atomic_xor_fetch(ptr_, value); }
2896};
2897
2902template <typename Float>
2903struct atomic_ref_base<Float, false, true> {
2904 static_assert(is_floating_point_v<Float>, "atomic_ref_base need floating point T");
2905
2906private:
2907 Float* ptr_;
2908
2909public:
2910 using value_type = Float;
2912
2914 static constexpr size_t required_alignment = alignof(Float);
2915
2916 atomic_ref_base() = delete;
2917 atomic_ref_base(const atomic_ref_base&) noexcept = default;
2918 atomic_ref_base& operator=(const atomic_ref_base&) = delete;
2919
2924 explicit atomic_ref_base(Float& value) :
2925 ptr_(&value) {
2926 NEFORCE_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
2927 }
2928
2934 Float operator=(Float value) noexcept {
2935 this->store(value);
2936 return value;
2937 }
2938
2943 operator Float() const noexcept { return this->load(); }
2944
2950
2956 void store(Float value, const memory_order mo = memory_order_seq_cst) noexcept {
2957 _NEFORCE atomic_store_any(ptr_, value, mo);
2958 }
2959
2965 Float load(const memory_order mo = memory_order_seq_cst) const noexcept {
2966 return _NEFORCE atomic_load_any(ptr_, mo);
2967 }
2968
2975 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
2976 return _NEFORCE atomic_exchange_any(ptr_, desire, mo);
2977 }
2978
2987 bool compare_exchange_weak(Float& expected, Float desire, const memory_order success,
2988 const memory_order failure) noexcept {
2989 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, success, failure);
2990 }
2991
3000 bool compare_exchange_strong(Float& expected, Float desire, const memory_order success,
3001 const memory_order failure) noexcept {
3002 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, success, failure);
3003 }
3004
3012 bool compare_exchange_weak(Float& expected, Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
3013 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
3014 }
3015
3023 bool compare_exchange_strong(Float& expected, Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
3024 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
3025 }
3026
3032 NEFORCE_ALWAYS_INLINE void wait(Float old, const memory_order mo = memory_order_seq_cst) const noexcept {
3033 _NEFORCE atomic_wait_address_v(ptr_, old, [this, mo] { return this->load(mo); });
3034 }
3035
3039 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(ptr_, false); }
3040
3044 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(ptr_, true); }
3045
3053 return _NEFORCE atomic_fetch_add_any(ptr_, value, mo);
3054 }
3055
3063 return _NEFORCE atomic_fetch_sub_any(ptr_, value, mo);
3064 }
3065
3072 return _NEFORCE atomic_add_fetch_any(ptr_, value, memory_order_seq_cst);
3073 }
3074
3081 return _NEFORCE atomic_sub_fetch_any(ptr_, value, memory_order_seq_cst);
3082 }
3083};
3084
3085
3086#ifdef NEFORCE_COMPILER_CLANG
3087# pragma clang diagnostic push
3088# pragma clang diagnostic ignored "-Watomic-alignment"
3089#endif
3090
3091template <typename T>
3092struct atomic_ref_base<T*, false, false> {
3093public:
3094 using value_type = T*;
3095 using difference_type = ptrdiff_t;
3096
3097private:
3098 T** ptr_;
3099
3100 static constexpr difference_type real_type_sizes(const difference_type dest) noexcept {
3101 static_assert(is_object_v<T>, "atomic_ref_base need object T");
3102 return dest * sizeof(T);
3103 }
3104
3105public:
3107 static constexpr size_t required_alignment = sizeof(T*) == 8 ? 8 : alignof(T*);
3108
3109 atomic_ref_base() = delete;
3110 atomic_ref_base(const atomic_ref_base&) noexcept = default;
3111 atomic_ref_base& operator=(const atomic_ref_base&) = delete;
3112
3117 explicit atomic_ref_base(T*& value) :
3118 ptr_(_NEFORCE addressof(value)) {
3119 NEFORCE_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
3120 }
3121
3127 T* operator=(T* value) noexcept {
3128 this->store(value);
3129 return value;
3130 }
3131
3136 operator T*() const noexcept { return this->load(); }
3137
3142 bool is_lock_free() const noexcept { return _NEFORCE is_always_lock_free<sizeof(T*), required_alignment>(); }
3143
3149 void store(T* value, const memory_order mo = memory_order_seq_cst) noexcept {
3150 _NEFORCE atomic_store_any(ptr_, value, mo);
3151 }
3152
3158 T* load(const memory_order mo = memory_order_seq_cst) const noexcept { return _NEFORCE atomic_load_any(ptr_, mo); }
3159
3166 T* exchange(T* desire, const memory_order mo = memory_order_seq_cst) noexcept {
3167 return _NEFORCE atomic_exchange_any(ptr_, desire, mo);
3168 }
3169
3178 bool compare_exchange_weak(T*& expected, T* desire, const memory_order success,
3179 const memory_order failure) noexcept {
3180 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, success, failure);
3181 }
3182
3191 bool compare_exchange_strong(T*& expected, T* desire, const memory_order success,
3192 const memory_order failure) noexcept {
3193 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, success, failure);
3194 }
3195
3203 bool compare_exchange_weak(T*& expected, T* desire, const memory_order mo = memory_order_seq_cst) noexcept {
3204 return _NEFORCE atomic_cmpexch_weak_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
3205 }
3206
3214 bool compare_exchange_strong(T*& expected, T* desire, const memory_order mo = memory_order_seq_cst) noexcept {
3215 return _NEFORCE atomic_cmpexch_strong_any(ptr_, expected, desire, mo, cmpexch_failure_order(mo));
3216 }
3217
3223 NEFORCE_ALWAYS_INLINE void wait(T* old, const memory_order mo = memory_order_seq_cst) const noexcept {
3224 _NEFORCE atomic_wait_address_v(ptr_, old, [this, mo] { return this->load(mo); });
3225 }
3226
3230 NEFORCE_ALWAYS_INLINE void notify_one() noexcept { _NEFORCE atomic_notify_address(ptr_, false); }
3231
3235 NEFORCE_ALWAYS_INLINE void notify_all() noexcept { _NEFORCE atomic_notify_address(ptr_, true); }
3236
3243 NEFORCE_ALWAYS_INLINE value_type fetch_add(const difference_type dest,
3244 const memory_order mo = memory_order_seq_cst) noexcept {
3245 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<difference_type>(sizeof(T)));
3246 uintptr_t old_val = _NEFORCE atomic_fetch_add_any(reinterpret_cast<uintptr_t*>(ptr_), byte_offset, mo);
3247 return reinterpret_cast<value_type>(old_val);
3248 }
3249
3256 NEFORCE_ALWAYS_INLINE value_type fetch_sub(const difference_type dest,
3257 const memory_order mo = memory_order_seq_cst) noexcept {
3258 const uintptr_t byte_offset = static_cast<uintptr_t>(dest * static_cast<difference_type>(sizeof(T)));
3259 uintptr_t old_val = _NEFORCE atomic_fetch_sub_any(reinterpret_cast<uintptr_t*>(ptr_), byte_offset, mo);
3260 return reinterpret_cast<value_type>(old_val);
3261 }
3262
3267 value_type operator++(int) noexcept { return fetch_add(1); }
3268
3273 value_type operator--(int) noexcept { return fetch_sub(1); }
3274
3279 value_type operator++() noexcept { return fetch_add(1) + 1; }
3280
3285 value_type operator--() noexcept { return fetch_sub(1) - 1; }
3286
3292 value_type operator+=(const difference_type dest) noexcept { return fetch_add(dest) + dest; }
3293
3299 value_type operator-=(const difference_type dest) noexcept { return fetch_sub(dest) - dest; }
3300};
3301
3302#ifdef NEFORCE_COMPILER_CLANG
3303# pragma clang diagnostic pop
3304#endif
3305 // AtomicOperations
3307 // AsyncComponents
3309
3310NEFORCE_END_NAMESPACE__
3311#endif // NEFORCE_CORE_ASYNC_ATOMIC_BASE_HPP__
原子等待/通知机制
调试断点和断言工具
NEFORCE_NODISCARD constexpr T * addressof(T &x) noexcept
获取对象的地址
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_sub(volatile T *ptr, atomic_diff_t< T > value, const memory_order mo) noexcept
原子获取并减去操作
T atomic_fetch_add_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子获取并添加操作
NEFORCE_ALWAYS_INLINE_INLINE void atomic_store_any(T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
通用原子存储操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_add(volatile T *ptr, atomic_diff_t< T > value, const memory_order mo) noexcept
原子获取并添加操作
NEFORCE_CONSTEXPR17 bool is_always_lock_free() noexcept
检查是否支持无锁操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_load_any(const T *ptr, memory_order mo) noexcept
通用原子加载操作
T atomic_fetch_sub_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子获取并减去操作
T atomic_add_fetch_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子添加并获取操作
NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > desired, const memory_order success, const memory_order failure) noexcept
强比较交换操作
void atomic_wait_address_v(const T *addr, T old, Func f) noexcept
基于值的原子等待
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_exchange(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子交换操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_add_fetch(volatile T *ptr, atomic_diff_t< T > value, memory_order mo) noexcept
原子添加并获取操作
void atomic_notify_address(const T *addr, const bool all) noexcept
原子通知
T atomic_sub_fetch_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子减去并获取操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_xor(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并异或操作
NEFORCE_ALWAYS_INLINE_INLINE void atomic_signal_fence(const memory_order mo) noexcept
信号内存屏障
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_xor_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子异或并获取操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_sub_fetch(volatile T *ptr, atomic_diff_t< T > value, memory_order mo) noexcept
原子减去并获取操作
NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak_any(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > *desired, const memory_order success, const memory_order failure) noexcept
通用弱比较交换操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_or(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并或操作
NEFORCE_ALWAYS_INLINE_INLINE void atomic_store(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子存储操作
NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > desired, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
conditional_t< is_pointer_v< T >, ptrdiff_t, remove_volatile_t< T > > atomic_diff_t
原子操作的差值类型
NEFORCE_ALWAYS_INLINE_INLINE void atomic_thread_fence(const memory_order mo) noexcept
线程内存屏障
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_and(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并与操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_or_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子或并获取操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_load(const volatile T *ptr, const memory_order mo) noexcept
原子加载操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_exchange_any(T *ptr, remove_volatile_t< T > desired, memory_order mo) noexcept
通用原子交换操作
NEFORCE_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_and_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子与并获取操作
NEFORCE_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong_any(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > *desired, const memory_order success, const memory_order failure) noexcept
通用强比较交换操作
NEFORCE_INLINE17 constexpr bool is_floating_point_v
is_floating_point的便捷变量模板
NEFORCE_INLINE17 constexpr bool is_integral_v
is_integral的便捷变量模板
NEFORCE_INLINE17 constexpr bool is_integral_like_v
is_integral_like的便捷变量模板
NEFORCE_INLINE17 constexpr bool is_object_v
is_object的便捷变量模板
unsigned char byte_t
字节类型,定义为无符号字符
unsigned int uint32_t
32位无符号整数类型
unsigned long long uint64_t
64位无符号整数类型
int int32_t
32位有符号整数类型
#define NEFORCE_CONSTEXPR_ASSERT(COND)
编译时常量断言
@ wait
等待操作
NEFORCE_CONSTEXPR14 void * memory_copy(void *NEFORCE_RESTRICT dest, const void *NEFORCE_RESTRICT src, size_t count) noexcept
从源内存复制到目标内存
NEFORCE_PURE_FUNCTION NEFORCE_CONSTEXPR14 int memory_compare(const void *lhs, const void *rhs, size_t count) noexcept
比较两个内存区域的内容
constexpr bool is_valid_cmpexch_failure_order(const memory_order mo) noexcept
检查比较交换失败内存顺序是否有效
NEFORCE_INLINE17 constexpr auto memory_order_acq_rel
获取-释放内存顺序常量
NEFORCE_INLINE17 constexpr auto memory_order_consume
数据依赖内存顺序常量
NEFORCE_INLINE17 constexpr auto memory_order_release
释放内存顺序常量
NEFORCE_INLINE17 constexpr auto memory_order_seq_cst
顺序一致性内存顺序常量
NEFORCE_INLINE17 constexpr auto memory_order_relaxed
宽松内存顺序常量
NEFORCE_INLINE17 constexpr auto memory_order_acquire
获取内存顺序常量
memory_order
内存顺序
constexpr memory_order cmpexch_failure_order(const memory_order mo) noexcept
获取原子比较交换操作失败时的内存顺序
@ memory_order_mask
内存顺序掩码
uint64_t uintptr_t
可容纳指针的无符号整数类型
int64_t ptrdiff_t
指针差类型
typename remove_volatile< T >::type remove_volatile_t
remove_volatile的便捷别名
NEFORCE_CONSTEXPR14 T exchange(T &val, U &&new_val) noexcept(is_nothrow_move_constructible_v< T > &&is_nothrow_assignable_v< T &, U >)
将新值赋给对象并返回旧值
NEFORCE_INLINE17 constexpr bool is_trivially_copyable_v
is_trivially_copyable的便捷变量模板
NEFORCE_INLINE17 constexpr bool is_nothrow_copy_constructible_v
is_nothrow_copy_constructible的便捷变量模板
typename conditional< Test, T1, T2 >::type conditional_t
conditional的便捷别名
原子内存序定义
NEFORCE_ALWAYS_INLINE_INLINE value_type fetch_add(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加指针偏移
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
value_type operator--(int) noexcept
后置递减运算符
value_type operator++(int) volatile noexcept
volatile版本的后置递增运算符
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换指针操作
value_type operator--(int) volatile noexcept
volatile版本的后置递减运算符
NEFORCE_ALWAYS_INLINE void store(const value_type ptr, const memory_order mo=memory_order_seq_cst) noexcept
原子存储指针操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换指针操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
弱比较交换指针操作
value_type operator+=(const ptrdiff_t dest) noexcept
指针加法赋值运算符
value_type operator--() noexcept
前置递减运算符
NEFORCE_ALWAYS_INLINE value_type exchange(const value_type ptr, const memory_order mo=memory_order_seq_cst) noexcept
原子交换指针操作
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
NEFORCE_ALWAYS_INLINE_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载指针操作
NEFORCE_ALWAYS_INLINE value_type fetch_add(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加指针偏移
value_type operator=(const value_type ptr) noexcept
赋值运算符
value_type operator++() volatile noexcept
volatile版本的前置递增运算符
NEFORCE_ALWAYS_INLINE_INLINE void store(const value_type ptr, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储指针操作
value_type operator-=(const ptrdiff_t dest) volatile noexcept
volatile版本的指针减法赋值运算符
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换指针操作
NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载指针操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换指针操作
value_type operator--() volatile noexcept
volatile版本的前置递减运算符
value_type operator++() noexcept
前置递增运算符
NEFORCE_ALWAYS_INLINE_INLINE value_type exchange(const value_type ptr, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换指针操作
value_type operator=(const value_type ptr) volatile noexcept
volatile版本的赋值运算符
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
NEFORCE_ALWAYS_INLINE_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换指针操作
NEFORCE_ALWAYS_INLINE_INLINE value_type fetch_sub(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去指针偏移
value_type operator+=(const ptrdiff_t dest) volatile noexcept
volatile版本的指针加法赋值运算符
NEFORCE_ALWAYS_INLINE_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换指针操作
ptrdiff_t difference_type
差值类型
value_type operator-=(const ptrdiff_t dest) noexcept
指针减法赋值运算符
NEFORCE_ALWAYS_INLINE value_type fetch_sub(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去指针偏移
value_type operator++(int) noexcept
后置递增运算符
NEFORCE_ALWAYS_INLINE void wait(value_type old, const memory_order mo=memory_order_seq_cst) const noexcept
等待指针改变
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
强比较交换指针操作
原子类型基础模板类
value_type operator--(int) noexcept
后置递减运算符
NEFORCE_ALWAYS_INLINE value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
NEFORCE_ALWAYS_INLINE value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加操作
NEFORCE_ALWAYS_INLINE value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
value_type operator|=(value_type value) volatile noexcept
volatile版本的位或赋值运算符
NEFORCE_ALWAYS_INLINE value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并与操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载操作
value_type operator--() noexcept
前置递减运算符
NEFORCE_ALWAYS_INLINE void store(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
强比较交换操作
value_type operator--() volatile noexcept
volatile版本的前置递减运算符
value_type operator-=(value_type value) volatile noexcept
volatile版本的减法赋值运算符
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换操作
value_type operator-=(value_type value) noexcept
减法赋值运算符
value_type operator=(value_type value) volatile noexcept
volatile版本的赋值运算符
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
value_type operator^=(value_type value) noexcept
位异或赋值运算符
value_type operator++(int) volatile noexcept
volatile版本的后置递增运算符
value_type operator++(int) noexcept
后置递增运算符
NEFORCE_ALWAYS_INLINE void wait(value_type old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
NEFORCE_ALWAYS_INLINE value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并异或操作
NEFORCE_ALWAYS_INLINE void store(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储操作
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
value_type operator--(int) volatile noexcept
volatile版本的后置递减运算符
value_type operator=(value_type value) noexcept
赋值运算符
NEFORCE_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换操作
NEFORCE_ALWAYS_INLINE value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并与操作
value_type operator++() noexcept
前置递增运算符
value_type operator+=(value_type value) volatile noexcept
volatile版本的加法赋值运算符
NEFORCE_ALWAYS_INLINE value_type exchange(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换操作
NEFORCE_ALWAYS_INLINE value_type exchange(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
value_type operator^=(value_type value) volatile noexcept
volatile版本的位异或赋值运算符
value_type operator|=(value_type value) noexcept
位或赋值运算符
NEFORCE_ALWAYS_INLINE value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并或操作
value_type operator++() volatile noexcept
volatile版本的前置递增运算符
NEFORCE_ALWAYS_INLINE value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并异或操作
value_type operator&=(value_type value) noexcept
位与赋值运算符
value_type operator&=(value_type value) volatile noexcept
volatile版本的位与赋值运算符
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
NEFORCE_ALWAYS_INLINE value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
NEFORCE_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
NEFORCE_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
NEFORCE_ALWAYS_INLINE value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并或操作
bool value_type
原子标志类型
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
NEFORCE_ALWAYS_INLINE_INLINE bool test(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的测试标志值
NEFORCE_ALWAYS_INLINE_INLINE void wait(const bool old, const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的等待标志值改变
NEFORCE_ALWAYS_INLINE void clear(const memory_order mo=memory_order_seq_cst) noexcept
清除标志
value_type flag_
原子标志值
NEFORCE_ALWAYS_INLINE bool test(const memory_order mo=memory_order_seq_cst) const noexcept
测试标志值
NEFORCE_ALWAYS_INLINE_INLINE bool test_and_set(const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的测试并设置标志
NEFORCE_ALWAYS_INLINE bool test_and_set(const memory_order mo=memory_order_seq_cst) noexcept
测试并设置标志
NEFORCE_ALWAYS_INLINE_INLINE void clear(const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的清除标志
NEFORCE_ALWAYS_INLINE void wait(const bool old, const memory_order mo=memory_order_seq_cst) const noexcept
等待标志值改变
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换操作
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
value_type operator-=(value_type value) volatile noexcept
volatile版本的减法赋值运算符
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
Float load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
Float operator=(Float value) noexcept
赋值运算符
value_type difference_type
差值类型
value_type operator+=(value_type value) volatile noexcept
volatile版本的加法赋值运算符
void store(Float value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储操作
NEFORCE_ALWAYS_INLINE void wait(Float old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去操作
Float operator=(Float value) volatile noexcept
volatile版本的赋值运算符
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换操作
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加操作
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
value_type operator-=(value_type value) noexcept
减法赋值运算符
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换操作
void store(Float value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
Float load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
void store(Float value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
Float load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
bool is_lock_free() const noexcept
检查是否支持无锁操作
value_type operator-=(value_type value) noexcept
减法赋值运算符
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
static constexpr size_t required_alignment
对齐需求
NEFORCE_ALWAYS_INLINE void wait(Float old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
Float operator=(Float value) noexcept
赋值运算符
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
bool compare_exchange_weak(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
bool compare_exchange_strong(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
bool compare_exchange_strong(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
T load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
NEFORCE_ALWAYS_INLINE void wait(T old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
bool is_lock_free() const noexcept
检查是否支持无锁操作
bool compare_exchange_weak(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
void store(T value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
T operator=(T value) noexcept
赋值运算符
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
static constexpr size_t required_alignment
对齐需求
T exchange(T desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
NEFORCE_ALWAYS_INLINE value_type operator++(int) noexcept
后置递增运算符
bool compare_exchange_weak(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并异或操作
NEFORCE_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
static constexpr size_t required_alignment
对齐需求
bool compare_exchange_weak(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
value_type operator++() noexcept
前置递增运算符
value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并与操作
T operator=(T value) noexcept
赋值运算符
T exchange(T desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
NEFORCE_NODISCARD bool is_lock_free() const noexcept
检查是否支持无锁操作
value_type operator^=(value_type value) noexcept
位异或赋值运算符
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
NEFORCE_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
void store(T value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
bool compare_exchange_strong(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
value_type operator&=(value_type value) noexcept
位与赋值运算符
T load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
NEFORCE_ALWAYS_INLINE void wait(T old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
value_type operator-=(value_type value) noexcept
减法赋值运算符
value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并或操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
value_type operator|=(value_type value) noexcept
位或赋值运算符
value_type operator--() noexcept
前置递减运算符
NEFORCE_ALWAYS_INLINE value_type operator--(int) noexcept
后置递减运算符
bool compare_exchange_strong(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
原子引用基础模板类