MSTL 1.4.0
A Modern C++ Library with extended functionality, web components, and utility libraries
载入中...
搜索中...
未找到
atomic_base.hpp
浏览该文件的文档.
1#ifndef MSTL_CORE_ASYNC_ATOMIC_BASE_HPP__
2#define MSTL_CORE_ASYNC_ATOMIC_BASE_HPP__
3
10
11#include "atomic_wait.hpp"
12#ifdef MSTL_PLATFORM_WINDOWS__
13#include <intrin0.h>
14#endif
16
22
37
39MSTL_INLINE17 constexpr auto memory_order_relaxed = memory_order::relaxed;
41MSTL_INLINE17 constexpr auto memory_order_consume = memory_order::consume;
43MSTL_INLINE17 constexpr auto memory_order_acquire = memory_order::acquire;
45MSTL_INLINE17 constexpr auto memory_order_release = memory_order::release;
47MSTL_INLINE17 constexpr auto memory_order_acq_rel = memory_order::acq_rel;
49MSTL_INLINE17 constexpr auto memory_order_seq_cst = memory_order::seq_cst;
50
51
64
74 return static_cast<memory_order>(static_cast<int64_t>(mo) | static_cast<int64_t>(mod));
75}
76
86 return static_cast<memory_order>(static_cast<int64_t>(mo) & static_cast<int64_t>(mod));
87}
88
89
102constexpr memory_order cmpexch_failure_order(const memory_order mo) noexcept {
103 constexpr auto mask = memory_order_modifier::memory_order_mask;
104 const memory_order base_mo = mo & mask;
105 const memory_order failure_order =
108 : base_mo;
109 constexpr auto modifier_mask = memory_order_modifier::memory_order_modifier_mask;
110 const auto modifiers = static_cast<memory_order_modifier>(mo & modifier_mask);
111 return failure_order | modifiers;
112}
113
124
125
132MSTL_ALWAYS_INLINE_INLINE void atomic_thread_fence(const memory_order mo) noexcept {
133#ifdef MSTL_COMPILER_MSVC__
134 if (mo == memory_order_relaxed) return;
135#if defined(MSTL_ARCH_X86__)
136 ::_ReadWriteBarrier();
137 if (mo == memory_order_seq_cst) {
138 volatile long guard;
139 ::_InterlockedIncrement(&guard);
140 ::_ReadWriteBarrier();
141 }
142#elif defined(MSTL_ARCH_ARM__)
143 if (mo == memory_order_acquire || mo == memory_order_consume) {
144 ::_Memory_load_acquire_barrier();
145 } else {
146 ::_ReadWriteBarrier();
147 }
148#else
149 assert(false);
150#endif
151#else
152 __atomic_thread_fence(static_cast<int32_t>(mo));
153#endif
154}
155
162MSTL_ALWAYS_INLINE_INLINE void atomic_signal_fence(const memory_order mo) noexcept {
163#ifdef MSTL_COMPILER_MSVC__
164 if (mo != memory_order_relaxed) ::_ReadWriteBarrier();
165#else
166 __atomic_signal_fence(static_cast<int32_t>(mo));
167#endif
168}
169 // MemoryOrder
171
174
175#ifdef MSTL_COMPILER_MSVC__
176
183template <size_t Size>
184struct interlocked_exchange_impl;
185
186template <>
187struct interlocked_exchange_impl<1> {
188 template <typename T>
189 static T call(volatile T* target, T value) {
190 return static_cast<T>(::_InterlockedExchange8(
191 reinterpret_cast<volatile char*>(target),
192 static_cast<char>(value)));
193 }
194};
195
196template <>
197struct interlocked_exchange_impl<2> {
198 template <typename T>
199 static T call(volatile T* target, T value) {
200 return static_cast<T>(::_InterlockedExchange16(
201 reinterpret_cast<volatile short*>(target),
202 static_cast<short>(value)));
203 }
204};
205
206template <>
207struct interlocked_exchange_impl<4> {
208 template <typename T>
209 static T call(volatile T* target, T value) {
210 return static_cast<T>(::_InterlockedExchange(
211 reinterpret_cast<volatile long*>(target),
212 static_cast<long>(value)));
213 }
214};
215
216template <>
217struct interlocked_exchange_impl<8> {
218 template <typename T>
219 static T call(volatile T* target, T value) {
220 return static_cast<T>(::_interlockedexchange64(
221 reinterpret_cast<volatile long long*>(target),
222 static_cast<long long>(value)));
223 }
224};
225
226
231template <size_t Size>
232struct interlocked_compare_exchange_impl;
233
234template <>
235struct interlocked_compare_exchange_impl<1> {
236 template <typename T>
237 static bool call(volatile T* target, T* expected, T desired) {
238 const char old = ::_InterlockedCompareExchange8(
239 reinterpret_cast<volatile char*>(target),
240 *reinterpret_cast<char*>(&desired),
241 *reinterpret_cast<char*>(expected));
242 if (old == *reinterpret_cast<char*>(expected)) return true;
243 *reinterpret_cast<char*>(expected) = old;
244 return false;
245 }
246};
247
248template <>
249struct interlocked_compare_exchange_impl<2> {
250 template <typename T>
251 static bool call(volatile T* target, T* expected, T desired) {
252 const short old = ::_InterlockedCompareExchange16(
253 reinterpret_cast<volatile short*>(target),
254 *reinterpret_cast<short*>(&desired),
255 *reinterpret_cast<short*>(expected));
256 if (old == *reinterpret_cast<short*>(expected)) return true;
257 *reinterpret_cast<short*>(expected) = old;
258 return false;
259 }
260};
261
262template <>
263struct interlocked_compare_exchange_impl<4> {
264 template <typename T>
265 static bool call(volatile T* target, T* expected, T desired) {
266 const long old = ::_InterlockedCompareExchange(
267 reinterpret_cast<volatile long*>(target),
268 *reinterpret_cast<long*>(&desired),
269 *reinterpret_cast<long*>(expected));
270 if (old == *reinterpret_cast<long*>(expected)) return true;
271 *reinterpret_cast<long*>(expected) = old;
272 return false;
273 }
274};
275
276template <>
277struct interlocked_compare_exchange_impl<8> {
278 template <typename T>
279 static bool call(volatile T* target, T* expected, T desired) {
280 const long long old = ::_InterlockedCompareExchange64(
281 reinterpret_cast<volatile long long*>(target),
282 *reinterpret_cast<long long*>(&desired),
283 *reinterpret_cast<long long*>(expected));
284 if (old == *reinterpret_cast<long long*>(expected)) return true;
285 *reinterpret_cast<long long*>(expected) = old;
286 return false;
287 }
288};
289
290template <>
291struct interlocked_compare_exchange_impl<16> {
292 template <typename T>
293 static bool call(volatile T* target, T* expected, T desired) {
294 alignas(16) long long exp_arr[2];
295 alignas(16) long long des_arr[2];
296 _MSTL memory_copy(exp_arr, expected, 16);
297 _MSTL memory_copy(des_arr, &desired, 16);
298
299 const bool result = ::_InterlockedCompareExchange128(
300 reinterpret_cast<volatile long long*>(target),
301 des_arr[1], des_arr[0], exp_arr) != 0;
302
303 if (!result) {
304 _MSTL memory_copy(expected, exp_arr, 16);
305 }
306 return result;
307 }
308};
309
310
315template <size_t Size>
316struct interlocked_fetch_add_impl;
317
318template <>
319struct interlocked_fetch_add_impl<1> {
320 template <typename T>
321 static T call(volatile T* target, T value) {
322 return static_cast<T>(::_InterlockedExchangeAdd8(
323 reinterpret_cast<volatile char*>(target),
324 static_cast<char>(value)));
325 }
326};
327
328template <>
329struct interlocked_fetch_add_impl<2> {
330 template <typename T>
331 static T call(volatile T* target, T value) {
332 return static_cast<T>(::_InterlockedExchangeAdd16(
333 reinterpret_cast<volatile short*>(target),
334 static_cast<short>(value)));
335 }
336};
337
338template <>
339struct interlocked_fetch_add_impl<4> {
340 template <typename T>
341 static T call(volatile T* target, T value) {
342 return static_cast<T>(::_InterlockedExchangeAdd(
343 reinterpret_cast<volatile long*>(target),
344 static_cast<long>(value)));
345 }
346};
347
348template <>
349struct interlocked_fetch_add_impl<8> {
350 template <typename T>
351 static T call(volatile T* target, T value) {
352 return static_cast<T>(::_interlockedexchangeadd64(
353 reinterpret_cast<volatile long long*>(target),
354 static_cast<long long>(value)));
355 }
356};
357
358
363template <size_t Size>
364struct interlocked_fetch_and_impl;
365
366template <>
367struct interlocked_fetch_and_impl<1> {
368 template <typename T>
369 static T call(volatile T* target, T value) {
370 return static_cast<T>(::_InterlockedAnd8(
371 reinterpret_cast<volatile char*>(target),
372 static_cast<char>(value)));
373 }
374};
375
376template <>
377struct interlocked_fetch_and_impl<2> {
378 template <typename T>
379 static T call(volatile T* target, T value) {
380 return static_cast<T>(::_InterlockedAnd16(
381 reinterpret_cast<volatile short*>(target),
382 static_cast<short>(value)));
383 }
384};
385
386template <>
387struct interlocked_fetch_and_impl<4> {
388 template <typename T>
389 static T call(volatile T* target, T value) {
390 return static_cast<T>(::_InterlockedAnd(
391 reinterpret_cast<volatile long*>(target),
392 static_cast<long>(value)));
393 }
394};
395
396template <>
397struct interlocked_fetch_and_impl<8> {
398 template <typename T>
399 static T call(volatile T* target, T value) {
400 return static_cast<T>(::_interlockedand64(
401 reinterpret_cast<volatile long long*>(target),
402 static_cast<long long>(value)));
403 }
404};
405
406
411template <size_t Size>
412struct interlocked_fetch_or_impl;
413
414template <>
415struct interlocked_fetch_or_impl<1> {
416 template <typename T>
417 static T call(volatile T* target, T value) {
418 return static_cast<T>(::_InterlockedOr8(
419 reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
420 }
421};
422template <>
423struct interlocked_fetch_or_impl<2> {
424 template <typename T>
425 static T call(volatile T* target, T value) {
426 return static_cast<T>(::_InterlockedOr16(
427 reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
428 }
429};
430template <>
431struct interlocked_fetch_or_impl<4> {
432 template <typename T>
433 static T call(volatile T* target, T value) {
434 return static_cast<T>(::_InterlockedOr(
435 reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
436 }
437};
438template <>
439struct interlocked_fetch_or_impl<8> {
440 template <typename T>
441 static T call(volatile T* target, T value) {
442 return static_cast<T>(::_interlockedor64(
443 reinterpret_cast<volatile long long*>(target),
444 static_cast<long long>(value)));
445 }
446};
447
448
453template <size_t Size>
454struct interlocked_fetch_xor_impl;
455
456template <>
457struct interlocked_fetch_xor_impl<1> {
458 template <typename T>
459 static T call(volatile T* target, T value) {
460 return static_cast<T>(::_InterlockedXor8(
461 reinterpret_cast<volatile char*>(target), static_cast<char>(value)));
462 }
463};
464template <>
465struct interlocked_fetch_xor_impl<2> {
466 template <typename T>
467 static T call(volatile T* target, T value) {
468 return static_cast<T>(::_InterlockedXor16(
469 reinterpret_cast<volatile short*>(target), static_cast<short>(value)));
470 }
471};
472template <>
473struct interlocked_fetch_xor_impl<4> {
474 template <typename T>
475 static T call(volatile T* target, T value) {
476 return static_cast<T>(::_InterlockedXor(
477 reinterpret_cast<volatile long*>(target), static_cast<long>(value)));
478 }
479};
480template <>
481struct interlocked_fetch_xor_impl<8> {
482 template <typename T>
483 static T call(volatile T* target, T value) {
484 return static_cast<T>(::_interlockedxor64(
485 reinterpret_cast<volatile long long*>(target),
486 static_cast<long long>(value)));
487 }
488};
489
490#endif
491
492#ifdef MSTL_COMPILER_GNUC__
493
500template <size_t Size>
501struct atomic_is_always_lock_free_impl {
502 static constexpr bool value = __atomic_always_lock_free(Size, nullptr);
503};
504
505#else
506
507template <size_t Size>
508struct atomic_is_always_lock_free_impl {
509 static constexpr bool value = false;
510};
511template <>
512struct atomic_is_always_lock_free_impl<1> {
513 static constexpr bool value = true;
514};
515template <>
516struct atomic_is_always_lock_free_impl<2> {
517 static constexpr bool value = true;
518};
519template <>
520struct atomic_is_always_lock_free_impl<4> {
521 static constexpr bool value = true;
522};
523template <>
524struct atomic_is_always_lock_free_impl<8> {
525 static constexpr bool value = true;
526};
527
528#endif
529
532
538
543template <typename T>
545
546
555template <typename T>
556MSTL_ALWAYS_INLINE_INLINE void
557atomic_store(volatile T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
558 static_assert(is_integral_v<T>, "T must be integral type");
559#ifdef MSTL_COMPILER_GNUC__
560 __atomic_store_n(ptr, value, static_cast<int32_t>(mo));
561#else
562 _INNER interlocked_exchange_impl<sizeof(T)>::call(ptr, value);
563 if (mo == memory_order_seq_cst || mo == memory_order_release) {
564 ::_ReadWriteBarrier();
565 }
566#endif
567}
568
577template <typename T>
578MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
579atomic_load(const volatile T* ptr, const memory_order mo) noexcept {
580 static_assert(is_integral_v<T>, "T must be integral type");
581#ifdef MSTL_COMPILER_GNUC__
582 return __atomic_load_n(ptr, static_cast<int32_t>(mo));
583#else
584 remove_volatile_t<T> result = *ptr;
585 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
586 ::_ReadWriteBarrier();
587 }
588 return result;
589#endif
590}
591
601template <typename T>
602MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
603atomic_exchange(volatile T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
604 static_assert(is_integral_v<T>, "T must be integral type");
605#ifdef MSTL_COMPILER_GNUC__
606 return __atomic_exchange_n(ptr, value, static_cast<int32_t>(mo));
607#else
608 remove_volatile_t<T> old = _INNER interlocked_exchange_impl<sizeof(T)>::call(ptr, value);
609 if (mo == memory_order_seq_cst) {
610 ::_ReadWriteBarrier();
611 }
612 return old;
613#endif
614}
615
628template <typename T>
629MSTL_ALWAYS_INLINE_INLINE bool
631 volatile T* ptr, remove_volatile_t<T>* expected,
632 remove_volatile_t<T> desired, const memory_order success, const memory_order failure) noexcept {
633 static_assert(is_integral_v<T>, "T must be integral type");
634 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
635#ifdef MSTL_COMPILER_GNUC__
636 return __atomic_compare_exchange_n(
637 ptr, expected, desired, true,
638 static_cast<int32_t>(success), static_cast<int32_t>(failure));
639#else
640#if defined(MSTL_ARCH_X86__)
641 const bool result = _INNER interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, desired);
643 ::_ReadWriteBarrier();
644 }
645 return result;
646#else
647 remove_volatile_t<T> old_val = *expected;
649 bool success_flag;
650#if defined(MSTL_ARCH_ARM__)
651 MSTL_IF_CONSTEXPR (sizeof(T) == 1) {
652 asm volatile(
653 "ldrexb %[loaded], [%[ptr]]\n\t"
654 "cmp %[loaded], %[old_val]\n\t"
655 "bne 1f\n\t"
656 "strexb %w[success], %w[desired], [%[ptr]]\n\t"
657 "1:"
658 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
659 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
660 : "cc", "memory");
661 } else MSTL_IF_CONSTEXPR (sizeof(T) == 2) {
662 asm volatile(
663 "ldrexh %[loaded], [%[ptr]]\n\t"
664 "cmp %[loaded], %[old_val]\n\t"
665 "bne 1f\n\t"
666 "strexh %w[success], %w[desired], [%[ptr]]\n\t"
667 "1:"
668 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
669 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
670 : "cc", "memory");
671 } else MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
672 asm volatile(
673 "ldrex %[loaded], [%[ptr]]\n\t"
674 "cmp %[loaded], %[old_val]\n\t"
675 "bne 1f\n\t"
676 "strex %w[success], %w[desired], [%[ptr]]\n\t"
677 "1:"
678 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
679 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
680 : "cc", "memory");
681 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
682 asm volatile(
683 "ldrexd %[loaded], [%[ptr]]\n\t"
684 "cmp %[loaded], %[old_val]\n\t"
685 "bne 1f\n\t"
686 "strexd %w[success], %[desired], [%[ptr]]\n\t"
687 "1:"
688 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
689 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
690 : "cc", "memory");
691 }
692#elif defined(MSTL_ARCH_RISCV__)
693 MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
694 asm volatile(
695 "lr.w %[loaded], (%[ptr])\n\t"
696 "bne %[loaded], %[old_val], 1f\n\t"
697 "sc.w %[success], %[desired], (%[ptr])\n\t"
698 "1:"
699 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
700 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
701 : "memory");
702 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
703 asm volatile(
704 "lr.d %[loaded], (%[ptr])\n\t"
705 "bne %[loaded], %[old_val], 1f\n\t"
706 "sc.d %[success], %[desired], (%[ptr])\n\t"
707 "1:"
708 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
709 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
710 : "memory");
711 }
712#elif defined(MSTL_ARCH_LOONGARCH__)
713 MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
714 asm volatile(
715 "ll.w %[loaded], %[ptr]\n\t"
716 "bne %[loaded], %[old_val], 1f\n\t"
717 "sc.w %[desired], %[ptr]\n\t"
718 "move %[success], %[desired]\n\t"
719 "1:"
720 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
721 : [ptr] "m" (*ptr), [old_val] "r" (old_val), [desired] "r" (desired)
722 : "memory");
723 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
724 asm volatile(
725 "ll.d %[loaded], %[ptr]\n\t"
726 "bne %[loaded], %[old_val], 1f\n\t"
727 "sc.d %[desired], %[ptr]\n\t"
728 "move %[success], %[desired]\n\t"
729 "1:"
730 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
731 : [ptr] "m" (*ptr), [old_val] "r" (old_val), [desired] "r" (desired)
732 : "memory");
733 }
734#endif
735 if (loaded != old_val) {
736 *expected = loaded;
737 return false;
738 }
739 return success_flag == 0;
740#endif
741#endif
742}
743
756template <typename T>
757MSTL_ALWAYS_INLINE_INLINE bool
759 volatile T* ptr, remove_volatile_t<T>* expected,
760 remove_volatile_t<T> desired, const memory_order success, const memory_order failure) noexcept {
761 static_assert(is_integral_v<T>, "T must be integral type");
762 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
763#ifdef MSTL_COMPILER_GNUC__
764 return __atomic_compare_exchange_n(
765 ptr, expected, desired, false,
766 static_cast<int32_t>(success), static_cast<int32_t>(failure));
767#else
768#if defined(MSTL_ARCH_X86__)
769 return _MSTL atomic_cmpexch_weak(ptr, expected, desired, success, failure);
770#else
771 remove_volatile_t<T> old_val = *expected;
772 while (true) {
773 if (_INNER cmpexch_weak(ptr, expected, desired, success, failure)) {
774 return true;
775 }
776 if (*expected != old_val) {
777 return false;
778 }
779 }
780#endif
781#endif
782}
783
793template <typename T>
794MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
795atomic_fetch_add(volatile T* ptr, atomic_diff_t<T> value, const memory_order mo) noexcept {
796 static_assert(is_integral_v<T>, "T must be integral type");
797#ifdef MSTL_COMPILER_GNUC__
798 return __atomic_fetch_add(ptr, value, static_cast<int32_t>(mo));
799#else
800 remove_volatile_t<T> old = _INNER interlocked_fetch_add_impl<sizeof(T)>::call(ptr, value);
801 if (mo == memory_order_seq_cst) {
802 ::_ReadWriteBarrier();
803 }
804 return old;
805#endif
806}
807
817template <typename T>
818MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
819atomic_fetch_sub(volatile T* ptr, atomic_diff_t<T> value, const memory_order mo) noexcept {
820 static_assert(is_integral_v<T>, "T must be integral type");
821#ifdef MSTL_COMPILER_GNUC__
822 return __atomic_fetch_sub(ptr, value, static_cast<int32_t>(mo));
823#else
824 return _MSTL atomic_fetch_add(ptr, static_cast<atomic_diff_t<T>>(-value), mo);
825#endif
826}
827
837template <typename T>
838MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
839atomic_fetch_and(volatile T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
840 static_assert(is_integral_v<T>, "T must be integral type");
841#ifdef MSTL_COMPILER_GNUC__
842 return __atomic_fetch_and(ptr, value, static_cast<int32_t>(mo));
843#else
844 remove_volatile_t<T> old = _INNER interlocked_fetch_and_impl<sizeof(T)>::call(ptr, value);
845 if (mo == memory_order_seq_cst) {
846 ::_ReadWriteBarrier();
847 }
848 return old;
849#endif
850}
851
861template <typename T>
862MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
863atomic_fetch_or(volatile T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
864 static_assert(is_integral_v<T>, "T must be integral type");
865#ifdef MSTL_COMPILER_GNUC__
866 return __atomic_fetch_or(ptr, value, static_cast<int32_t>(mo));
867#else
868 remove_volatile_t<T> old = _INNER interlocked_fetch_or_impl<sizeof(T)>::call(ptr, value);
869 if (mo == memory_order_seq_cst) {
870 ::_ReadWriteBarrier();
871 }
872 return old;
873#endif
874}
875
885template <typename T>
886MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
887atomic_fetch_xor(volatile T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
888 static_assert(is_integral_v<T>, "T must be integral type");
889#ifdef MSTL_COMPILER_GNUC__
890 return __atomic_fetch_xor(ptr, value, static_cast<int32_t>(mo));
891#else
892 remove_volatile_t<T> old = _INNER interlocked_fetch_xor_impl<sizeof(T)>::call(ptr, value);
893 if (mo == memory_order_seq_cst) {
894 ::_ReadWriteBarrier();
895 }
896 return old;
897#endif
898}
899
909template <typename T>
910MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
911atomic_add_fetch(volatile T* ptr, atomic_diff_t<T> value, memory_order mo) noexcept {
912 static_assert(is_integral_v<T>, "T must be integral type");
913#ifdef MSTL_COMPILER_GNUC__
914 return __atomic_add_fetch(ptr, value, static_cast<int32_t>(mo));
915#else
916 return _MSTL atomic_fetch_add(ptr, value, mo) + value;
917#endif
918}
919
929template <typename T>
930MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
931atomic_sub_fetch(volatile T* ptr, atomic_diff_t<T> value, memory_order mo) noexcept {
932 static_assert(is_integral_v<T>, "T must be integral type");
933#ifdef MSTL_COMPILER_GNUC__
934 return __atomic_sub_fetch(ptr, value, static_cast<int32_t>(mo));
935#else
936 return _MSTL atomic_fetch_sub(ptr, value, mo) - value;
937#endif
938}
939
949template <typename T>
950MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
951atomic_and_fetch(volatile T* ptr, remove_volatile_t<T> value, memory_order mo) noexcept {
952 static_assert(is_integral_v<T>, "T must be integral type");
953#ifdef MSTL_COMPILER_GNUC__
954 return __atomic_and_fetch(ptr, value, static_cast<int32_t>(mo));
955#else
956 return _MSTL atomic_fetch_and(ptr, value, mo) & value;
957#endif
958}
959
969template <typename T>
970MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
971atomic_or_fetch(volatile T* ptr, remove_volatile_t<T> value, memory_order mo) noexcept {
972 static_assert(is_integral_v<T>, "T must be integral type");
973#ifdef MSTL_COMPILER_GNUC__
974 return __atomic_or_fetch(ptr, value, static_cast<int32_t>(mo));
975#else
976 return _MSTL atomic_fetch_or(ptr, value, mo) | value;
977#endif
978}
979
989template <typename T>
990MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
991atomic_xor_fetch(volatile T* ptr, remove_volatile_t<T> value, memory_order mo) noexcept {
992 static_assert(is_integral_v<T>, "T must be integral type");
993#ifdef MSTL_COMPILER_GNUC__
994 return __atomic_xor_fetch(ptr, value, static_cast<int32_t>(mo));
995#else
996 return _MSTL atomic_fetch_xor(ptr, value, mo) ^ value;
997#endif
998}
999
1000
1012template <typename T>
1013MSTL_ALWAYS_INLINE_INLINE bool
1015 volatile T* ptr, remove_volatile_t<T>* expected,
1016 remove_volatile_t<T>* desired, const memory_order success, const memory_order failure) noexcept {
1017 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
1018#ifdef MSTL_COMPILER_GNUC__
1019 return __atomic_compare_exchange(
1020 ptr, expected, desired, true,
1021 static_cast<int32_t>(success), static_cast<int32_t>(failure));
1022#else
1023#if defined(MSTL_ARCH_X86__)
1024 const bool result = _INNER interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, *desired);
1025 if (success == memory_order_seq_cst || failure == memory_order_seq_cst) {
1026 ::_ReadWriteBarrier();
1027 }
1028 return result;
1029#else
1030 remove_volatile_t<T> old_val = *expected;
1031 remove_volatile_t<T> loaded;
1032 bool success_flag;
1033#if defined(MSTL_ARCH_ARM__)
1034 MSTL_IF_CONSTEXPR (sizeof(T) == 1) {
1035 asm volatile(
1036 "ldrexb %[loaded], [%[ptr]]\n\t"
1037 "cmp %[loaded], %[old_val]\n\t"
1038 "bne 1f\n\t"
1039 "strexb %w[success], %w[desired], [%[ptr]]\n\t"
1040 "1:"
1041 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1042 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1043 : "cc", "memory");
1044 } else MSTL_IF_CONSTEXPR (sizeof(T) == 2) {
1045 asm volatile(
1046 "ldrexh %[loaded], [%[ptr]]\n\t"
1047 "cmp %[loaded], %[old_val]\n\t"
1048 "bne 1f\n\t"
1049 "strexh %w[success], %w[desired], [%[ptr]]\n\t"
1050 "1:"
1051 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1052 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1053 : "cc", "memory");
1054 } else MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
1055 asm volatile(
1056 "ldrex %[loaded], [%[ptr]]\n\t"
1057 "cmp %[loaded], %[old_val]\n\t"
1058 "bne 1f\n\t"
1059 "strex %w[success], %w[desired], [%[ptr]]\n\t"
1060 "1:"
1061 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1062 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1063 : "cc", "memory");
1064 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
1065 asm volatile(
1066 "ldrexd %[loaded], [%[ptr]]\n\t"
1067 "cmp %[loaded], %[old_val]\n\t"
1068 "bne 1f\n\t"
1069 "strexd %w[success], %[desired], [%[ptr]]\n\t"
1070 "1:"
1071 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1072 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1073 : "cc", "memory");
1074 }
1075#elif defined(MSTL_ARCH_RISCV__)
1076 MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
1077 asm volatile(
1078 "lr.w %[loaded], (%[ptr])\n\t"
1079 "bne %[loaded], %[old_val], 1f\n\t"
1080 "sc.w %[success], %[desired], (%[ptr])\n\t"
1081 "1:"
1082 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1083 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1084 : "memory");
1085 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
1086 asm volatile(
1087 "lr.d %[loaded], (%[ptr])\n\t"
1088 "bne %[loaded], %[old_val], 1f\n\t"
1089 "sc.d %[success], %[desired], (%[ptr])\n\t"
1090 "1:"
1091 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1092 : [ptr] "r" (ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1093 : "memory");
1094 }
1095#elif defined(MSTL_ARCH_LOONGARCH__)
1096 MSTL_IF_CONSTEXPR (sizeof(T) == 4) {
1097 asm volatile(
1098 "ll.w %[loaded], %[ptr]\n\t"
1099 "bne %[loaded], %[old_val], 1f\n\t"
1100 "sc.w %[desired], %[ptr]\n\t"
1101 "move %[success], %[desired]\n\t"
1102 "1:"
1103 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1104 : [ptr] "m" (*ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1105 : "memory");
1106 } else MSTL_IF_CONSTEXPR (sizeof(T) == 8) {
1107 asm volatile(
1108 "ll.d %[loaded], %[ptr]\n\t"
1109 "bne %[loaded], %[old_val], 1f\n\t"
1110 "sc.d %[desired], %[ptr]\n\t"
1111 "move %[success], %[desired]\n\t"
1112 "1:"
1113 : [loaded] "=&r" (loaded), [success] "=&r" (success_flag)
1114 : [ptr] "m" (*ptr), [old_val] "r" (old_val), [desired] "r" (desired)
1115 : "memory");
1116 }
1117#endif
1118 if (loaded != old_val) {
1119 *expected = loaded;
1120 return false;
1121 }
1122 return success_flag == 0;
1123#endif
1124#endif
1125}
1126
1138template <typename T>
1139MSTL_ALWAYS_INLINE_INLINE bool
1141 volatile T* ptr, remove_volatile_t<T>* expected,
1142 remove_volatile_t<T>* desired, const memory_order success, const memory_order failure) noexcept {
1143 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
1144#ifdef MSTL_COMPILER_GNUC__
1145 return __atomic_compare_exchange(
1146 ptr, expected, desired, false,
1147 static_cast<int32_t>(success), static_cast<int32_t>(failure));
1148#else
1149#if defined(MSTL_ARCH_X86__)
1150 const bool result = _INNER interlocked_compare_exchange_impl<sizeof(T)>::call(ptr, expected, *desired);
1151 if (success == memory_order_seq_cst || failure == memory_order_seq_cst) {
1152 ::_ReadWriteBarrier();
1153 }
1154 return result;
1155#else
1156 remove_volatile_t<T> old_val = *expected;
1157 while (true) {
1158 if (_INNER cmpexch_weak(ptr, expected, desired, success, failure)) {
1159 return true;
1160 }
1161 if (*expected != old_val) {
1162 return false;
1163 }
1164 }
1165#endif
1166#endif
1167}
1168
1177template <typename T>
1178MSTL_ALWAYS_INLINE_INLINE void
1179atomic_store_any(T* ptr, remove_volatile_t<T> value, const memory_order mo) noexcept {
1180#ifdef MSTL_COMPILER_GNUC__
1181 __atomic_store(ptr, _MSTL addressof(value), static_cast<int32_t>(mo));
1182#else
1183 remove_volatile_t<T> expected = *ptr;
1184 while (!_MSTL atomic_cmpexch_weak_any(ptr, &expected, &value, mo, memory_order_relaxed)) {
1185 // Retry
1186 }
1187#endif
1188}
1189
1198template <typename T>
1199MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
1200atomic_load_any(const T* ptr, memory_order mo) noexcept {
1201#ifdef MSTL_COMPILER_GNUC__
1202 alignas(T) byte_t buffer[sizeof(T)];
1203 T* dest = reinterpret_cast<remove_volatile_t<T>*>(buffer);
1204 __atomic_load(ptr, dest, static_cast<int32_t>(mo));
1205 return *dest;
1206#else
1207 remove_volatile_t<T> result;
1209 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
1210 ::_ReadWriteBarrier();
1211 }
1212 return result;
1213#endif
1214}
1215
1225template <typename T>
1226MSTL_ALWAYS_INLINE_INLINE remove_volatile_t<T>
1228#ifdef MSTL_COMPILER_GNUC__
1229 alignas(T) byte_t buffer[sizeof(T)];
1230 T* dest = reinterpret_cast<remove_volatile_t<T>*>(buffer);
1231 __atomic_exchange(ptr, _MSTL addressof(desired), dest, static_cast<int32_t>(mo));
1232 return *dest;
1233#else
1235 while (!_MSTL atomic_cmpexch_weak_any(ptr, &old, &desired, mo, memory_order_relaxed)) {
1236 // Retry
1237 }
1238 return old;
1239#endif
1240}
1241
1250template <typename T>
1253 remove_volatile_t<T> new_value = old_value + value;
1254 while (!_MSTL atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed)) {
1255 new_value = old_value + value;
1256 }
1257 return old_value;
1258}
1259
1268template <typename T>
1271 remove_volatile_t<T> new_value = old_value - value;
1272 while (!_MSTL atomic_cmpexch_weak_any(ptr, &old_value, &new_value, mo, memory_order_relaxed)) {
1273 new_value = old_value - value;
1274 }
1275 return old_value;
1276}
1277
1285template <typename T>
1288 remove_volatile_t<T> new_value = old_value + value;
1289 while (!_MSTL atomic_cmpexch_weak_any(ptr, &old_value, &new_value, memory_order_seq_cst, memory_order_relaxed)) {
1290 new_value = old_value + value;
1291 }
1292 return new_value;
1293}
1294
1302template <typename T>
1305 remove_volatile_t<T> new_value = old_value - value;
1306 while (!_MSTL atomic_cmpexch_weak_any(ptr, &old_value, &new_value, memory_order_seq_cst, memory_order_relaxed)) {
1307 new_value = old_value - value;
1308 }
1309 return new_value;
1310}
1311
1312
1319template <size_t Size, size_t Align>
1320constexpr bool is_always_lock_free() noexcept {
1321#ifdef MSTL_COMPILER_GNUC__
1322 return __atomic_is_lock_free(Size, reinterpret_cast<void *>(-Align));
1323#else
1324 return _INNER atomic_is_always_lock_free_impl<Size>::value;
1325#endif
1326}
1327
1328
1335struct atomic_flag {
1340#ifdef MSTL_PLATFORM_WINDOWS__
1341 long;
1342#else
1343 bool;
1344#endif
1345
1347
1348 atomic_flag() noexcept = default;
1349 atomic_flag(const atomic_flag&) = delete;
1350 atomic_flag& operator =(const atomic_flag&) = delete;
1351 atomic_flag& operator =(const atomic_flag&) volatile = delete;
1352 atomic_flag(atomic_flag&&) noexcept = default;
1353 atomic_flag& operator =(atomic_flag&&) noexcept = default;
1354 ~atomic_flag() noexcept = default;
1355
1360 constexpr atomic_flag(const value_type flag) noexcept
1361 : flag_(static_cast<value_type>(flag ? 1 : 0)) {}
1362
1368 MSTL_ALWAYS_INLINE bool
1370#ifdef MSTL_COMPILER_GNUC__
1371 return __atomic_test_and_set(&flag_, static_cast<int32_t>(mo));
1372#else
1373 const long old_val = ::_InterlockedExchange(&flag_, 1);
1374 if (mo == memory_order_seq_cst) ::_ReadWriteBarrier();
1375 return old_val != 0;
1376#endif
1377 }
1378
1382 MSTL_ALWAYS_INLINE_INLINE bool
1383 test_and_set(const memory_order mo = memory_order_seq_cst) volatile noexcept {
1384#ifdef MSTL_COMPILER_GNUC__
1385 return __atomic_test_and_set(&flag_, static_cast<int32_t>(mo));
1386#else
1387 const long old_val = ::_InterlockedExchange(&flag_, 1);
1388 if (mo == memory_order_seq_cst) ::_ReadWriteBarrier();
1389 return old_val != 0;
1390#endif
1391 }
1392
1398 MSTL_ALWAYS_INLINE bool
1399 test(const memory_order mo = memory_order_seq_cst) const noexcept {
1400#ifdef MSTL_COMPILER_GNUC__
1401 value_type value;
1402 __atomic_load(&flag_, &value, static_cast<int32_t>(mo));
1403 return value == sizeof(value_type);
1404#else
1405 const long as_bytes = flag_;
1406 if (mo != memory_order_relaxed) ::_ReadWriteBarrier();
1407 return as_bytes != 0;
1408#endif
1409 }
1410
1414 MSTL_ALWAYS_INLINE_INLINE bool
1415 test(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
1416#ifdef MSTL_COMPILER_GNUC__
1417 value_type value;
1418 __atomic_load(&flag_, &value, static_cast<int32_t>(mo));
1419 return value == sizeof(value_type);
1420#else
1421 const long as_bytes = flag_;
1422 if (mo != memory_order_relaxed) ::_ReadWriteBarrier();
1423 return as_bytes != 0;
1424#endif
1425 }
1426
1432 MSTL_ALWAYS_INLINE void
1433 wait(const bool old, const memory_order mo = memory_order_seq_cst) const noexcept {
1434 const value_type value = old ? 1 : 0;
1436 const_cast<const value_type*>(&flag_), value,
1437 [this, mo] { return this->test(mo); }
1438 );
1439 }
1440
1444 MSTL_ALWAYS_INLINE_INLINE void
1445 wait(const bool old, const memory_order mo = memory_order_seq_cst) const volatile noexcept {
1446 const value_type value = old ? 1 : 0;
1448 const_cast<const value_type*>(&flag_), value,
1449 [this, mo] { return this->test(mo); }
1450 );
1451 }
1452
1456 MSTL_ALWAYS_INLINE void notify_one() noexcept {
1458 }
1459
1463 MSTL_ALWAYS_INLINE void notify_all() noexcept {
1465 }
1466
1471 MSTL_ALWAYS_INLINE void
1474 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
1475 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
1476 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1477
1478#ifdef MSTL_COMPILER_GNUC__
1479 __atomic_clear(&flag_, static_cast<int32_t>(mo));
1480#else
1481 _MSTL atomic_store(&flag_, static_cast<value_type>(0), mo);
1482#endif
1483 }
1484
1488 MSTL_ALWAYS_INLINE_INLINE void
1489 clear(const memory_order mo = memory_order_seq_cst) volatile noexcept {
1491 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
1492 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
1493 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1494
1495#ifdef MSTL_COMPILER_GNUC__
1496 __atomic_clear(&flag_, static_cast<int32_t>(mo));
1497#else
1498 _MSTL atomic_store(&flag_, static_cast<value_type>(0), mo);
1499#endif
1500 }
1501};
1502
1503
1510template <typename T>
1511struct atomic_base {
1512 using value_type = T;
1514
1515 static_assert(is_integral_like_v<T>, "T must be an integral-like type");
1516
1517private:
1518 static constexpr size_t align_inner = sizeof(T) > alignof(T) ? sizeof(T) : alignof(T);
1519
1520 alignas(align_inner) value_type value_;
1521
1522public:
1525
1526 atomic_base() noexcept = default;
1527 ~atomic_base() noexcept = default;
1528 atomic_base(const atomic_base&) = delete;
1529 atomic_base& operator =(const atomic_base&) = delete;
1530 atomic_base& operator =(const atomic_base&) volatile = delete;
1531 atomic_base(atomic_base&&) noexcept = default;
1532 atomic_base& operator =(atomic_base&&) noexcept = default;
1533
1538 constexpr atomic_base(value_type value) noexcept
1539 : value_ (value) {}
1540
1545 operator value_type() const noexcept {
1546 return load();
1547 }
1548
1552 operator value_type() const volatile noexcept {
1553 return load();
1554 }
1555
1561 value_type operator =(value_type value) noexcept {
1562 atomic_base::store(value);
1563 return value;
1564 }
1565
1569 value_type operator =(value_type value) volatile noexcept {
1570 atomic_base::store(value);
1571 return value;
1572 }
1573
1578 value_type operator ++(int) noexcept {
1579 return fetch_add(1);
1580 }
1581
1585 value_type operator ++(int) volatile noexcept {
1586 return fetch_add(1);
1587 }
1588
1593 value_type operator --(int) noexcept {
1594 return fetch_sub(1);
1595 }
1596
1600 value_type operator --(int) volatile noexcept {
1601 return fetch_sub(1);
1602 }
1603
1609 return _MSTL atomic_add_fetch(&value_, 1, memory_order_seq_cst);
1610 }
1611
1615 value_type operator ++() volatile noexcept {
1616 return _MSTL atomic_add_fetch(&value_, 1, memory_order_seq_cst);
1617 }
1618
1624 return _MSTL atomic_sub_fetch(&value_, 1, memory_order_seq_cst);
1625 }
1626
1630 value_type operator --() volatile noexcept {
1631 return _MSTL atomic_sub_fetch(&value_, 1, memory_order_seq_cst);
1632 }
1633
1640 return _MSTL atomic_add_fetch(&value_, value, memory_order_seq_cst);
1641 }
1642
1646 value_type operator +=(value_type value) volatile noexcept {
1647 return _MSTL atomic_add_fetch(&value_, value, memory_order_seq_cst);
1648 }
1649
1656 return _MSTL atomic_sub_fetch(&value_, value, memory_order_seq_cst);
1657 }
1658
1662 value_type operator -=(value_type value) volatile noexcept {
1663 return _MSTL atomic_sub_fetch(&value_, value, memory_order_seq_cst);
1664 }
1665
1672 return _MSTL atomic_and_fetch(&value_, value, memory_order_seq_cst);
1673 }
1674
1678 value_type operator &=(value_type value) volatile noexcept {
1679 return _MSTL atomic_and_fetch(&value_, value, memory_order_seq_cst);
1680 }
1681
1688 return _MSTL atomic_or_fetch(&value_, value, memory_order_seq_cst);
1689 }
1690
1694 value_type operator |=(value_type value) volatile noexcept {
1695 return _MSTL atomic_or_fetch(&value_, value, memory_order_seq_cst);
1696 }
1697
1704 return _MSTL atomic_xor_fetch(&value_, value, memory_order_seq_cst);
1705 }
1706
1710 value_type operator ^=(value_type value) volatile noexcept {
1711 return _MSTL atomic_xor_fetch(&value_, value, memory_order_seq_cst);
1712 }
1713
1718 bool is_lock_free() const noexcept {
1719 return is_always_lock_free;
1720 }
1721
1725 bool is_lock_free() const volatile noexcept {
1726 return is_always_lock_free;
1727 }
1728
1734 MSTL_ALWAYS_INLINE void
1737 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
1738 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1739 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
1740 _MSTL atomic_store(&value_, value, mo);
1741 }
1742
1746 MSTL_ALWAYS_INLINE void
1747 store(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1749 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
1750 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1751 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
1752 _MSTL atomic_store(&value_, value, mo);
1753 }
1754
1760 MSTL_ALWAYS_INLINE value_type
1761 load(const memory_order mo = memory_order_seq_cst) const noexcept {
1763 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_release);
1764 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1765 return _MSTL atomic_load(&value_, mo);
1766 }
1767
1771 MSTL_ALWAYS_INLINE value_type
1772 load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
1774 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_release);
1775 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
1776 return _MSTL atomic_load(&value_, mo);
1777 }
1778
1785 MSTL_ALWAYS_INLINE value_type
1787 return _MSTL atomic_exchange(&value_, value, mo);
1788 }
1789
1793 MSTL_ALWAYS_INLINE value_type
1794 exchange(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1795 return _MSTL atomic_exchange(&value_, value, mo);
1796 }
1797
1806 MSTL_ALWAYS_INLINE bool
1808 value_type& expected, value_type desired,
1809 const memory_order success, const memory_order failure) noexcept {
1810 return _MSTL atomic_cmpexch_weak_any(&value_, &expected, &desired, success, failure);
1811 }
1812
1816 MSTL_ALWAYS_INLINE bool
1818 value_type& expected, value_type desired,
1819 const memory_order success, const memory_order failure) volatile noexcept {
1820 return _MSTL atomic_cmpexch_weak_any(&value_, &expected, &desired, success, failure);
1821 }
1822
1830 MSTL_ALWAYS_INLINE bool
1832 value_type& expected, value_type desired,
1833 const memory_order mo = memory_order_seq_cst) noexcept {
1834 return this->compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
1835 }
1836
1840 MSTL_ALWAYS_INLINE bool
1842 value_type& expected, value_type desired,
1843 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1844 return this->compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
1845 }
1846
1855 MSTL_ALWAYS_INLINE bool
1857 value_type& expected, value_type desired,
1858 const memory_order success, const memory_order failure) noexcept {
1859 return _MSTL atomic_cmpexch_strong_any(&value_, &expected, &desired, success, failure);
1860 }
1861
1865 MSTL_ALWAYS_INLINE bool
1867 value_type& expected, value_type desired,
1868 const memory_order success, const memory_order failure) volatile noexcept {
1869 return _MSTL atomic_cmpexch_strong_any(&value_, &expected, &desired, success, failure);
1870 }
1871
1879 MSTL_ALWAYS_INLINE bool
1881 const memory_order mo = memory_order_seq_cst) noexcept {
1882 return this->compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
1883 }
1884
1888 MSTL_ALWAYS_INLINE bool
1890 const memory_order mo = memory_order_seq_cst) volatile noexcept {
1891 return this->compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
1892 }
1893
1899 MSTL_ALWAYS_INLINE void
1900 wait(value_type old, const memory_order mo = memory_order_seq_cst) const noexcept {
1901 _MSTL atomic_wait_address_v(&value_, old, [mo, this] {
1902 return this->load(mo);
1903 });
1904 }
1905
1909 MSTL_ALWAYS_INLINE void notify_one() noexcept {
1910 _MSTL atomic_notify_address(&value_, false);
1911 }
1912
1916 MSTL_ALWAYS_INLINE void notify_all() noexcept {
1917 _MSTL atomic_notify_address(&value_, true);
1918 }
1919
1926 MSTL_ALWAYS_INLINE value_type
1928 return _MSTL atomic_fetch_add(&value_, value, mo);
1929 }
1930
1934 MSTL_ALWAYS_INLINE value_type
1935 fetch_add(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1936 return _MSTL atomic_fetch_add(&value_, value, mo);
1937 }
1938
1945 MSTL_ALWAYS_INLINE value_type
1947 return _MSTL atomic_fetch_sub(&value_, value, mo);
1948 }
1949
1953 MSTL_ALWAYS_INLINE value_type
1954 fetch_sub(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1955 return _MSTL atomic_fetch_sub(&value_, value, mo);
1956 }
1957
1964 MSTL_ALWAYS_INLINE value_type
1966 return _MSTL atomic_fetch_and(&value_, value, mo);
1967 }
1968
1972 MSTL_ALWAYS_INLINE value_type
1973 fetch_and(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1974 return _MSTL atomic_fetch_and(&value_, value, mo);
1975 }
1976
1983 MSTL_ALWAYS_INLINE value_type
1985 return _MSTL atomic_fetch_or(&value_, value, mo);
1986 }
1987
1991 MSTL_ALWAYS_INLINE value_type
1992 fetch_or(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
1993 return _MSTL atomic_fetch_or(&value_, value, mo);
1994 }
1995
2002 MSTL_ALWAYS_INLINE value_type
2004 return _MSTL atomic_fetch_xor(&value_, value, mo);
2005 }
2006
2010 MSTL_ALWAYS_INLINE value_type
2011 fetch_xor(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2012 return _MSTL atomic_fetch_xor(&value_, value, mo);
2013 }
2014};
2015
2022template <typename T>
2023struct atomic_base<T*> {
2024 using value_type = T*;
2026
2027private:
2028 value_type ptr_ = nullptr;
2029
2035 MSTL_ALWAYS_INLINE_INLINE static constexpr difference_type
2036 real_type_sizes(const difference_type dest) noexcept {
2037 return dest * sizeof(T);
2038 }
2039
2040public:
2043
2044 atomic_base() noexcept = default;
2045 atomic_base(const atomic_base&) = delete;
2046 atomic_base& operator =(const atomic_base&) = delete;
2047 atomic_base& operator =(const atomic_base&) volatile = delete;
2048 atomic_base(atomic_base&&) noexcept = default;
2049 atomic_base& operator =(atomic_base&&) noexcept = default;
2050 ~atomic_base() noexcept = default;
2051
2056 constexpr atomic_base(const value_type ptr) noexcept
2057 : ptr_(ptr) {}
2058
2063 operator value_type() const noexcept {
2064 return load();
2065 }
2066
2070 operator value_type() const volatile noexcept {
2071 return load();
2072 }
2073
2079 value_type operator =(const value_type ptr) noexcept {
2080 atomic_base::store(ptr);
2081 return ptr;
2082 }
2083
2087 value_type operator =(const value_type ptr) volatile noexcept {
2088 atomic_base::store(ptr);
2089 return ptr;
2090 }
2091
2096 value_type operator ++(int) noexcept {
2097 return fetch_add(1);
2098 }
2099
2103 value_type operator ++(int) volatile noexcept {
2104 return fetch_add(1);
2105 }
2106
2111 value_type operator --(int) noexcept {
2112 return fetch_sub(1);
2113 }
2114
2118 value_type operator --(int) volatile noexcept {
2119 return fetch_sub(1);
2120 }
2121
2127#ifdef MSTL_COMPILER_GNUC__
2128 return __atomic_add_fetch(&ptr_, real_type_sizes(1), static_cast<int32_t>(memory_order_seq_cst));
2129#else
2130 const char* old_ptr = reinterpret_cast<char*>(
2131 ::_interlockedexchangeadd64(
2132 reinterpret_cast<volatile long long*>(&ptr_),
2133 static_cast<long long>(sizeof(T))));
2134 return reinterpret_cast<value_type>(old_ptr + sizeof(T));
2135#endif
2136 }
2137
2141 value_type operator ++() volatile noexcept {
2142#ifdef MSTL_COMPILER_GNUC__
2143 return __atomic_add_fetch(&ptr_, real_type_sizes(1), static_cast<int32_t>(memory_order_seq_cst));
2144#else
2145 const char* old_ptr = reinterpret_cast<char*>(
2146 ::_interlockedexchangeadd64(
2147 reinterpret_cast<volatile long long*>(&ptr_),
2148 static_cast<long long>(sizeof(T))));
2149 return reinterpret_cast<value_type>(old_ptr + sizeof(T));
2150#endif
2151 }
2152
2158#ifdef MSTL_COMPILER_GNUC__
2159 return __atomic_sub_fetch(&ptr_, real_type_sizes(1), static_cast<int32_t>(memory_order_seq_cst));
2160#else
2161 const char* old_ptr = reinterpret_cast<char*>(
2162 ::_interlockedexchangeadd64(
2163 reinterpret_cast<volatile long long*>(&ptr_),
2164 -static_cast<ptrdiff_t>(sizeof(T))));
2165 return reinterpret_cast<value_type>(old_ptr - sizeof(T));
2166#endif
2167 }
2168
2172 value_type operator --() volatile noexcept {
2173#ifdef MSTL_COMPILER_GNUC__
2174 return __atomic_sub_fetch(&ptr_, real_type_sizes(1), static_cast<int32_t>(memory_order_seq_cst));
2175#else
2176 const char* old_ptr = reinterpret_cast<char*>(
2177 ::_interlockedexchangeadd64(
2178 reinterpret_cast<volatile long long*>(&ptr_),
2179 -static_cast<ptrdiff_t>(sizeof(T))));
2180 return reinterpret_cast<value_type>(old_ptr - sizeof(T));
2181#endif
2182 }
2183
2189 value_type operator +=(const ptrdiff_t dest) noexcept {
2190#ifdef MSTL_COMPILER_GNUC__
2191 return __atomic_add_fetch(&ptr_, real_type_sizes(dest), static_cast<int32_t>(memory_order_seq_cst));
2192#else
2193 const char* old_ptr = reinterpret_cast<char*>(
2194 ::_interlockedexchangeadd64(
2195 reinterpret_cast<volatile long long*>(&ptr_),
2196 static_cast<long long>(dest * sizeof(T))));
2197 return reinterpret_cast<value_type>(old_ptr + dest * sizeof(T));
2198#endif
2199 }
2200
2204 value_type operator +=(const ptrdiff_t dest) volatile noexcept {
2205#ifdef MSTL_COMPILER_GNUC__
2206 return __atomic_add_fetch(&ptr_, real_type_sizes(dest), static_cast<int32_t>(memory_order_seq_cst));
2207#else
2208 const char* old_ptr = reinterpret_cast<char*>(
2209 ::_interlockedexchangeadd64(
2210 reinterpret_cast<volatile long long*>(&ptr_),
2211 static_cast<long long>(dest * sizeof(T))));
2212 return reinterpret_cast<value_type>(old_ptr + dest * sizeof(T));
2213#endif
2214 }
2215
2221 value_type operator -=(const ptrdiff_t dest) noexcept {
2222#ifdef MSTL_COMPILER_GNUC__
2223 return __atomic_sub_fetch(&ptr_, real_type_sizes(dest), static_cast<int32_t>(memory_order_seq_cst));
2224#else
2225 const char* old_ptr = reinterpret_cast<char*>(
2226 ::_interlockedexchangeadd64(
2227 reinterpret_cast<volatile long long*>(&ptr_),
2228 -dest * static_cast<ptrdiff_t>(sizeof(T))));
2229 return reinterpret_cast<value_type>(old_ptr - dest * sizeof(T));
2230#endif
2231 }
2232
2236 value_type operator -=(const ptrdiff_t dest) volatile noexcept {
2237#ifdef MSTL_COMPILER_GNUC__
2238 return __atomic_sub_fetch(&ptr_, real_type_sizes(dest), static_cast<int32_t>(memory_order_seq_cst));
2239#else
2240 const char* old_ptr = reinterpret_cast<char*>(
2241 ::_interlockedexchangeadd64(
2242 reinterpret_cast<volatile long long*>(&ptr_),
2243 -dest * static_cast<ptrdiff_t>(sizeof(T))));
2244 return reinterpret_cast<value_type>(old_ptr - dest * sizeof(T));
2245#endif
2246 }
2247
2252 bool is_lock_free() const noexcept {
2253 return is_always_lock_free;
2254 }
2255
2259 bool is_lock_free() const volatile noexcept {
2260 return is_always_lock_free;
2261 }
2262
2268 MSTL_ALWAYS_INLINE void
2269 store(const value_type ptr, const memory_order mo = memory_order_seq_cst) noexcept {
2271 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
2272 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
2273 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
2274
2275#ifdef MSTL_COMPILER_GNUC__
2276 __atomic_store_n(&ptr_, ptr, static_cast<int32_t>(mo));
2277#else
2278 ::_InterlockedExchangePointer(
2279 reinterpret_cast<void* volatile*>(&ptr_), ptr);
2280 if (mo == memory_order_seq_cst || mo == memory_order_release) {
2281 ::_ReadWriteBarrier();
2282 }
2283#endif
2284 }
2285
2289 MSTL_ALWAYS_INLINE_INLINE void
2290 store(const value_type ptr, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2292 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acquire);
2293 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
2294 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_consume);
2295
2296#ifdef MSTL_COMPILER_GNUC__
2297 __atomic_store_n(&ptr_, ptr, static_cast<int32_t>(mo));
2298#else
2299 ::_InterlockedExchangePointer(
2300 reinterpret_cast<void* volatile*>(&ptr_), ptr);
2301 if (mo == memory_order_seq_cst || mo == memory_order_release) {
2302 ::_ReadWriteBarrier();
2303 }
2304#endif
2305 }
2306
2312 MSTL_ALWAYS_INLINE value_type
2313 load(const memory_order mo = memory_order_seq_cst) const noexcept {
2315 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_release);
2316 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
2317#ifdef MSTL_COMPILER_GNUC__
2318 return __atomic_load_n(&ptr_, static_cast<int32_t>(mo));
2319#else
2320 const value_type result = *reinterpret_cast<value_type const volatile*>(&ptr_);
2321 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
2322 ::_ReadWriteBarrier();
2323 }
2324 return result;
2325#endif
2326 }
2327
2331 MSTL_ALWAYS_INLINE_INLINE value_type
2332 load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
2334 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_release);
2335 MSTL_CONSTEXPR_ASSERT(rmo != memory_order_acq_rel);
2336#ifdef MSTL_COMPILER_GNUC__
2337 return __atomic_load_n(&ptr_, static_cast<int32_t>(mo));
2338#else
2339 const value_type result = *reinterpret_cast<value_type const volatile*>(&ptr_);
2340 if (mo == memory_order_seq_cst || mo == memory_order_acquire) {
2341 ::_ReadWriteBarrier();
2342 }
2343 return result;
2344#endif
2345 }
2346
2353 MSTL_ALWAYS_INLINE value_type
2354 exchange(const value_type ptr, const memory_order mo = memory_order_seq_cst) noexcept {
2355#ifdef MSTL_COMPILER_GNUC__
2356 return __atomic_exchange_n(&ptr_, ptr, static_cast<int32_t>(mo));
2357#else
2358 const value_type old = static_cast<value_type>(
2359 ::_InterlockedExchangePointer(
2360 reinterpret_cast<void* volatile*>(&ptr_), ptr));
2361 if (mo == memory_order_seq_cst) {
2362 ::_ReadWriteBarrier();
2363 }
2364 return old;
2365#endif
2366 }
2367
2371 MSTL_ALWAYS_INLINE_INLINE value_type
2372 exchange(const value_type ptr, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2373#ifdef MSTL_COMPILER_GNUC__
2374 return __atomic_exchange_n(&ptr_, ptr, static_cast<int32_t>(mo));
2375#else
2376 const value_type old = static_cast<value_type>(
2377 ::_InterlockedExchangePointer(
2378 reinterpret_cast<void* volatile*>(&ptr_), ptr));
2379 if (mo == memory_order_seq_cst) {
2380 ::_ReadWriteBarrier();
2381 }
2382 return old;
2383#endif
2384 }
2385
2394 MSTL_ALWAYS_INLINE bool
2396 value_type& expected, value_type desired,
2397 const memory_order success, const memory_order failure) noexcept {
2398 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
2400 _MSTL addressof(ptr_), _MSTL addressof(expected),
2401 _MSTL addressof(desired), success, failure);
2402 }
2403
2407 MSTL_ALWAYS_INLINE_INLINE bool
2409 value_type& expected, value_type desired,
2410 const memory_order success, const memory_order failure) volatile noexcept {
2411 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
2413 _MSTL addressof(ptr_), _MSTL addressof(expected),
2414 _MSTL addressof(desired), success, failure);
2415 }
2416
2424 MSTL_ALWAYS_INLINE bool
2426 value_type& expected, value_type desired,
2427 const memory_order mo = memory_order_seq_cst) noexcept {
2428 return atomic_base::compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
2429 }
2430
2434 MSTL_ALWAYS_INLINE bool
2436 value_type& expected, value_type desired,
2437 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2438 return atomic_base::compare_exchange_weak(expected, desired, mo, cmpexch_failure_order(mo));
2439 }
2440
2449 MSTL_ALWAYS_INLINE bool
2451 value_type& expected, value_type desired,
2452 const memory_order success, const memory_order failure) noexcept {
2453 MSTL_CONSTEXPR_ASSERT(is_valid_cmpexch_failure_order(failure));
2455 _MSTL addressof(ptr_), _MSTL addressof(expected),
2456 _MSTL addressof(desired), success, failure);
2457 }
2458
2462 MSTL_ALWAYS_INLINE_INLINE bool
2464 value_type& expected, value_type desired,
2465 const memory_order success, const memory_order failure) volatile noexcept {
2467 _MSTL addressof(ptr_), _MSTL addressof(expected),
2468 _MSTL addressof(desired), success, failure);
2469 }
2470
2478 MSTL_ALWAYS_INLINE bool
2480 const memory_order mo = memory_order_seq_cst) noexcept {
2481 return atomic_base::compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
2482 }
2483
2487 MSTL_ALWAYS_INLINE bool
2489 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2490 return atomic_base::compare_exchange_strong(expected, desired, mo, cmpexch_failure_order(mo));
2491 }
2492
2498 MSTL_ALWAYS_INLINE void
2499 wait(value_type old, const memory_order mo = memory_order_seq_cst) const noexcept {
2500 _MSTL atomic_wait_address_v(&ptr_, old, [mo, this] {
2501 return this->load(mo);
2502 });
2503 }
2504
2508 MSTL_ALWAYS_INLINE void notify_one() noexcept {
2509 _MSTL atomic_notify_address(&ptr_, false);
2510 }
2511
2515 MSTL_ALWAYS_INLINE void notify_all() noexcept {
2516 _MSTL atomic_notify_address(&ptr_, true);
2517 }
2518
2525 MSTL_ALWAYS_INLINE value_type
2526 fetch_add(const ptrdiff_t dest, const memory_order mo = memory_order_seq_cst) noexcept {
2527 return _MSTL atomic_fetch_add(&ptr_, dest * sizeof(T), mo);
2528 }
2529
2533 MSTL_ALWAYS_INLINE_INLINE value_type
2534 fetch_add(const ptrdiff_t dest, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2535 return _MSTL atomic_fetch_add(&ptr_, dest * sizeof(T), mo);
2536 }
2537
2544 MSTL_ALWAYS_INLINE value_type
2545 fetch_sub(const ptrdiff_t dest, const memory_order mo = memory_order_seq_cst) noexcept {
2546#ifdef MSTL_COMPILER_GNUC__
2547 return __atomic_fetch_sub(&ptr_, real_type_sizes(dest), static_cast<int32_t>(mo));
2548#else
2549 const char* old_ptr = reinterpret_cast<char*>(
2550 ::_interlockedexchangeadd64(
2551 reinterpret_cast<volatile long long*>(&ptr_),
2552 -dest * static_cast<ptrdiff_t>(sizeof(T))));
2553 if (mo == memory_order_seq_cst) {
2554 ::_ReadWriteBarrier();
2555 }
2556 return reinterpret_cast<value_type>(old_ptr);
2557#endif
2558 }
2559
2563 MSTL_ALWAYS_INLINE_INLINE value_type
2564 fetch_sub(const ptrdiff_t dest, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2565#ifdef MSTL_COMPILER_GNUC__
2566 return __atomic_fetch_sub(&ptr_, real_type_sizes(dest), static_cast<int32_t>(mo));
2567#else
2568 const char* old_ptr = reinterpret_cast<char*>(
2569 ::_interlockedexchangeadd64(
2570 reinterpret_cast<volatile long long*>(&ptr_),
2571 -dest * static_cast<ptrdiff_t>(sizeof(T))));
2572 if (mo == memory_order_seq_cst) {
2573 ::_ReadWriteBarrier();
2574 }
2575 return reinterpret_cast<value_type>(old_ptr);
2576#endif
2577 }
2578};
2579
2580
2588template <typename Float>
2589struct atomic_float_base {
2590 static_assert(is_floating_point_v<Float>, "atomic_ref_base need floating point T");
2591
2592 using value_type = Float;
2594
2595private:
2596 alignas(alignof(Float)) Float float_ = _MSTL initialize<Float>();
2597
2598public:
2601
2602 atomic_float_base() = default;
2603 atomic_float_base(const atomic_float_base&) = delete;
2604 atomic_float_base& operator =(const atomic_float_base&) = delete;
2605 atomic_float_base& operator =(const atomic_float_base&) volatile = delete;
2606 atomic_float_base(atomic_float_base&&) noexcept = default;
2607 atomic_float_base& operator =(atomic_float_base&&) noexcept = default;
2608
2613 constexpr atomic_float_base(Float value)
2614 noexcept(is_nothrow_copy_constructible_v<Float>)
2615 : float_(value) {}
2616
2620 Float operator =(Float value) noexcept {
2621 this->store(value);
2622 return value;
2623 }
2624
2628 Float operator =(Float value) volatile noexcept {
2629 this->store(value);
2630 return value;
2631 }
2632
2636 bool is_lock_free() const noexcept {
2637 return is_always_lock_free;
2638 }
2639
2643 bool is_lock_free() const volatile noexcept {
2644 return is_always_lock_free;
2645 }
2646
2650 void store(Float value, const memory_order mo = memory_order_seq_cst) noexcept {
2651 _MSTL atomic_store_any(&float_, value, mo);
2652 }
2653
2657 void store(Float value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2658 _MSTL atomic_store_any(&float_, value, mo);
2659 }
2660
2664 Float load(const memory_order mo = memory_order_seq_cst) const noexcept {
2665 return _MSTL atomic_load_any(&float_, mo);
2666 }
2667
2671 Float load(const memory_order mo = memory_order_seq_cst) const volatile noexcept {
2672 return _MSTL atomic_load_any(&float_, mo);
2673 }
2674
2678 operator Float() const noexcept {
2679 return this->load();
2680 }
2681
2685 operator Float() const volatile noexcept {
2686 return this->load();
2687 }
2688
2692 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
2693 return _MSTL atomic_exchange_any(&float_, desire, mo);
2694 }
2695
2699 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2700 return _MSTL atomic_exchange_any(&float_, desire, mo);
2701 }
2702
2706 bool compare_exchange_weak(Float& expected, Float desire,
2707 const memory_order success, const memory_order failure) noexcept {
2708 return _MSTL atomic_cmpexch_weak(&float_, expected, desire, success, failure);
2709 }
2710
2714 bool compare_exchange_weak(Float& expected, Float desire,
2715 const memory_order success, const memory_order failure) volatile noexcept {
2716 return _MSTL atomic_cmpexch_weak(&float_, expected, desire, success, failure);
2717 }
2718
2722 bool compare_exchange_strong(Float& expected, Float desire,
2723 const memory_order success, const memory_order failure) noexcept {
2724 return _MSTL atomic_cmpexch_strong(&float_, expected, desire, success, failure);
2725 }
2726
2730 bool compare_exchange_strong(Float& expected, Float desire,
2731 const memory_order success, const memory_order failure) volatile noexcept {
2732 return _MSTL atomic_cmpexch_strong(&float_, expected, desire, success, failure);
2733 }
2734
2738 bool compare_exchange_weak(Float& expected, Float desire,
2739 const memory_order mo = memory_order_seq_cst) noexcept {
2740 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
2741 }
2742
2746 bool compare_exchange_weak(Float& expected, Float desire,
2747 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2748 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
2749 }
2750
2754 bool compare_exchange_strong(Float& expected, Float desire,
2755 const memory_order mo = memory_order_seq_cst) noexcept {
2756 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
2757 }
2758
2762 bool compare_exchange_strong(Float& expected, Float desire,
2763 const memory_order mo = memory_order_seq_cst) volatile noexcept {
2764 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
2765 }
2766
2772 MSTL_ALWAYS_INLINE void
2773 wait(Float old, const memory_order mo = memory_order_seq_cst) const noexcept {
2774 _MSTL atomic_wait_address_v(&float_, old, [mo, this] {
2775 return this->load(mo);
2776 });
2777 }
2778
2782 MSTL_ALWAYS_INLINE void notify_one() noexcept {
2783 _MSTL atomic_notify_address(&float_, false);
2784 }
2785
2789 MSTL_ALWAYS_INLINE void notify_all() noexcept {
2790 _MSTL atomic_notify_address(&float_, true);
2791 }
2792
2800 return _MSTL atomic_fetch_add_any(&float_, value, mo);
2801 }
2802
2806 value_type fetch_add(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2807 return _MSTL atomic_fetch_add_any(&float_, value, mo);
2808 }
2809
2817 return _MSTL atomic_fetch_sub_any(&float_, value, mo);
2818 }
2819
2823 value_type fetch_sub(value_type value, const memory_order mo = memory_order_seq_cst) volatile noexcept {
2824 return _MSTL atomic_fetch_sub_any(&float_, value, mo);
2825 }
2826
2833 return _MSTL atomic_add_fetch_any(&float_, value);
2834 }
2835
2839 value_type operator +=(value_type value) volatile noexcept {
2840 return _MSTL atomic_add_fetch_any(&float_, value);
2841 }
2842
2849 return _MSTL atomic_sub_fetch_any(&float_, value);
2850 }
2851
2855 value_type operator -=(value_type value) volatile noexcept {
2856 return _MSTL atomic_sub_fetch_any(&float_, value);
2857 }
2858};
2859
2860
2870template <typename T, bool IsIntegral = is_integral_v<T>, bool IsFloatingPoint = is_floating_point_v<T>>
2872
2873
2878template <typename T>
2879struct atomic_ref_base<T, false, false> {
2880 static_assert(is_trivially_copyable_v<T>, "atomic_ref_base need trivially copyable T");
2881
2882private:
2883 static constexpr int align_inner = (sizeof(T) & (sizeof(T) - 1)) || sizeof(T) > 16 ? 0 : sizeof(T);
2884
2885 T* ptr_;
2886
2887public:
2888 using value_type = T;
2889
2891 static constexpr size_t required_alignment = align_inner > alignof(T) ? align_inner : alignof(T);
2894
2899 explicit atomic_ref_base(T& value) : ptr_(_MSTL addressof(value)) {
2900 MSTL_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
2901 }
2902
2903 atomic_ref_base(const atomic_ref_base&) noexcept = default;
2904 atomic_ref_base& operator =(const atomic_ref_base&) = delete;
2905
2911 T operator =(T value) noexcept {
2912 this->store(value);
2913 return value;
2914 }
2915
2920 operator T() const noexcept {
2921 return this->load();
2922 }
2923
2928 bool is_lock_free() const noexcept {
2929 return is_always_lock_free;
2930 }
2931
2937 void store(T value, const memory_order mo = memory_order_seq_cst) noexcept {
2938 _MSTL atomic_store_any(ptr_, value, mo);
2939 }
2940
2946 T load(const memory_order mo = memory_order_seq_cst) const noexcept {
2947 return _MSTL atomic_load_any(ptr_, mo);
2948 }
2949
2956 T exchange(T desire, const memory_order mo = memory_order_seq_cst) noexcept {
2957 return _MSTL atomic_exchange_any(ptr_, desire, mo);
2958 }
2959
2968 bool compare_exchange_weak(T& expected, T desire,
2969 const memory_order success, const memory_order failure) noexcept {
2970 return _MSTL atomic_cmpexch_weak(ptr_, expected, desire, success, failure);
2971 }
2972
2981 bool compare_exchange_strong(T& expected, T desire,
2982 const memory_order success, const memory_order failure) noexcept {
2983 return _MSTL atomic_cmpexch_strong(ptr_, expected, desire, success, failure);
2984 }
2985
2993 bool compare_exchange_weak(T& expected, T desire,
2994 const memory_order mo = memory_order_seq_cst) noexcept {
2995 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
2996 }
2997
3005 bool compare_exchange_strong(T& expected, T desire,
3006 const memory_order mo = memory_order_seq_cst) noexcept {
3007 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
3008 }
3009
3015 MSTL_ALWAYS_INLINE void
3016 wait(T old, const memory_order mo = memory_order_seq_cst) const noexcept {
3017 _MSTL atomic_wait_address_v(ptr_, old, [this, mo] {
3018 return this->load(mo);
3019 });
3020 }
3021
3025 MSTL_ALWAYS_INLINE void notify_one() noexcept {
3026 _MSTL atomic_notify_address(ptr_, false);
3027 }
3028
3032 MSTL_ALWAYS_INLINE void notify_all() noexcept {
3033 _MSTL atomic_notify_address(ptr_, true);
3034 }
3035};
3036
3041template <typename T>
3042struct atomic_ref_base<T, true, false> {
3043 static_assert(is_integral_like_v<T>, "atomic_ref need integral-like T");
3044
3045private:
3046
3047 T* ptr_;
3048
3049public:
3050 using value_type = T;
3052
3054 static constexpr size_t required_alignment = sizeof(T) > alignof(T) ? sizeof(T) : alignof(T);
3057
3058 atomic_ref_base() = delete;
3059 atomic_ref_base(const atomic_ref_base&) noexcept = default;
3060 atomic_ref_base& operator =(const atomic_ref_base&) = delete;
3061
3066 explicit atomic_ref_base(T& value) : ptr_(&value) {
3067 MSTL_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
3068 }
3069
3075 T operator =(T value) noexcept {
3076 this->store(value);
3077 return value;
3078 }
3079
3084 MSTL_NODISCARD operator T() const noexcept {
3085 return this->load();
3086 }
3087
3092 MSTL_NODISCARD bool is_lock_free() const noexcept {
3093 return is_always_lock_free;
3094 }
3095
3101 void store(T value, const memory_order mo = memory_order_seq_cst) noexcept {
3102 _MSTL atomic_store(ptr_, value, mo);
3103 }
3104
3110 T load(const memory_order mo = memory_order_seq_cst) const noexcept {
3111 return _MSTL atomic_load(ptr_, mo);
3112 }
3113
3120 T exchange(T desire, const memory_order mo = memory_order_seq_cst) noexcept {
3121 return _MSTL atomic_exchange(ptr_, desire, mo);
3122 }
3123
3132 bool compare_exchange_weak(T& expected, T desire,
3133 const memory_order success, const memory_order failure) noexcept {
3134 return _MSTL atomic_cmpexch_weak(ptr_, expected, desire, success, failure);
3135 }
3136
3145 bool compare_exchange_strong(T& expected, T desire,
3146 const memory_order success, const memory_order failure) noexcept {
3147 return _MSTL atomic_cmpexch_strong(ptr_, expected, desire, success, failure);
3148 }
3149
3157 bool compare_exchange_weak(T& expected, T desire,
3158 const memory_order mo = memory_order_seq_cst) noexcept {
3159 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
3160 }
3161
3169 bool compare_exchange_strong(T& expected, T desire,
3170 const memory_order mo = memory_order_seq_cst) noexcept {
3171 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
3172 }
3173
3179 MSTL_ALWAYS_INLINE void
3180 wait(T old, const memory_order mo = memory_order_seq_cst) const noexcept {
3181 _MSTL atomic_wait_address_v(ptr_, old, [this, mo] {
3182 return this->load(mo);
3183 });
3184 }
3185
3189 MSTL_ALWAYS_INLINE void notify_one() noexcept {
3190 _MSTL atomic_notify_address(ptr_, false);
3191 }
3192
3196 MSTL_ALWAYS_INLINE void notify_all() noexcept {
3197 _MSTL atomic_notify_address(ptr_, true);
3198 }
3199
3207 return _MSTL atomic_fetch_add(ptr_, value, mo);
3208 }
3209
3217 return _MSTL atomic_fetch_sub(ptr_, value, mo);
3218 }
3219
3227 return _MSTL atomic_fetch_and(ptr_, value, mo);
3228 }
3229
3237 return _MSTL atomic_fetch_or(ptr_, value, mo);
3238 }
3239
3247 return _MSTL atomic_fetch_xor(ptr_, value, mo);
3248 }
3249
3254 MSTL_ALWAYS_INLINE value_type operator ++(int) noexcept {
3255 return fetch_add(1);
3256 }
3257
3262 MSTL_ALWAYS_INLINE value_type operator --(int) noexcept {
3263 return fetch_sub(1);
3264 }
3265
3270 value_type operator ++() noexcept {
3271 return _MSTL atomic_add_fetch(ptr_, value_type(1));
3272 }
3273
3278 value_type operator --() noexcept {
3279 return _MSTL atomic_sub_fetch(ptr_, value_type(1));
3280 }
3281
3287 value_type operator +=(value_type value) noexcept {
3288 return _MSTL atomic_add_fetch(ptr_, value);
3289 }
3290
3296 value_type operator -=(value_type value) noexcept {
3297 return _MSTL atomic_sub_fetch(ptr_, value);
3298 }
3299
3305 value_type operator &=(value_type value) noexcept {
3306 return _MSTL atomic_and_fetch(ptr_, value);
3307 }
3308
3314 value_type operator |=(value_type value) noexcept {
3315 return _MSTL atomic_or_fetch(ptr_, value);
3316 }
3317
3323 value_type operator ^=(value_type value) noexcept {
3324 return _MSTL atomic_xor_fetch(ptr_, value);
3325 }
3326};
3327
3332template <typename Float>
3333struct atomic_ref_base<Float, false, true> {
3334 static_assert(is_floating_point_v<Float>, "atomic_ref_base need floating point T");
3335
3336private:
3337 Float* ptr_;
3338
3339public:
3340 using value_type = Float;
3342
3344 static constexpr size_t required_alignment = alignof(Float);
3347
3348 atomic_ref_base() = delete;
3349 atomic_ref_base(const atomic_ref_base&) noexcept = default;
3350 atomic_ref_base& operator =(const atomic_ref_base&) = delete;
3351
3356 explicit atomic_ref_base(Float& value) : ptr_(&value) {
3357 MSTL_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
3358 }
3359
3365 Float operator =(Float value) noexcept {
3366 this->store(value);
3367 return value;
3368 }
3369
3374 operator Float() const noexcept {
3375 return this->load();
3376 }
3377
3382 bool is_lock_free() const noexcept {
3383 return is_always_lock_free;
3384 }
3385
3391 void store(Float value, const memory_order mo = memory_order_seq_cst) noexcept {
3392 _MSTL atomic_store_any(ptr_, value, mo);
3393 }
3394
3400 Float load(const memory_order mo = memory_order_seq_cst) const noexcept {
3401 return _MSTL atomic_load_any(ptr_, mo);
3402 }
3403
3410 Float exchange(Float desire, const memory_order mo = memory_order_seq_cst) noexcept {
3411 return _MSTL atomic_exchange_any(ptr_, desire, mo);
3412 }
3413
3422 bool compare_exchange_weak(Float& expected, Float desire,
3423 const memory_order success, const memory_order failure) noexcept {
3424 return _MSTL atomic_cmpexch_weak(ptr_, expected, desire, success, failure);
3425 }
3426
3435 bool compare_exchange_strong(Float& expected, Float desire,
3436 const memory_order success, const memory_order failure) noexcept {
3437 return _MSTL atomic_cmpexch_strong(ptr_, expected, desire, success, failure);
3438 }
3439
3447 bool compare_exchange_weak(Float& expected, Float desire,
3448 const memory_order mo = memory_order_seq_cst) noexcept {
3449 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
3450 }
3451
3459 bool compare_exchange_strong(Float& expected, Float desire,
3460 const memory_order mo = memory_order_seq_cst) noexcept {
3461 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
3462 }
3463
3469 MSTL_ALWAYS_INLINE void
3470 wait(Float old, const memory_order mo = memory_order_seq_cst) const noexcept {
3471 _MSTL atomic_wait_address_v(ptr_, old, [this, mo] {
3472 return this->load(mo);
3473 });
3474 }
3475
3479 MSTL_ALWAYS_INLINE void notify_one() noexcept {
3480 _MSTL atomic_notify_address(ptr_, false);
3481 }
3482
3486 MSTL_ALWAYS_INLINE void notify_all() noexcept {
3487 _MSTL atomic_notify_address(ptr_, true);
3488 }
3489
3497 return _MSTL atomic_fetch_add_any(ptr_, value, mo);
3498 }
3499
3507 return _MSTL atomic_fetch_sub_any(ptr_, value, mo);
3508 }
3509
3515 value_type operator +=(value_type value) const noexcept {
3516 return _MSTL atomic_add_fetch_any(ptr_, value);
3517 }
3518
3524 value_type operator -=(value_type value) const noexcept {
3525 return _MSTL atomic_sub_fetch_any(ptr_, value);
3526 }
3527};
3528
3529
3530#ifdef MSTL_COMPILER_CLANG__
3531#pragma clang diagnostic push
3532#pragma clang diagnostic ignored "-Watomic-alignment"
3533#endif
3534
3535template <typename T>
3536struct atomic_ref_base<T*, false, false> {
3537public:
3538 using value_type = T*;
3539 using difference_type = ptrdiff_t;
3540
3541private:
3542 T** ptr_;
3543
3549 static constexpr difference_type real_type_sizes(const difference_type dest) noexcept {
3550 static_assert(is_object_v<T>, "atomic_ref_base need object T");
3551 return dest * sizeof(T);
3552 }
3553
3554public:
3556 static constexpr size_t required_alignment = sizeof(T*) == 8 ? 8 : alignof(T*);
3559
3560 atomic_ref_base() = delete;
3561 atomic_ref_base(const atomic_ref_base&) noexcept = default;
3562 atomic_ref_base& operator =(const atomic_ref_base&) = delete;
3563
3568 explicit atomic_ref_base(T*& value) : ptr_(_MSTL addressof(value)) {
3569 MSTL_CONSTEXPR_ASSERT((static_cast<uintptr_t>(ptr_) % required_alignment) == 0);
3570 }
3571
3577 T* operator =(T* value) noexcept {
3578 this->store(value);
3579 return value;
3580 }
3581
3586 operator T*() const noexcept {
3587 return this->load();
3588 }
3589
3594 bool is_lock_free() const noexcept {
3595 return is_always_lock_free;
3596 }
3597
3603 void store(T* value, const memory_order mo = memory_order_seq_cst) noexcept {
3604 _MSTL atomic_store_any(ptr_, value, mo);
3605 }
3606
3612 T* load(const memory_order mo = memory_order_seq_cst) const noexcept {
3613 return _MSTL atomic_load_any(ptr_, mo);
3614 }
3615
3622 T* exchange(T* desire, const memory_order mo = memory_order_seq_cst) noexcept {
3623 return _MSTL atomic_exchange_any(ptr_, desire, mo);
3624 }
3625
3634 bool compare_exchange_weak(T*& expected, T* desire,
3635 const memory_order success, const memory_order failure) noexcept {
3636 return _MSTL atomic_cmpexch_weak(ptr_, expected, desire, success, failure);
3637 }
3638
3647 bool compare_exchange_strong(T*& expected, T* desire,
3648 const memory_order success, const memory_order failure) noexcept {
3649 return _MSTL atomic_cmpexch_strong(ptr_, expected, desire, success, failure);
3650 }
3651
3659 bool compare_exchange_weak(T*& expected, T* desire,
3660 const memory_order mo = memory_order_seq_cst) noexcept {
3661 return _MSTL atomic_cmpexch_weak(expected, desire, mo, cmpexch_failure_order(mo));
3662 }
3663
3671 bool compare_exchange_strong(T*& expected, T* desire,
3672 const memory_order mo = memory_order_seq_cst) noexcept {
3673 return _MSTL atomic_cmpexch_strong(expected, desire, mo, cmpexch_failure_order(mo));
3674 }
3675
3681 MSTL_ALWAYS_INLINE void
3682 wait(T* old, const memory_order mo = memory_order_seq_cst) const noexcept {
3683 _MSTL atomic_wait_address_v(ptr_, old, [this, mo] {
3684 return this->load(mo);
3685 });
3686 }
3687
3691 MSTL_ALWAYS_INLINE void notify_one() noexcept {
3692 _MSTL atomic_notify_address(ptr_, false);
3693 }
3694
3698 MSTL_ALWAYS_INLINE void notify_all() noexcept {
3699 _MSTL atomic_notify_address(ptr_, true);
3700 }
3701
3708 MSTL_ALWAYS_INLINE value_type
3709 fetch_add(const difference_type dest, const memory_order mo = memory_order_seq_cst) noexcept {
3710 return _MSTL atomic_fetch_add_any(ptr_, real_type_sizes(dest), mo);
3711 }
3712
3719 MSTL_ALWAYS_INLINE value_type
3720 fetch_sub(const difference_type dest, const memory_order mo = memory_order_seq_cst) noexcept {
3721 return _MSTL atomic_fetch_sub_any(ptr_, real_type_sizes(dest), mo);
3722 }
3723
3728 value_type operator ++(int) noexcept {
3729 return fetch_add(1);
3730 }
3731
3736 value_type operator --(int) noexcept {
3737 return fetch_sub(1);
3738 }
3739
3744 value_type operator ++() noexcept {
3745 return _MSTL atomic_add_fetch_any(ptr_, real_type_sizes(1));
3746 }
3747
3752 value_type operator --() noexcept {
3753 return _MSTL atomic_sub_fetch_any(ptr_, real_type_sizes(1));
3754 }
3755
3761 value_type operator +=(const difference_type dest) noexcept {
3762 return _MSTL atomic_add_fetch_any(ptr_, real_type_sizes(dest));
3763 }
3764
3770 value_type operator -=(const difference_type dest) noexcept {
3771 return _MSTL atomic_sub_fetch_any(ptr_, real_type_sizes(dest));
3772 }
3773};
3774
3775#ifdef MSTL_COMPILER_CLANG__
3776#pragma clang diagnostic pop
3777#endif
3778 // AtomicOperations
3780
3782#endif // MSTL_CORE_ASYNC_ATOMIC_BASE_HPP__
MSTL原子等待/通知机制
MSTL_NODISCARD constexpr T * addressof(T &x) noexcept
获取对象的地址
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_add_fetch(volatile T *ptr, atomic_diff_t< T > value, memory_order mo) noexcept
原子添加并获取操作
MSTL_ALWAYS_INLINE_INLINE void atomic_store(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子存储操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_exchange(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子交换操作
T atomic_fetch_add_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子获取并添加操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_or_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子或并获取操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_load(const volatile T *ptr, const memory_order mo) noexcept
原子加载操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_or(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并或操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_sub(volatile T *ptr, atomic_diff_t< T > value, const memory_order mo) noexcept
原子获取并减去操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_and(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并与操作
T atomic_fetch_sub_any(T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
通用原子获取并减去操作
constexpr bool is_always_lock_free() noexcept
检查是否支持无锁操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_exchange_any(T *ptr, remove_volatile_t< T > desired, memory_order mo) noexcept
通用原子交换操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_sub_fetch(volatile T *ptr, atomic_diff_t< T > value, memory_order mo) noexcept
原子减去并获取操作
void atomic_wait_address_v(const T *addr, T old, Func f) noexcept
基于值的原子等待
MSTL_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > desired, const memory_order success, const memory_order failure) noexcept
强比较交换操作
void atomic_notify_address(const T *addr, const bool all) noexcept
原子通知
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_load_any(const T *ptr, memory_order mo) noexcept
通用原子加载操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_add(volatile T *ptr, atomic_diff_t< T > value, const memory_order mo) noexcept
原子获取并添加操作
T atomic_sub_fetch_any(T *ptr, remove_volatile_t< T > value) noexcept
通用原子减去并获取操作
MSTL_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > desired, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_xor_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子异或并获取操作
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_fetch_xor(volatile T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
原子获取并异或操作
conditional_t< is_pointer_v< T >, ptrdiff_t, remove_volatile_t< T > > atomic_diff_t
原子操作的差值类型
MSTL_ALWAYS_INLINE_INLINE remove_volatile_t< T > atomic_and_fetch(volatile T *ptr, remove_volatile_t< T > value, memory_order mo) noexcept
原子与并获取操作
MSTL_ALWAYS_INLINE_INLINE void atomic_store_any(T *ptr, remove_volatile_t< T > value, const memory_order mo) noexcept
通用原子存储操作
MSTL_ALWAYS_INLINE_INLINE bool atomic_cmpexch_weak_any(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > *desired, const memory_order success, const memory_order failure) noexcept
通用弱比较交换操作
MSTL_ALWAYS_INLINE_INLINE bool atomic_cmpexch_strong_any(volatile T *ptr, remove_volatile_t< T > *expected, remove_volatile_t< T > *desired, const memory_order success, const memory_order failure) noexcept
通用强比较交换操作
T atomic_add_fetch_any(T *ptr, remove_volatile_t< T > value) noexcept
通用原子添加并获取操作
unsigned char byte_t
字节类型,定义为无符号字符
long long int64_t
64位有符号整数类型
int int32_t
32位有符号整数类型
@ wait
等待操作
MSTL_CONSTEXPR14 void * memory_copy(void *MSTL_RESTRICT dest, const void *MSTL_RESTRICT src, size_t count) noexcept
从源内存复制到目标内存
MSTL_INLINE17 constexpr auto memory_order_release
释放内存顺序常量
constexpr bool is_valid_cmpexch_failure_order(const memory_order mo) noexcept
检查比较交换失败内存顺序是否有效
MSTL_INLINE17 constexpr auto memory_order_acq_rel
获取-释放内存顺序常量
MSTL_ALWAYS_INLINE_INLINE void atomic_thread_fence(const memory_order mo) noexcept
线程内存屏障
constexpr memory_order operator|(memory_order mo, memory_order_modifier mod) noexcept
内存顺序与修饰符的或操作符
MSTL_ALWAYS_INLINE_INLINE void atomic_signal_fence(const memory_order mo) noexcept
信号内存屏障
MSTL_INLINE17 constexpr auto memory_order_relaxed
宽松内存顺序常量
constexpr memory_order operator&(memory_order mo, memory_order_modifier mod) noexcept
内存顺序与修饰符的与操作符
MSTL_INLINE17 constexpr auto memory_order_acquire
获取内存顺序常量
memory_order
内存顺序
MSTL_INLINE17 constexpr auto memory_order_consume
数据依赖内存顺序常量
MSTL_INLINE17 constexpr auto memory_order_seq_cst
顺序一致性内存顺序常量
memory_order_modifier
内存顺序修饰符枚举
constexpr memory_order cmpexch_failure_order(const memory_order mo) noexcept
获取原子比较交换操作失败时的内存顺序
@ release
释放操作,确保前面的读写不会被重排到后面
@ seq_cst
顺序一致性,最严格的内存顺序
@ relaxed
最宽松的内存顺序,只保证原子性
@ acquire
获取操作,确保后续读写不会被重排到前面
@ consume
数据依赖顺序,用于依赖读取的场景
@ acq_rel
获取-释放组合操作
@ memory_order_mask
内存顺序掩码
@ memory_order_modifier_mask
修饰符掩码
@ memory_order_hle_acquire
HLE获取修饰符
@ memory_order_hle_release
HLE释放修饰符
#define _MSTL
全局命名空间MSTL前缀
#define MSTL_END_INNER__
结束inner命名空间
#define _INNER
inner命名空间前缀
#define MSTL_END_NAMESPACE__
结束全局命名空间MSTL
#define MSTL_BEGIN_NAMESPACE__
开始全局命名空间MSTL
#define MSTL_BEGIN_INNER__
开始inner命名空间
uint64_t uintptr_t
可容纳指针的无符号整数类型
int64_t ptrdiff_t
指针差类型
typename remove_volatile< T >::type remove_volatile_t
remove_volatile的便捷别名
MSTL_CONSTEXPR14 T exchange(T &val, U &&new_val) noexcept(conjunction< is_nothrow_move_constructible< T >, is_nothrow_assignable< T &, U > >::value)
将新值赋给对象并返回旧值
constexpr T initialize() noexcept(is_nothrow_default_constructible< T >::value)
返回类型T的默认初始化值
typename conditional< Test, T1, T2 >::type conditional_t
conditional的便捷别名
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
弱比较交换指针操作
MSTL_ALWAYS_INLINE_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换指针操作
MSTL_ALWAYS_INLINE_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换指针操作
MSTL_ALWAYS_INLINE value_type fetch_add(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加指针偏移
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
MSTL_ALWAYS_INLINE value_type exchange(const value_type ptr, const memory_order mo=memory_order_seq_cst) noexcept
原子交换指针操作
MSTL_ALWAYS_INLINE void store(const value_type ptr, const memory_order mo=memory_order_seq_cst) noexcept
原子存储指针操作
MSTL_ALWAYS_INLINE_INLINE value_type fetch_add(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加指针偏移
MSTL_ALWAYS_INLINE_INLINE void store(const value_type ptr, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储指针操作
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
MSTL_ALWAYS_INLINE value_type fetch_sub(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去指针偏移
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换指针操作
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换指针操作
MSTL_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载指针操作
MSTL_ALWAYS_INLINE_INLINE value_type exchange(const value_type ptr, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换指针操作
MSTL_ALWAYS_INLINE_INLINE value_type fetch_sub(const ptrdiff_t dest, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去指针偏移
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
强比较交换指针操作
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
static constexpr bool is_always_lock_free
是否总是无锁
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换指针操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
MSTL_ALWAYS_INLINE_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载指针操作
ptrdiff_t difference_type
差值类型
MSTL_ALWAYS_INLINE void wait(value_type old, const memory_order mo=memory_order_seq_cst) const noexcept
等待指针改变
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换指针操作
MSTL_ALWAYS_INLINE value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并或操作
MSTL_ALWAYS_INLINE value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加操作
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换操作
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换操作
MSTL_ALWAYS_INLINE value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
value_type operator--() noexcept
前置递减运算符
MSTL_ALWAYS_INLINE value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并与操作
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
MSTL_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载操作
value_type operator-=(value_type value) noexcept
减法赋值运算符
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
强比较交换操作
value_type operator^=(value_type value) noexcept
位异或赋值运算符
MSTL_ALWAYS_INLINE value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并与操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
MSTL_ALWAYS_INLINE value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
MSTL_ALWAYS_INLINE void store(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
MSTL_ALWAYS_INLINE value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并异或操作
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换操作
MSTL_ALWAYS_INLINE value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并或操作
MSTL_ALWAYS_INLINE void wait(value_type old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
value_type operator++() noexcept
前置递增运算符
MSTL_ALWAYS_INLINE value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
MSTL_ALWAYS_INLINE value_type exchange(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换操作
MSTL_ALWAYS_INLINE value_type load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
MSTL_ALWAYS_INLINE value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并异或操作
static constexpr bool is_always_lock_free
MSTL_ALWAYS_INLINE void store(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储操作
MSTL_ALWAYS_INLINE bool compare_exchange_weak(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换操作
value_type operator|=(value_type value) noexcept
位或赋值运算符
MSTL_ALWAYS_INLINE bool compare_exchange_strong(value_type &expected, value_type desired, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
value_type operator&=(value_type value) noexcept
位与赋值运算符
MSTL_ALWAYS_INLINE value_type exchange(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
MSTL_ALWAYS_INLINE_INLINE bool test_and_set(const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的测试并设置标志
bool value_type
原子标志类型
MSTL_ALWAYS_INLINE_INLINE bool test(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的测试标志值
MSTL_ALWAYS_INLINE_INLINE void clear(const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的清除标志
MSTL_ALWAYS_INLINE void wait(const bool old, const memory_order mo=memory_order_seq_cst) const noexcept
等待标志值改变
value_type flag_
原子标志值
MSTL_ALWAYS_INLINE void clear(const memory_order mo=memory_order_seq_cst) noexcept
清除标志
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
MSTL_ALWAYS_INLINE bool test_and_set(const memory_order mo=memory_order_seq_cst) noexcept
测试并设置标志
MSTL_ALWAYS_INLINE bool test(const memory_order mo=memory_order_seq_cst) const noexcept
测试标志值
MSTL_ALWAYS_INLINE_INLINE void wait(const bool old, const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的等待标志值改变
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
static constexpr bool is_always_lock_free
是否总是无锁
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的弱比较交换操作
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
Float load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
value_type difference_type
差值类型
void store(Float value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子存储操作
bool is_lock_free() const volatile noexcept
volatile版本的检查是否支持无锁操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并减去操作
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) volatile noexcept
volatile版本的强比较交换操作
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子获取并添加操作
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
value_type operator-=(value_type value) noexcept
减法赋值运算符
MSTL_ALWAYS_INLINE void wait(Float old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版强比较交换操作
void store(Float value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的简化版弱比较交换操作
value_type operator+=(value_type value) noexcept
加法赋值运算符
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) volatile noexcept
volatile版本的原子交换操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
Float load(const memory_order mo=memory_order_seq_cst) const volatile noexcept
volatile版本的原子加载操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
Float exchange(Float desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
void store(Float value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
Float load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
bool compare_exchange_strong(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
bool is_lock_free() const noexcept
检查是否支持无锁操作
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
static constexpr size_t required_alignment
对齐需求
MSTL_ALWAYS_INLINE void wait(Float old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
static constexpr bool is_always_lock_free
是否总是无锁
bool compare_exchange_strong(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
bool compare_exchange_weak(Float &expected, Float desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
bool compare_exchange_weak(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
static constexpr bool is_always_lock_free
是否总是无锁
bool compare_exchange_strong(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
bool compare_exchange_strong(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
T load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
MSTL_ALWAYS_INLINE void wait(T old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
bool is_lock_free() const noexcept
检查是否支持无锁操作
bool compare_exchange_weak(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
void store(T value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
static constexpr size_t required_alignment
对齐需求
T exchange(T desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
MSTL_NODISCARD bool is_lock_free() const noexcept
检查是否支持无锁操作
bool compare_exchange_weak(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
弱比较交换操作
value_type fetch_xor(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并异或操作
static constexpr bool is_always_lock_free
是否总是无锁
static constexpr size_t required_alignment
对齐需求
bool compare_exchange_weak(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版弱比较交换操作
MSTL_ALWAYS_INLINE void notify_one() noexcept
通知一个等待线程
value_type fetch_and(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并与操作
T exchange(T desire, const memory_order mo=memory_order_seq_cst) noexcept
原子交换操作
value_type fetch_add(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并添加操作
void store(T value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
bool compare_exchange_strong(T &expected, T desire, const memory_order success, const memory_order failure) noexcept
强比较交换操作
MSTL_ALWAYS_INLINE void wait(T old, const memory_order mo=memory_order_seq_cst) const noexcept
等待值改变
value_type fetch_sub(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并减去操作
T load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
value_type fetch_or(value_type value, const memory_order mo=memory_order_seq_cst) noexcept
原子获取并或操作
MSTL_ALWAYS_INLINE void notify_all() noexcept
通知所有等待线程
bool compare_exchange_strong(T &expected, T desire, const memory_order mo=memory_order_seq_cst) noexcept
简化版强比较交换操作
原子引用基础模板类