MSTL 1.4.0
A Modern C++ Library with extended functionality, web components, and utility libraries
载入中...
搜索中...
未找到
thread_pool.hpp
1#ifndef MSTL_CORE_ASYNC_THREAD_POOL_HPP__
2#define MSTL_CORE_ASYNC_THREAD_POOL_HPP__
3#include "../container/array.hpp"
4#include "../container/priority_queue.hpp"
5#include "../container/unordered_map.hpp"
7#include "../time/datetime.hpp"
8#include "../system/sysinfo.hpp"
9#include "packaged_task.hpp"
10#include "timer.hpp"
12
13MSTL_INLINE17 constexpr size_t THREAD_POOL_TASK_MAX_THRESHHOLD = numeric_traits<int32_t>::max();
14MSTL_INLINE17 constexpr size_t THREAD_POOL_MAX_IDLE_SECONDS = 60;
15MSTL_INLINE17 constexpr size_t THREAD_POOL_LOCAL_QUEUE_SIZE = 256;
16static const size_t THREAD_POOL_THREAD_MAX_THRESHHOLD = sysinfo::instance().get_system_info().processor_numbers;
17
18
19enum class THREAD_POOL_MODE : uint8_t {
20 MODE_FIXED, MODE_CACHED
21};
22
23enum class STEAL_STRATEGY {
24 HALF,
25 FIXED_BATCH,
26 SINGLE,
27 ADAPTIVE
28};
29
30
32
33class MSTL_API manual_thread {
34public:
35 using id_type = uint32_t;
36
37private:
38 using thread_func = _MSTL function<void(id_type)>;
39
40 thread_func func_;
41 id_type thread_id_;
42
43public:
44 explicit manual_thread(thread_func&& func) noexcept;
45 ~manual_thread() = default;
46
47 MSTL_NODISCARD id_type id() const noexcept { return thread_id_; }
48 void start();
49};
50
52
53
54struct task_group {
55 task_group() = default;
56 ~task_group() = default;
57
58 _MSTL atomic<size_t> running_count{0};
59
60 void increment() noexcept {
61 running_count.fetch_add(1, _MSTL memory_order_relaxed);
62 }
63
64 void decrement() noexcept {
65 if (running_count.fetch_sub(1, _MSTL memory_order_release) == 1) {
66 running_count.notify_all();
67 }
68 }
69
70 void wait() const noexcept {
71 size_t count = running_count.load(_MSTL memory_order_acquire);
72 while (count != 0) {
73 running_count.wait(count);
74 count = running_count.load(_MSTL memory_order_acquire);
75 }
76 }
77};
78
79
80class MSTL_API local_queue {
81private:
82 static STEAL_STRATEGY steal_strategy_;
83 static uint32_t fixed_batch_size_;
84
85 _MSTL array<_MSTL function<void()>, THREAD_POOL_LOCAL_QUEUE_SIZE> tasks_{};
86 _MSTL atomic<uint64_t> head_{0};
87 _MSTL atomic<uint32_t> tail_{0};
88
89private:
90 constexpr static size_t mask_ = THREAD_POOL_LOCAL_QUEUE_SIZE - 1;
91
92 MSTL_NODISCARD static uint64_t pack(const uint32_t steal, const uint32_t local_head) noexcept {
93 return static_cast<uint64_t>(steal) << 32 | static_cast<uint64_t>(local_head);
94 }
95
96 MSTL_NODISCARD static pair<uint32_t, uint32_t> unpack(const uint64_t head) noexcept {
97 return {static_cast<uint32_t>(head >> 32), static_cast<uint32_t>(head)};
98 }
99
100 uint32_t be_stolen_by_impl(local_queue& dst, uint32_t dst_tail);
101
102public:
103 local_queue() = default;
104 ~local_queue() = default;
105 local_queue(const local_queue&) = delete;
106 local_queue& operator =(const local_queue&) = delete;
107 local_queue(local_queue&& other) noexcept;
108 local_queue& operator =(local_queue&& other) noexcept;
109
110 MSTL_NODISCARD size_t capacity() const noexcept { return tasks_.size(); }
111 MSTL_NODISCARD bool empty() const noexcept { return size() == 0u; }
112
113 MSTL_NODISCARD size_t remain_size() const noexcept {
114 const auto tail = tail_.load(_MSTL memory_order_acquire);
115 const auto head = head_.load(_MSTL memory_order_acquire);
116 const auto steal = unpack(head).first;
117 const size_t used = static_cast<size_t>(tail - steal);
118 const size_t remain = capacity() - used;
119 return remain;
120 }
121 MSTL_NODISCARD size_t size() const noexcept {
122 const auto tail = tail_.load(_MSTL memory_order_acquire);
123 const auto head = head_.load(_MSTL memory_order_acquire);
124 const auto local_head = unpack(head).second;
125 return static_cast<size_t>(tail - local_head);
126 }
127
128 static void set_steal_strategy(const STEAL_STRATEGY strategy, const uint32_t batch_size = 4) {
129 steal_strategy_ = strategy;
130 fixed_batch_size_ = batch_size;
131 }
132
133 void push_back(_MSTL function<void()> task) {
134 const uint32_t tail = tail_.load(_MSTL memory_order_relaxed);
135 tasks_[tail & mask_] = _MSTL move(task);
136 tail_.store(tail + 1, _MSTL memory_order_release);
137 }
138
139 _MSTL optional<_MSTL function<void()>> try_pop();
140
141 _MSTL optional<_MSTL function<void()>> be_stolen_by(local_queue& dst_queue);
142};
143
144
145struct MSTL_API worker_context {
146 using id_type = _INNER manual_thread::id_type;
147
148 local_queue queue{};
149 id_type id{0};
150 _MSTL atomic<bool> is_stealing{false};
151 size_t consecutive_idle_count = 0;
152
153 worker_context() = default;
154 worker_context(const worker_context&) = delete;
155 worker_context& operator =(const worker_context&) = delete;
156 worker_context(worker_context&& other) noexcept;
157 worker_context& operator =(worker_context&& other) noexcept;
158};
159
160
161enum class TASK_STATUS {
162 PENDING,
163 RUNNING,
164 COMPLETED,
165 FAILED
166};
167
168MSTL_CONSTEXPR20 string to_string(const TASK_STATUS status) {
169 switch (status) {
170 case TASK_STATUS::PENDING: return "PENDING";
171 case TASK_STATUS::RUNNING: return "RUNNING";
172 case TASK_STATUS::COMPLETED: return "COMPLETED";
173 case TASK_STATUS::FAILED: return "FAILED";
174 default: MSTL_UNREACHABLE;
175 }
176}
177
178
179struct task_info {
180 using priority_type = uint32_t;
181
182 const uint64_t id;
183 atomic<TASK_STATUS> status{TASK_STATUS::PENDING};
184 timestamp submit_time{timestamp::now()};
185 timestamp start_time{0};
186 timestamp finish_time{0};
187 _INNER manual_thread::id_type worker_thread_id{0};
188 string error{};
189 priority_type priority;
190
191 explicit task_info(const uint64_t task_id, const priority_type priority)
192 : id(task_id), priority(priority) {}
193
194 MSTL_NODISCARD bool is_finished() const noexcept {
195 const auto s = status.load(_MSTL memory_order_acquire);
196 return s == TASK_STATUS::COMPLETED || s == TASK_STATUS::FAILED;
197 }
198
199 MSTL_NODISCARD int64_t exec_time() const noexcept {
200 if (start_time.value() == 0 || finish_time.value() == 0) {
201 return -1;
202 }
203 return finish_time - start_time;
204 }
205};
206
207using task_info_ptr = _MSTL shared_ptr<task_info>;
208
209
210template <typename T>
211struct submit_result {
212 _MSTL future<T> future;
213 task_info_ptr task_info;
214
215 MSTL_NODISCARD explicit operator bool() const noexcept {
216 return future.valid() && task_info;
217 }
218};
219
220
221class MSTL_API thread_pool {
222public:
223 struct periodic_task_state {
224 atomic_bool cancelled{false};
225 };
226
227 struct MSTL_API pool_statistics : istringify<pool_statistics> {
228 size_t total_threads;
229 size_t idle_threads;
230 size_t busy_threads;
231 size_t queue_size;
232 size_t total_submitted;
233 size_t total_stolen;
234 size_t total_completed;
235
236 MSTL_NODISCARD string to_string() const;
237 };
238
239 using id_type = _INNER manual_thread::id_type;
240 using periodic_token = shared_ptr<periodic_task_state>;
241 using priority_type = task_info::priority_type;
242
243private:
244 using Task = _MSTL function<void()>;
245
246 struct priority_task {
247 Task task;
248 priority_type priority;
249 task_info_ptr task_info;
250
251 priority_task(Task t, const priority_type p, task_info_ptr info) noexcept
252 : task(_MSTL move(t)), priority(p), task_info(_MSTL move(info)) {}
253
254 bool operator <(const priority_task& other) const noexcept {
255 return priority < other.priority;
256 }
257 };
258
259 _MSTL unordered_map<id_type, _MSTL unique_ptr<_INNER manual_thread>> threads_map_;
260 _MSTL unordered_map<id_type, worker_context> worker_contexts_;
261 _MSTL vector<_MSTL atomic<worker_context*>> worker_contexts_ptr_;
262 _MSTL mutex worker_contexts_mtx_;
263
264 _MSTL timer_scheduler<steady_clock> timer_{};
265
266 id_type init_thread_size_{0};
267 size_t thread_threshhold_{THREAD_POOL_THREAD_MAX_THRESHHOLD};
268
269 _MSTL priority_queue<priority_task> task_queue_{};
270 _MSTL atomic_uint task_size_{0};
271 _MSTL atomic_uint idle_thread_size_{0};
272 size_t task_threshhold_{THREAD_POOL_TASK_MAX_THRESHHOLD};
273
274 _MSTL mutex task_queue_mtx_{};
275 _MSTL condition_variable not_full_{};
276 _MSTL condition_variable not_empty_{};
277 _MSTL condition_variable exit_cond_{};
278
279 _MSTL atomic<THREAD_POOL_MODE> pool_mode_{THREAD_POOL_MODE::MODE_FIXED};
280 _MSTL atomic_bool is_running_{false};
281
282 _MSTL atomic_size_t total_submitted_tasks_{0};
283 _MSTL atomic_size_t total_completed_tasks_{0};
284 _MSTL atomic_size_t total_stolen_tasks_{0};
285
286 _MSTL atomic_size_t steal_worker_count_{0};
287 _MSTL atomic_uint64_t next_task_id_{0};
288
289private:
290 uint64_t generate_task_id() {
291 return next_task_id_.fetch_add(1, _MSTL memory_order_relaxed);
292 }
293
294 void thread_function(id_type thread_id);
295 _MSTL optional<Task> try_steal_task(worker_context& ctx);
296
297 pool_statistics statistics_unsafe() const;
298
299public:
300 thread_pool();
301 ~thread_pool();
302
303 thread_pool(const thread_pool&) = delete;
304 thread_pool& operator =(const thread_pool&) = delete;
305 thread_pool(thread_pool&&) = delete;
306 thread_pool& operator =(thread_pool&&) = delete;
307
308 bool set_mode(THREAD_POOL_MODE mode) noexcept;
309 bool set_steal_mode(STEAL_STRATEGY strategy, uint32_t steal_batch = 4) noexcept;
310 bool set_task_threshhold(size_t threshhold) noexcept;
311 bool set_thread_threshhold(size_t threshhold) noexcept;
312
313 MSTL_NODISCARD static size_t max_thread_size() noexcept { return THREAD_POOL_THREAD_MAX_THRESHHOLD; }
314 MSTL_NODISCARD bool running() const noexcept { return is_running_; }
315 MSTL_NODISCARD THREAD_POOL_MODE mode() const noexcept { return pool_mode_; }
316 MSTL_NODISCARD pool_statistics statistics() const;
317
318 bool start(size_t init_thread_size = 3);
319 pool_statistics stop();
320
321 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
322 submit_result<invoke_result_t<Func, Args...>> submit_task(priority_type priority, Func&& func, Args&&... args);
323
324 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
325 submit_result<invoke_result_t<Func, Args...>> submit_task(Func&& func, Args&&... args) {
326 return this->submit_task(0, _MSTL forward<Func>(func), _MSTL forward<Args>(args)...);
327 }
328
329 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
330 submit_result<invoke_result_t<Func, Args...>> submit_after(int64_t delay_ms, priority_type priority, Func&& func, Args&&... args);
331
332 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
333 submit_result<invoke_result_t<Func, Args...>> submit_after(int64_t delay_ms, Func&& func, Args&&... args) {
334 return this->submit_after(delay_ms, 0, _MSTL forward<Func>(func), _MSTL forward<Args>(args)...);
335 }
336
337 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
338 periodic_token submit_every(int64_t interval_ms, priority_type priority, Func&& func, Args&&... args);
339
340 template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int> = 0>
341 periodic_token submit_every(int64_t interval_ms, Func&& func, Args&&... args) {
342 return this->submit_every(interval_ms, 0, _MSTL forward<Func>(func), _MSTL forward<Args>(args)...);
343 }
344
345 static void cancel_periodic_task(const periodic_token& token) {
346 if (token) token->cancelled.store(true);
347 }
348
349 template <typename... Types>
350 static tuple<future_result_t<Types>...> wait(future<Types>&&... futures) {
351 return _MSTL make_tuple(_MSTL get(futures)...);
352 }
353};
354
355#ifdef MSTL_COMPILER_MSVC__
356MSTL_ALWAYS_INLINE_INLINE MSTL_API worker_context*& get_worker_context() noexcept;
357MSTL_ALWAYS_INLINE_INLINE MSTL_API _MSTL shared_ptr<task_group>& get_current_task_group() noexcept;
358#else
359extern thread_local worker_context* t_worker_ctx;
360extern thread_local _MSTL shared_ptr<task_group> t_current_task_group;
361MSTL_ALWAYS_INLINE_INLINE worker_context*& get_worker_context() noexcept { return t_worker_ctx; }
362MSTL_ALWAYS_INLINE_INLINE _MSTL shared_ptr<task_group>& get_current_task_group() noexcept { return t_current_task_group; }
363#endif
364
365
366template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int>>
367submit_result<invoke_result_t<Func, Args...>>
368thread_pool::submit_task(const priority_type priority, Func&& func, Args&&... args) {
369 using Result = decltype(func(_MSTL forward<Args>(args)...));
370
371 auto info = _MSTL make_shared<task_info>(generate_task_id(), priority);
372
373 const auto current_group = get_current_task_group();
374 if (current_group) {
375 current_group->increment();
376 }
377
378 auto task = _MSTL make_shared<_MSTL packaged_task<Result()>>(
379 [func = _MSTL forward<Func>(func),
380 args = _MSTL make_tuple(_MSTL forward<Args>(args)...),
381 group = current_group,
382 info]() mutable -> Result {
383 struct context_guard {
384 task_info_ptr info;
385 _MSTL shared_ptr<task_group> group_inner;
386 _MSTL shared_ptr<task_group> prev_group_inner;
387
388 explicit context_guard(task_info_ptr i, _MSTL shared_ptr<task_group> g)
389 : info(move(i)), group_inner(move(g)) {
390 info->status.store(TASK_STATUS::RUNNING, _MSTL memory_order_release);
391 info->start_time = timestamp::now();
392 info->worker_thread_id = get_worker_context() ? get_worker_context()->id : 0;
393
394 prev_group_inner = get_current_task_group();
395 get_current_task_group() = group_inner;
396 }
397
398 ~context_guard() noexcept {
399 try {
400 info->finish_time = timestamp::now();
401 auto expected = TASK_STATUS::RUNNING;
402 info->status.compare_exchange_strong(expected,
403 TASK_STATUS::COMPLETED, _MSTL memory_order_release);
404
405 get_current_task_group() = prev_group_inner;
406 if (group_inner) group_inner->decrement();
407 } catch (...) {
408 /* ignore */
409 }
410 }
411 };
412
413 context_guard guard(info, group);
414 try {
415 return _MSTL apply(func, args);
416 } catch (const exception& e) {
417 info->status.store(TASK_STATUS::FAILED, _MSTL memory_order_release);
418 info->error = e.what();
419 throw;
420 } catch (...) {
421 info->status.store(TASK_STATUS::FAILED, _MSTL memory_order_release);
422 info->error = "Unknown exception";
423 throw;
424 }
425 }
426 );
427
428 _MSTL future<Result> res = task->get_future();
429 Task job([task] { (*task)(); });
430
431 if (priority > 0) {
432 _MSTL smart_lock<_MSTL mutex> lock(task_queue_mtx_);
433
434 if (!not_full_.wait_for(lock, seconds(1), [&]()->bool {
435 return task_queue_.size() < task_threshhold_;
436 })) {
437 info->status.store(TASK_STATUS::FAILED, _MSTL memory_order_release);
438 info->error = "Task queue is full";
439
440 auto dummy_task = _MSTL make_shared<_MSTL packaged_task<Result()>>(
441 []() -> Result { return Result(); });
442 (*dummy_task)();
443 return submit_result<Result>{dummy_task->get_future(), info};
444 }
445
446 task_queue_.emplace(_MSTL move(job), priority, info);
447 ++task_size_;
448 ++total_submitted_tasks_;
449 not_empty_.notify_one();
450
451 } else {
452 auto* ctx = get_worker_context();
453
454 if (ctx != nullptr && ctx->queue.remain_size() > 0) {
455 ctx->queue.push_back(move(job));
456 ++total_submitted_tasks_;
457 } else {
458 _MSTL smart_lock<_MSTL mutex> lock(task_queue_mtx_);
459 if (!not_full_.wait_for(lock, seconds(1), [&]()->bool {
460 return task_queue_.size() < task_threshhold_;
461 })) {
462 info->status.store(TASK_STATUS::FAILED, _MSTL memory_order_release);
463 info->error = "Task queue is full";
464
465 auto dummy_task = _MSTL make_shared<_MSTL packaged_task<Result()>>(
466 []() -> Result { return Result(); });
467 (*dummy_task)();
468 return submit_result<Result>{dummy_task->get_future(), info};
469 }
470
471 task_queue_.emplace(_MSTL move(job), 0, info);
472 ++task_size_;
473 ++total_submitted_tasks_;
474 not_empty_.notify_one();
475 }
476 }
477
478 if (pool_mode_.load() == THREAD_POOL_MODE::MODE_CACHED
479 && task_size_.load() > idle_thread_size_
480 && threads_map_.size() < thread_threshhold_) {
481
483 [this](const id_type id) {
484 thread_function(id);
485 });
486 id_type thread_id = ptr->id();
487
488 if (thread_id >= worker_contexts_ptr_.size()) {
489 worker_contexts_ptr_.reserve(thread_id + 1);
490 for (size_t i = worker_contexts_ptr_.size() - 1; i <= thread_id; i++) {
492 tmp.store(nullptr, memory_order_relaxed);
493 worker_contexts_ptr_.emplace_back(move(tmp));
494 }
495 }
496
497 threads_map_.emplace(thread_id, _MSTL move(ptr));
498 threads_map_[thread_id]->start();
499 ++idle_thread_size_;
500 }
501
502 return submit_result<Result>{_MSTL move(res), info};
503}
504
505template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int>>
506submit_result<invoke_result_t<Func, Args...>>
507thread_pool::submit_after(const int64_t delay_ms, const priority_type priority, Func&& func, Args&&... args) {
508 using Result = decltype(func(args...));
509
510 auto info = _MSTL make_shared<task_info>(generate_task_id(), priority);
511
512 auto task = _MSTL make_shared<_MSTL packaged_task<Result()>>(
513 [func = _MSTL forward<Func>(func),
514 tup = _MSTL make_tuple(_MSTL forward<Args>(args)...),
515 info]() mutable {
516 struct context_guard {
517 task_info_ptr info;
518
519 explicit context_guard(task_info_ptr i) : info(move(i)) {
520 info->status.store(TASK_STATUS::RUNNING, _MSTL memory_order_release);
521 info->start_time = timestamp::now();
522 info->worker_thread_id = get_worker_context() ? get_worker_context()->id : 0;
523 }
524
525 ~context_guard() noexcept {
526 info->finish_time = timestamp::now();
527 auto expected = TASK_STATUS::RUNNING;
528 info->status.compare_exchange_strong(expected,
529 TASK_STATUS::COMPLETED, _MSTL memory_order_release);
530 }
531 };
532
533 context_guard guard(info);
534
535 try {
536 return _MSTL apply(func, tup);
537 } catch (const _MSTL exception& e) {
538 info->status.store(TASK_STATUS::FAILED, _MSTL memory_order_release);
539 info->error = e.what();
540 throw;
541 }
542 });
543
544 _MSTL future<Result> res = task->get_future();
545
546 auto expire_time = steady_clock::now() + milliseconds(delay_ms);
547 timer_.add_task(expire_time, [this, task = _MSTL move(task), priority]() mutable {
548 this->submit_task(priority, [task]() {
549 (*task)();
550 });
551 });
552
553 return submit_result<Result>{_MSTL move(res), info};
554}
555
556template <typename Func, typename... Args, enable_if_t<is_invocable_v<Func, Args...>, int>>
557thread_pool::periodic_token thread_pool::submit_every(int64_t interval_ms, const priority_type priority, Func &&func, Args &&...args) {
559 auto task = _MSTL make_shared<_MSTL function<void()>>(
560 [func = _MSTL forward<Func>(func), tup = _MSTL make_tuple(_MSTL forward<Args>(args)...)]() mutable {
561 _MSTL apply(func, tup);
562 }
563 );
564 auto handler_ptr = _MSTL make_shared<Task>();
565 *handler_ptr = [this, state, task, interval_ms, priority, handler_ptr]() {
566 if (state->cancelled.load()) return;
567
568 this->submit_task(priority, [task]() {
569 (*task)();
570 });
571
572 if (state->cancelled.load()) return;
573 auto next_time = steady_clock::now() + milliseconds(interval_ms);
574 timer_.add_task(next_time, [handler_ptr]() { (*handler_ptr)(); });
575 };
576
577 auto first_time = steady_clock::now() + milliseconds(interval_ms);
578 timer_.add_task(first_time, [handler_ptr]() { (*handler_ptr)(); });
579 return state;
580}
581
583#endif // MSTL_CORE_ASYNC_THREAD_POOL_HPP__
函数包装器主模板声明
独占future类模板
锁管理器模板
static MSTL_NODISCARD constexpr T max() noexcept
获取类型的最大值
共享智能指针类模板
MSTL_NODISCARD constexpr T && forward(remove_reference_t< T > &x) noexcept
完美转发左值
MSTL_ALWAYS_INLINE enable_if_t< is_void_v< T >, future_result_t< T > > get(future< T > &f)
通用future结果获取函数
atomic< bool > atomic_bool
布尔原子类型
atomic< uint64_t > atomic_uint64_t
64位无符号整数原子类型
atomic< size_t > atomic_size_t
大小类型原子类型
atomic< unsigned int > atomic_uint
无符号整型原子类型
unsigned char uint8_t
8位无符号整数类型
unsigned int uint32_t
32位无符号整数类型
long long int64_t
64位有符号整数类型
unsigned long long uint64_t
64位无符号整数类型
constexpr iter_difference_t< Iterator > count(Iterator first, Iterator last, const T &value)
统计范围内等于指定值的元素数量
duration< int64_t, milli > milliseconds
毫秒持续时间
duration< int64_t > seconds
秒持续时间
@ wait
等待操作
typename _INNER __invoke_result_aux< F, Args... >::type invoke_result_t
invoke_result的便捷别名
MSTL_INLINE17 constexpr auto memory_order_release
释放内存顺序常量
MSTL_INLINE17 constexpr auto memory_order_relaxed
宽松内存顺序常量
MSTL_INLINE17 constexpr auto memory_order_acquire
获取内存顺序常量
lock< Mutex, true > smart_lock
智能锁管理器的便捷类型别名
#define _MSTL
全局命名空间MSTL前缀
#define MSTL_END_INNER__
结束inner命名空间
#define _INNER
inner命名空间前缀
#define MSTL_END_NAMESPACE__
结束全局命名空间MSTL
#define MSTL_BEGIN_NAMESPACE__
开始全局命名空间MSTL
#define MSTL_BEGIN_INNER__
开始inner命名空间
MSTL_NODISCARD constexpr bool operator<(const normal_iterator< LeftIter > &lhs, const normal_iterator< RightIter > &rhs) noexcept
小于比较运算符
enable_if_t<!is_unbounded_array_v< T > &&is_constructible_v< T, Args... >, shared_ptr< T > > make_shared(Args &&... args)
融合分配创建共享指针
constexpr Iterator2 move(Iterator1 first, Iterator1 last, Iterator2 result)
移动范围元素
MSTL_ALWAYS_INLINE_INLINE thread::id id() noexcept
获取当前线程标识符
bool MSTL_API priority(int priority) noexcept
设置线程优先级
MSTL_NODISCARD constexpr tuple< unwrap_ref_decay_t< Types >... > make_tuple(Types &&... args)
从参数创建元组
constexpr auto apply(Func &&f, Tuple &&t) noexcept(_INNER __apply_unpack_tuple< _MSTL is_nothrow_invocable, Func, Tuple >::value) -> decltype(auto)
将元组元素解包作为参数调用函数
MSTL_NODISCARD MSTL_ALWAYS_INLINE constexpr bool empty(const Container &cont) noexcept(noexcept(cont.empty()))
检查容器是否为空
MSTL_NODISCARD MSTL_ALWAYS_INLINE constexpr decltype(auto) size(const Container &cont) noexcept(noexcept(cont.size()))
获取容器的大小
typename enable_if< Test, T >::type enable_if_t
enable_if的便捷别名
MSTL_CONSTEXPR20 unique_ptr< T > make_unique(Args &&... args)
创建unique_ptr
MSTL可选值类型
MSTL 异步任务包装器
通用原子类型模板
T load(const memory_order mo=memory_order_seq_cst) const noexcept
原子加载操作
void store(T value, const memory_order mo=memory_order_seq_cst) noexcept
原子存储操作
异常基类
MSTL_NODISCARD const char * what() const noexcept
获取错误信息
static time_point now() noexcept
获取当前时间点