BluFedora Job System v1.0.0
This is a C++ job system library for use in game engines.
job_queue.hpp
Go to the documentation of this file.
1/******************************************************************************/
13/******************************************************************************/
14#ifndef JOB_QUEUE_HPP
15#define JOB_QUEUE_HPP
16
17#include "job_api.hpp" // PauseProcessor
18#include "job_assert.hpp" // JobAssert
19
20#include <algorithm> // copy_n
21#include <atomic> // atomic<T>
22#include <cstddef> // size_t
23#include <iterator> // make_move_iterator
24#include <mutex> // mutex
25#include <new> // hardware_destructive_interference_size, operator new
26#include <utility> // move
27
28namespace Job
29{
30 static constexpr std::size_t k_FalseSharingPadSize = std::hardware_destructive_interference_size;
31
32#define Job_CacheAlign alignas(k_FalseSharingPadSize)
33
34 template<typename T>
36 {
37 public:
38 using size_type = std::size_t;
39
40 private:
41 std::mutex m_Lock;
47
48 public:
49 void Initialize(T* const memory_backing, const size_type capacity) noexcept
50 {
51 m_Data = memory_backing;
52 m_Capacity = capacity;
53 m_CapacityMask = capacity - 1;
54 m_WriteIndex = 0u;
55 m_Size = 0u;
56
57 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
58 }
59
60 bool Push(const T& value)
61 {
62 const std::lock_guard<std::mutex> guard(m_Lock);
63 (void)guard;
64
65 if (m_Size == m_Capacity)
66 {
67 return false;
68 }
69
70 *elementAt(m_WriteIndex++) = value;
71 ++m_Size;
72
73 return true;
74 }
75
76 bool Pop(T* const out_value)
77 {
78 JobAssert(out_value != nullptr, "`out_value` cannot be a nullptr.");
79
80 const std::lock_guard<std::mutex> guard(m_Lock);
81 (void)guard;
82
83 if (m_Size == 0u)
84 {
85 return false;
86 }
87
88 *out_value = *elementAt(m_WriteIndex - m_Size);
89 --m_Size;
90
91 return true;
92 }
93
94 private:
95 size_type mask(const size_type raw_index) const noexcept
96 {
97 return raw_index & m_CapacityMask;
98 }
99
100 T* elementAt(const size_type raw_index) const noexcept
101 {
102 return m_Data + mask(raw_index);
103 }
104 };
105
106 // [https://www.youtube.com/watch?v=K3P_Lmq6pw0]
107 //
108 // Single Producer, Single Consumer Lockfree Queue
109 //
110 template<typename T>
112 {
113 public:
114 using size_type = std::size_t;
115 using atomic_size_type = std::atomic<size_type>;
116
117 private:
118 // Writer Thread
119
124
125 // Reader Thread
126
131
132 // Shared 'Immutable' State
133
137 unsigned char m_Padding4[k_FalseSharingPadSize - sizeof(m_Data) - sizeof(m_Capacity) - sizeof(m_CapacityMask)];
138
139 static_assert(atomic_size_type::is_always_lock_free, "Expected to be lockfree.");
140
141 public:
142 SPSCQueue() = default;
143 ~SPSCQueue() = default;
144
145 // NOTE(SR): Not thread safe.
146 void Initialize(T* const memory_backing, const size_type capacity) noexcept
147 {
148 m_ProducerIndex.store(0, std::memory_order_relaxed);
150 m_ConsumerIndex.store(0, std::memory_order_relaxed);
152 m_Data = memory_backing;
153 m_Capacity = capacity;
154 m_CapacityMask = capacity - 1;
155
156 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
157 }
158
159 bool Push(const T& value)
160 {
161 return PushLazy([&value](T* const destination) { ::new (destination) T(value); });
162 }
163
164 bool Pop(T* const out_value)
165 {
166 JobAssert(out_value != nullptr, "`out_value` cannot be a nullptr.");
167
168 return PopLazy([out_value](T&& value) { *out_value = std::move(value); });
169 }
170
171 // Memory passed into `callback` is uninitialized, must be placement new'ed into.
172 template<typename CallbackFn>
173 bool PushLazy(CallbackFn&& callback)
174 {
175 const size_type write_index = m_ProducerIndex.load(std::memory_order_relaxed);
176
177 if (IsFull(write_index, m_CachedConsumerIndex))
178 {
179 m_CachedConsumerIndex = m_ConsumerIndex.load(std::memory_order_acquire);
180 if (IsFull(write_index, m_CachedConsumerIndex))
181 {
182 return false;
183 }
184 }
185
186 callback(ElementAt(write_index));
187 m_ProducerIndex.store(write_index + 1, std::memory_order_release);
188
189 return true;
190 }
191
192 template<typename CallbackFn>
193 bool PopLazy(CallbackFn&& callback)
194 {
195 const size_type read_index = m_ConsumerIndex.load(std::memory_order_relaxed);
196
197 if (IsEmpty(m_CachedProducerIndex, read_index))
198 {
199 m_CachedProducerIndex = m_ProducerIndex.load(std::memory_order_acquire);
200 if (IsEmpty(m_CachedProducerIndex, read_index))
201 {
202 return false;
203 }
204 }
205
206 T* const element = ElementAt(read_index);
207 callback(std::move(*element));
208 element->~T();
209 m_ConsumerIndex.fetch_add(1, std::memory_order_release);
210
211 return true;
212 }
213
214 private:
215 bool IsFull(const size_type head, const size_type tail) const noexcept
216 {
217 return ((head + 1) & m_CapacityMask) == tail;
218 }
219
220 static bool IsEmpty(const size_type head, const size_type tail) noexcept
221 {
222 return head == tail;
223 }
224
225 T* ElementAt(const size_type index) const noexcept
226 {
227 return m_Data + (index & m_CapacityMask);
228 }
229 };
230
232 {
233 SUCCESS,
236 };
237
238 // [Dynamic Circular Work-Stealing Deque](https://www.dre.vanderbilt.edu/~schmidt/PDF/work-stealing-dequeue.pdf)
239 // [Correct and Efficient Work-Stealing for Weak Memory Models](https://fzn.fr/readings/ppopp13.pdf)
240 template<typename T>
242 {
243 private:
244 using AtomicT = std::atomic<T>;
245
246 static_assert(AtomicT::is_always_lock_free, "T Should be a small pointer-like type, expected to be lock-free when atomic.");
247
248 public:
249 using size_type = std::int64_t; // NOTE(SR): Must be signed for Pop to work correctly on empty queue.
250 using atomic_size_type = std::atomic<size_type>;
251
252 private:
256
257 // Shared 'Immutable' State
258
262
263 public:
264 SPMCDeque() = default;
265 ~SPMCDeque() = default;
266
267 // NOTE(SR): Not thread safe.
268 void Initialize(AtomicT* const memory_backing, const size_type capacity) noexcept
269 {
270 m_ProducerIndex = 0;
271 m_ConsumerIndex = 0;
272 m_Data = memory_backing;
273 m_Capacity = capacity;
274 m_CapacityMask = capacity - 1;
275
276 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
277 }
278
279 // NOTE(SR): Must be called by owning thread.
280
281 SPMCDequeStatus Push(const T& value)
282 {
283 const size_type write_index = m_ProducerIndex.load(std::memory_order_relaxed);
284 const size_type read_index = m_ConsumerIndex.load(std::memory_order_acquire);
285 const size_type size = write_index - read_index;
286
287 if (size > m_CapacityMask)
288 {
290 }
291
292 ElementAt(write_index)->store(value, std::memory_order_relaxed);
293
294 m_ProducerIndex.store(write_index + 1, std::memory_order_release);
295
297 }
298
299 SPMCDequeStatus Pop(T* const out_value)
300 {
301 const size_type producer_index = m_ProducerIndex.load(std::memory_order_relaxed) - 1;
302
303 // Reserve the slot at the producer end.
304 m_ProducerIndex.store(producer_index, std::memory_order_relaxed);
305
306 // The above store needs to happen before this next read
307 // to have consistent view of the buffer.
308 //
309 // `m_ProducerIndex` can only be written to by this thread
310 // so first reserve a slot then we read what the other threads have to say.
311 //
312 std::atomic_thread_fence(std::memory_order_seq_cst);
313
314 size_type consumer_index = m_ConsumerIndex.load(std::memory_order_relaxed);
315
316 if (consumer_index <= producer_index)
317 {
318 if (consumer_index == producer_index) // Only one item in queue
319 {
320 const bool successful_pop = m_ConsumerIndex.compare_exchange_strong(consumer_index, consumer_index + 1, std::memory_order_seq_cst, std::memory_order_relaxed);
321
322 if (successful_pop)
323 {
324 *out_value = ElementAt(producer_index)->load(std::memory_order_relaxed);
325 }
326
327 m_ProducerIndex.store(producer_index + 1, std::memory_order_relaxed);
329 }
330
331 *out_value = ElementAt(producer_index)->load(std::memory_order_relaxed);
333 }
334
335 // Empty Queue, so restore to canonical empty.
336 m_ProducerIndex.store(producer_index + 1, std::memory_order_seq_cst);
338 }
339
340 // NOTE(SR): Must be called by non owning thread.
341
342 SPMCDequeStatus Steal(T* const out_value)
343 {
344 size_type read_index = m_ConsumerIndex.load(std::memory_order_acquire);
345
346 // Must fully read `m_ConsumerIndex` before we read the producer owned `m_ProducerIndex`.
347 std::atomic_thread_fence(std::memory_order_seq_cst);
348
349 const size_type write_index = m_ProducerIndex.load(std::memory_order_acquire);
350
351 // if (next_read_index <= write_index)
352 if (read_index < write_index)
353 {
354 // Must load result before the CAS, since a push can happen concurrently right after the CAS.
355 T result = ElementAt(read_index)->load(std::memory_order_relaxed);
356
357 // Need strong memory ordering to read the element before the cas.
358 if (m_ConsumerIndex.compare_exchange_strong(read_index, read_index + 1, std::memory_order_seq_cst, std::memory_order_relaxed))
359 {
360 *out_value = std::move(result);
362 }
363
365 }
366
368 }
369
370 private:
371 AtomicT* ElementAt(const size_type index) const noexcept
372 {
373 return m_Data + (index & m_CapacityMask);
374 }
375 };
376
377 // https://www.youtube.com/watch?v=_qaKkHuHYE0&ab_channel=CppCon
379 {
380 public:
381 using size_type = std::size_t;
382 using atomic_size_type = std::atomic<size_type>;
383 using value_type = unsigned char; // byte
384
385 private:
387 {
390 };
391
392 private:
401 unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_Queue) - sizeof(m_Capacity)];
402
403 public:
404 MPMCQueue() = default;
405 ~MPMCQueue() = default;
406
407 // NOTE(SR): Not thread safe.
408 void Initialize(value_type* const memory_backing, const size_type capacity) noexcept
409 {
410 m_ProducerPending.store(0, std::memory_order_relaxed);
411 m_ProducerCommited.store(0, std::memory_order_relaxed);
412 m_ConsumerPending.store(0, std::memory_order_relaxed);
413 m_ConsumerCommited.store(0, std::memory_order_relaxed);
414 m_Queue = memory_backing;
415 m_Capacity = capacity;
416 }
417
418 //
419
420 bool PushExact(const value_type* elements, const size_type num_elements)
421 {
422 return PushImpl<true>(elements, num_elements) != 0u;
423 }
424
425 size_type PushUpTo(const value_type* elements, const size_type num_elements)
426 {
427 return PushImpl<false>(elements, num_elements);
428 }
429
430 bool PopExact(value_type* out_elements, const size_type num_elements)
431 {
432 return PopImpl<true>(out_elements, num_elements) != 0u;
433 }
434
435 size_type PopUpTo(value_type* out_elements, const size_type num_elements)
436 {
437 return PopImpl<false>(out_elements, num_elements);
438 }
439
440 private:
441 template<bool allOrNothing>
442 size_type PushImpl(const value_type* elements, const size_type num_elements)
443 {
444 IndexRange range;
445 if (RequestWriteRange<allOrNothing>(&range, num_elements))
446 {
447 const size_type written_elements = WriteElements(elements, range);
448 Commit(&m_ProducerCommited, range);
449 return written_elements;
450 }
451
452 return 0u;
453 }
454
455 template<bool allOrNothing>
456 size_type PopImpl(value_type* out_elements, const size_type num_elements)
457 {
458 IndexRange range;
459 if (RequestPopRange<allOrNothing>(&range, num_elements))
460 {
461 const size_type read_elements = ReadElements(out_elements, range);
462 Commit(&m_ConsumerCommited, range);
463 return read_elements;
464 }
465
466 return 0u;
467 }
468
469 template<bool allOrNothing>
470 bool RequestWriteRange(IndexRange* out_range, const size_type num_items)
471 {
472 size_type old_head, new_head;
473
474 old_head = m_ProducerPending.load(std::memory_order_relaxed);
475 do
476 {
477 const size_type tail = m_ConsumerCommited.load(std::memory_order_acquire);
478
479 size_type capacity_left = Distance(old_head, tail);
480 if constexpr (allOrNothing)
481 {
482 if (capacity_left < num_items)
483 {
484 capacity_left = 0;
485 }
486 }
487
488 if (capacity_left == 0)
489 {
490 return false;
491 }
492
493 const size_type num_element_to_write = capacity_left < num_items ? capacity_left : num_items;
494
495 new_head = old_head + num_element_to_write;
496
497 } while (!m_ProducerPending.compare_exchange_weak(old_head, new_head, std::memory_order_relaxed, std::memory_order_relaxed));
498
499 *out_range = {old_head, new_head};
500 return true;
501 }
502
503 template<bool allOrNothing>
504 bool RequestPopRange(IndexRange* out_range, const size_type num_items)
505 {
506 size_type old_tail, new_tail;
507
508 old_tail = m_ConsumerPending.load(std::memory_order_relaxed);
509 do
510 {
511 const size_type head = m_ProducerCommited.load(std::memory_order_acquire);
512 const size_type distance = Distance(head, old_tail);
513
514 size_t capacity_left = (m_Capacity - distance);
515 if constexpr (allOrNothing)
516 {
517 if (capacity_left < num_items)
518 {
519 capacity_left = 0;
520 }
521 }
522
523 if (!capacity_left)
524 {
525 return false;
526 }
527
528 const size_type num_element_to_read = capacity_left < num_items ? capacity_left : num_items;
529
530 new_tail = old_tail + num_element_to_read;
531
532 } while (!m_ConsumerPending.compare_exchange_weak(old_tail, new_tail, std::memory_order_relaxed, std::memory_order_relaxed));
533
534 *out_range = {old_tail, new_tail};
535 return true;
536 }
537
538 size_type WriteElements(const value_type* const elements, const IndexRange range)
539 {
540 const size_type real_start = range.start % m_Capacity;
541 const size_type write_size = Distance(real_start, range.end % m_Capacity);
542 const size_type capacity_before_split = m_Capacity - real_start;
543 const size_type num_items_before_split = write_size < capacity_before_split ? write_size : capacity_before_split;
544 const size_type num_items_after_split = write_size - num_items_before_split;
545
546 std::copy_n(elements + 0u, num_items_before_split, m_Queue + real_start);
547 std::copy_n(elements + num_items_before_split, num_items_after_split, m_Queue + 0u);
548
549 return write_size;
550 }
551
552 size_type ReadElements(value_type* const out_elements, const IndexRange range) const
553 {
554 const size_type real_start = range.start % m_Capacity;
555 const size_type read_size = Distance(real_start, range.end % m_Capacity);
556 const size_type capacity_before_split = m_Capacity - real_start;
557 const size_type num_items_before_split = read_size < capacity_before_split ? read_size : capacity_before_split;
558 const size_type num_items_after_split = read_size - num_items_before_split;
559
560 std::copy_n(std::make_move_iterator(m_Queue + real_start), num_items_before_split, out_elements + 0u);
561 std::copy_n(std::make_move_iterator(m_Queue + 0u), num_items_after_split, out_elements + num_items_before_split);
562
563 return read_size;
564 }
565
566 void Commit(atomic_size_type* commit, const IndexRange range) const
567 {
568 size_type start_copy;
569 while (!commit->compare_exchange_weak(
570 start_copy = range.start,
571 range.end,
572 std::memory_order_release,
573 std::memory_order_relaxed))
574 {
576 }
577 }
578
579 size_type Distance(const size_type a, const size_type b) const
580 {
581 return (b > a) ? (b - a) : m_Capacity - a + b;
582 }
583 };
584
585#undef Job_CacheAlign
586} // namespace Job
587
588#endif // JOB_QUEUE_HPP
589
590/******************************************************************************/
591/*
592 MIT License
593
594 Copyright (c) 2024-2025 Shareef Abdoul-Raheem
595
596 Permission is hereby granted, free of charge, to any person obtaining a copy
597 of this software and associated documentation files (the "Software"), to deal
598 in the Software without restriction, including without limitation the rights
599 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
600 copies of the Software, and to permit persons to whom the Software is
601 furnished to do so, subject to the following conditions:
602
603 The above copyright notice and this permission notice shall be included in all
604 copies or substantial portions of the Software.
605
606 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
607 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
608 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
609 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
610 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
611 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
612 SOFTWARE.
613*/
614/******************************************************************************/
size_type m_Capacity
Definition: job_queue.hpp:43
std::mutex m_Lock
Definition: job_queue.hpp:41
size_type m_WriteIndex
Definition: job_queue.hpp:45
size_type m_CapacityMask
Definition: job_queue.hpp:44
size_type m_Size
Definition: job_queue.hpp:46
void Initialize(T *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:49
std::size_t size_type
Definition: job_queue.hpp:38
bool Push(const T &value)
Definition: job_queue.hpp:60
bool Pop(T *const out_value)
Definition: job_queue.hpp:76
T * elementAt(const size_type raw_index) const noexcept
Definition: job_queue.hpp:100
size_type mask(const size_type raw_index) const noexcept
Definition: job_queue.hpp:95
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(atomic_size_type) *2]
Definition: job_queue.hpp:395
size_type PushUpTo(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:425
void Commit(atomic_size_type *commit, const IndexRange range) const
Definition: job_queue.hpp:566
atomic_size_type m_ProducerPending
Definition: job_queue.hpp:393
size_type PopImpl(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:456
size_type ReadElements(value_type *const out_elements, const IndexRange range) const
Definition: job_queue.hpp:552
atomic_size_type m_ProducerCommited
Definition: job_queue.hpp:394
std::size_t size_type
Definition: job_queue.hpp:381
size_type PopUpTo(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:435
size_type m_Capacity
Definition: job_queue.hpp:400
bool RequestPopRange(IndexRange *out_range, const size_type num_items)
Definition: job_queue.hpp:504
bool PopExact(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:430
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:382
unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_Queue) - sizeof(m_Capacity)]
Definition: job_queue.hpp:401
MPMCQueue()=default
unsigned char value_type
Definition: job_queue.hpp:383
atomic_size_type m_ConsumerCommited
Definition: job_queue.hpp:397
~MPMCQueue()=default
size_type Distance(const size_type a, const size_type b) const
Definition: job_queue.hpp:579
atomic_size_type m_ConsumerPending
Definition: job_queue.hpp:396
bool RequestWriteRange(IndexRange *out_range, const size_type num_items)
Definition: job_queue.hpp:470
value_type * m_Queue
Definition: job_queue.hpp:399
size_type WriteElements(const value_type *const elements, const IndexRange range)
Definition: job_queue.hpp:538
size_type PushImpl(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:442
unsigned char m_Padding1[k_FalseSharingPadSize - sizeof(atomic_size_type) *2]
Definition: job_queue.hpp:398
void Initialize(value_type *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:408
bool PushExact(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:420
~SPMCDeque()=default
AtomicT * ElementAt(const size_type index) const noexcept
Definition: job_queue.hpp:371
size_type m_CapacityMask
Definition: job_queue.hpp:261
std::int64_t size_type
Definition: job_queue.hpp:249
atomic_size_type m_ConsumerIndex
Definition: job_queue.hpp:254
SPMCDequeStatus Pop(T *const out_value)
Definition: job_queue.hpp:299
atomic_size_type m_ProducerIndex
Definition: job_queue.hpp:253
size_type m_Capacity
Definition: job_queue.hpp:260
std::atomic< T > AtomicT
Definition: job_queue.hpp:244
SPMCDequeStatus Push(const T &value)
Definition: job_queue.hpp:281
void Initialize(AtomicT *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:268
SPMCDequeStatus Steal(T *const out_value)
Definition: job_queue.hpp:342
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(m_ProducerIndex) - sizeof(m_ConsumerIndex)]
Definition: job_queue.hpp:255
SPMCDeque()=default
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:250
AtomicT * m_Data
Definition: job_queue.hpp:259
unsigned char m_Padding4[k_FalseSharingPadSize - sizeof(m_Data) - sizeof(m_Capacity) - sizeof(m_CapacityMask)]
Definition: job_queue.hpp:137
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:115
atomic_size_type m_ConsumerIndex
Definition: job_queue.hpp:127
T * ElementAt(const size_type index) const noexcept
Definition: job_queue.hpp:225
unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_ConsumerIndex)]
Definition: job_queue.hpp:128
bool Push(const T &value)
Definition: job_queue.hpp:159
bool IsFull(const size_type head, const size_type tail) const noexcept
Definition: job_queue.hpp:215
unsigned char m_Padding1[k_FalseSharingPadSize - sizeof(m_CachedConsumerIndex)]
Definition: job_queue.hpp:123
SPSCQueue()=default
std::size_t size_type
Definition: job_queue.hpp:114
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(m_ProducerIndex)]
Definition: job_queue.hpp:121
size_type m_Capacity
Definition: job_queue.hpp:135
size_type m_CachedProducerIndex
Definition: job_queue.hpp:129
unsigned char m_Padding3[k_FalseSharingPadSize - sizeof(m_CachedProducerIndex)]
Definition: job_queue.hpp:130
size_type m_CachedConsumerIndex
Definition: job_queue.hpp:122
bool PopLazy(CallbackFn &&callback)
Definition: job_queue.hpp:193
~SPSCQueue()=default
bool Pop(T *const out_value)
Definition: job_queue.hpp:164
atomic_size_type m_ProducerIndex
Definition: job_queue.hpp:120
void Initialize(T *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:146
bool PushLazy(CallbackFn &&callback)
Definition: job_queue.hpp:173
static bool IsEmpty(const size_type head, const size_type tail) noexcept
Definition: job_queue.hpp:220
size_type m_CapacityMask
Definition: job_queue.hpp:136
API for a multi-threading job system.
Assertion macro for this library.
#define JobAssert(expr, msg)
Definition: job_assert.hpp:27
#define Job_CacheAlign
Definition: job_queue.hpp:32
Definition: job_api.hpp:30
static constexpr std::size_t k_FalseSharingPadSize
Definition: job_queue.hpp:30
void PauseProcessor() noexcept
CPU pause instruction to indicate when you are in a spin wait loop.
SPMCDequeStatus
Definition: job_queue.hpp:232
@ FAILED_RACE
Returned from Pop and Steal.
@ FAILED_SIZE
Returned from Push, Pop and Steal.
@ SUCCESS
Returned from Push, Pop and Steal.