BluFedora Job System v1.0.0
This is a C++ job system library for use in game engines.
job_queue.hpp
Go to the documentation of this file.
1/******************************************************************************/
13/******************************************************************************/
14#ifndef JOB_QUEUE_HPP
15#define JOB_QUEUE_HPP
16
17#include "job_api.hpp" // PauseProcessor
18#include "job_assert.hpp" // JobAssert
19
20#include <algorithm> // copy_n
21#include <atomic> // atomic<T>
22#include <cstddef> // size_t
23#include <iterator> // make_move_iterator
24#include <mutex> // mutex
25#include <new> // hardware_destructive_interference_size, operator new
26#include <utility> // move
27
28namespace Job
29{
30 static constexpr std::size_t k_FalseSharingPadSize = std::hardware_destructive_interference_size;
31
32#define Job_CacheAlign alignas(k_FalseSharingPadSize)
33
34 template<typename T>
36 {
37 public:
38 using size_type = std::size_t;
39
40 private:
41 std::mutex m_Lock;
47
48 public:
49 void Initialize(T* const memory_backing, const size_type capacity) noexcept
50 {
51 m_Data = memory_backing;
52 m_Capacity = capacity;
53 m_CapacityMask = capacity - 1;
54 m_WriteIndex = 0u;
55 m_Size = 0u;
56
57 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
58 }
59
60 bool Push(const T& value)
61 {
62 const std::lock_guard<std::mutex> guard(m_Lock);
63 (void)guard;
64
65 if (m_Size == m_Capacity)
66 {
67 return false;
68 }
69
70 *elementAt(m_WriteIndex++) = value;
71 ++m_Size;
72
73 return true;
74 }
75
76 bool Pop(T* const out_value)
77 {
78 JobAssert(out_value != nullptr, "`out_value` cannot be a nullptr.");
79
80 const std::lock_guard<std::mutex> guard(m_Lock);
81 (void)guard;
82
83 if (m_Size == 0u)
84 {
85 return false;
86 }
87
88 *out_value = *elementAt(m_WriteIndex - m_Size);
89 --m_Size;
90
91 return true;
92 }
93
94 private:
95 size_type mask(const size_type raw_index) const noexcept
96 {
97 return raw_index & m_CapacityMask;
98 }
99
100 T* elementAt(const size_type raw_index) const noexcept
101 {
102 return m_Data + mask(raw_index);
103 }
104 };
105
106 // [https://www.youtube.com/watch?v=K3P_Lmq6pw0]
107 //
108 // Single Producer, Single Consumer Lockfree Queue
109 //
110 template<typename T>
112 {
113 public:
114 using size_type = std::size_t;
115 using atomic_size_type = std::atomic<size_type>;
116
117 private:
118 // Writer Thread
119
124
125 // Reader Thread
126
131
132 // Shared 'Immutable' State
133
137 unsigned char m_Padding4[k_FalseSharingPadSize - sizeof(m_Data) - sizeof(m_Capacity) - sizeof(m_CapacityMask)];
138
139 static_assert(atomic_size_type::is_always_lock_free, "Expected to be lockfree.");
140
141 public:
142 SPSCQueue() = default;
143 ~SPSCQueue() = default;
144
145 void Initialize(T* const memory_backing, const size_type capacity) noexcept
146 {
147 m_ProducerIndex.store(0, std::memory_order_relaxed);
149 m_ConsumerIndex.store(0, std::memory_order_relaxed);
151 m_Data = memory_backing;
152 m_Capacity = capacity;
153 m_CapacityMask = capacity - 1;
154
155 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
156 }
157
158 bool Push(const T& value)
159 {
160 return PushLazy([&value](T* const destination) { ::new (destination) T(value); });
161 }
162
163 bool Pop(T* const out_value)
164 {
165 JobAssert(out_value != nullptr, "`out_value` cannot be a nullptr.");
166
167 return PopLazy([out_value](T&& value) { *out_value = std::move(value); });
168 }
169
170 // Memory passed into `callback` is uninitialized, must be placement new'ed into.
171 template<typename CallbackFn>
172 bool PushLazy(CallbackFn&& callback)
173 {
174 const size_type write_index = m_ProducerIndex.load(std::memory_order_relaxed);
175
176 if (IsFull(write_index, m_CachedConsumerIndex))
177 {
178 m_CachedConsumerIndex = m_ConsumerIndex.load(std::memory_order_acquire);
179 if (IsFull(write_index, m_CachedConsumerIndex))
180 {
181 return false;
182 }
183 }
184
185 callback(ElementAt(write_index));
186 m_ProducerIndex.store(write_index + 1, std::memory_order_release);
187
188 return true;
189 }
190
191 template<typename CallbackFn>
192 bool PopLazy(CallbackFn&& callback)
193 {
194 const size_type read_index = m_ConsumerIndex.load(std::memory_order_relaxed);
195
196 if (IsEmpty(m_CachedProducerIndex, read_index))
197 {
198 m_CachedProducerIndex = m_ProducerIndex.load(std::memory_order_acquire);
199 if (IsEmpty(m_CachedProducerIndex, read_index))
200 {
201 return false;
202 }
203 }
204
205 T* const element = ElementAt(read_index);
206 callback(std::move(*element));
207 element->~T();
208 m_ConsumerIndex.store(read_index + 1, std::memory_order_release);
209
210 return true;
211 }
212
213 private:
214 bool IsFull(const size_type head, const size_type tail) const noexcept
215 {
216 return ((head + 1) & m_CapacityMask) == tail;
217 }
218
219 static bool IsEmpty(const size_type head, const size_type tail) noexcept
220 {
221 return head == tail;
222 }
223
224 T* ElementAt(const size_type index) const noexcept
225 {
226 return m_Data + (index & m_CapacityMask);
227 }
228 };
229
231 {
232 SUCCESS,
235 };
236
237 // [Dynamic Circular Work-Stealing Deque](https://www.dre.vanderbilt.edu/~schmidt/PDF/work-stealing-dequeue.pdf)
238 // [Correct and Efficient Work-Stealing for Weak Memory Models](https://fzn.fr/readings/ppopp13.pdf)
239 template<typename T>
241 {
242 private:
243 using AtomicT = std::atomic<T>;
244
245 static_assert(AtomicT::is_always_lock_free, "T Should be a small pointer-like type, expected to be lock-free when atomic.");
246
247 public:
248 using size_type = std::int64_t; // NOTE(SR): Must be signed for Pop to work correctly on empty queue.
249 using atomic_size_type = std::atomic<size_type>;
250
251 private:
255
256 // Shared 'Immutable' State
257
261
262 public:
263 SPMCDeque() = default;
264 ~SPMCDeque() = default;
265
266 void Initialize(AtomicT* const memory_backing, const size_type capacity) noexcept
267 {
268 m_ProducerIndex = 0;
269 m_ConsumerIndex = 0;
270 m_Data = memory_backing;
271 m_Capacity = capacity;
272 m_CapacityMask = capacity - 1;
273
274 JobAssert((m_Capacity & m_CapacityMask) == 0, "Capacity must be a power of 2.");
275 }
276
277 // NOTE(SR): Must be called by owning thread.
278
279 SPMCDequeStatus Push(const T& value)
280 {
281 const size_type write_index = m_ProducerIndex.load(std::memory_order_relaxed);
282 const size_type read_index = m_ConsumerIndex.load(std::memory_order_acquire);
283 const size_type size = write_index - read_index;
284
285 if (size > m_CapacityMask)
286 {
288 }
289
290 ElementAt(write_index)->store(value, std::memory_order_relaxed);
291
292 m_ProducerIndex.store(write_index + 1, std::memory_order_release);
293
295 }
296
297 SPMCDequeStatus Pop(T* const out_value)
298 {
299 const size_type producer_index = m_ProducerIndex.load(std::memory_order_relaxed) - 1;
300
301 // Reserve the slot at the producer end.
302 m_ProducerIndex.store(producer_index, std::memory_order_relaxed);
303
304 // The above store needs to happen before this next read
305 // to have consistent view of the buffer.
306 //
307 // `m_ProducerIndex` can only be written to by this thread
308 // so first reserve a slot then we read what the other threads have to say.
309 //
310 std::atomic_thread_fence(std::memory_order_seq_cst);
311
312 size_type consumer_index = m_ConsumerIndex.load(std::memory_order_relaxed);
313
314 if (consumer_index <= producer_index)
315 {
316 if (consumer_index == producer_index) // Only one item in queue
317 {
318 const bool successful_pop = m_ConsumerIndex.compare_exchange_strong(consumer_index, consumer_index + 1, std::memory_order_seq_cst, std::memory_order_relaxed);
319
320 if (successful_pop)
321 {
322 *out_value = ElementAt(producer_index)->load(std::memory_order_relaxed);
323 }
324
325 m_ProducerIndex.store(producer_index + 1, std::memory_order_relaxed);
327 }
328
329 *out_value = ElementAt(producer_index)->load(std::memory_order_relaxed);
331 }
332
333 // Empty Queue, so restore to canonical empty.
334 m_ProducerIndex.store(producer_index + 1, std::memory_order_seq_cst);
336 }
337
338 // NOTE(SR): Must be called by non owning thread.
339
340 SPMCDequeStatus Steal(T* const out_value)
341 {
342 size_type read_index = m_ConsumerIndex.load(std::memory_order_acquire);
343
344 // Must fully read `m_ConsumerIndex` before we read the producer owned `m_ProducerIndex`.
345 std::atomic_thread_fence(std::memory_order_seq_cst);
346
347 const size_type write_index = m_ProducerIndex.load(std::memory_order_acquire);
348
349 // if (next_read_index <= write_index)
350 if (read_index < write_index)
351 {
352 // Must load result before the CAS, since a push can happen concurrently right after the CAS.
353 T result = ElementAt(read_index)->load(std::memory_order_relaxed);
354
355 // Need strong memory ordering to read the element before the cas.
356 if (m_ConsumerIndex.compare_exchange_strong(read_index, read_index + 1, std::memory_order_seq_cst, std::memory_order_relaxed))
357 {
358 *out_value = std::move(result);
360 }
361
363 }
364
366 }
367
368 private:
369 AtomicT* ElementAt(const size_type index) const noexcept
370 {
371 return m_Data + (index & m_CapacityMask);
372 }
373 };
374
375 // https://www.youtube.com/watch?v=_qaKkHuHYE0&ab_channel=CppCon
377 {
378 public:
379 using size_type = std::size_t;
380 using atomic_size_type = std::atomic<size_type>;
381 using value_type = unsigned char; // byte
382
383 private:
385 {
388 };
389
390 private:
399 unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_Queue) - sizeof(m_Capacity)];
400
401 public:
402 MPMCQueue() = default;
403 ~MPMCQueue() = default;
404
405 void Initialize(value_type* const memory_backing, const size_type capacity) noexcept
406 {
407 m_ProducerPending.store(0, std::memory_order_relaxed);
408 m_ProducerCommited.store(0, std::memory_order_relaxed);
409 m_ConsumerPending.store(0, std::memory_order_relaxed);
410 m_ConsumerCommited.store(0, std::memory_order_relaxed);
411 m_Queue = memory_backing;
412 m_Capacity = capacity;
413 }
414
415 //
416
417 bool PushExact(const value_type* elements, const size_type num_elements)
418 {
419 return PushImpl<true>(elements, num_elements) != 0u;
420 }
421
422 size_type PushUpTo(const value_type* elements, const size_type num_elements)
423 {
424 return PushImpl<false>(elements, num_elements);
425 }
426
427 bool PopExact(value_type* out_elements, const size_type num_elements)
428 {
429 return PopImpl<true>(out_elements, num_elements) != 0u;
430 }
431
432 size_type PopUpTo(value_type* out_elements, const size_type num_elements)
433 {
434 return PopImpl<false>(out_elements, num_elements);
435 }
436
437 private:
438 template<bool allOrNothing>
439 size_type PushImpl(const value_type* elements, const size_type num_elements)
440 {
441 IndexRange range;
442 if (RequestWriteRange<allOrNothing>(&range, num_elements))
443 {
444 const size_type written_elements = WriteElements(elements, range);
445 Commit(&m_ProducerCommited, range);
446 return written_elements;
447 }
448
449 return 0u;
450 }
451
452 template<bool allOrNothing>
453 size_type PopImpl(value_type* out_elements, const size_type num_elements)
454 {
455 IndexRange range;
456 if (RequestPopRange<allOrNothing>(&range, num_elements))
457 {
458 const size_type read_elements = ReadElements(out_elements, range);
459 Commit(&m_ConsumerCommited, range);
460 return read_elements;
461 }
462
463 return 0u;
464 }
465
466 template<bool allOrNothing>
467 bool RequestWriteRange(IndexRange* out_range, const size_type num_items)
468 {
469 size_type old_head, new_head;
470
471 old_head = m_ProducerPending.load(std::memory_order_relaxed);
472 do
473 {
474 const size_type tail = m_ConsumerCommited.load(std::memory_order_acquire);
475
476 size_type capacity_left = Distance(old_head, tail);
477 if constexpr (allOrNothing)
478 {
479 if (capacity_left < num_items)
480 {
481 capacity_left = 0;
482 }
483 }
484
485 if (capacity_left == 0)
486 {
487 return false;
488 }
489
490 const size_type num_element_to_write = capacity_left < num_items ? capacity_left : num_items;
491
492 new_head = old_head + num_element_to_write;
493
494 } while (!m_ProducerPending.compare_exchange_weak(old_head, new_head, std::memory_order_relaxed, std::memory_order_relaxed));
495
496 *out_range = {old_head, new_head};
497 return true;
498 }
499
500 template<bool allOrNothing>
501 bool RequestPopRange(IndexRange* out_range, const size_type num_items)
502 {
503 size_type old_tail, new_tail;
504
505 old_tail = m_ConsumerPending.load(std::memory_order_relaxed);
506 do
507 {
508 const size_type head = m_ProducerCommited.load(std::memory_order_acquire);
509 const size_type distance = Distance(head, old_tail);
510
511 size_t capacity_left = (m_Capacity - distance);
512 if constexpr (allOrNothing)
513 {
514 if (capacity_left < num_items)
515 {
516 capacity_left = 0;
517 }
518 }
519
520 if (!capacity_left)
521 {
522 return false;
523 }
524
525 const size_type num_element_to_read = capacity_left < num_items ? capacity_left : num_items;
526
527 new_tail = old_tail + num_element_to_read;
528
529 } while (!m_ConsumerPending.compare_exchange_weak(old_tail, new_tail, std::memory_order_relaxed, std::memory_order_relaxed));
530
531 *out_range = {old_tail, new_tail};
532 return true;
533 }
534
535 size_type WriteElements(const value_type* const elements, const IndexRange range)
536 {
537 const size_type real_start = range.start % m_Capacity;
538 const size_type write_size = Distance(real_start, range.end % m_Capacity);
539 const size_type capacity_before_split = m_Capacity - real_start;
540 const size_type num_items_before_split = write_size < capacity_before_split ? write_size : capacity_before_split;
541 const size_type num_items_after_split = write_size - num_items_before_split;
542
543 std::copy_n(elements + 0u, num_items_before_split, m_Queue + real_start);
544 std::copy_n(elements + num_items_before_split, num_items_after_split, m_Queue + 0u);
545
546 return write_size;
547 }
548
549 size_type ReadElements(value_type* const out_elements, const IndexRange range) const
550 {
551 const size_type real_start = range.start % m_Capacity;
552 const size_type read_size = Distance(real_start, range.end % m_Capacity);
553 const size_type capacity_before_split = m_Capacity - real_start;
554 const size_type num_items_before_split = read_size < capacity_before_split ? read_size : capacity_before_split;
555 const size_type num_items_after_split = read_size - num_items_before_split;
556
557 std::copy_n(std::make_move_iterator(m_Queue + real_start), num_items_before_split, out_elements + 0u);
558 std::copy_n(std::make_move_iterator(m_Queue + 0u), num_items_after_split, out_elements + num_items_before_split);
559
560 return read_size;
561 }
562
563 void Commit(atomic_size_type* commit, const IndexRange range) const
564 {
565 size_type start_copy;
566 while (!commit->compare_exchange_weak(
567 start_copy = range.start,
568 range.end,
569 std::memory_order_release,
570 std::memory_order_relaxed))
571 {
573 }
574 }
575
576 size_type Distance(const size_type a, const size_type b) const
577 {
578 return (b > a) ? (b - a) : m_Capacity - a + b;
579 }
580 };
581
582#undef Job_CacheAlign
583} // namespace Job
584
585#endif // JOB_QUEUE_HPP
586
587/******************************************************************************/
588/*
589 MIT License
590
591 Copyright (c) 2024-2025 Shareef Abdoul-Raheem
592
593 Permission is hereby granted, free of charge, to any person obtaining a copy
594 of this software and associated documentation files (the "Software"), to deal
595 in the Software without restriction, including without limitation the rights
596 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
597 copies of the Software, and to permit persons to whom the Software is
598 furnished to do so, subject to the following conditions:
599
600 The above copyright notice and this permission notice shall be included in all
601 copies or substantial portions of the Software.
602
603 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
604 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
605 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
606 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
607 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
608 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
609 SOFTWARE.
610*/
611/******************************************************************************/
size_type m_Capacity
Definition: job_queue.hpp:43
std::mutex m_Lock
Definition: job_queue.hpp:41
size_type m_WriteIndex
Definition: job_queue.hpp:45
size_type m_CapacityMask
Definition: job_queue.hpp:44
size_type m_Size
Definition: job_queue.hpp:46
void Initialize(T *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:49
std::size_t size_type
Definition: job_queue.hpp:38
bool Push(const T &value)
Definition: job_queue.hpp:60
bool Pop(T *const out_value)
Definition: job_queue.hpp:76
T * elementAt(const size_type raw_index) const noexcept
Definition: job_queue.hpp:100
size_type mask(const size_type raw_index) const noexcept
Definition: job_queue.hpp:95
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(atomic_size_type) *2]
Definition: job_queue.hpp:393
size_type PushUpTo(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:422
void Commit(atomic_size_type *commit, const IndexRange range) const
Definition: job_queue.hpp:563
atomic_size_type m_ProducerPending
Definition: job_queue.hpp:391
size_type PopImpl(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:453
size_type ReadElements(value_type *const out_elements, const IndexRange range) const
Definition: job_queue.hpp:549
atomic_size_type m_ProducerCommited
Definition: job_queue.hpp:392
std::size_t size_type
Definition: job_queue.hpp:379
size_type PopUpTo(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:432
size_type m_Capacity
Definition: job_queue.hpp:398
bool RequestPopRange(IndexRange *out_range, const size_type num_items)
Definition: job_queue.hpp:501
bool PopExact(value_type *out_elements, const size_type num_elements)
Definition: job_queue.hpp:427
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:380
unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_Queue) - sizeof(m_Capacity)]
Definition: job_queue.hpp:399
MPMCQueue()=default
unsigned char value_type
Definition: job_queue.hpp:381
atomic_size_type m_ConsumerCommited
Definition: job_queue.hpp:395
~MPMCQueue()=default
size_type Distance(const size_type a, const size_type b) const
Definition: job_queue.hpp:576
atomic_size_type m_ConsumerPending
Definition: job_queue.hpp:394
bool RequestWriteRange(IndexRange *out_range, const size_type num_items)
Definition: job_queue.hpp:467
value_type * m_Queue
Definition: job_queue.hpp:397
size_type WriteElements(const value_type *const elements, const IndexRange range)
Definition: job_queue.hpp:535
size_type PushImpl(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:439
unsigned char m_Padding1[k_FalseSharingPadSize - sizeof(atomic_size_type) *2]
Definition: job_queue.hpp:396
void Initialize(value_type *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:405
bool PushExact(const value_type *elements, const size_type num_elements)
Definition: job_queue.hpp:417
~SPMCDeque()=default
AtomicT * ElementAt(const size_type index) const noexcept
Definition: job_queue.hpp:369
size_type m_CapacityMask
Definition: job_queue.hpp:260
std::int64_t size_type
Definition: job_queue.hpp:248
atomic_size_type m_ConsumerIndex
Definition: job_queue.hpp:253
SPMCDequeStatus Pop(T *const out_value)
Definition: job_queue.hpp:297
atomic_size_type m_ProducerIndex
Definition: job_queue.hpp:252
size_type m_Capacity
Definition: job_queue.hpp:259
std::atomic< T > AtomicT
Definition: job_queue.hpp:243
SPMCDequeStatus Push(const T &value)
Definition: job_queue.hpp:279
void Initialize(AtomicT *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:266
SPMCDequeStatus Steal(T *const out_value)
Definition: job_queue.hpp:340
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(m_ProducerIndex) - sizeof(m_ConsumerIndex)]
Definition: job_queue.hpp:254
SPMCDeque()=default
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:249
AtomicT * m_Data
Definition: job_queue.hpp:258
unsigned char m_Padding4[k_FalseSharingPadSize - sizeof(m_Data) - sizeof(m_Capacity) - sizeof(m_CapacityMask)]
Definition: job_queue.hpp:137
std::atomic< size_type > atomic_size_type
Definition: job_queue.hpp:115
atomic_size_type m_ConsumerIndex
Definition: job_queue.hpp:127
T * ElementAt(const size_type index) const noexcept
Definition: job_queue.hpp:224
unsigned char m_Padding2[k_FalseSharingPadSize - sizeof(m_ConsumerIndex)]
Definition: job_queue.hpp:128
bool Push(const T &value)
Definition: job_queue.hpp:158
bool IsFull(const size_type head, const size_type tail) const noexcept
Definition: job_queue.hpp:214
unsigned char m_Padding1[k_FalseSharingPadSize - sizeof(m_CachedConsumerIndex)]
Definition: job_queue.hpp:123
SPSCQueue()=default
std::size_t size_type
Definition: job_queue.hpp:114
unsigned char m_Padding0[k_FalseSharingPadSize - sizeof(m_ProducerIndex)]
Definition: job_queue.hpp:121
size_type m_Capacity
Definition: job_queue.hpp:135
size_type m_CachedProducerIndex
Definition: job_queue.hpp:129
unsigned char m_Padding3[k_FalseSharingPadSize - sizeof(m_CachedProducerIndex)]
Definition: job_queue.hpp:130
size_type m_CachedConsumerIndex
Definition: job_queue.hpp:122
bool PopLazy(CallbackFn &&callback)
Definition: job_queue.hpp:192
~SPSCQueue()=default
bool Pop(T *const out_value)
Definition: job_queue.hpp:163
atomic_size_type m_ProducerIndex
Definition: job_queue.hpp:120
void Initialize(T *const memory_backing, const size_type capacity) noexcept
Definition: job_queue.hpp:145
bool PushLazy(CallbackFn &&callback)
Definition: job_queue.hpp:172
static bool IsEmpty(const size_type head, const size_type tail) noexcept
Definition: job_queue.hpp:219
size_type m_CapacityMask
Definition: job_queue.hpp:136
API for a multi-threading job system.
Assertion macro for this library.
#define JobAssert(expr, msg)
Definition: job_assert.hpp:27
#define Job_CacheAlign
Definition: job_queue.hpp:32
Definition: job_api.hpp:30
static constexpr std::size_t k_FalseSharingPadSize
Definition: job_queue.hpp:30
void PauseProcessor() noexcept
CPU pause instruction to indicate when you are in a spin wait loop.
SPMCDequeStatus
Definition: job_queue.hpp:231
@ FAILED_RACE
Returned from Pop and Steal.
@ FAILED_SIZE
Returned from Push, Pop and Steal.
@ SUCCESS
Returned from Push, Pop and Steal.