diff --git a/lib/pls/include/pls/internal/data_structures/work_stealing_deque.h b/lib/pls/include/pls/internal/data_structures/work_stealing_deque.h index 9c542e9..4c89b6b 100644 --- a/lib/pls/include/pls/internal/data_structures/work_stealing_deque.h +++ b/lib/pls/include/pls/internal/data_structures/work_stealing_deque.h @@ -21,10 +21,15 @@ using offset_t = stamped_integer::member_t; // Single Item in the deque class work_stealing_deque_item { + // TODO: In our opinion these atomic's are a pure formality to make the thread sanitizer happy, + // as the race occurs in 'pop_head', where ALL CASES reading a corrupt/old value are cases + // where the next CAS fails anywas, thus making these corrupted values have no influence on + // the overall program execution. + // ==> If we find performance problems in this queue, try removing the atoimcs again. // Pointer to the actual data - pointer_t data_; + std::atomic data_; // Index (relative to stack base) to the next and previous element - offset_t next_item_; + std::atomic next_item_; offset_t previous_item_; public: @@ -32,7 +37,7 @@ class work_stealing_deque_item { template Item *data() { - return reinterpret_cast(data_); + return reinterpret_cast(data_.load()); } template @@ -40,7 +45,7 @@ class work_stealing_deque_item { data_ = reinterpret_cast(data); } - offset_t next_item() const { return next_item_; } + offset_t next_item() const { return next_item_.load(); } void set_next_item(offset_t next_item) { next_item_ = next_item; } offset_t previous_item() const { return previous_item_; }