#include "pls/internal/scheduling/lock_free/task.h" #include "pls/internal/scheduling/scheduler.h" namespace pls::internal::scheduling::lock_free { task *task::find_task(unsigned id, unsigned depth) { return thread_state::get().get_scheduler().thread_state_for(id).get_task_manager().get_task(depth); } void task::prepare_for_push(unsigned int pushing_thread_id) { data_structures::stamped_integer target_next; target_next.stamp_ = pushing_thread_id + 1; target_next.value_ = 0; resource_stack_next_.store(target_next, std::memory_order_relaxed); } bool task::push_task_chain(task *spare_task_chain, unsigned pushing_thread_id) { PLS_ASSERT(this->thread_id_ != spare_task_chain->thread_id_, "Makes no sense to push task onto itself, as it is not clean by definition."); PLS_ASSERT(this->depth_ == spare_task_chain->depth_, "Must only push tasks with correct depth."); data_structures::stamped_integer current_root = this->resource_stack_root_.load(std::memory_order_relaxed); data_structures::stamped_integer target_root; data_structures::stamped_integer expected_next_field; data_structures::stamped_integer target_next_field; expected_next_field.stamp_ = pushing_thread_id + 1; expected_next_field.value_ = 0; do { // current_root implicitly re-loaded by CAS in loop target_root.stamp_ = current_root.stamp_ + 1; target_root.value_ = spare_task_chain->thread_id_ + 1; // Prepare the target_next_field as required if (current_root.value_ == 0) { // Empty, simply push in with no successor. target_next_field.stamp_ = pushing_thread_id + 1; target_next_field.value_ = 0; } else { // Already an entry. Find it's corresponding task and set it as our successor. auto *current_root_task = find_task(current_root.value_ - 1, this->depth_); target_next_field.stamp_ = pushing_thread_id + 1; target_next_field.value_ = current_root_task->thread_id_ + 1; } if (!spare_task_chain->resource_stack_next_.compare_exchange_strong(expected_next_field, target_next_field, std::memory_order_relaxed)) { return false; } else { expected_next_field = target_next_field; } } while (!this->resource_stack_root_.compare_exchange_strong(current_root, target_root, std::memory_order_acq_rel)); return true; } void task::reset_task_chain(task *expected_content) { data_structures::stamped_integer current_root = this->resource_stack_root_.load(std::memory_order_relaxed); PLS_ASSERT(current_root.value_ == expected_content->thread_id_ + 1, "Must only reset the task chain if we exactly know its state! (current_root.value_)"); data_structures::stamped_integer target_root; target_root.stamp_ = current_root.stamp_ + 1; this->resource_stack_root_.store(target_root, std::memory_order_relaxed); } task *task::pop_task_chain() { data_structures::stamped_integer current_root = this->resource_stack_root_.load(std::memory_order_relaxed); data_structures::stamped_integer target_root; task *output_task; do { // current_root implicitly re-loaded by CAS in loop if (current_root.value_ == 0) { // Empty... return nullptr; } else { // Found something, try to pop it auto *current_root_task = find_task(current_root.value_ - 1, this->depth_); auto next_stack_cas = current_root_task->resource_stack_next_.load(std::memory_order_relaxed); target_root.stamp_ = current_root.stamp_ + 1; target_root.value_ = next_stack_cas.value_; output_task = current_root_task; } } while (!this->resource_stack_root_.compare_exchange_strong(current_root, target_root, std::memory_order_acq_rel)); output_task->resource_stack_next_.store({0, 0}); return output_task; } }