#include "pls/internal/scheduling/scheduler.h" #include "context_switcher/context_switcher.h" #include "pls/internal/scheduling/task_manager.h" #include "pls/internal/scheduling/thread_state.h" #include "pls/internal/base/thread.h" #include "pls/internal/base/error_handling.h" namespace pls { namespace internal { namespace scheduling { scheduler::scheduler(scheduler_memory &memory, const unsigned int num_threads, bool reuse_thread) : num_threads_{num_threads}, reuse_thread_{reuse_thread}, memory_{memory}, sync_barrier_{num_threads + 1 - reuse_thread}, terminated_{false} { if (num_threads_ > memory.max_threads()) { PLS_ERROR("Tried to create scheduler with more OS threads than pre-allocated memory."); } for (unsigned int i = 0; i < num_threads_; i++) { // Placement new is required, as the memory of `memory_` is not required to be initialized. memory.thread_state_for(i).set_scheduler(this); memory.thread_state_for(i).set_id(i); memory.thread_state_for(i).get_task_manager().set_thread_id(i); if (reuse_thread && i == 0) { continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one. } memory.thread_for(i) = base::thread(&scheduler::work_thread_main_loop, &memory_.thread_state_for(i)); } } scheduler::~scheduler() { terminate(); } void scheduler::work_thread_main_loop() { auto &scheduler = thread_state::get().get_scheduler(); while (true) { // Wait to be triggered scheduler.sync_barrier_.wait(); // Check for shutdown if (scheduler.terminated_) { return; } scheduler.work_thread_work_section(); // Sync back with main thread scheduler.sync_barrier_.wait(); } } void scheduler::work_thread_work_section() { auto &my_state = thread_state::get(); auto &my_task_manager = my_state.get_task_manager(); auto const num_threads = my_state.get_scheduler().num_threads(); if (my_state.get_id() == 0) { // Main Thread, kick off by executing the user's main code block. main_thread_starter_function_->run(); } while (!work_section_done_) { PLS_ASSERT(my_task_manager.check_task_chain(), "Must start stealing with a clean task chain."); // TODO: move steal routine into separate function const size_t target = my_state.get_rand() % num_threads; if (target == my_state.get_id()) { continue; } auto &target_state = my_state.get_scheduler().thread_state_for(target); task *traded_task = target_state.get_task_manager().steal_task(my_task_manager); if (traded_task != nullptr) { // The stealing procedure correctly changed our chain and active task. // Now we need to perform the 'post steal' actions (manage resources and execute the stolen task). PLS_ASSERT(my_task_manager.check_task_chain_forward(&my_task_manager.get_active_task()), "We are sole owner of this chain, it has to be valid!"); // Move the traded in resource of this active task over to the stack of resources. auto *stolen_task = &my_task_manager.get_active_task(); // Push the traded in resource on the resource stack to clear the traded_field for later steals/spawns. my_task_manager.push_resource_on_task(stolen_task, traded_task); auto optional_exchanged_task = external_trading_deque::get_trade_object(stolen_task); if (optional_exchanged_task) { // All good, we pushed the task over to the stack, nothing more to do PLS_ASSERT(*optional_exchanged_task == traded_task, "We are currently executing this, no one else can put another task in this field!"); } else { // The last other active thread took it as its spare resource... // ...remove our traded object from the stack again (it must be empty now and no one must access it anymore). auto current_root = stolen_task->resource_stack_root_.load(); current_root.stamp++; current_root.value = 0; stolen_task->resource_stack_root_.store(current_root); } // Execute the stolen task by jumping to it's continuation. PLS_ASSERT(stolen_task->continuation_.valid(), "A task that we can steal must have a valid continuation for us to start working."); stolen_task->is_synchronized_ = false; context_switcher::switch_context(std::move(stolen_task->continuation_)); // We will continue execution in this line when we finished the stolen work. } } } void scheduler::terminate() { if (terminated_) { return; } terminated_ = true; sync_barrier_.wait(); for (unsigned int i = 0; i < num_threads_; i++) { if (reuse_thread_ && i == 0) { continue; } memory_.thread_for(i).join(); } } thread_state &scheduler::thread_state_for(size_t id) { return memory_.thread_state_for(id); } void scheduler::sync() { thread_state::get().get_task_manager().sync(); } } } }