diff --git a/lib/pls/include/pls/internal/scheduling/scheduler.h b/lib/pls/include/pls/internal/scheduling/scheduler.h index 6c345bc..ec0e4fb 100644 --- a/lib/pls/include/pls/internal/scheduling/scheduler.h +++ b/lib/pls/include/pls/internal/scheduling/scheduler.h @@ -5,6 +5,7 @@ #include #include #include +#include #include "pls/internal/helpers/profiler.h" @@ -38,7 +39,17 @@ class scheduler { * * @param num_threads The number of worker threads to be created. */ - explicit scheduler(unsigned int num_threads, size_t computation_depth, size_t stack_size, bool reuse_thread = true); + explicit scheduler(unsigned int num_threads, + size_t computation_depth, + size_t stack_size, + bool reuse_thread = true); + + template + explicit scheduler(unsigned int num_threads, + size_t computation_depth, + size_t stack_size, + bool reuse_thread, + ALLOC &&stack_allocator); /** * The scheduler is implicitly terminated as soon as it leaves the scope. @@ -105,8 +116,7 @@ class scheduler { bool terminated_; - // TODO: remove this into a public wrapper class with templating - base::mmap_stack_allocator stack_allocator_{}; + std::shared_ptr stack_allocator_; }; } diff --git a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h index 158c0c7..3c3a73b 100644 --- a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h +++ b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h @@ -14,6 +14,46 @@ namespace pls::internal::scheduling { +template +scheduler::scheduler(unsigned int num_threads, + size_t computation_depth, + size_t stack_size, + bool reuse_thread, + ALLOC &&stack_allocator) : + num_threads_{num_threads}, + reuse_thread_{reuse_thread}, + sync_barrier_{num_threads + 1 - reuse_thread}, + worker_threads_{}, + thread_states_{}, + main_thread_starter_function_{nullptr}, + work_section_done_{false}, + terminated_{false}, + stack_allocator_{std::make_shared(std::forward(stack_allocator))} { + + worker_threads_.reserve(num_threads); + task_managers_.reserve(num_threads); + thread_states_.reserve(num_threads); + for (unsigned int i = 0; i < num_threads_; i++) { + auto &this_task_manager = + task_managers_.emplace_back(std::make_unique(i, + computation_depth, + stack_size, + stack_allocator_)); + auto &this_thread_state = thread_states_.emplace_back(std::make_unique(*this, i, *this_task_manager)); + + if (reuse_thread && i == 0) { + worker_threads_.emplace_back(); + continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one. + } + + auto *this_thread_state_pointer = this_thread_state.get(); + worker_threads_.emplace_back([this_thread_state_pointer] { + thread_state::set(this_thread_state_pointer); + work_thread_main_loop(); + }); + } +} + class scheduler::init_function { public: virtual void run() = 0; diff --git a/lib/pls/include/pls/internal/scheduling/task_manager.h b/lib/pls/include/pls/internal/scheduling/task_manager.h index f182968..55eec55 100644 --- a/lib/pls/include/pls/internal/scheduling/task_manager.h +++ b/lib/pls/include/pls/internal/scheduling/task_manager.h @@ -26,7 +26,7 @@ class task_manager { explicit task_manager(unsigned thread_id, size_t num_tasks, size_t stack_size, - stack_allocator &stack_allocator); + std::shared_ptr stack_allocator); ~task_manager(); void push_resource_on_task(task *target_task, task *spare_task_chain); @@ -51,14 +51,29 @@ class task_manager { bool try_clean_return(context_switcher::continuation &result_cont); + /** + * Helper to check if a task chain is correctly chained forward form the given starting task. + * + * @param start_task The start of the 'to be clean' chain + * @return true if the chain is clean/consistent. + */ bool check_task_chain_forward(task *start_task); + /** + * Helper to check if a task chain is correctly chained backward form the given starting task. + * + * @param start_task The end of the 'to be clean' chain + * @return true if the chain was is clean/consistent. + */ bool check_task_chain_backward(task *start_task); + /** + * Check the task chain maintained by this task manager. + * + * @return true if the chain is in a clean/consistent state. + */ bool check_task_chain(); private: - size_t num_tasks_; - - stack_allocator &stack_allocator_; + std::shared_ptr stack_allocator_; std::vector> tasks_; task *active_task_; diff --git a/lib/pls/src/internal/scheduling/scheduler.cpp b/lib/pls/src/internal/scheduling/scheduler.cpp index 318de19..06ba412 100644 --- a/lib/pls/src/internal/scheduling/scheduler.cpp +++ b/lib/pls/src/internal/scheduling/scheduler.cpp @@ -7,41 +7,19 @@ namespace pls::internal::scheduling { -scheduler::scheduler(unsigned int num_threads, size_t computation_depth, size_t stack_size, bool reuse_thread) : - num_threads_{num_threads}, - reuse_thread_{reuse_thread}, - sync_barrier_{num_threads + 1 - reuse_thread}, - worker_threads_{}, - thread_states_{}, - main_thread_starter_function_{nullptr}, - work_section_done_{false}, - terminated_{false} { - - worker_threads_.reserve(num_threads); - task_managers_.reserve(num_threads); - thread_states_.reserve(num_threads); - for (unsigned int i = 0; i < num_threads_; i++) { - auto &this_task_manager = - task_managers_.emplace_back(std::make_unique(i, computation_depth, stack_size, stack_allocator_)); - auto &this_thread_state = thread_states_.emplace_back(std::make_unique(*this, i, *this_task_manager)); - - if (reuse_thread && i == 0) { - worker_threads_.emplace_back(); - continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one. - } +scheduler::scheduler(unsigned int num_threads, + size_t computation_depth, + size_t stack_size, + bool reuse_thread) : scheduler(num_threads, + computation_depth, + stack_size, + reuse_thread, + base::mmap_stack_allocator{}) {} - auto *this_thread_state_pointer = this_thread_state.get(); - worker_threads_.emplace_back([this_thread_state_pointer] { - thread_state::set(this_thread_state_pointer); - work_thread_main_loop(); - }); - } -} scheduler::~scheduler() { terminate(); } - void scheduler::work_thread_main_loop() { auto &scheduler = thread_state::get().get_scheduler(); while (true) { diff --git a/lib/pls/src/internal/scheduling/task_manager.cpp b/lib/pls/src/internal/scheduling/task_manager.cpp index 245b41f..b6c40c5 100644 --- a/lib/pls/src/internal/scheduling/task_manager.cpp +++ b/lib/pls/src/internal/scheduling/task_manager.cpp @@ -9,14 +9,13 @@ namespace pls::internal::scheduling { task_manager::task_manager(unsigned thread_id, size_t num_tasks, size_t stack_size, - stack_allocator &stack_allocator) : num_tasks_{num_tasks}, - stack_allocator_{stack_allocator}, - tasks_{}, - deque_{thread_id, num_tasks_} { + std::shared_ptr stack_allocator) : stack_allocator_{stack_allocator}, + tasks_{}, + deque_{thread_id, num_tasks} { tasks_.reserve(num_tasks); for (size_t i = 0; i < num_tasks - 1; i++) { - char *stack_memory = stack_allocator.allocate_stack(stack_size); + char *stack_memory = stack_allocator->allocate_stack(stack_size); tasks_.emplace_back(std::make_unique(stack_memory, stack_size, i, thread_id)); if (i > 0) { @@ -29,7 +28,7 @@ task_manager::task_manager(unsigned thread_id, task_manager::~task_manager() { for (auto &task : tasks_) { - stack_allocator_.free_stack(task->stack_size_, task->stack_memory_); + stack_allocator_->free_stack(task->stack_size_, task->stack_memory_); } } diff --git a/test/scheduling_tests.cpp b/test/scheduling_tests.cpp index c192d6f..3f03cc9 100644 --- a/test/scheduling_tests.cpp +++ b/test/scheduling_tests.cpp @@ -10,7 +10,6 @@ using namespace pls::internal::scheduling; -constexpr int MAX_NUM_THREADS = 8; constexpr int MAX_NUM_TASKS = 32; constexpr int MAX_STACK_SIZE = 1024 * 8;