diff --git a/lib/pls/include/pls/internal/scheduling/scheduler_memory.h b/lib/pls/include/pls/internal/scheduling/scheduler_memory.h index 988ef1d..57a53dc 100644 --- a/lib/pls/include/pls/internal/scheduling/scheduler_memory.h +++ b/lib/pls/include/pls/internal/scheduling/scheduler_memory.h @@ -14,11 +14,36 @@ void worker_routine(); using scheduler_thread = base::thread; class scheduler_memory { + private: + size_t max_threads_; + thread_state **thread_states_; + scheduler_thread **threads_; + data_structures::aligned_stack **task_stacks_; + + protected: + void init(size_t max_therads, + thread_state **thread_states, + scheduler_thread **threads, + data_structures::aligned_stack **task_stacks) { + max_threads_ = max_therads; + thread_states_ = thread_states; + threads_ = threads; + task_stacks_ = task_stacks; + } + public: - virtual size_t max_threads() const = 0; - virtual thread_state *thread_state_for(size_t id) = 0; - virtual scheduler_thread *thread_for(size_t id) = 0; - virtual data_structures::aligned_stack *task_stack_for(size_t id) = 0; + size_t max_threads() const { + return max_threads_; + } + thread_state *thread_state_for(size_t id) const { + return thread_states_[id]; + } + scheduler_thread *thread_for(size_t id) const { + return threads_[id]; + } + data_structures::aligned_stack *task_stack_for(size_t id) const { + return task_stacks_[id]; + } }; template @@ -31,23 +56,30 @@ class static_scheduler_memory : public scheduler_memory { using aligned_thread_stack = base::alignment::aligned_wrapper>; using aligned_aligned_stack = base::alignment::aligned_wrapper; + // Actual Memory std::array threads_; std::array thread_states_; std::array task_stacks_memory_; std::array task_stacks_; + // References for parent + std::array thread_refs_; + std::array thread_state_refs_; + std::array task_stack_refs_; + public: - static_scheduler_memory() { + static_scheduler_memory() : scheduler_memory() { for (size_t i = 0; i < MAX_THREADS; i++) { new((void *) task_stacks_[i].pointer()) data_structures::aligned_stack(task_stacks_memory_[i].pointer()->data(), TASK_STACK_SIZE); + + thread_refs_[i] = threads_[i].pointer(); + thread_state_refs_[i] = thread_states_[i].pointer(); + task_stack_refs_[i] = task_stacks_[i].pointer(); } - } - size_t max_threads() const override { return MAX_THREADS; } - thread_state *thread_state_for(size_t id) override { return thread_states_[id].pointer(); } - scheduler_thread *thread_for(size_t id) override { return threads_[id].pointer(); } - data_structures::aligned_stack *task_stack_for(size_t id) override { return task_stacks_[id].pointer(); } + init(MAX_THREADS, thread_state_refs_.data(), thread_refs_.data(), task_stack_refs_.data()); + } }; class malloc_scheduler_memory : public scheduler_memory { @@ -60,18 +92,20 @@ class malloc_scheduler_memory : public scheduler_memory { const size_t num_threads_; + // Actual Memory aligned_thread *threads_; aligned_thread_state *thread_states_; char **task_stacks_memory_; aligned_aligned_stack *task_stacks_; + + // References for parent + scheduler_thread **thread_refs_; + thread_state **thread_state_refs_; + data_structures::aligned_stack **task_stack_refs_; + public: explicit malloc_scheduler_memory(size_t num_threads, size_t memory_per_stack = 2 << 16); ~malloc_scheduler_memory(); - - size_t max_threads() const override { return num_threads_; } - thread_state *thread_state_for(size_t id) override { return thread_states_[id].pointer(); } - scheduler_thread *thread_for(size_t id) override { return threads_[id].pointer(); } - data_structures::aligned_stack *task_stack_for(size_t id) override { return task_stacks_[id].pointer(); } }; } diff --git a/lib/pls/src/internal/scheduling/scheduler_memory.cpp b/lib/pls/src/internal/scheduling/scheduler_memory.cpp index 7d46744..e59abd3 100644 --- a/lib/pls/src/internal/scheduling/scheduler_memory.cpp +++ b/lib/pls/src/internal/scheduling/scheduler_memory.cpp @@ -10,14 +10,25 @@ malloc_scheduler_memory::malloc_scheduler_memory(const size_t num_threads, const reinterpret_cast(base::alignment::allocate_aligned(num_threads * sizeof(aligned_thread))); thread_states_ = reinterpret_cast(base::alignment::allocate_aligned( num_threads * sizeof(aligned_thread_state))); - task_stacks_ = reinterpret_cast(base::alignment::allocate_aligned( num_threads * sizeof(aligned_aligned_stack))); task_stacks_memory_ = reinterpret_cast(base::alignment::allocate_aligned(num_threads * sizeof(char *))); + + thread_refs_ = static_cast(malloc(num_threads * sizeof(scheduler_thread *))); + thread_state_refs_ = static_cast(malloc(num_threads * sizeof(thread_state *))); + task_stack_refs_ = + static_cast(malloc(num_threads * sizeof(data_structures::aligned_stack *))); + for (size_t i = 0; i < num_threads_; i++) { task_stacks_memory_[i] = reinterpret_cast(base::alignment::allocate_aligned(memory_per_stack)); new((void *) task_stacks_[i].pointer()) data_structures::aligned_stack(task_stacks_memory_[i], memory_per_stack); + + thread_refs_[i] = threads_[i].pointer(); + thread_state_refs_[i] = thread_states_[i].pointer(); + task_stack_refs_[i] = task_stacks_[i].pointer(); } + + init(num_threads, thread_state_refs_, thread_refs_, task_stack_refs_); } malloc_scheduler_memory::~malloc_scheduler_memory() { @@ -29,6 +40,10 @@ malloc_scheduler_memory::~malloc_scheduler_memory() { } free(task_stacks_); free(task_stacks_memory_); + + free(thread_refs_); + free(thread_state_refs_); + free(task_stack_refs_); } }