From 3ad3a3d4c2c795e3c3b7231e7c264e76b4e9e612 Mon Sep 17 00:00:00 2001 From: FritzFlorian Date: Tue, 30 Jun 2020 15:07:10 +0200 Subject: [PATCH] Minor adjustment to coordinating thread spawns (catch seems to have problems with the other implementation). --- ci_scripts/run_tests.sh | 2 ++ lib/pls/include/pls/internal/scheduling/scheduler_impl.h | 34 ++++++++++++++-------------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/ci_scripts/run_tests.sh b/ci_scripts/run_tests.sh index 5876b92..06f82d9 100755 --- a/ci_scripts/run_tests.sh +++ b/ci_scripts/run_tests.sh @@ -6,4 +6,6 @@ cmake .. -DCMAKE_BUILD_TYPE=RELEASE -DTHREAD_SANITIZER=OFF make # run the actual tests +echo "Running tests..." ./bin/tests +echo "...tests passed!" diff --git a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h index 28ddf35..eeaa771 100644 --- a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h +++ b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h @@ -40,6 +40,7 @@ scheduler::scheduler(unsigned int num_threads, worker_threads_.reserve(num_threads); task_managers_.reserve(num_threads); thread_states_.reserve(num_threads); + std::atomic num_spawned{0}; for (unsigned int i = 0; i < num_threads_; i++) { auto &this_task_manager = task_managers_.emplace_back(std::make_unique(i, @@ -54,29 +55,22 @@ scheduler::scheduler(unsigned int num_threads, if (reuse_thread && i == 0) { worker_threads_.emplace_back(); + num_spawned++; continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one. } auto *this_thread_state_pointer = this_thread_state.get(); - worker_threads_.emplace_back([this_thread_state_pointer] { + + worker_threads_.emplace_back([this_thread_state_pointer, &num_spawned] { thread_state::set(this_thread_state_pointer); + num_spawned++; work_thread_main_loop(); }); } - // Make sure all threads are created and touched their stacks. - // Executing a work section ensures one wakeup/sleep cycle of all workers - // and explicitly forcing one task per worker forces them to initialize their stacks. - std::atomic num_spawned; - this->perform_work([&]() { - for (unsigned i = 0; i < num_threads; i++) { - spawn([&]() { - num_spawned++; - while (num_spawned < num_threads) std::this_thread::yield(); - }); - } - sync(); - }); + while (num_spawned < num_threads) { + std::this_thread::yield(); + } } class scheduler::init_function { @@ -346,12 +340,12 @@ void scheduler::spawn_and_sync_internal(Function &&lambda) { // execute the lambda itself, which could lead to a different thread returning. #if PLS_PROFILING_ENABLED - spawning_state.get_scheduler().profiler_.task_finish_stack_measure(syncing_state.get_thread_id(), - last_task->stack_memory_, - last_task->stack_size_, - last_task->profiling_node_); - syncing_state.get_scheduler().profiler_.task_stop_running(spawning_state.get_thread_id(), - last_task->profiling_node_); + spawning_state.get_scheduler().profiler_.task_finish_stack_measure(spawning_state.get_thread_id(), + last_task->stack_memory_, + last_task->stack_size_, + last_task->profiling_node_); + spawning_state.get_scheduler().profiler_.task_stop_running(spawning_state.get_thread_id(), + last_task->profiling_node_); auto *next_dag_node = spawning_state.get_scheduler().profiler_.task_sync(spawning_state.get_thread_id(), last_task->profiling_node_); last_task->profiling_node_ = next_dag_node; -- libgit2 0.26.0