Commit 3ad3a3d4 by FritzFlorian

Minor adjustment to coordinating thread spawns (catch seems to have problems…

Minor adjustment to coordinating thread spawns (catch seems to have problems with the other implementation).
parent 82ccdc91
Pipeline #1552 passed with stages
in 4 minutes 37 seconds
...@@ -6,4 +6,6 @@ cmake .. -DCMAKE_BUILD_TYPE=RELEASE -DTHREAD_SANITIZER=OFF ...@@ -6,4 +6,6 @@ cmake .. -DCMAKE_BUILD_TYPE=RELEASE -DTHREAD_SANITIZER=OFF
make make
# run the actual tests # run the actual tests
echo "Running tests..."
./bin/tests ./bin/tests
echo "...tests passed!"
...@@ -40,6 +40,7 @@ scheduler::scheduler(unsigned int num_threads, ...@@ -40,6 +40,7 @@ scheduler::scheduler(unsigned int num_threads,
worker_threads_.reserve(num_threads); worker_threads_.reserve(num_threads);
task_managers_.reserve(num_threads); task_managers_.reserve(num_threads);
thread_states_.reserve(num_threads); thread_states_.reserve(num_threads);
std::atomic<unsigned> num_spawned{0};
for (unsigned int i = 0; i < num_threads_; i++) { for (unsigned int i = 0; i < num_threads_; i++) {
auto &this_task_manager = auto &this_task_manager =
task_managers_.emplace_back(std::make_unique<task_manager>(i, task_managers_.emplace_back(std::make_unique<task_manager>(i,
...@@ -54,29 +55,22 @@ scheduler::scheduler(unsigned int num_threads, ...@@ -54,29 +55,22 @@ scheduler::scheduler(unsigned int num_threads,
if (reuse_thread && i == 0) { if (reuse_thread && i == 0) {
worker_threads_.emplace_back(); worker_threads_.emplace_back();
num_spawned++;
continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one. continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one.
} }
auto *this_thread_state_pointer = this_thread_state.get(); auto *this_thread_state_pointer = this_thread_state.get();
worker_threads_.emplace_back([this_thread_state_pointer] {
worker_threads_.emplace_back([this_thread_state_pointer, &num_spawned] {
thread_state::set(this_thread_state_pointer); thread_state::set(this_thread_state_pointer);
num_spawned++;
work_thread_main_loop(); work_thread_main_loop();
}); });
} }
// Make sure all threads are created and touched their stacks. while (num_spawned < num_threads) {
// Executing a work section ensures one wakeup/sleep cycle of all workers std::this_thread::yield();
// and explicitly forcing one task per worker forces them to initialize their stacks. }
std::atomic<unsigned> num_spawned;
this->perform_work([&]() {
for (unsigned i = 0; i < num_threads; i++) {
spawn([&]() {
num_spawned++;
while (num_spawned < num_threads) std::this_thread::yield();
});
}
sync();
});
} }
class scheduler::init_function { class scheduler::init_function {
...@@ -346,12 +340,12 @@ void scheduler::spawn_and_sync_internal(Function &&lambda) { ...@@ -346,12 +340,12 @@ void scheduler::spawn_and_sync_internal(Function &&lambda) {
// execute the lambda itself, which could lead to a different thread returning. // execute the lambda itself, which could lead to a different thread returning.
#if PLS_PROFILING_ENABLED #if PLS_PROFILING_ENABLED
spawning_state.get_scheduler().profiler_.task_finish_stack_measure(syncing_state.get_thread_id(), spawning_state.get_scheduler().profiler_.task_finish_stack_measure(spawning_state.get_thread_id(),
last_task->stack_memory_, last_task->stack_memory_,
last_task->stack_size_, last_task->stack_size_,
last_task->profiling_node_); last_task->profiling_node_);
syncing_state.get_scheduler().profiler_.task_stop_running(spawning_state.get_thread_id(), spawning_state.get_scheduler().profiler_.task_stop_running(spawning_state.get_thread_id(),
last_task->profiling_node_); last_task->profiling_node_);
auto *next_dag_node = auto *next_dag_node =
spawning_state.get_scheduler().profiler_.task_sync(spawning_state.get_thread_id(), last_task->profiling_node_); spawning_state.get_scheduler().profiler_.task_sync(spawning_state.get_thread_id(), last_task->profiling_node_);
last_task->profiling_node_ = next_dag_node; last_task->profiling_node_ = next_dag_node;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment