From 3687a591895332697bf2610bdcb11a39d158e3b4 Mon Sep 17 00:00:00 2001 From: FritzFlorian Date: Fri, 24 Apr 2020 14:20:42 +0200 Subject: [PATCH] Fix: race-condition on traded in task-chains. --- lib/pls/include/pls/internal/scheduling/scheduler_impl.h | 1 + lib/pls/src/internal/scheduling/lock_free/task_manager.cpp | 4 ++-- lib/pls/src/internal/scheduling/scheduler.cpp | 7 +++---- test/scheduling_tests.cpp | 32 +++++++++++++++++++++++++++++++- 4 files changed, 37 insertions(+), 7 deletions(-) diff --git a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h index bdcc241..e70aacd 100644 --- a/lib/pls/include/pls/internal/scheduling/scheduler_impl.h +++ b/lib/pls/include/pls/internal/scheduling/scheduler_impl.h @@ -66,6 +66,7 @@ class scheduler::init_function_impl : public init_function { void run() override { base_task *root_task = thread_state::get().get_active_task(); root_task->run_as_task([root_task, this](::context_switcher::continuation cont) { + root_task->is_synchronized_ = true; thread_state::get().main_continuation() = std::move(cont); function_(); thread_state::get().get_scheduler().work_section_done_.store(true); diff --git a/lib/pls/src/internal/scheduling/lock_free/task_manager.cpp b/lib/pls/src/internal/scheduling/lock_free/task_manager.cpp index 2e47d08..4844477 100644 --- a/lib/pls/src/internal/scheduling/lock_free/task_manager.cpp +++ b/lib/pls/src/internal/scheduling/lock_free/task_manager.cpp @@ -54,6 +54,7 @@ std::tuple task_manager::steal_task(thread_state &stea // get a suitable task to trade in // TODO: opt. add debug marker to traded in tasks that we do not accidentally use them. task *traded_task = static_cast(&scheduler::task_chain_at(stolen_task->depth_, stealing_state)); + base_task *chain_after_stolen_task = traded_task->next_; // perform the actual pop operation auto pop_result_task = deque_.pop_top(traded_task, peek); @@ -77,7 +78,7 @@ std::tuple task_manager::steal_task(thread_state &stea stolen_task->reset_task_chain(); } - return std::pair{stolen_task, traded_task}; + return std::pair{stolen_task, chain_after_stolen_task}; } else { return std::pair{nullptr, nullptr}; } @@ -103,5 +104,4 @@ base_task *task_manager::pop_clean_task_chain(base_task *base_task) { return clean_chain; } - } diff --git a/lib/pls/src/internal/scheduling/scheduler.cpp b/lib/pls/src/internal/scheduling/scheduler.cpp index 8869965..34c75da 100644 --- a/lib/pls/src/internal/scheduling/scheduler.cpp +++ b/lib/pls/src/internal/scheduling/scheduler.cpp @@ -56,12 +56,11 @@ void scheduler::work_thread_work_section() { } while (target == my_state.get_thread_id()); thread_state &target_state = my_state.get_scheduler().thread_state_for(target); - auto[stolen_task, traded_task] = target_state.get_task_manager().steal_task(my_state); + auto[stolen_task, chain_after_stolen_task] = target_state.get_task_manager().steal_task(my_state); if (stolen_task) { // Keep task chain consistent. We want to appear as if we are working an a branch upwards of the stolen task. - base_task *next_own_task = traded_task->next_; - stolen_task->next_ = next_own_task; - next_own_task->prev_ = stolen_task; + stolen_task->next_ = chain_after_stolen_task; + chain_after_stolen_task->prev_ = stolen_task; my_state.set_active_task(stolen_task); PLS_ASSERT(check_task_chain_forward(*my_state.get_active_task()), diff --git a/test/scheduling_tests.cpp b/test/scheduling_tests.cpp index db3803d..2bcf7b2 100644 --- a/test/scheduling_tests.cpp +++ b/test/scheduling_tests.cpp @@ -4,6 +4,7 @@ #include "pls/pls.h" using namespace pls::internal::scheduling; +using namespace pls::internal::scheduling::lock_free; constexpr int MAX_NUM_TASKS = 32; constexpr int MAX_STACK_SIZE = 1024 * 8; @@ -32,7 +33,7 @@ TEST_CASE("scheduler correctly initializes", "[internal/scheduling/scheduler]") } } -TEST_CASE("tasks distributed over workers (do not block)", "[internal/scheduling/scheduler]") { +TEST_CASE("tasks distributed over workers (do not block)", "[internal/scheduling/scheduler.h]") { scheduler scheduler{3, MAX_NUM_TASKS, MAX_STACK_SIZE}; std::atomic num_run{0}; @@ -54,3 +55,32 @@ TEST_CASE("tasks distributed over workers (do not block)", "[internal/scheduling }); REQUIRE(num_run == 3); } + +unsigned fib_serial(unsigned n) { + if (n <= 1) { + return n; + } + + return fib_serial(n - 1) + fib_serial(n - 2); +} + +unsigned fib_pls(unsigned n) { + if (n <= 1) { + return n; + } + + unsigned a, b; + pls::invoke( + [&a, n] { a = fib_pls(n - 1); }, + [&b, n] { b = fib_pls(n - 2); } + ); + return a + b; +} + +TEST_CASE("simple fib", "[internal/scheduling/scheduler]") { + scheduler scheduler{3, MAX_NUM_TASKS, MAX_STACK_SIZE}; + + scheduler.perform_work([&] { + REQUIRE(fib_serial(28) == fib_pls(28)); + }); +} -- libgit2 0.26.0