From 0a8b9ebbd5551a7fefc88f494d290b9541716605 Mon Sep 17 00:00:00 2001 From: FritzFlorian Date: Tue, 28 May 2019 16:51:12 +0200 Subject: [PATCH] Remove backoff mechanisms for now. --- lib/pls/include/pls/internal/base/backoff.h | 4 ++-- lib/pls/include/pls/internal/base/system_details.h | 8 +------- lib/pls/src/internal/scheduling/abstract_task.cpp | 11 ++++++----- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/lib/pls/include/pls/internal/base/backoff.h b/lib/pls/include/pls/internal/base/backoff.h index 9e7cbf5..45ca814 100644 --- a/lib/pls/include/pls/internal/base/backoff.h +++ b/lib/pls/include/pls/internal/base/backoff.h @@ -14,7 +14,7 @@ namespace internal { namespace base { class backoff { - const unsigned long INITIAL_SPIN_ITERS = 2u << 4u; + const unsigned long INITIAL_SPIN_ITERS = 2u << 1u; const unsigned long MAX_SPIN_ITERS = 2u << 8u; const unsigned long MAX_ITERS = 2u << 10u; const unsigned long YELD_ITERS = 2u << 10u; @@ -36,7 +36,7 @@ class backoff { if (current_ >= YELD_ITERS) { PROFILE_LOCK("Yield") - this_thread::yield(); + this_thread::sleep(5); } current_ = std::min(current_ * 2, MAX_ITERS); diff --git a/lib/pls/include/pls/internal/base/system_details.h b/lib/pls/include/pls/internal/base/system_details.h index 0f0d5e7..17b51d7 100644 --- a/lib/pls/include/pls/internal/base/system_details.h +++ b/lib/pls/include/pls/internal/base/system_details.h @@ -52,15 +52,9 @@ constexpr pointer_t CACHE_LINE_SIZE = 64; * * Choose the implementation appropriate for your compiler-cpu combination. */ -#if (COMPILER == MVCC) -inline void relax_cpu() { - _mm_pause(); -} -#elif (COMPILER == GCC || COMPILER == LLVM) inline void relax_cpu() { - asm("pause"); + asm volatile("pause":: : "memory"); } -#endif } } diff --git a/lib/pls/src/internal/scheduling/abstract_task.cpp b/lib/pls/src/internal/scheduling/abstract_task.cpp index aeccb1d..85e2c36 100644 --- a/lib/pls/src/internal/scheduling/abstract_task.cpp +++ b/lib/pls/src/internal/scheduling/abstract_task.cpp @@ -10,7 +10,7 @@ namespace internal { namespace scheduling { bool abstract_task::steal_work() { - thread_local static base::backoff backoff{}; +// thread_local static base::backoff backoff{}; PROFILE_STEALING("abstract_task::steal_work") const auto my_state = base::this_thread::state(); @@ -22,7 +22,7 @@ bool abstract_task::steal_work() { for (size_t i = 0; i < max_tries; i++) { size_t target = (offset + i) % my_scheduler->num_threads(); if (target == my_id) { - target = (target + 1) % my_scheduler->num_threads(); + continue; } auto target_state = my_scheduler->thread_state_for(target); @@ -47,7 +47,7 @@ bool abstract_task::steal_work() { if (internal_stealing(current_task)) { // internal steal was a success, hand it back to the internal scheduler target_state->lock_.reader_unlock(); - backoff.reset(); +// backoff.reset(); return true; } @@ -65,7 +65,7 @@ bool abstract_task::steal_work() { auto lock = &target_state->lock_; if (current_task->split_task(lock)) { // top level steal was a success (we did a top level task steal) - backoff.reset(); +// backoff.reset(); return false; } @@ -76,7 +76,8 @@ bool abstract_task::steal_work() { } // internal steal was no success - backoff.do_backoff(); +// backoff.do_backoff(); +// base::this_thread::sleep(5); return false; } -- libgit2 0.26.0