Commit 0a8b9ebb by FritzFlorian

Remove backoff mechanisms for now.

parent a78ff0d1
Pipeline #1231 passed with stages
in 3 minutes 50 seconds
...@@ -14,7 +14,7 @@ namespace internal { ...@@ -14,7 +14,7 @@ namespace internal {
namespace base { namespace base {
class backoff { class backoff {
const unsigned long INITIAL_SPIN_ITERS = 2u << 4u; const unsigned long INITIAL_SPIN_ITERS = 2u << 1u;
const unsigned long MAX_SPIN_ITERS = 2u << 8u; const unsigned long MAX_SPIN_ITERS = 2u << 8u;
const unsigned long MAX_ITERS = 2u << 10u; const unsigned long MAX_ITERS = 2u << 10u;
const unsigned long YELD_ITERS = 2u << 10u; const unsigned long YELD_ITERS = 2u << 10u;
...@@ -36,7 +36,7 @@ class backoff { ...@@ -36,7 +36,7 @@ class backoff {
if (current_ >= YELD_ITERS) { if (current_ >= YELD_ITERS) {
PROFILE_LOCK("Yield") PROFILE_LOCK("Yield")
this_thread::yield(); this_thread::sleep(5);
} }
current_ = std::min(current_ * 2, MAX_ITERS); current_ = std::min(current_ * 2, MAX_ITERS);
......
...@@ -52,15 +52,9 @@ constexpr pointer_t CACHE_LINE_SIZE = 64; ...@@ -52,15 +52,9 @@ constexpr pointer_t CACHE_LINE_SIZE = 64;
* *
* Choose the implementation appropriate for your compiler-cpu combination. * Choose the implementation appropriate for your compiler-cpu combination.
*/ */
#if (COMPILER == MVCC)
inline void relax_cpu() {
_mm_pause();
}
#elif (COMPILER == GCC || COMPILER == LLVM)
inline void relax_cpu() { inline void relax_cpu() {
asm("pause"); asm volatile("pause":: : "memory");
} }
#endif
} }
} }
......
...@@ -10,7 +10,7 @@ namespace internal { ...@@ -10,7 +10,7 @@ namespace internal {
namespace scheduling { namespace scheduling {
bool abstract_task::steal_work() { bool abstract_task::steal_work() {
thread_local static base::backoff backoff{}; // thread_local static base::backoff backoff{};
PROFILE_STEALING("abstract_task::steal_work") PROFILE_STEALING("abstract_task::steal_work")
const auto my_state = base::this_thread::state<thread_state>(); const auto my_state = base::this_thread::state<thread_state>();
...@@ -22,7 +22,7 @@ bool abstract_task::steal_work() { ...@@ -22,7 +22,7 @@ bool abstract_task::steal_work() {
for (size_t i = 0; i < max_tries; i++) { for (size_t i = 0; i < max_tries; i++) {
size_t target = (offset + i) % my_scheduler->num_threads(); size_t target = (offset + i) % my_scheduler->num_threads();
if (target == my_id) { if (target == my_id) {
target = (target + 1) % my_scheduler->num_threads(); continue;
} }
auto target_state = my_scheduler->thread_state_for(target); auto target_state = my_scheduler->thread_state_for(target);
...@@ -47,7 +47,7 @@ bool abstract_task::steal_work() { ...@@ -47,7 +47,7 @@ bool abstract_task::steal_work() {
if (internal_stealing(current_task)) { if (internal_stealing(current_task)) {
// internal steal was a success, hand it back to the internal scheduler // internal steal was a success, hand it back to the internal scheduler
target_state->lock_.reader_unlock(); target_state->lock_.reader_unlock();
backoff.reset(); // backoff.reset();
return true; return true;
} }
...@@ -65,7 +65,7 @@ bool abstract_task::steal_work() { ...@@ -65,7 +65,7 @@ bool abstract_task::steal_work() {
auto lock = &target_state->lock_; auto lock = &target_state->lock_;
if (current_task->split_task(lock)) { if (current_task->split_task(lock)) {
// top level steal was a success (we did a top level task steal) // top level steal was a success (we did a top level task steal)
backoff.reset(); // backoff.reset();
return false; return false;
} }
...@@ -76,7 +76,8 @@ bool abstract_task::steal_work() { ...@@ -76,7 +76,8 @@ bool abstract_task::steal_work() {
} }
// internal steal was no success // internal steal was no success
backoff.do_backoff(); // backoff.do_backoff();
// base::this_thread::sleep(5);
return false; return false;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment