Commit d16ad3eb by FritzFlorian

Add basic exponential backoff to our code.

parent aad1db1f
Pipeline #1158 failed with stages
in 4 minutes 8 seconds
...@@ -15,6 +15,7 @@ add_library(pls STATIC ...@@ -15,6 +15,7 @@ add_library(pls STATIC
include/pls/internal/base/system_details.h include/pls/internal/base/system_details.h
include/pls/internal/base/error_handling.h include/pls/internal/base/error_handling.h
include/pls/internal/base/alignment.h src/internal/base/alignment.cpp include/pls/internal/base/alignment.h src/internal/base/alignment.cpp
include/pls/internal/base/backoff.h
include/pls/internal/data_structures/aligned_stack.h src/internal/data_structures/aligned_stack.cpp include/pls/internal/data_structures/aligned_stack.h src/internal/data_structures/aligned_stack.cpp
include/pls/internal/data_structures/aligned_stack_impl.h include/pls/internal/data_structures/aligned_stack_impl.h
......
#ifndef PLS_BACKOFF_H_
#define PLS_BACKOFF_H_
#include "pls/internal/base/system_details.h"
#include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/thread.h"
#include <random>
#include <math.h>
namespace pls {
namespace internal {
namespace base {
class backoff {
static constexpr unsigned long INITIAL_SPIN_ITERS = 2u << 2u;
static constexpr unsigned long MAX_SPIN_ITERS = 2u << 6u;
unsigned long current_ = INITIAL_SPIN_ITERS;
std::minstd_rand random_;
static void spin(unsigned long iterations) {
for (volatile unsigned long i = 0; i < iterations; i++)
system_details::relax_cpu(); // Spin
}
public:
backoff() : current_{INITIAL_SPIN_ITERS}, random_{std::random_device{}()} {}
void do_backoff() {
PROFILE_LOCK("Backoff")
spin(random_() % std::min(current_, MAX_SPIN_ITERS));
current_ = current_ * 2;
if (current_ > MAX_SPIN_ITERS) {
current_ = MAX_SPIN_ITERS;
}
}
void reset() {
current_ = INITIAL_SPIN_ITERS;
}
};
}
}
}
#endif //PLS_BACKOFF_H_
...@@ -10,7 +10,7 @@ namespace internal { ...@@ -10,7 +10,7 @@ namespace internal {
namespace base { namespace base {
// Default Spin-Lock implementation for this project. // Default Spin-Lock implementation for this project.
using spin_lock = tas_spin_lock; using spin_lock = ttas_spin_lock;
} }
} }
......
...@@ -21,11 +21,10 @@ namespace base { ...@@ -21,11 +21,10 @@ namespace base {
*/ */
class tas_spin_lock { class tas_spin_lock {
std::atomic_flag flag_; std::atomic_flag flag_;
unsigned int yield_at_tries_;
public: public:
tas_spin_lock() : flag_{ATOMIC_FLAG_INIT}, yield_at_tries_{1024} {}; tas_spin_lock() : flag_{ATOMIC_FLAG_INIT} {};
tas_spin_lock(const tas_spin_lock &other) : flag_{ATOMIC_FLAG_INIT}, yield_at_tries_{other.yield_at_tries_} {} tas_spin_lock(const tas_spin_lock &/*other*/) : flag_{ATOMIC_FLAG_INIT} {}
void lock(); void lock();
bool try_lock(unsigned int num_tries = 1); bool try_lock(unsigned int num_tries = 1);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <iostream> #include <iostream>
#include "pls/internal/base/thread.h" #include "pls/internal/base/thread.h"
#include "pls/internal/base/backoff.h"
namespace pls { namespace pls {
namespace internal { namespace internal {
...@@ -18,11 +19,11 @@ namespace base { ...@@ -18,11 +19,11 @@ namespace base {
*/ */
class ttas_spin_lock { class ttas_spin_lock {
std::atomic<int> flag_; std::atomic<int> flag_;
const unsigned int yield_at_tries_; backoff backoff_;
public: public:
ttas_spin_lock() : flag_{0}, yield_at_tries_{1024} {}; ttas_spin_lock() : flag_{0}, backoff_{} {};
ttas_spin_lock(const ttas_spin_lock &other) : flag_{0}, yield_at_tries_{other.yield_at_tries_} {} ttas_spin_lock(const ttas_spin_lock &/*other*/) : flag_{0}, backoff_{} {}
void lock(); void lock();
bool try_lock(unsigned int num_tries = 1); bool try_lock(unsigned int num_tries = 1);
......
...@@ -7,14 +7,14 @@ namespace base { ...@@ -7,14 +7,14 @@ namespace base {
bool swmr_spin_lock::reader_try_lock() { bool swmr_spin_lock::reader_try_lock() {
PROFILE_LOCK("Try Acquire Read Lock") PROFILE_LOCK("Try Acquire Read Lock")
if (write_request_.load(std::memory_order_relaxed) == 1) { if (write_request_.load(std::memory_order_acquire) == 1) {
return false; return false;
} }
// We think we can enter the region // We think we can enter the region
readers_.fetch_add(1, std::memory_order_acquire); readers_.fetch_add(1, std::memory_order_acquire);
if (write_request_.load() == 1) { if (write_request_.load(std::memory_order_acquire) == 1) {
// Whoops, the writer acquires the lock, so we back off again // Whoops, the writer acquires the lock, so we back off again
readers_--; readers_.fetch_add(-1, std::memory_order_release);
return false; return false;
} }
...@@ -29,17 +29,10 @@ void swmr_spin_lock::reader_unlock() { ...@@ -29,17 +29,10 @@ void swmr_spin_lock::reader_unlock() {
void swmr_spin_lock::writer_lock() { void swmr_spin_lock::writer_lock() {
PROFILE_LOCK("Acquire Write Lock") PROFILE_LOCK("Acquire Write Lock")
// Tell the readers that we would like to write // Tell the readers that we would like to write
int expected; write_request_.store(1, std::memory_order_acquire);
while (true) {
expected = 0;
if (write_request_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) {
break;
}
system_details::relax_cpu(); // Spin until WE set the write lock flag
}
// Wait for all of them to exit the critical section // Wait for all of them to exit the critical section
while (readers_.load() > 0) while (readers_.load(std::memory_order_acquire) > 0)
system_details::relax_cpu(); // Spin, not expensive as relaxed load system_details::relax_cpu(); // Spin, not expensive as relaxed load
} }
......
#include "pls/internal/helpers/profiler.h" #include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/tas_spin_lock.h" #include "pls/internal/base/tas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace pls { namespace pls {
namespace internal { namespace internal {
...@@ -7,30 +8,31 @@ namespace base { ...@@ -7,30 +8,31 @@ namespace base {
void tas_spin_lock::lock() { void tas_spin_lock::lock() {
PROFILE_LOCK("Acquire Lock") PROFILE_LOCK("Acquire Lock")
int tries = 0; backoff backoff_strategy;
while (true) {
tries++;
if (tries % yield_at_tries_ == 0) {
this_thread::yield();
}
while (true) {
if (flag_.test_and_set(std::memory_order_acquire) == 0) { if (flag_.test_and_set(std::memory_order_acquire) == 0) {
return; return;
} }
backoff_strategy.do_backoff();
} }
} }
bool tas_spin_lock::try_lock(unsigned int num_tries) { bool tas_spin_lock::try_lock(unsigned int num_tries) {
PROFILE_LOCK("Try Acquire Lock") PROFILE_LOCK("Try Acquire Lock")
backoff backoff_strategy;
while (true) { while (true) {
if (flag_.test_and_set(std::memory_order_acquire) == 0) {
return true;
}
num_tries--; num_tries--;
if (num_tries <= 0) { if (num_tries <= 0) {
return false; return false;
} }
if (flag_.test_and_set(std::memory_order_acquire) == 0) { backoff_strategy.do_backoff();
return true;
}
} }
} }
......
#include "pls/internal/helpers/profiler.h" #include "pls/internal/helpers/profiler.h"
#include "pls/internal/base/ttas_spin_lock.h" #include "pls/internal/base/ttas_spin_lock.h"
#include "pls/internal/base/backoff.h"
namespace pls { namespace pls {
namespace internal { namespace internal {
...@@ -7,40 +8,44 @@ namespace base { ...@@ -7,40 +8,44 @@ namespace base {
void ttas_spin_lock::lock() { void ttas_spin_lock::lock() {
PROFILE_LOCK("Acquire Lock") PROFILE_LOCK("Acquire Lock")
int tries = 0;
int expected = 0; int expected = 0;
backoff_.reset();
while (true) { while (true) {
while (flag_.load(std::memory_order_relaxed) == 1) { while (flag_.load(std::memory_order_relaxed) == 1)
tries++; system_details::relax_cpu(); // Spin
if (tries % yield_at_tries_ == 0) {
this_thread::yield();
}
}
expected = 0; expected = 0;
if (flag_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) { if (flag_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) {
return; return;
} }
backoff_.do_backoff();
} }
} }
bool ttas_spin_lock::try_lock(unsigned int num_tries) { bool ttas_spin_lock::try_lock(unsigned int num_tries) {
PROFILE_LOCK("Try Acquire Lock") PROFILE_LOCK("Try Acquire Lock")
int expected = 0; int expected = 0;
backoff_.reset();
while (true) { while (true) {
while (flag_.load(std::memory_order_relaxed) == 1) { while (flag_.load() == 1) {
num_tries--; num_tries--;
if (num_tries <= 0) { if (num_tries <= 0) {
return false; return false;
} }
system_details::relax_cpu();
} }
expected = 0; expected = 0;
if (flag_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) { if (flag_.compare_exchange_weak(expected, 1, std::memory_order_acquire)) {
return true; return true;
} }
num_tries--;
if (num_tries <= 0) {
return false;
}
backoff_.do_backoff();
} }
} }
......
...@@ -15,11 +15,11 @@ bool abstract_task::steal_work() { ...@@ -15,11 +15,11 @@ bool abstract_task::steal_work() {
const size_t my_id = my_state->id_; const size_t my_id = my_state->id_;
const size_t offset = my_state->random_() % my_scheduler->num_threads(); const size_t offset = my_state->random_() % my_scheduler->num_threads();
const size_t max_tries = my_scheduler->num_threads(); // TODO: Tune this value const size_t max_tries = my_scheduler->num_threads() - 1; // TODO: Tune this value
for (size_t i = 0; i < max_tries; i++) { for (size_t i = 0; i < max_tries; i++) {
size_t target = (offset + i) % my_scheduler->num_threads(); size_t target = (offset + i) % my_scheduler->num_threads();
if (target == my_id) { if (target == my_id) {
continue; target = (target + 1) % my_scheduler->num_threads();
} }
auto target_state = my_scheduler->thread_state_for(target); auto target_state = my_scheduler->thread_state_for(target);
......
...@@ -54,6 +54,7 @@ void fork_join_sub_task::wait_for_all() { ...@@ -54,6 +54,7 @@ void fork_join_sub_task::wait_for_all() {
if (local_task != nullptr) { if (local_task != nullptr) {
local_task->execute(); local_task->execute();
} else { } else {
while (ref_count_ > 0) {
// Try to steal work. // Try to steal work.
// External steal will be executed implicitly if success // External steal will be executed implicitly if success
PROFILE_STEALING("steal work") PROFILE_STEALING("steal work")
...@@ -64,6 +65,7 @@ void fork_join_sub_task::wait_for_all() { ...@@ -64,6 +65,7 @@ void fork_join_sub_task::wait_for_all() {
} }
} }
} }
}
tbb_task_->my_stack_->reset_state(stack_state_); tbb_task_->my_stack_->reset_state(stack_state_);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment