Commit 67553581 by FritzFlorian

Fix locking bug in high-level stealing.

The lock was not released when an split_task was executed. We did a dirty fix by allowing the method to unlock the lock itself, but this has to be done cleaner in the future.
parent cbdc5e4f
Pipeline #1116 passed with stages
in 3 minutes 16 seconds
......@@ -28,7 +28,7 @@ namespace pls {
int depth() { return depth_; }
protected:
virtual bool internal_stealing(abstract_task* other_task) = 0;
virtual bool split_task() = 0;
virtual bool split_task(base::spin_lock* lock) = 0;
bool steal_work();
};
......
......@@ -40,7 +40,7 @@ namespace pls {
return false;
}
bool split_task() override {
bool split_task(base::spin_lock* /*lock*/) override {
return false;
}
};
......@@ -64,7 +64,7 @@ namespace pls {
return false;
}
bool split_task() override {
bool split_task(base::spin_lock* /*lock*/) override {
return false;
}
};
......
......@@ -57,7 +57,7 @@ namespace pls {
return false;
}
bool split_task() override;
bool split_task(base::spin_lock* lock) override;
};
template<typename Function>
......@@ -83,16 +83,19 @@ namespace pls {
return false;
}
bool split_task() override {
bool split_task(base::spin_lock* /*lock*/) override {
return false;
}
};
template<typename Function>
bool run_on_n_threads_task<Function>::split_task() {
bool run_on_n_threads_task<Function>::split_task(base::spin_lock* lock) {
if (get_counter() <= 0) {
return false;
}
// In success case, unlock.
// TODO: this locking is complicated and error prone.
lock->unlock();
auto scheduler = base::this_thread::state<thread_state>()->scheduler_;
auto task = run_on_n_threads_task_worker<Function>{function_, this};
......
......@@ -60,6 +60,7 @@ namespace pls {
base::aligned_stack* my_stack_;
// Double-Ended Queue management
base::spin_lock lock_;
tbb_sub_task* top_;
tbb_sub_task* bottom_;
......@@ -70,7 +71,7 @@ namespace pls {
tbb_sub_task* get_stolen_sub_task();
bool internal_stealing(abstract_task* other_task) override;
bool split_task() override;
bool split_task(base::spin_lock* /*lock*/) override;
public:
explicit tbb_task(tbb_sub_task* root_task):
......
......@@ -13,7 +13,9 @@ namespace pls {
for (size_t i = 1; i < my_scheduler->num_threads(); i++) {
size_t target = (my_id + i) % my_scheduler->num_threads();
auto target_state = my_scheduler->thread_state_for(target);
std::lock_guard<base::spin_lock> lock{target_state->lock_};
// TODO: Cleaner Locking Using std::guarded_lock
target_state->lock_.lock();
// Dig down to our level
abstract_task* current_task = target_state->root_task_;
......@@ -27,6 +29,7 @@ namespace pls {
current_task->depth_ == depth_) {
if (internal_stealing(current_task)) {
// internal steal was a success, hand it back to the internal scheduler
target_state->lock_.unlock();
return true;
}
......@@ -39,13 +42,14 @@ namespace pls {
// Execute 'top level task steal' if possible
// (only try deeper tasks to keep depth restricted stealing)
while (current_task != nullptr) {
if (current_task->split_task()) {
if (current_task->split_task(&target_state->lock_)) {
// internal steal was no success (we did a top level task steal)
return false;
}
current_task = current_task->child_task_;
}
target_state->lock_.unlock();
}
// internal steal was no success
......
......@@ -11,7 +11,7 @@ namespace pls {
below_{nullptr},
above_{nullptr} {}
tbb_sub_task::tbb_sub_task(const tbb_sub_task& other) {
tbb_sub_task::tbb_sub_task(const tbb_sub_task& /*other*/) {
// Do Nothing, will be inited after this anyways
}
......@@ -34,6 +34,8 @@ namespace pls {
sub_task->stack_state_ = tbb_task_->my_stack_->save_state();
// Put sub_task into stealing queue
{
std::lock_guard<base::spin_lock> lock{tbb_task_->lock_};
if (tbb_task_->bottom_ != nullptr) {
tbb_task_->bottom_->below_ = sub_task;
} else {
......@@ -43,6 +45,7 @@ namespace pls {
sub_task->below_ = nullptr;
tbb_task_->bottom_ = sub_task;
}
}
void tbb_sub_task::wait_for_all() {
while (ref_count_ > 0) {
......@@ -61,12 +64,14 @@ namespace pls {
}
tbb_sub_task* tbb_task::get_local_sub_task() {
// Remove from bottom of queue
std::lock_guard<base::spin_lock> lock{lock_};
if (bottom_ == nullptr) {
return nullptr;
}
// Remove from bottom of queue
tbb_sub_task* result = bottom_;
tbb_sub_task *result = bottom_;
bottom_ = bottom_->above_;
if (bottom_ == nullptr) {
top_ = nullptr;
......@@ -78,11 +83,14 @@ namespace pls {
}
tbb_sub_task* tbb_task::get_stolen_sub_task() {
// Remove from top of queue
std::lock_guard<base::spin_lock> lock{lock_};
if (top_ == nullptr) {
return nullptr;
}
tbb_sub_task* result = top_;
tbb_sub_task *result = top_;
top_ = top_->below_;
if (top_ == nullptr) {
bottom_ = nullptr;
......@@ -110,11 +118,14 @@ namespace pls {
}
}
bool tbb_task::split_task() {
bool tbb_task::split_task(base::spin_lock* lock) {
tbb_sub_task* stolen_sub_task = get_stolen_sub_task();
if (stolen_sub_task == nullptr) {
return false;
}
// In success case, unlock.
// TODO: this locking is complicated and error prone.
lock->unlock();
tbb_task task{stolen_sub_task};
scheduler::execute_task(task, depth());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment