scheduler.cpp 3.73 KB
Newer Older
1
#include "pls/internal/scheduling/scheduler.h"
2 3
#include "pls/internal/scheduling/thread_state.h"

4
#include "pls/internal/base/thread.h"
5
#include "pls/internal/base/error_handling.h"
6
#include "pls/internal/helpers/profiler.h"
7 8

namespace pls {
9 10 11
namespace internal {
namespace scheduling {

12
scheduler::scheduler(scheduler_memory &memory, const unsigned int num_threads, bool reuse_thread) :
13
    num_threads_{num_threads},
14
    reuse_thread_{reuse_thread},
15
    memory_{memory},
16
    sync_barrier_{num_threads + 1 - reuse_thread},
17
    terminated_{false} {
18
  if (num_threads_ > memory.max_threads()) {
19 20 21 22 23
    PLS_ERROR("Tried to create scheduler with more OS threads than pre-allocated memory.");
  }

  for (unsigned int i = 0; i < num_threads_; i++) {
    // Placement new is required, as the memory of `memory_` is not required to be initialized.
24 25
    memory.thread_state_for(i).scheduler_ = this;
    memory.thread_state_for(i).id_ = i;
26 27 28 29

    if (reuse_thread && i == 0) {
      continue; // Skip over first/main thread when re-using the users thread, as this one will replace the first one.
    }
30
    memory.thread_for(i) = base::thread(&scheduler::work_thread_main_loop, &memory_.thread_state_for(i));
31 32 33 34 35 36 37
  }
}

scheduler::~scheduler() {
  terminate();
}

38 39
void scheduler::work_thread_main_loop() {
  auto &scheduler = thread_state::get().get_scheduler();
40
  while (true) {
41
    // Wait to be triggered
42
    scheduler.sync_barrier_.wait();
43 44

    // Check for shutdown
45
    if (scheduler.terminated_) {
46 47 48
      return;
    }

49
    scheduler.work_thread_work_section();
50

51
    // Sync back with main thread
52 53 54 55 56 57
    scheduler.sync_barrier_.wait();
  }
}

void scheduler::work_thread_work_section() {
  auto &my_state = thread_state::get();
58
  my_state.reset();
59 60 61 62
  auto &my_cont_manager = my_state.get_cont_manager();

  auto const num_threads = my_state.get_scheduler().num_threads();
  auto const my_id = my_state.get_id();
63 64

  if (my_state.get_id() == 0) {
65
    // Main Thread, kick off by executing the user's main code block.
66
    main_thread_starter_function_->run();
67
  }
68 69

  do {
70 71 72 73 74 75 76 77
    // Work off pending continuations we need to execute locally
    while (my_cont_manager.falling_through()) {
      my_cont_manager.execute_fall_through_code();
    }

    // Steal Routine (will be continuously executed when there are no more fall through's).
    // TODO: move into separate function
    const size_t offset = my_state.random_() % num_threads;
78
    const size_t max_tries = num_threads;
79 80 81 82
    for (size_t i = 0; i < max_tries; i++) {
      size_t target = (offset + i) % num_threads;
      auto &target_state = my_state.get_scheduler().thread_state_for(target);

83
      PLS_ASSERT(my_cont_manager.is_clean(), "Only steal with clean chain!");
84
      PROFILE_STEALING("steal")
85
      auto *stolen_task = target_state.get_task_manager().steal_remote_task(my_cont_manager);
86
      PROFILE_END_BLOCK;
87 88 89 90
      if (stolen_task != nullptr) {
        my_state.parent_cont_ = stolen_task->get_cont();
        my_state.right_spawn_ = true;
        stolen_task->execute();
91 92 93 94
        if (my_cont_manager.falling_through()) {
          break;
        } else {
          my_cont_manager.fall_through_and_notify_cont(stolen_task->get_cont(), true);
95
          break;
96
        }
97 98
      }
    }
99 100 101
//    if (!my_cont_manager.falling_through()) {
//      base::this_thread::sleep(5);
//    }
102
  } while (!work_section_done_);
103

104
  PLS_ASSERT(my_cont_manager.is_clean(), "Only finish work section with clean chain!");
105 106
}

107
void scheduler::terminate() {
108 109 110 111 112 113 114
  if (terminated_) {
    return;
  }

  terminated_ = true;
  sync_barrier_.wait();

115 116 117
  for (unsigned int i = 0; i < num_threads_; i++) {
    if (reuse_thread_ && i == 0) {
      continue;
118
    }
119
    memory_.thread_for(i).join();
120 121 122
  }
}

123
thread_state &scheduler::thread_state_for(size_t id) { return memory_.thread_state_for(id); }
124

125 126
}
}
127
}