scheduling_tests.cpp 1.39 KB
Newer Older
1 2
#include <catch.hpp>

3 4 5 6 7 8 9 10 11 12 13 14
#include <atomic>
#include <thread>
#include <mutex>

#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/cont.h"
#include "pls/internal/scheduling/cont_manager.h"
#include "pls/internal/scheduling/scheduler_memory.h"
#include "pls/internal/scheduling/parallel_result.h"

using namespace pls::internal::scheduling;

15
// TODO: Introduce actual tests once multiple threads work...
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
TEST_CASE("continuation stealing", "[internal/scheduling/cont_manager.h]") {
  const int NUM_THREADS = 2;
  const int NUM_TASKS = 8;
  const int MAX_TASK_STACK_SIZE = 8;
  const int NUM_CONTS = 8;
  const int MAX_CONT_SIZE = 256;

  static_scheduler_memory<NUM_THREADS,
                          NUM_TASKS,
                          MAX_TASK_STACK_SIZE,
                          NUM_CONTS,
                          MAX_CONT_SIZE> static_scheduler_memory;

  scheduler scheduler{static_scheduler_memory, NUM_THREADS};

  // Coordinate progress to match OUR order
  std::atomic<int> progress{0};

  // Order:
  // 0) work on first task on main thread
  // 1) second thread stole right task

  scheduler.perform_work([&]() {
    return scheduler::par([&]() {
      while (progress.load() != 1);
      return parallel_result<int>{0};
    }, [&]() {
      progress.store(1);
      return parallel_result<int>{0};
    }).then([&](int, int) {

      return parallel_result<int>{0};
    });
  });
}