main.cpp 2.26 KB
Newer Older
1 2 3 4
#include "pls/internal/scheduling/scheduler.h"
#include "pls/internal/scheduling/parallel_result.h"
#include "pls/internal/scheduling/scheduler_memory.h"
#include "pls/algorithms/for_each.h"
5

6
using namespace pls::internal::scheduling;
7

8 9
#include "benchmark_runner.h"
#include "benchmark_base/matrix.h"
10

11
using namespace comparison_benchmarks::base;
12 13

template<typename T, int SIZE>
14
class pls_matrix : public matrix::matrix<T, SIZE> {
15
 public:
16
  pls_matrix() : matrix::matrix<T, SIZE>() {}
17

18 19
  parallel_result<int> pls_multiply(const matrix::matrix<T, SIZE> &a, const matrix::matrix<T, SIZE> &b) {
    return pls::algorithm::for_each_range(0, SIZE, [this, &a, &b](int i) {
20 21 22 23 24
      this->multiply_column(i, a, b);
    });
  }
};

25 26 27 28
constexpr size_t MAX_NUM_THREADS = 8;
constexpr size_t MAX_NUM_TASKS = 32;
constexpr size_t MAX_NUM_CONTS = 32;
constexpr size_t MAX_CONT_SIZE = 512;
29

30 31 32 33
int main(int argc, char **argv) {
  int num_threads;
  string directory;
  benchmark_runner::read_args(argc, argv, num_threads, directory);
34

35 36 37
  string test_name = to_string(num_threads) + ".csv";
  string full_directory = directory + "/PLS_v2/";
  benchmark_runner runner{full_directory, test_name};
38

39 40 41
  pls_matrix<double, matrix::MATRIX_SIZE> a;
  pls_matrix<double, matrix::MATRIX_SIZE> b;
  pls_matrix<double, matrix::MATRIX_SIZE> result;
42

43 44 45
  static_scheduler_memory<MAX_NUM_THREADS,
                          MAX_NUM_TASKS,
                          MAX_NUM_CONTS,
46
                          MAX_CONT_SIZE> static_scheduler_memory;
47

48 49 50
  scheduler scheduler{static_scheduler_memory, (unsigned int) num_threads};

  for (int i = 0; i < matrix::WARMUP_ITERATIONS; i++) {
51 52 53

    scheduler.perform_work([&]() {
      return scheduler::par([&]() {
54
        return result.pls_multiply(a, b);
55 56
      }, []() {
        return parallel_result<int>{0};
57
      }).then([&](int, int) {
58 59 60 61
        return parallel_result<int>{0};
      });
    });
  }
62

63 64 65
  for (int i = 0; i < matrix::NUM_ITERATIONS; i++) {
    scheduler.perform_work([&]() {
      runner.start_iteration();
66

67 68 69 70 71 72 73 74 75 76 77 78 79
      return scheduler::par([&]() {
        return result.pls_multiply(a, b);
      }, []() {
        return parallel_result<int>{0};
      }).then([&](int, int) {
        runner.end_iteration();
        return parallel_result<int>{0};
      });
    });
  }
  runner.commit_results(true);

}