#include "pls/internal/scheduling/scheduler.h" #include "pls/internal/scheduling/parallel_result.h" #include "pls/internal/scheduling/scheduler_memory.h" #include "pls/algorithms/for_each.h" using namespace pls::internal::scheduling; #include "benchmark_runner.h" #include "benchmark_base/matrix.h" using namespace comparison_benchmarks::base; template class pls_matrix : public matrix::matrix { public: pls_matrix() : matrix::matrix() {} parallel_result pls_multiply(const matrix::matrix &a, const matrix::matrix &b) { return pls::algorithm::for_each_range(0, SIZE, [this, &a, &b](int i) { this->multiply_column(i, a, b); }); } }; constexpr size_t MAX_NUM_THREADS = 8; constexpr size_t MAX_NUM_TASKS = 32; constexpr size_t MAX_NUM_CONTS = 32; constexpr size_t MAX_CONT_SIZE = 512; int main(int argc, char **argv) { int num_threads; string directory; benchmark_runner::read_args(argc, argv, num_threads, directory); string test_name = to_string(num_threads) + ".csv"; string full_directory = directory + "/PLS_v2/"; benchmark_runner runner{full_directory, test_name}; pls_matrix a; pls_matrix b; pls_matrix result; static_scheduler_memory static_scheduler_memory; scheduler scheduler{static_scheduler_memory, (unsigned int) num_threads}; for (int i = 0; i < matrix::WARMUP_ITERATIONS; i++) { scheduler.perform_work([&]() { return scheduler::par([&]() { return result.pls_multiply(a, b); }, []() { return parallel_result{0}; }).then([&](int, int) { return parallel_result{0}; }); }); } for (int i = 0; i < matrix::NUM_ITERATIONS; i++) { scheduler.perform_work([&]() { runner.start_iteration(); return scheduler::par([&]() { return result.pls_multiply(a, b); }, []() { return parallel_result{0}; }).then([&](int, int) { runner.end_iteration(); return parallel_result{0}; }); }); } runner.commit_results(true); }