#include "pls/internal/scheduling/scheduler.h" #include "pls/internal/scheduling/parallel_result.h" #include "pls/internal/scheduling/scheduler_memory.h" #include "pls/internal/helpers/profiler.h" using namespace pls::internal::scheduling; #include #include #include #include "benchmark_runner.h" #include "benchmark_base/fft.h" using namespace comparison_benchmarks::base; parallel_result conquer(fft::complex_vector::iterator data, int n) { if (n < 2) { return parallel_result{0}; } fft::divide(data, n); if (n <= fft::RECURSIVE_CUTOFF) { fft::conquer(data, n / 2); fft::conquer(data + n / 2, n / 2); fft::combine(data, n); return parallel_result{0}; } else { return scheduler::par([=]() { return conquer(data, n / 2); }, [=]() { return conquer(data + n / 2, n / 2); }).then([=](int, int) { fft::combine(data, n); return parallel_result{0}; }); } } constexpr int MAX_NUM_THREADS = 8; constexpr int MAX_NUM_TASKS = 64; constexpr int MAX_NUM_CONTS = 64; constexpr int MAX_CONT_SIZE = 256; int main(int argc, char **argv) { int num_threads; string directory; benchmark_runner::read_args(argc, argv, num_threads, directory); string test_name = to_string(num_threads) + ".csv"; string full_directory = directory + "/PLS_v2/"; benchmark_runner runner{full_directory, test_name}; fft::complex_vector data = fft::generate_input(); static_scheduler_memory static_scheduler_memory; scheduler scheduler{static_scheduler_memory, (unsigned int) num_threads}; for (int i = 0; i < fft::NUM_WARMUP_ITERATIONS; i++) { scheduler.perform_work([&]() { return scheduler::par([&]() { return conquer(data.begin(), fft::SIZE); }, []() { return parallel_result{0}; }).then([&](short, short) { return parallel_result{0}; }); }); } for (int i = 0; i < fft::NUM_ITERATIONS; i++) { scheduler.perform_work([&]() { runner.start_iteration(); return scheduler::par([&]() { return conquer(data.begin(), fft::SIZE); }, []() { return parallel_result{0}; }).then([&](short, short) { runner.end_iteration(); return parallel_result{0}; }); }); } runner.commit_results(true); return 0; }