#include "pls/pls.h" using namespace pls; #include "benchmark_runner.h" #include "benchmark_base/matrix_div_conquer.h" using namespace comparison_benchmarks::base; #include #include #include void multiply_div_conquer(const std::vector>>> &tmp_arrays, pls::strain_local_resource &local_indices, size_t size, size_t depth, matrix_div_conquer::blocked_matrix_view &result, matrix_div_conquer::blocked_matrix_view &a, matrix_div_conquer::blocked_matrix_view &b) { if (size <= 8) { multiply_naive(size, result, a, b); return; } // Temporary storage required for the intermediate results auto strain_local_index = local_indices.get_item(depth); std::unique_ptr const &data_1_1_a = tmp_arrays[depth][strain_local_index.get_strain_index()][0]; std::unique_ptr const &data_1_1_b = tmp_arrays[depth][strain_local_index.get_strain_index()][1]; std::unique_ptr const &data_1_2_a = tmp_arrays[depth][strain_local_index.get_strain_index()][2]; std::unique_ptr const &data_1_2_b = tmp_arrays[depth][strain_local_index.get_strain_index()][3]; std::unique_ptr const &data_2_1_a = tmp_arrays[depth][strain_local_index.get_strain_index()][4]; std::unique_ptr const &data_2_1_b = tmp_arrays[depth][strain_local_index.get_strain_index()][5]; std::unique_ptr const &data_2_2_a = tmp_arrays[depth][strain_local_index.get_strain_index()][6]; std::unique_ptr const &data_2_2_b = tmp_arrays[depth][strain_local_index.get_strain_index()][7]; // Handles to sub-matrices used matrix_div_conquer::blocked_matrix_view result_1_1 = result.quadrant_1_1(); matrix_div_conquer::blocked_matrix_view result_1_2 = result.quadrant_1_2(); matrix_div_conquer::blocked_matrix_view result_2_1 = result.quadrant_2_1(); matrix_div_conquer::blocked_matrix_view result_2_2 = result.quadrant_2_2(); matrix_div_conquer::blocked_matrix_view result_1_1_a{data_1_1_a.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_1_1_b{data_1_1_b.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_1_2_a{data_1_2_a.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_1_2_b{data_1_2_b.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_2_1_a{data_2_1_a.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_2_1_b{data_2_1_b.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_2_2_a{data_2_2_a.get(), size / 2}; matrix_div_conquer::blocked_matrix_view result_2_2_b{data_2_2_b.get(), size / 2}; matrix_div_conquer::blocked_matrix_view a_1_1 = a.quadrant_1_1(); matrix_div_conquer::blocked_matrix_view a_1_2 = a.quadrant_1_2(); matrix_div_conquer::blocked_matrix_view a_2_1 = a.quadrant_2_1(); matrix_div_conquer::blocked_matrix_view a_2_2 = a.quadrant_2_2(); matrix_div_conquer::blocked_matrix_view b_1_1 = b.quadrant_1_1(); matrix_div_conquer::blocked_matrix_view b_1_2 = b.quadrant_1_2(); matrix_div_conquer::blocked_matrix_view b_2_1 = b.quadrant_2_1(); matrix_div_conquer::blocked_matrix_view b_2_2 = b.quadrant_2_2(); // Divide Work Into Sub-Calls pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_1_1_a, a_1_1, b_1_1); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_1_1_b, a_1_2, b_2_1); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_1_2_a, a_1_1, b_1_2); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_1_2_b, a_1_2, b_2_2); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_2_1_a, a_2_1, b_1_1); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_2_1_b, a_2_2, b_2_1); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_2_2_a, a_2_1, b_1_2); } ); pls::spawn( [&]() { multiply_div_conquer(tmp_arrays, local_indices, size / 2, depth + 1, result_2_2_b, a_2_2, b_2_2); } ); pls::sync(); // Combine results for (size_t i = 0; i < (size / 2) * (size / 2); i++) { // The layout is not important here, ass all have the same order, so just sum element wise result_1_1.get_data()[i] = result_1_1_a.get_data()[i] + result_1_1_b.get_data()[i]; result_1_2.get_data()[i] = result_1_2_a.get_data()[i] + result_1_2_b.get_data()[i]; result_2_1.get_data()[i] = result_2_1_a.get_data()[i] + result_2_1_b.get_data()[i]; result_2_2.get_data()[i] = result_2_2_a.get_data()[i] + result_2_2_b.get_data()[i]; } } constexpr int MAX_NUM_TASKS = 32; constexpr int MAX_STACK_SIZE = 4096 * 2; int main(int argc, char **argv) { const size_t size = matrix_div_conquer::MATRIX_SIZE; int num_threads; string directory; benchmark_runner::read_args(argc, argv, num_threads, directory); string test_name = to_string(num_threads) + ".csv"; string full_directory = directory + "/PLS_v3/"; benchmark_runner runner{full_directory, test_name}; // Only run on one version to avoid copy std::unique_ptr result_data{new double[size * size]}; std::unique_ptr a_data{new double[size * size]}; std::unique_ptr b_data{new double[size * size]}; matrix_div_conquer::blocked_matrix_view a{a_data.get(), size}; matrix_div_conquer::blocked_matrix_view b{b_data.get(), size}; matrix_div_conquer::blocked_matrix_view result{result_data.get(), size}; // Fill data arrays as needed a.fill_default_data(); b.fill_default_data(); matrix_div_conquer::fill_block_lookup(size); // Strain local data std::vector>>> div_conquer_temp_arrays; size_t max_depth = 0; size_t remaining_size = size; while (remaining_size > 1) { auto &depth_buffers = div_conquer_temp_arrays.emplace_back(); for (int thread_id = 0; thread_id < 8; thread_id++) { auto &depth_thread_buffers = depth_buffers.emplace_back(); for (int i = 0; i < 8; i++) { depth_thread_buffers.emplace_back(new double[(remaining_size / 2) * (remaining_size / 2)]); } } max_depth++; remaining_size = remaining_size / 2; } pls::strain_local_resource local_indices{(unsigned) num_threads, (unsigned) max_depth}; scheduler scheduler{(unsigned) num_threads, MAX_NUM_TASKS, MAX_STACK_SIZE}; runner.run_iterations(1, [&]() { scheduler.perform_work([&]() { multiply_div_conquer(div_conquer_temp_arrays, local_indices, size, 0, result, a, b); }); }, 0); runner.commit_results(true); scheduler.terminate(); }