211 líneas
6.7 KiB
C++

/**
* \file
* \brief PDS HW2 tests
*
* To run these test execute:
* make tests
* mpirun -np <N> ./out/tests
*
* Note:
* Yes each process runs the entire test suite!!
*
* \author
* Christos Choutouridis AEM:8997
* <cchoutou@ece.auth.gr>
*/
#include <gtest/gtest.h>
#include <mpi.h>
#include <random>
#include "distsort.hpp"
/*
* Global fixtures
*/
// MPI handler for the test session
MPI_t<> ts_mpi;
// Mersenne seeded from hw if possible. range: [type_min, type_max]
std::random_device rd;
std::mt19937 gen(rd());
class TMPIdistSort : public ::testing::Test {
protected:
static void SetUpTestSuite() {
int argc = 0;
char** argv = nullptr;
ts_mpi.init(&argc, &argv);
}
static void TearDownTestSuite() {
ts_mpi.finalize();
}
};
/*
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin8_t [16]
*/
TEST_F(TMPIdistSort, distBubbletonic_test1) {
// Create and fill vector
using tsValue_t = uint8_t; // Test parameters
size_t ts_buffer_size = 16;
ShadowedVec_t<tsValue_t> ts_Data;
std::uniform_int_distribution<tsValue_t > dis(
std::numeric_limits<tsValue_t>::min(),
std::numeric_limits<tsValue_t>::max()
);
ts_Data.resize(ts_buffer_size);
std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
// Execute function under test in all processes
distBubbletonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
// Local min and max
auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
// Gather min/max to rank 0
std::vector<tsValue_t> global_mins(ts_mpi.size());
std::vector<tsValue_t> global_maxes(ts_mpi.size());
MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
// Check results
EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
if (ts_mpi.rank() == 0) {
for (size_t i = 1; i < global_mins.size(); ++i) {
EXPECT_LE(global_maxes[i - 1], global_mins[i]);
}
}
}
/*
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin32_t [1 << 16]
*/
TEST_F(TMPIdistSort, distBubbletonic_test2) {
// Create and fill vector
using tsValue_t = uint32_t; // Test parameters
size_t ts_buffer_size = 1 << 16;
ShadowedVec_t<tsValue_t> ts_Data;
std::uniform_int_distribution<tsValue_t > dis(
std::numeric_limits<tsValue_t>::min(),
std::numeric_limits<tsValue_t>::max()
);
ts_Data.resize(ts_buffer_size);
std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
// Execute function under test in all processes
distBubbletonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
// Local min and max
auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
// Gather min/max to rank 0
std::vector<tsValue_t> global_mins(ts_mpi.size());
std::vector<tsValue_t> global_maxes(ts_mpi.size());
MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
// Check results
EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
if (ts_mpi.rank() == 0) {
for (size_t i = 1; i < global_mins.size(); ++i) {
EXPECT_LE(global_maxes[i - 1], global_mins[i]);
}
}
}
/*
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin8_t [16]
*/
TEST_F(TMPIdistSort, distBitonic_test1) {
// Create and fill vector
using tsValue_t = uint8_t; // Test parameters
size_t ts_buffer_size = 16;
ShadowedVec_t<tsValue_t> ts_Data;
std::uniform_int_distribution<tsValue_t > dis(
std::numeric_limits<tsValue_t>::min(),
std::numeric_limits<tsValue_t>::max()
);
ts_Data.resize(ts_buffer_size);
std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
// Execute function under test in all processes
distBitonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
// Local min and max
auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
// Gather min/max to rank 0
std::vector<tsValue_t> global_mins(ts_mpi.size());
std::vector<tsValue_t> global_maxes(ts_mpi.size());
MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
// Check results
EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
if (ts_mpi.rank() == 0) {
for (size_t i = 1; i < global_mins.size(); ++i) {
EXPECT_LE(global_maxes[i - 1], global_mins[i]);
}
}
}
/*
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin32_t [1 << 16]
*/
TEST_F(TMPIdistSort, distBitonic_test2) {
// Create and fill vector
using tsValue_t = uint32_t; // Test parameters
size_t ts_buffer_size = 1 << 16;
ShadowedVec_t<tsValue_t> ts_Data;
std::uniform_int_distribution<tsValue_t > dis(
std::numeric_limits<tsValue_t>::min(),
std::numeric_limits<tsValue_t>::max()
);
ts_Data.resize(ts_buffer_size);
std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
// Execute function under test in all processes
distBitonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
// Local min and max
auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
// Gather min/max to rank 0
std::vector<tsValue_t> global_mins(ts_mpi.size());
std::vector<tsValue_t> global_maxes(ts_mpi.size());
MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
// Check results
EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
if (ts_mpi.rank() == 0) {
for (size_t i = 1; i < global_mins.size(); ++i) {
EXPECT_LE(global_maxes[i - 1], global_mins[i]);
}
}
}