AUTH's THMMY "Parallel and distributed systems" course assignments.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

tests_MPI.cpp 9.8 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /**
  2. * \file
  3. * \brief PDS HW2 tests
  4. *
  5. * To run these test execute:
  6. * export OMP_NUM_THREADS=4 # optional to see parallelization speed-up
  7. * make tests
  8. * mpirun -np <N> ./out/tests
  9. *
  10. * Note:
  11. * Yes each process runs the entire test suite!!
  12. *
  13. * \author
  14. * Christos Choutouridis AEM:8997
  15. * <cchoutou@ece.auth.gr>
  16. */
  17. #include <gtest/gtest.h>
  18. #include <mpi.h>
  19. #include <random>
  20. #include "distsort.hpp"
  21. /*
  22. * Global fixtures
  23. */
  24. // MPI handler for the test session
  25. MPI_t<> ts_mpi;
  26. // Mersenne seeded from hw if possible. range: [type_min, type_max]
  27. std::random_device rd;
  28. std::mt19937 gen(rd());
  29. class TMPIdistSort : public ::testing::Test {
  30. protected:
  31. static void SetUpTestSuite() {
  32. int argc = 0;
  33. char** argv = nullptr;
  34. ts_mpi.init(&argc, &argv);
  35. }
  36. static void TearDownTestSuite() {
  37. ts_mpi.finalize();
  38. }
  39. };
  40. /*
  41. * MPI: SysTest (acceptance)
  42. * Each process executes distBubbletonic for uin8_t [16]
  43. */
  44. TEST_F(TMPIdistSort, distBubbletonic_test1) {
  45. // Create and fill vector
  46. using tsValue_t = uint8_t; // Test parameters
  47. size_t ts_buffer_size = 16;
  48. ShadowedVec_t<tsValue_t> ts_Data;
  49. std::uniform_int_distribution<tsValue_t > dis(
  50. std::numeric_limits<tsValue_t>::min(),
  51. std::numeric_limits<tsValue_t>::max()
  52. );
  53. ts_Data.resize(ts_buffer_size);
  54. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  55. // Execute function under test in all processes
  56. distBubbletonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  57. // Local min and max
  58. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  59. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  60. // Gather min/max to rank 0
  61. std::vector<tsValue_t> global_mins(ts_mpi.size());
  62. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  63. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  64. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  65. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  66. // Check results
  67. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  68. if (ts_mpi.rank() == 0) {
  69. for (size_t i = 1; i < global_mins.size(); ++i) {
  70. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  71. }
  72. }
  73. }
  74. /*
  75. * MPI: SysTest (acceptance)
  76. * Each process executes distBubbletonic for uin32_t [1 << 16]
  77. */
  78. TEST_F(TMPIdistSort, distBubbletonic_test2) {
  79. // Create and fill vector
  80. using tsValue_t = uint32_t; // Test parameters
  81. size_t ts_buffer_size = 1 << 16;
  82. ShadowedVec_t<tsValue_t> ts_Data;
  83. std::uniform_int_distribution<tsValue_t > dis(
  84. std::numeric_limits<tsValue_t>::min(),
  85. std::numeric_limits<tsValue_t>::max()
  86. );
  87. ts_Data.resize(ts_buffer_size);
  88. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  89. // Execute function under test in all processes
  90. distBubbletonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  91. // Local min and max
  92. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  93. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  94. // Gather min/max to rank 0
  95. std::vector<tsValue_t> global_mins(ts_mpi.size());
  96. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  97. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  98. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  99. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  100. // Check results
  101. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  102. if (ts_mpi.rank() == 0) {
  103. for (size_t i = 1; i < global_mins.size(); ++i) {
  104. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  105. }
  106. }
  107. }
  108. /*
  109. * MPI: SysTest (acceptance)
  110. * Each process executes distBubbletonic for uin32_t [1 << 16] with pipeline
  111. */
  112. TEST_F(TMPIdistSort, distBubbletonic_test3) {
  113. // Create and fill vector
  114. using tsValue_t = uint32_t; // Test parameters
  115. size_t ts_buffer_size = 1 << 16;
  116. ShadowedVec_t<tsValue_t> ts_Data;
  117. std::uniform_int_distribution<tsValue_t > dis(
  118. std::numeric_limits<tsValue_t>::min(),
  119. std::numeric_limits<tsValue_t>::max()
  120. );
  121. ts_Data.resize(ts_buffer_size);
  122. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  123. // Set pipeline
  124. config.pipeline = 8;
  125. // Execute function under test in all processes
  126. distBubbletonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  127. // Local min and max
  128. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  129. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  130. // Gather min/max to rank 0
  131. std::vector<tsValue_t> global_mins(ts_mpi.size());
  132. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  133. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  134. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  135. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  136. // Check results
  137. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  138. if (ts_mpi.rank() == 0) {
  139. for (size_t i = 1; i < global_mins.size(); ++i) {
  140. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  141. }
  142. }
  143. }
  144. /*
  145. * MPI: SysTest (acceptance)
  146. * Each process executes distBitonic for uin8_t [16]
  147. */
  148. TEST_F(TMPIdistSort, distBitonic_test1) {
  149. // Create and fill vector
  150. using tsValue_t = uint8_t; // Test parameters
  151. size_t ts_buffer_size = 16;
  152. ShadowedVec_t<tsValue_t> ts_Data;
  153. std::uniform_int_distribution<tsValue_t > dis(
  154. std::numeric_limits<tsValue_t>::min(),
  155. std::numeric_limits<tsValue_t>::max()
  156. );
  157. ts_Data.resize(ts_buffer_size);
  158. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  159. // Execute function under test in all processes
  160. distBitonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  161. // Local min and max
  162. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  163. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  164. // Gather min/max to rank 0
  165. std::vector<tsValue_t> global_mins(ts_mpi.size());
  166. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  167. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  168. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  169. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  170. // Check results
  171. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  172. if (ts_mpi.rank() == 0) {
  173. for (size_t i = 1; i < global_mins.size(); ++i) {
  174. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  175. }
  176. }
  177. }
  178. /*
  179. * MPI: SysTest (acceptance)
  180. * Each process executes distBitonic for uin32_t [1 << 16]
  181. */
  182. TEST_F(TMPIdistSort, distBitonic_test2) {
  183. // Create and fill vector
  184. using tsValue_t = uint32_t; // Test parameters
  185. size_t ts_buffer_size = 1 << 16;
  186. ShadowedVec_t<tsValue_t> ts_Data;
  187. std::uniform_int_distribution<tsValue_t > dis(
  188. std::numeric_limits<tsValue_t>::min(),
  189. std::numeric_limits<tsValue_t>::max()
  190. );
  191. ts_Data.resize(ts_buffer_size);
  192. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  193. // Execute function under test in all processes
  194. distBitonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  195. // Local min and max
  196. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  197. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  198. // Gather min/max to rank 0
  199. std::vector<tsValue_t> global_mins(ts_mpi.size());
  200. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  201. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  202. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  203. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  204. // Check results
  205. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  206. if (ts_mpi.rank() == 0) {
  207. for (size_t i = 1; i < global_mins.size(); ++i) {
  208. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  209. }
  210. }
  211. }
  212. /*
  213. * MPI: SysTest (acceptance)
  214. * Each process executes distBitonic for uin32_t [1 << 16] with pipeline
  215. */
  216. TEST_F(TMPIdistSort, distBitonic_test3) {
  217. // Create and fill vector
  218. using tsValue_t = uint32_t; // Test parameters
  219. size_t ts_buffer_size = 1 << 16;
  220. ShadowedVec_t<tsValue_t> ts_Data;
  221. std::uniform_int_distribution<tsValue_t > dis(
  222. std::numeric_limits<tsValue_t>::min(),
  223. std::numeric_limits<tsValue_t>::max()
  224. );
  225. ts_Data.resize(ts_buffer_size);
  226. std::generate(ts_Data.begin(), ts_Data.end(), [&]() { return dis(gen); });
  227. // Set pipeline
  228. config.pipeline = 8;
  229. // Execute function under test in all processes
  230. distBitonic(ts_Data, ts_mpi.size(), ts_mpi.rank());
  231. // Local min and max
  232. auto local_min = *std::min_element(ts_Data.begin(), ts_Data.end());
  233. auto local_max = *std::max_element(ts_Data.begin(), ts_Data.end());
  234. // Gather min/max to rank 0
  235. std::vector<tsValue_t> global_mins(ts_mpi.size());
  236. std::vector<tsValue_t> global_maxes(ts_mpi.size());
  237. MPI_Datatype datatype = MPI_TypeMapper<tsValue_t>::getType();
  238. MPI_Gather(&local_min, 1, datatype, global_mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
  239. MPI_Gather(&local_max, 1, datatype, global_maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
  240. // Check results
  241. EXPECT_EQ(std::is_sorted(ts_Data.begin(), ts_Data.end()), true);
  242. if (ts_mpi.rank() == 0) {
  243. for (size_t i = 1; i < global_mins.size(); ++i) {
  244. EXPECT_LE(global_maxes[i - 1], global_mins[i]);
  245. }
  246. }
  247. }