Browse Source

HW2: A validator added and small changes

tags/v2.0
parent
commit
36de8d94fc
7 changed files with 131 additions and 66 deletions
  1. +8
    -6
      homework_2/Makefile
  2. +8
    -4
      homework_2/include/config.h
  3. +26
    -24
      homework_2/include/distsort.hpp
  4. +15
    -16
      homework_2/include/utils.hpp
  5. +1
    -1
      homework_2/julia/bitonic_v05.jl
  6. +59
    -5
      homework_2/src/main.cpp
  7. +14
    -10
      homework_2/test/tests_MPI.cpp

+ 8
- 6
homework_2/Makefile View File

@@ -29,9 +29,9 @@ SRC_DIR_LIST := src test test/gtest


# Include directories list(space seperated). Makefile-relative path. # Include directories list(space seperated). Makefile-relative path.
INC_DIR_LIST := include \ INC_DIR_LIST := include \
test \
test/gtest/ \
/usr/lib/x86_64-linux-gnu/openmpi/include/ \
test \
test/gtest/ \
/usr/lib/x86_64-linux-gnu/openmpi/include/ \
src src


# Exclude files list(space seperated). Filenames only. # Exclude files list(space seperated). Filenames only.
@@ -52,7 +52,7 @@ REL_CXXFLAGS := -Wall -Wextra -O3 -std=c++17


# Pre-defines # Pre-defines
# PRE_DEFS := MYCAB=1729 SUPER_MODE # PRE_DEFS := MYCAB=1729 SUPER_MODE
PRE_DEFS :=
PRE_DEFS :=


# ============== Linker settings ============== # ============== Linker settings ==============
# Linker flags (example: -pthread -lm) # Linker flags (example: -pthread -lm)
@@ -82,7 +82,7 @@ DOCKER :=
# compiler and compiler flags. # compiler and compiler flags.
CSIZE := size CSIZE := size
CFLAGS := $(DEB_CFLAGS) CFLAGS := $(DEB_CFLAGS)
CXXFLAGS := $(DEB_CXXFLAGS)
CXXFLAGS := $(DEB_CXXFLAGS)
CXX := g++ #mpic++ CXX := g++ #mpic++
CC := gcc #mpicc CC := gcc #mpicc


@@ -224,11 +224,13 @@ tests: $(BUILD_DIR)/$(TARGET)
@mkdir -p out @mkdir -p out
cp $(BUILD_DIR)/$(TARGET) out/$(TARGET) cp $(BUILD_DIR)/$(TARGET) out/$(TARGET)


measurements:
hpc-build:
make clean make clean
make distbubbletonic make distbubbletonic
make clean make clean
make distbitonic make distbitonic
make clean
make tests




all: debug distbubbletonic distbitonic all: debug distbubbletonic distbitonic


+ 8
- 4
homework_2/include/config.h View File

@@ -24,6 +24,9 @@
#define CODE_VERSION BITONIC #define CODE_VERSION BITONIC
#endif #endif


// Default Data size (in case -q <N> is not present)
#define DEFAULT_DATA_SIZE (1 << 16)

/*! /*!
* Value type selection * Value type selection
* *
@@ -42,10 +45,11 @@ using distValue_t = uint32_t;
* Session option for each invocation of the executable * Session option for each invocation of the executable
*/ */
struct session_t { struct session_t {
size_t arraySize{0};
bool ndebug{false};
bool timing{false};
bool verbose{false}; //!< Flag to enable verbose output to stdout
size_t arraySize{DEFAULT_DATA_SIZE}; //!<
bool validation{false}; //!< Request a full validation at the end, performed by process rank 0
bool ndebug{false}; //!< Skips debug trap on DEBUG builds
bool timing{false}; //!< Enable timing measurements and prints
bool verbose{false}; //!< Flag to enable verbose output to stdout
}; };


extern session_t session; extern session_t session;


+ 26
- 24
homework_2/include/distsort.hpp View File

@@ -177,15 +177,15 @@ void fullSort(RangeT& data, bool ascending) noexcept {
* @note * @note
* This is the core functionality. Use the elbowSort() function instead * This is the core functionality. Use the elbowSort() function instead
* *
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam CompT A Comparison type for binary operation comparisons
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
* @tparam CompT A Comparison type for binary operation comparisons
* *
* @param data [ShadowedT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
* @param comp [CompT] The binary operator object
* @param data [ShadowedDataT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
* @param comp [CompT] The binary operator object
*/ */
template<typename ShadowedT, typename CompT>
void elbowSortCore(ShadowedT& data, bool ascending, CompT comp) noexcept {
template<typename ShadowedDataT, typename CompT>
void elbowSortCore(ShadowedDataT& data, bool ascending, CompT comp) noexcept {
auto& active = data.getActive(); // Get the source vector (the data to sort) auto& active = data.getActive(); // Get the source vector (the data to sort)
auto& shadow = data.getShadow(); // Get the target vector (the sorted data) auto& shadow = data.getShadow(); // Get the target vector (the sorted data)


@@ -215,13 +215,13 @@ void elbowSortCore(ShadowedT& data, bool ascending, CompT comp) noexcept {
/*! /*!
* Sort a shadowed buffer using the "elbow sort" algorithm. * Sort a shadowed buffer using the "elbow sort" algorithm.
* *
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
* *
* @param data [ShadowedT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
* @param data [ShadowedDataT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
*/ */
template<typename ShadowedT>
void elbowSort(ShadowedT& data, bool ascending) noexcept {
template<typename ShadowedDataT>
void elbowSort(ShadowedDataT& data, bool ascending) noexcept {
if (ascending) if (ascending)
elbowSortCore(data, ascending, std::less<>()); elbowSortCore(data, ascending, std::less<>());
else else
@@ -261,13 +261,14 @@ void minmax(RangeT& local, const RangeT& remote, bool keepSmall) noexcept {
* @note * @note
* Each MPI process should run an instance of this function. * Each MPI process should run an instance of this function.
* *
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
* *
* @param data [ShadowedT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param data [ShadowedDataT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*/ */
template<typename ShadowedT>
void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
template<typename ShadowedDataT>
void distBubbletonic(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
// Initially sort to create a half part of a bitonic sequence // Initially sort to create a half part of a bitonic sequence
fullSort(data, ascending<SortMode::Bubbletonic>(rank, 0)); fullSort(data, ascending<SortMode::Bubbletonic>(rank, 0));


@@ -279,7 +280,7 @@ void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
if ( isActive(rank, Processes) && if ( isActive(rank, Processes) &&
isActive(part, Processes) ) { isActive(part, Processes) ) {
// Exchange with partner, keep nim-or-max and sort - O(N) // Exchange with partner, keep nim-or-max and sort - O(N)
mpi.exchange(part, data.getActive(), data.getShadow(), step);
mpi.exchange(data.getActive(), data.getShadow(), part, step);
minmax(data.getActive(), data.getShadow(), ks); minmax(data.getActive(), data.getShadow(), ks);
elbowSort(data, ascending<SortMode::Bubbletonic>(rank, Processes)); elbowSort(data, ascending<SortMode::Bubbletonic>(rank, Processes));
} }
@@ -298,13 +299,14 @@ void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
* @note * @note
* Each MPI process should run an instance of this function. * Each MPI process should run an instance of this function.
* *
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
* *
* @param data [ShadowedT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param data [ShadowedDataT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*/ */
template<typename ShadowedT>
void distBitonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
template<typename ShadowedDataT>
void distBitonic(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
// Initially sort to create a half part of a bitonic sequence // Initially sort to create a half part of a bitonic sequence
fullSort(data, ascending<SortMode::Bitonic>(rank, 0)); fullSort(data, ascending<SortMode::Bitonic>(rank, 0));


@@ -317,7 +319,7 @@ void distBitonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
auto part = partner<SortMode::Bitonic>(rank, step); auto part = partner<SortMode::Bitonic>(rank, step);
auto ks = keepSmall<SortMode::Bitonic>(rank, part, depth); auto ks = keepSmall<SortMode::Bitonic>(rank, part, depth);
// Exchange with partner, keep nim-or-max // Exchange with partner, keep nim-or-max
mpi.exchange(part, data.getActive(), data.getShadow(), (depth << 8) | step);
mpi.exchange(data.getActive(), data.getShadow(), part, (depth << 8) | step);
minmax(data.getActive(), data.getShadow(), ks); minmax(data.getActive(), data.getShadow(), ks);
} }
// sort - O(N) // sort - O(N)


+ 15
- 16
homework_2/include/utils.hpp View File

@@ -49,9 +49,9 @@ struct MPI_t {
* Initializes the MPI environment, must called from each process * Initializes the MPI environment, must called from each process
* *
* @param argc [int*] POINTER to main's argc argument * @param argc [int*] POINTER to main's argc argument
* @param argv [car***] POINTER to main's argv argument
* @param argv [char***] POINTER to main's argv argument
*/ */
void init(int *argc, char ***argv) {
void init(int* argc, char*** argv) {
// Initialize the MPI environment // Initialize the MPI environment
int err; int err;
if ((err = MPI_Init(argc, argv)) != MPI_SUCCESS) if ((err = MPI_Init(argc, argv)) != MPI_SUCCESS)
@@ -84,13 +84,13 @@ struct MPI_t {
* *
* @tparam T The inner valur type used in buffer * @tparam T The inner valur type used in buffer
* *
* @param partner [mpi_id_t] The partner for the exchange
* @param send_data [std::vector<T>] Reference to local data to send * @param send_data [std::vector<T>] Reference to local data to send
* @param recv_data [std::vector<T>] Reference to buffer to receive data from partner * @param recv_data [std::vector<T>] Reference to buffer to receive data from partner
* @param partner [mpi_id_t] The partner for the exchange
* @param tag [int] The tag to use for the MPI communication * @param tag [int] The tag to use for the MPI communication
*/ */
template<typename T> template<typename T>
void exchange(ID_t partner, const std::vector<T>& send_data, std::vector<T>& recv_data, int tag) {
void exchange(const std::vector<T>& send_data, std::vector<T>& recv_data, ID_t partner, int tag) {
using namespace std::string_literals; using namespace std::string_literals;


MPI_Datatype datatype = MPI_TypeMapper<T>::getType(); MPI_Datatype datatype = MPI_TypeMapper<T>::getType();
@@ -110,6 +110,11 @@ struct MPI_t {
[[nodiscard]] ID_t size() const noexcept { return size_; } [[nodiscard]] ID_t size() const noexcept { return size_; }
[[nodiscard]] const std::string& name() const noexcept { return name_; } [[nodiscard]] const std::string& name() const noexcept { return name_; }


// Mutators
ID_t rank(ID_t rank) noexcept { return rank_ = rank; }
ID_t size(ID_t size) noexcept { return size_ = size; }
std::string& name(const std::string& name) noexcept { return name_ = name; }

/*! /*!
* Finalized the MPI * Finalized the MPI
*/ */
@@ -130,7 +135,7 @@ struct MPI_t {
// Local functionality // Local functionality
private: private:
/*! /*!
* Throw exception helper. It bundles the prefix with the MPI error string retrieved by
* Throw exception helper. It bundles the prefix msg with the MPI error string retrieved by
* MPI API. * MPI API.
* *
* @param err The MPI error code * @param err The MPI error code
@@ -143,11 +148,7 @@ private:
throw std::runtime_error(prefixMsg + std::string (err_msg) + '\n'); throw std::runtime_error(prefixMsg + std::string (err_msg) + '\n');
} }


#if !defined TESTING
private: private:
#else
public:
#endif
ID_t rank_{}; //!< MPI rank of the process ID_t rank_{}; //!< MPI rank of the process
ID_t size_{}; //!< MPI total size of the execution ID_t size_{}; //!< MPI total size of the execution
std::string name_{}; //!< The name of the local machine std::string name_{}; //!< The name of the local machine
@@ -172,10 +173,10 @@ using mpi_id_t = MPI_t<>::ID_t;
template <typename Value_t> template <typename Value_t>
struct ShadowedVec_t { struct ShadowedVec_t {
// STL requirements // STL requirements
using value_type = Value_t;
using iterator = typename std::vector<Value_t>::iterator;
using value_type = Value_t;
using iterator = typename std::vector<Value_t>::iterator;
using const_iterator = typename std::vector<Value_t>::const_iterator; using const_iterator = typename std::vector<Value_t>::const_iterator;
using size_type = typename std::vector<Value_t>::size_type;
using size_type = typename std::vector<Value_t>::size_type;


// Default constructor // Default constructor
ShadowedVec_t() = default; ShadowedVec_t() = default;
@@ -214,17 +215,15 @@ struct ShadowedVec_t {
} }


// Type accessors // Type accessors
const std::vector<Value_t>& getNorth() const { return North; }
const std::vector<Value_t>& getSouth() const { return South; }
std::vector<Value_t>& getActive() { return (active == north) ? North : South; } std::vector<Value_t>& getActive() { return (active == north) ? North : South; }
std::vector<Value_t>& getShadow() { return (active == north) ? South : North; } std::vector<Value_t>& getShadow() { return (active == north) ? South : North; }
const std::vector<Value_t>& getActive() const { return (active == north) ? North : South; } const std::vector<Value_t>& getActive() const { return (active == north) ? North : South; }
const std::vector<Value_t>& getShadow() const { return (active == north) ? South : North; } const std::vector<Value_t>& getShadow() const { return (active == north) ? South : North; }


// Switching vectors
// Swap vectors
void switch_active() { active = (active == north) ? south : north; } void switch_active() { active = (active == north) ? south : north; }


// Dispatch to active vector functionality
// Dispatch vector functionality to active vector
Value_t& operator[](size_type index) { return getActive()[index]; } Value_t& operator[](size_type index) { return getActive()[index]; }
const Value_t& operator[](size_type index) const { return getActive()[index]; } const Value_t& operator[](size_type index) const { return getActive()[index]; }




+ 1
- 1
homework_2/julia/bitonic_v05.jl View File

@@ -50,7 +50,7 @@ function distbubletonic!(p, data)
for i in 1:p for i in 1:p
sort!(view(data, i, :), rev = !ascending[i]) sort!(view(data, i, :), rev = !ascending[i])
end end
for step in 0:p-2
for step in 0:p-1
direction = [true for x = 1:p] direction = [true for x = 1:p]
partnerid = partner.(pid, step) partnerid = partner.(pid, step)
activeids = active.(partnerid, p) activeids = active.(partnerid, p)


+ 59
- 5
homework_2/src/main.cpp View File

@@ -43,6 +43,9 @@ bool get_options(int argc, char* argv[]){
status = false; status = false;
} }
} }
else if (arg == "--validation") {
session.validation = true;
}
else if (arg == "--ndebug") { else if (arg == "--ndebug") {
session.ndebug = true; session.ndebug = true;
} }
@@ -54,14 +57,16 @@ bool get_options(int argc, char* argv[]){
} }
else if (arg == "-h" || arg == "--help") { else if (arg == "-h" || arg == "--help") {
std::cout << "distbitonic/distbubbletonic - A distributed bitonic sort\n\n"; std::cout << "distbitonic/distbubbletonic - A distributed bitonic sort\n\n";
std::cout << "distbitonic -q <> [--ndebug] [-v]\n";
std::cout << "distbitonic -q <N> [--validation] [--ndebug] [-v]\n";
std::cout << "distbitonic -h\n"; std::cout << "distbitonic -h\n";
std::cout << "distbubbletonic -q <> [--ndebug] [-v]\n";
std::cout << "distbubbletonic -q <N> [--validation] [--ndebug] [-v]\n";
std::cout << "distbubbletonic -h\n"; std::cout << "distbubbletonic -h\n";
std::cout << '\n'; std::cout << '\n';
std::cout << "Options:\n\n"; std::cout << "Options:\n\n";
std::cout << " -q | --array-size <size>\n";
std::cout << " Selects the array size according to size = 2^q\n\n";
std::cout << " -q | --array-size <N>\n";
std::cout << " Selects the array size according to size = 2^N\n\n";
std::cout << " --validation\n";
std::cout << " Request a full validation at the end, performed by process rank 0\n\n";
std::cout << " --ndebug\n"; std::cout << " --ndebug\n";
std::cout << " Skip debug breakpoint when on debug build.\n\n"; std::cout << " Skip debug breakpoint when on debug build.\n\n";
std::cout << " -t | --timing\n"; std::cout << " -t | --timing\n";
@@ -87,6 +92,48 @@ bool get_options(int argc, char* argv[]){
return status; return status;
} }


/*!
* A simple validator for the entire distributed process
*
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
*
* @param data [ShadowedDataT] The local to MPI process
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*
* @return [bool] True if all are sorted and in total ascending order
*/
template<typename ShadowedDataT>
bool validator(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
using value_t = typename ShadowedDataT::value_type;
bool ret = true; // Have faith!

// Local results
value_t lmin = data.front();
value_t lmax = data.back();
value_t lsort = static_cast<value_t>(std::is_sorted(data.begin(), data.end()));

// Gather min/max/sort to rank 0
std::vector<value_t> mins(Processes);
std::vector<value_t> maxes(Processes);
std::vector<value_t> sorts(Processes);

MPI_Datatype datatype = MPI_TypeMapper<value_t>::getType();
MPI_Gather(&lmin, 1, datatype, mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&lmax, 1, datatype, maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&lsort, 1, datatype, sorts.data(), 1, datatype, 0, MPI_COMM_WORLD);

// Check all results
if (rank == 0) {
for (mpi_id_t r = 1; r < Processes; ++r) {
if (sorts[r] == 0)
ret = false;
if (maxes[r - 1] > mins[r])
ret = false;
}
}
return ret;
}


#if !defined TESTING #if !defined TESTING
int main(int argc, char* argv[]) try { int main(int argc, char* argv[]) try {
@@ -146,7 +193,14 @@ int main(int argc, char* argv[]) try {
std::string timeMsg = "rank " + std::to_string(mpi.rank()); std::string timeMsg = "rank " + std::to_string(mpi.rank());
timer.print_dt(timeMsg.c_str()); timer.print_dt(timeMsg.c_str());


std::cout << "[Data]: Rank " << mpi.rank() << ": [" << +Data.front() << " .. " << +Data.back() << "]" << std::endl;
if (session.validation) {
// If requested, we have the chance to fail!
if (mpi.rank() == 0)
std::cout << "Results validation ...";
bool val = validator(Data, mpi.size(), mpi.rank());
if (mpi.rank() == 0)
std::cout << ((val) ? "\x1B[32m [PASS] \x1B[0m\n" : " \x1B[32m [FAIL] \x1B[0m\n");
}
mpi.finalize(); mpi.finalize();
return 0; return 0;
} }


+ 14
- 10
homework_2/test/tests_MPI.cpp View File

@@ -2,6 +2,10 @@
* \file * \file
* \brief PDS HW2 tests * \brief PDS HW2 tests
* *
* To run these test execute:
* make tests
* mpirun -np <N> ./out/tests
*
* \author * \author
* Christos Choutouridis AEM:8997 * Christos Choutouridis AEM:8997
* <cchoutou@ece.auth.gr> * <cchoutou@ece.auth.gr>
@@ -28,8 +32,8 @@ protected:
int rank, size; int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_size(MPI_COMM_WORLD, &size);
ts_mpi.rank_ = rank;
ts_mpi.size_ = size;
ts_mpi.rank(rank);
ts_mpi.size(size);
} }


static void TearDownTestSuite() { static void TearDownTestSuite() {
@@ -39,8 +43,8 @@ protected:




/* /*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin8_t [16]
*/ */
TEST_F(TMPIdistSort, distBubbletonic_test1) { TEST_F(TMPIdistSort, distBubbletonic_test1) {
// Create and fill vector // Create and fill vector
@@ -80,8 +84,8 @@ TEST_F(TMPIdistSort, distBubbletonic_test1) {
} }


/* /*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin32_t [1 << 16]
*/ */
TEST_F(TMPIdistSort, distBubbletonic_test2) { TEST_F(TMPIdistSort, distBubbletonic_test2) {
// Create and fill vector // Create and fill vector
@@ -122,8 +126,8 @@ TEST_F(TMPIdistSort, distBubbletonic_test2) {




/* /*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin8_t [16]
*/ */
TEST_F(TMPIdistSort, distBitonic_test1) { TEST_F(TMPIdistSort, distBitonic_test1) {
// Create and fill vector // Create and fill vector
@@ -163,8 +167,8 @@ TEST_F(TMPIdistSort, distBitonic_test1) {
} }


/* /*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin32_t [1 << 16]
*/ */
TEST_F(TMPIdistSort, distBitonic_test2) { TEST_F(TMPIdistSort, distBitonic_test2) {
// Create and fill vector // Create and fill vector


Loading…
Cancel
Save