HW2: A validator added and small changes

This commit is contained in:
Christos Choutouridis 2024-12-31 12:54:58 +02:00
parent 9dd3eb737f
commit 36de8d94fc
7 changed files with 131 additions and 66 deletions

View File

@ -224,11 +224,13 @@ tests: $(BUILD_DIR)/$(TARGET)
@mkdir -p out
cp $(BUILD_DIR)/$(TARGET) out/$(TARGET)
measurements:
hpc-build:
make clean
make distbubbletonic
make clean
make distbitonic
make clean
make tests
all: debug distbubbletonic distbitonic

View File

@ -24,6 +24,9 @@
#define CODE_VERSION BITONIC
#endif
// Default Data size (in case -q <N> is not present)
#define DEFAULT_DATA_SIZE (1 << 16)
/*!
* Value type selection
*
@ -42,9 +45,10 @@ using distValue_t = uint32_t;
* Session option for each invocation of the executable
*/
struct session_t {
size_t arraySize{0};
bool ndebug{false};
bool timing{false};
size_t arraySize{DEFAULT_DATA_SIZE}; //!<
bool validation{false}; //!< Request a full validation at the end, performed by process rank 0
bool ndebug{false}; //!< Skips debug trap on DEBUG builds
bool timing{false}; //!< Enable timing measurements and prints
bool verbose{false}; //!< Flag to enable verbose output to stdout
};

View File

@ -177,15 +177,15 @@ void fullSort(RangeT& data, bool ascending) noexcept {
* @note
* This is the core functionality. Use the elbowSort() function instead
*
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
* @tparam CompT A Comparison type for binary operation comparisons
*
* @param data [ShadowedT] The data to sort
* @param data [ShadowedDataT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
* @param comp [CompT] The binary operator object
*/
template<typename ShadowedT, typename CompT>
void elbowSortCore(ShadowedT& data, bool ascending, CompT comp) noexcept {
template<typename ShadowedDataT, typename CompT>
void elbowSortCore(ShadowedDataT& data, bool ascending, CompT comp) noexcept {
auto& active = data.getActive(); // Get the source vector (the data to sort)
auto& shadow = data.getShadow(); // Get the target vector (the sorted data)
@ -215,13 +215,13 @@ void elbowSortCore(ShadowedT& data, bool ascending, CompT comp) noexcept {
/*!
* Sort a shadowed buffer using the "elbow sort" algorithm.
*
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
*
* @param data [ShadowedT] The data to sort
* @param data [ShadowedDataT] The data to sort
* @param ascending [bool] Flag to indicate the sorting order
*/
template<typename ShadowedT>
void elbowSort(ShadowedT& data, bool ascending) noexcept {
template<typename ShadowedDataT>
void elbowSort(ShadowedDataT& data, bool ascending) noexcept {
if (ascending)
elbowSortCore(data, ascending, std::less<>());
else
@ -261,13 +261,14 @@ void minmax(RangeT& local, const RangeT& remote, bool keepSmall) noexcept {
* @note
* Each MPI process should run an instance of this function.
*
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
*
* @param data [ShadowedT] The local to MPI process data to sort
* @param data [ShadowedDataT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*/
template<typename ShadowedT>
void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
template<typename ShadowedDataT>
void distBubbletonic(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
// Initially sort to create a half part of a bitonic sequence
fullSort(data, ascending<SortMode::Bubbletonic>(rank, 0));
@ -279,7 +280,7 @@ void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
if ( isActive(rank, Processes) &&
isActive(part, Processes) ) {
// Exchange with partner, keep nim-or-max and sort - O(N)
mpi.exchange(part, data.getActive(), data.getShadow(), step);
mpi.exchange(data.getActive(), data.getShadow(), part, step);
minmax(data.getActive(), data.getShadow(), ks);
elbowSort(data, ascending<SortMode::Bubbletonic>(rank, Processes));
}
@ -298,13 +299,14 @@ void distBubbletonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
* @note
* Each MPI process should run an instance of this function.
*
* @tparam ShadowedT A Shadowed buffer type with random access iterator.
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
*
* @param data [ShadowedT] The local to MPI process data to sort
* @param data [ShadowedDataT] The local to MPI process data to sort
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*/
template<typename ShadowedT>
void distBitonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
template<typename ShadowedDataT>
void distBitonic(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
// Initially sort to create a half part of a bitonic sequence
fullSort(data, ascending<SortMode::Bitonic>(rank, 0));
@ -317,7 +319,7 @@ void distBitonic(ShadowedT& data, mpi_id_t Processes, mpi_id_t rank) {
auto part = partner<SortMode::Bitonic>(rank, step);
auto ks = keepSmall<SortMode::Bitonic>(rank, part, depth);
// Exchange with partner, keep nim-or-max
mpi.exchange(part, data.getActive(), data.getShadow(), (depth << 8) | step);
mpi.exchange(data.getActive(), data.getShadow(), part, (depth << 8) | step);
minmax(data.getActive(), data.getShadow(), ks);
}
// sort - O(N)

View File

@ -49,7 +49,7 @@ struct MPI_t {
* Initializes the MPI environment, must called from each process
*
* @param argc [int*] POINTER to main's argc argument
* @param argv [car***] POINTER to main's argv argument
* @param argv [char***] POINTER to main's argv argument
*/
void init(int* argc, char*** argv) {
// Initialize the MPI environment
@ -84,13 +84,13 @@ struct MPI_t {
*
* @tparam T The inner valur type used in buffer
*
* @param partner [mpi_id_t] The partner for the exchange
* @param send_data [std::vector<T>] Reference to local data to send
* @param recv_data [std::vector<T>] Reference to buffer to receive data from partner
* @param partner [mpi_id_t] The partner for the exchange
* @param tag [int] The tag to use for the MPI communication
*/
template<typename T>
void exchange(ID_t partner, const std::vector<T>& send_data, std::vector<T>& recv_data, int tag) {
void exchange(const std::vector<T>& send_data, std::vector<T>& recv_data, ID_t partner, int tag) {
using namespace std::string_literals;
MPI_Datatype datatype = MPI_TypeMapper<T>::getType();
@ -110,6 +110,11 @@ struct MPI_t {
[[nodiscard]] ID_t size() const noexcept { return size_; }
[[nodiscard]] const std::string& name() const noexcept { return name_; }
// Mutators
ID_t rank(ID_t rank) noexcept { return rank_ = rank; }
ID_t size(ID_t size) noexcept { return size_ = size; }
std::string& name(const std::string& name) noexcept { return name_ = name; }
/*!
* Finalized the MPI
*/
@ -130,7 +135,7 @@ struct MPI_t {
// Local functionality
private:
/*!
* Throw exception helper. It bundles the prefix with the MPI error string retrieved by
* Throw exception helper. It bundles the prefix msg with the MPI error string retrieved by
* MPI API.
*
* @param err The MPI error code
@ -143,11 +148,7 @@ private:
throw std::runtime_error(prefixMsg + std::string (err_msg) + '\n');
}
#if !defined TESTING
private:
#else
public:
#endif
ID_t rank_{}; //!< MPI rank of the process
ID_t size_{}; //!< MPI total size of the execution
std::string name_{}; //!< The name of the local machine
@ -214,17 +215,15 @@ struct ShadowedVec_t {
}
// Type accessors
const std::vector<Value_t>& getNorth() const { return North; }
const std::vector<Value_t>& getSouth() const { return South; }
std::vector<Value_t>& getActive() { return (active == north) ? North : South; }
std::vector<Value_t>& getShadow() { return (active == north) ? South : North; }
const std::vector<Value_t>& getActive() const { return (active == north) ? North : South; }
const std::vector<Value_t>& getShadow() const { return (active == north) ? South : North; }
// Switching vectors
// Swap vectors
void switch_active() { active = (active == north) ? south : north; }
// Dispatch to active vector functionality
// Dispatch vector functionality to active vector
Value_t& operator[](size_type index) { return getActive()[index]; }
const Value_t& operator[](size_type index) const { return getActive()[index]; }

View File

@ -50,7 +50,7 @@ function distbubletonic!(p, data)
for i in 1:p
sort!(view(data, i, :), rev = !ascending[i])
end
for step in 0:p-2
for step in 0:p-1
direction = [true for x = 1:p]
partnerid = partner.(pid, step)
activeids = active.(partnerid, p)

View File

@ -43,6 +43,9 @@ bool get_options(int argc, char* argv[]){
status = false;
}
}
else if (arg == "--validation") {
session.validation = true;
}
else if (arg == "--ndebug") {
session.ndebug = true;
}
@ -54,14 +57,16 @@ bool get_options(int argc, char* argv[]){
}
else if (arg == "-h" || arg == "--help") {
std::cout << "distbitonic/distbubbletonic - A distributed bitonic sort\n\n";
std::cout << "distbitonic -q <> [--ndebug] [-v]\n";
std::cout << "distbitonic -q <N> [--validation] [--ndebug] [-v]\n";
std::cout << "distbitonic -h\n";
std::cout << "distbubbletonic -q <> [--ndebug] [-v]\n";
std::cout << "distbubbletonic -q <N> [--validation] [--ndebug] [-v]\n";
std::cout << "distbubbletonic -h\n";
std::cout << '\n';
std::cout << "Options:\n\n";
std::cout << " -q | --array-size <size>\n";
std::cout << " Selects the array size according to size = 2^q\n\n";
std::cout << " -q | --array-size <N>\n";
std::cout << " Selects the array size according to size = 2^N\n\n";
std::cout << " --validation\n";
std::cout << " Request a full validation at the end, performed by process rank 0\n\n";
std::cout << " --ndebug\n";
std::cout << " Skip debug breakpoint when on debug build.\n\n";
std::cout << " -t | --timing\n";
@ -87,6 +92,48 @@ bool get_options(int argc, char* argv[]){
return status;
}
/*!
* A simple validator for the entire distributed process
*
* @tparam ShadowedDataT A Shadowed buffer type with random access iterator.
*
* @param data [ShadowedDataT] The local to MPI process
* @param Processes [mpi_id_t] The total number of MPI processes
* @param rank [mpi_id_t] The current process id
*
* @return [bool] True if all are sorted and in total ascending order
*/
template<typename ShadowedDataT>
bool validator(ShadowedDataT& data, mpi_id_t Processes, mpi_id_t rank) {
using value_t = typename ShadowedDataT::value_type;
bool ret = true; // Have faith!
// Local results
value_t lmin = data.front();
value_t lmax = data.back();
value_t lsort = static_cast<value_t>(std::is_sorted(data.begin(), data.end()));
// Gather min/max/sort to rank 0
std::vector<value_t> mins(Processes);
std::vector<value_t> maxes(Processes);
std::vector<value_t> sorts(Processes);
MPI_Datatype datatype = MPI_TypeMapper<value_t>::getType();
MPI_Gather(&lmin, 1, datatype, mins.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&lmax, 1, datatype, maxes.data(), 1, datatype, 0, MPI_COMM_WORLD);
MPI_Gather(&lsort, 1, datatype, sorts.data(), 1, datatype, 0, MPI_COMM_WORLD);
// Check all results
if (rank == 0) {
for (mpi_id_t r = 1; r < Processes; ++r) {
if (sorts[r] == 0)
ret = false;
if (maxes[r - 1] > mins[r])
ret = false;
}
}
return ret;
}
#if !defined TESTING
int main(int argc, char* argv[]) try {
@ -146,7 +193,14 @@ int main(int argc, char* argv[]) try {
std::string timeMsg = "rank " + std::to_string(mpi.rank());
timer.print_dt(timeMsg.c_str());
std::cout << "[Data]: Rank " << mpi.rank() << ": [" << +Data.front() << " .. " << +Data.back() << "]" << std::endl;
if (session.validation) {
// If requested, we have the chance to fail!
if (mpi.rank() == 0)
std::cout << "Results validation ...";
bool val = validator(Data, mpi.size(), mpi.rank());
if (mpi.rank() == 0)
std::cout << ((val) ? "\x1B[32m [PASS] \x1B[0m\n" : " \x1B[32m [FAIL] \x1B[0m\n");
}
mpi.finalize();
return 0;
}

View File

@ -2,6 +2,10 @@
* \file
* \brief PDS HW2 tests
*
* To run these test execute:
* make tests
* mpirun -np <N> ./out/tests
*
* \author
* Christos Choutouridis AEM:8997
* <cchoutou@ece.auth.gr>
@ -28,8 +32,8 @@ protected:
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
ts_mpi.rank_ = rank;
ts_mpi.size_ = size;
ts_mpi.rank(rank);
ts_mpi.size(size);
}
static void TearDownTestSuite() {
@ -39,8 +43,8 @@ protected:
/*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin8_t [16]
*/
TEST_F(TMPIdistSort, distBubbletonic_test1) {
// Create and fill vector
@ -80,8 +84,8 @@ TEST_F(TMPIdistSort, distBubbletonic_test1) {
}
/*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBubbletonic for uin32_t [1 << 16]
*/
TEST_F(TMPIdistSort, distBubbletonic_test2) {
// Create and fill vector
@ -122,8 +126,8 @@ TEST_F(TMPIdistSort, distBubbletonic_test2) {
/*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin8_t [16]
*/
TEST_F(TMPIdistSort, distBitonic_test1) {
// Create and fill vector
@ -163,8 +167,8 @@ TEST_F(TMPIdistSort, distBitonic_test1) {
}
/*
* To run thiese test execute:
* mpirun -np <N> ./bit/tests
* MPI: SysTest (acceptance)
* Each process executes distBitonic for uin32_t [1 << 16]
*/
TEST_F(TMPIdistSort, distBitonic_test2) {
// Create and fill vector