|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343 |
- /*!
- * \file v4.cpp
- * \brief vv3 part of the exercise.
- *
- * \author
- * Christos Choutouridis AEM:8997
- * <cchoutou@ece.auth.gr>
- */
- #include <v4.h>
-
- namespace v4 {
-
- #if defined CILK
-
- /*!
- * Utility function to get/set the number of threads.
- *
- * The number of threads are controlled via environment variable \c CILK_NWORKERS
- *
- * \return The number of threads used.
- * \note
- * The user can reduce the number with the command option \c --max_threads.
- * If so the requested number will be used even if the environment has more threads available.
- */
- int nworkers() {
- if (session.max_threads)
- return (session.max_threads < __cilkrts_get_nworkers()) ?
- session.max_threads : __cilkrts_get_nworkers();
- else
- return __cilkrts_get_nworkers();
- }
-
- /*!
- * Calculate and return a vertex-wise count vector.
- *
- * 1
- * vector = --- * (A.* (A*B))*ones_N
- * 2
- * We squeezed all that to one function for performance. The row*column multiplication
- * uses the inner CSC structure of sparse matrix and follows only non-zero members
- * with a time complexity of \$ O(nnz1 + nnz2) \$
- *
- * \param A The first matrix to use.
- * \param B The second matrix to use (they can be the same).
- * \return The count vector. RVO is used here.
- * \note
- * We use two methods of calculation based on \c --make_symmetric or \c --triangular_only
- * - A full matrix calculation
- * - A lower triangular matrix
- * \warning
- * The later(--triangular_only) produce correct results ONLY if we are after the total count.
- */
- std::vector<value_t> mmacc_v(matrix& A, matrix& B) {
- std::vector<value_t> c(A.size());
-
- cilk_for (int i=0 ; i<A.size() ; ++i) {
- for (auto j = A.getRow(i); j.index() != j.end() ; ++j){
- c[i] += A.getRow(i)*B.getCol(j.index());
- }
- if (session.makeSymmetric) c[i] /= 2;
- }
- return c;
- }
-
- /*!
- * A sum utility to use as spawn function for parallelized sum.
- * \return The sum of \c v from \c begin to \c end.
- */
- void do_sum (value_t& out_sum, std::vector<value_t>& v, index_t begin, index_t end) {
- for (auto i =begin ; i != end ; ++i)
- out_sum += v[i];
- }
-
- /*!
- * A parallelized version of sum. Just because ;)
- * \return The total sum of vector \c v
- */
- value_t sum (std::vector<value_t>& v) {
- int n = nworkers();
- std::vector<value_t> sum_v(n, 0); // result of each do_sum invocation.
-
- // We spawn workers in a more statically way.
- for (index_t i =0 ; i < n ; ++i) {
- cilk_spawn do_sum(sum_v[i], v, i*v.size()/n, (i+1)*v.size()/n);
- }
- cilk_sync;
-
- // sum the sums (a sum to rule them all)
- value_t s =0; for (auto& it : sum_v) s += it;
- return s;
- }
-
- #elif defined OMP
-
- /*!
- * Utility function to get/set the number of threads.
- *
- * The number of threads are controlled via environment variable \c OMP_NUM_THREADS
- *
- * \return The number of threads used.
- * \note
- * The user can reduce the number with the command option \c --max_threads.
- * If so the requested number will be used even if the environment has more threads available.
- */
- int nworkers() {
- if (session.max_threads && session.max_threads < (size_t)omp_get_max_threads()) {
- omp_set_dynamic(0);
- omp_set_num_threads(session.max_threads);
- return session.max_threads;
- }
- else {
- omp_set_dynamic(1);
- return omp_get_max_threads();
- }
- }
-
- /*!
- * Calculate and return a vertex-wise count vector.
- *
- * 1
- * vector = --- * (A.* (A*B))*ones_N
- * 2
- * We squeezed all that to one function for performance. The row*column multiplication
- * uses the inner CSC structure of sparse matrix and follows only non-zero members
- * with a time complexity of \$ O(nnz1 + nnz2) \$
- *
- * \param A The first matrix to use.
- * \param B The second matrix to use (they can be the same).
- * \return The count vector. RVO is used here.
- * \note
- * We use two methods of calculation based on \c --make_symmetric or \c --triangular_only
- * - A full matrix calculation
- * - A lower triangular matrix
- * \warning
- * The later(--triangular_only) produce correct results ONLY if we are after the total count.
- */
- std::vector<value_t> mmacc_v(matrix& A, matrix& B) {
- std::vector<value_t> c(A.size());
-
- // OMP schedule selection
- if (session.dynamic) omp_set_schedule (omp_sched_dynamic, 0);
- else omp_set_schedule (omp_sched_static, 0);
- #pragma omp parallel for shared(c) schedule(runtime)
- for (int i=0 ; i<A.size() ; ++i) {
- for (auto j = A.getRow(i); j.index() != j.end() ; ++j) {
- c[i] += A.getRow(i)*B.getCol(j.index());
- }
- if (session.makeSymmetric) c[i] /= 2;
- }
- return c;
- }
-
- /*!
- * A parallelized version of sum. Just because ;)
- * \return The total sum of vector \c v
- */
- value_t sum (std::vector<value_t>& v) {
- value_t s =0;
-
- #pragma omp parallel for reduction(+:s)
- for (auto i =0u ; i<v.size() ; ++i)
- s += v[i];
- return s;
- }
-
- #elif defined THREADS
-
- /*!
- * Utility function to get/set the number of threads.
- *
- * The number of threads are inherited by the environment via std::thread::hardware_concurrency()
- *
- * \return The number of threads used.
- * \note
- * The user can reduce the number with the command option \c --max_threads.
- * If so the requested number will be used even if the environment has more threads available.
- */
- int nworkers() {
- if (session.max_threads)
- return (session.max_threads < std::thread::hardware_concurrency()) ?
- session.max_threads : std::thread::hardware_concurrency();
- else
- return std::thread::hardware_concurrency();
- }
-
- /*!
- * A spawn function to calculate and return a vertex-wise count vector.
- *
- * 1
- * vector(begin..end) = --- * (A.* (A*B))*ones_N
- * 2
- *
- * We squeezed all that to one function for performance. The row*column multiplication
- * uses the inner CSC structure of sparse matrix and follows only non-zero members
- * with a time complexity of \$ O(nnz1 + nnz2) \$
- *
- * \param out Reference to output vector
- * \param A The first matrix to use.
- * \param B The second matrix to use (they can be the same).
- * \param iton vector containing the range with the columns to use (it can be shuffled).
- * \return The count vector. RVO is used here.
- * \note
- * We use two methods of calculation based on \c --make_symmetric or \c --triangular_only
- * - A full matrix calculation
- * - A lower triangular matrix
- * \warning
- * The later(--triangular_only) produce correct results ONLY if we are after the total count.
- */
- std::vector<value_t> mmacc_v_rng(
- std::vector<value_t>& out, matrix& A, matrix& B, std::vector<index_t>& iton, index_t begin, index_t end) {
- for (index_t i=begin ; i<end ; ++i) {
- index_t ii = iton[i];
- for (auto j = A.getRow(ii); j.index() != j.end() ; ++j){
- out[ii] += A.getRow(ii)*B.getCol(j.index());
- }
- if (session.makeSymmetric) out[ii] /= 2;
- }
- return out;
- }
-
- /*!
- * Calculate and return a vertex-wise count vector.
- *
- * \param A The first matrix to use.
- * \param B The second matrix to use (they can be the same).
- * \return The count vector. RVO is used here.
- */
- std::vector<value_t> mmacc_v(matrix& A, matrix& B) {
- std::vector<std::thread> workers;
- std::vector<value_t> c(A.size());
- int n = nworkers();
-
- std::vector<index_t> iton(A.size()); // Create a 0 .. N range for outer loop
- std::iota(iton.begin(), iton.end(), 0);
- if (session.dynamic) // in case of dynamic scheduling, shuffle the range
- std::shuffle(iton.begin(), iton.end(), std::mt19937{std::random_device{}()});
-
- for (index_t i=0 ; i<n ; ++i) // dispatch the workers and hold them in a vector
- workers.push_back (
- std::thread (mmacc_v_rng, std::ref(c), std::ref(A), std::ref(B), std::ref(iton), i*A.size()/n, (i+1)*A.size()/n)
- );
-
- // a for to join them all...
- std::for_each(workers.begin(), workers.end(), [](std::thread& t){
- t.join();
- });
-
- return c;
- }
-
- /*!
- * A sum utility to use as spawn function for parallelized sum.
- * \return The sum of \c v from \c begin to \c end.
- */
- void do_sum (value_t& out_sum, std::vector<value_t>& v, index_t begin, index_t end) {
- for (auto i =begin ; i != end ; ++i)
- out_sum += v[i];
- }
-
- /*!
- * A parallelized version of sum. Just because ;)
- * \return The total sum of vector \c v
- */
- value_t sum (std::vector<value_t>& v) {
- int n = nworkers();
- std::vector<value_t> sum_v(n, 0); // result of each do_sum invocation.
- std::vector<std::thread> workers;
-
- // We spawn workers in a more statically way.
- for (index_t i =0 ; i < n ; ++i)
- workers.push_back (std::thread (do_sum, std::ref(sum_v[i]), std::ref(v), i*v.size()/n, (i+1)*v.size()/n));
-
- std::for_each(workers.begin(), workers.end(), [](std::thread& t){
- t.join();
- });
-
- // sum the sums (a sum to rule them all)
- value_t s =0; for (auto& it : sum_v) s += it;
- return s;
- }
-
- #else
-
- //! Return the number of workers.
- //! \note This function is just for completion
- int nworkers() { return 1; }
-
- /*!
- * Calculate and return a vertex-wise count vector.
- *
- * 1
- * vector = --- * (A.* (A*B))*ones_N
- * 2
- *
- * We squeezed all that to one function for performance. The row*column multiplication
- * uses the inner CSC structure of sparse matrix and follows only non-zero members
- * with a time complexity of \$ O(nnz1 + nnz2) \$
- *
- * \param A The first matrix to use.
- * \param B The second matrix to use (they can be the same).
- * \return The count vector. RVO is used here.
- * \note
- * We use two methods of calculation based on \c --make_symmetric or \c --triangular_only
- * - A full matrix calculation
- * - A lower triangular matrix
- * \warning
- * The later(--triangular_only) produce correct results ONLY if we are after the total count.
- */
- std::vector<value_t> mmacc_v(matrix& A, matrix& B) {
- std::vector<value_t> c(A.size());
- for (int i=0 ; i<A.size() ; ++i) {
- for (auto j = A.getRow(i); j.index() != j.end() ; ++j){
- c[i] += A.getRow(i)*B.getCol(j.index());
- }
- if (session.makeSymmetric) c[i] /= 2;
- }
- return c;
- }
-
- /*!
- * Summation functionality.
- * \return The total sum of vector \c v
- */
- value_t sum (std::vector<value_t>& v) {
- value_t s =0;
- for (auto& it : v)
- s += it;
- return s;
- }
-
- #endif
-
- //! Polymorphic interface function for count vector
- std::vector<value_t> triang_v(matrix& A) {
- return mmacc_v(A, A);
- }
-
- //! Polymorphic interface function for sum results
- value_t triang_count (std::vector<value_t>& c) {
- return (session.makeSymmetric) ? sum(c)/3 : sum(c);
- }
-
- } // namespace v4
|