AUTH's THMMY "Parallel and distributed systems" course assignments.
Du kannst nicht mehr als 25 Themen auswählen Themen müssen entweder mit einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.
 
 
 
 
 
 

113 Zeilen
2.9 KiB

  1. #
  2. # ---------------------------------------------
  3. # Bitonic v0.5 functionality
  4. #
  5. function exchange(localid, remoteid)
  6. if verbose
  7. println("Exchange local data from $localid with partner $remoteid")
  8. end
  9. nothing # We have all data here ;)
  10. end
  11. function minmax(data, localid, remoteid, keepsmall)
  12. # Keep min-max on local data
  13. temp = copy(data[localid+1, :])
  14. if (keepsmall)
  15. view(data, localid+1, :) .= min.(temp, data[remoteid+1, :])
  16. view(data, remoteid+1, :) .= max.(temp, data[remoteid+1, :])
  17. else
  18. view(data, localid+1, :) .= max.(temp, data[remoteid+1, :])
  19. view(data, remoteid+1, :) .= min.(temp, data[remoteid+1, :])
  20. end
  21. end
  22. function sort_network!(data, n, depth)
  23. nodes = 0:n-1
  24. for step = depth-1:-1:0
  25. partnerid = nodes .⊻ (1 << step)
  26. direction = (nodes .& (1 << depth)) .== 0 .& (nodes .< partnerid)
  27. keepsmall = ((nodes .< partnerid) .& direction) .| ((nodes .> partnerid) .& .!direction)
  28. if verbose
  29. println("depth: $depth | step: $step | partner: $partnerid | keepsmall: $keepsmall")
  30. end
  31. # exchange with partner and keep small or large (run all MPI nodes)
  32. for i in 0:n-1
  33. if (i < partnerid[i+1])
  34. exchange(i, partnerid[i+1])
  35. minmax(data, i, partnerid[i+1], keepsmall[i+1])
  36. end
  37. end
  38. end
  39. end
  40. """
  41. distbitonic!(p, data)
  42. distributed bitonic sort v1 using elbow merge locally except for the first step
  43. p: The number of processes
  44. data: (p, N/p) array
  45. """
  46. function distbitonic!(p, data)
  47. q = Int(log2(p)) # CPU order
  48. pid = 0:p-1
  49. ascending = mod.(pid,2) .== 0
  50. if verbose
  51. println("ascending: $ascending")
  52. end
  53. # local full sort here (run all MPI nodes)
  54. for i in 1:p
  55. sort!(view(data, i, :), rev = !ascending[i])
  56. end
  57. for depth = 1:q
  58. sort_network!(data, p, depth)
  59. ascending = (pid .& (1 << depth)) .== 0
  60. if verbose
  61. println("ascending: $ascending")
  62. end
  63. # local elbowmerge here (run all MPI nodes)
  64. for i in 1:p
  65. sort!(view(data, i, :), rev = !ascending[i])
  66. end
  67. end
  68. nothing
  69. end
  70. #
  71. # Homework setup
  72. # ---------------------------------------------
  73. #
  74. p::Int8 = 3 # The order of number of "processors"
  75. q::Int8 = 8 # The data size order (power of 2) of each "processor"
  76. verbose = false;
  77. # Run Script
  78. # ---------------------------------------------
  79. P::Int = 2^p
  80. Q::Int = 2^q
  81. N::Int = 2^(q+p)
  82. println("Distributed bitonic (v1) test")
  83. println("p: $p -> Number of processors: $P")
  84. println("q: $q -> Data length for each node: $Q, Total: $(P*Q)")
  85. println("Create an $P x $Q array")
  86. Data = rand(Int8, P, Q)
  87. println("Sort array with $P (MPI) nodes")
  88. @time distbitonic!(P, Data)
  89. # Test
  90. if issorted(vec(permutedims(Data)))
  91. println("Test: Passed")
  92. else
  93. println("Test: Failed")
  94. end