mult.jl 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. import Libdl
  2. using StarPU
  3. using LinearAlgebra
  4. #shoud be the same as in the makefile
  5. const STRIDE = 72
  6. @target STARPU_CPU+STARPU_CUDA
  7. @codelet function matrix_mult(m1 :: Matrix{Float32}, m2 :: Matrix{Float32}, m3 :: Matrix{Float32}) :: Nothing
  8. width_m2 :: Int32 = width(m2)
  9. height_m1 :: Int32 = height(m1)
  10. width_m1 :: Int32 = width(m1)
  11. # Naive version
  12. @parallel for j in (1 : width_m2)
  13. @parallel for i in (1 : height_m1)
  14. sum :: Float32 = 0.
  15. for k in (1 : width_m1)
  16. sum = sum + m1[i, k] * m2[k, j]
  17. end
  18. m3[i, j] = sum
  19. end
  20. end
  21. # ##### Tiled and unrolled version
  22. # for l in (1 : width_m2)
  23. # for m in (1 : height_m1)
  24. # m3[m,l] = 0
  25. # end
  26. # end
  27. # @parallel for i in (1 : STRIDE : height_m1)
  28. # for k in (1 : STRIDE : width_m1 )
  29. # for j in (1 : STRIDE : width_m2 )
  30. # for kk in (k : 4 : k+STRIDE-1)
  31. # for jj in (j : 2 : j+STRIDE-1)
  32. # alpha00 :: Float32 =m2[kk,jj]
  33. # alpha01 :: Float32 =m2[kk,jj+1]
  34. # alpha10 :: Float32 =m2[kk+1,jj]
  35. # alpha11 :: Float32 =m2[kk+1,jj+1]
  36. # alpha20 :: Float32 =m2[kk+2,jj]
  37. # alpha21 :: Float32 =m2[kk+2,jj+1]
  38. # alpha30 :: Float32 =m2[kk+3,jj]
  39. # alpha31 :: Float32 =m2[kk+3,jj+1]
  40. # for ii in (i : 1 : i+STRIDE-1)
  41. # m3[ii, jj] = m3[ii, jj] + m1[ii, kk] * alpha00 + m1[ii, kk+1] * alpha10 + m1[ii, kk+2] * alpha20 + m1[ii,kk+3]*alpha30
  42. # m3[ii, jj+1] = m3[ii, jj+1] + m1[ii, kk] * alpha01 + m1[ii, kk+1] * alpha11 + m1[ii, kk+2]*alpha21 + m1[ii,kk+3]*alpha31
  43. # end
  44. # end
  45. # end
  46. # end
  47. # end
  48. # end
  49. return
  50. end
  51. starpu_init()
  52. function multiply_with_starpu(A :: Matrix{Float32}, B :: Matrix{Float32}, C :: Matrix{Float32}, nslicesx, nslicesy)
  53. scale= 3
  54. tmin=0
  55. vert = StarpuDataFilter(STARPU_MATRIX_FILTER_VERTICAL_BLOCK, nslicesx)
  56. horiz = StarpuDataFilter(STARPU_MATRIX_FILTER_BLOCK, nslicesy)
  57. @starpu_block let
  58. hA,hB,hC = starpu_data_register(A, B, C)
  59. starpu_data_partition(hB, vert)
  60. starpu_data_partition(hA, horiz)
  61. starpu_data_map_filters(hC, vert, horiz)
  62. tmin=0
  63. perfmodel = StarpuPerfmodel(
  64. perf_type = STARPU_HISTORY_BASED,
  65. symbol = "history_perf"
  66. )
  67. cl = StarpuCodelet(
  68. cpu_func = CPU_CODELETS["matrix_mult"],
  69. # cuda_func = CUDA_CODELETS["matrix_mult"],
  70. #opencl_func="ocl_matrix_mult",
  71. modes = [STARPU_R, STARPU_R, STARPU_W],
  72. perfmodel = perfmodel
  73. )
  74. for i in (1 : 10 )
  75. t=time_ns()
  76. @starpu_sync_tasks begin
  77. for taskx in (1 : nslicesx)
  78. for tasky in (1 : nslicesy)
  79. handles = [hA[tasky], hB[taskx], hC[taskx, tasky]]
  80. task = StarpuTask(cl = cl, handles = handles)
  81. starpu_task_submit(task)
  82. #@starpu_async_cl matrix_mult(hA[tasky], hB[taskx], hC[taskx, tasky])
  83. end
  84. end
  85. end
  86. t=time_ns()-t
  87. if (tmin==0 || tmin>t)
  88. tmin=t
  89. end
  90. end
  91. end
  92. return tmin
  93. end
  94. function approximately_equals(
  95. A :: Matrix{Cfloat},
  96. B :: Matrix{Cfloat},
  97. eps = 1e-2
  98. )
  99. (height, width) = size(A)
  100. for j in (1 : width)
  101. for i in (1 : height)
  102. if (abs(A[i,j] - B[i,j]) > eps * max(abs(B[i,j]), abs(A[i,j])))
  103. println("A[$i,$j] : $(A[i,j]), B[$i,$j] : $(B[i,j])")
  104. return false
  105. end
  106. end
  107. end
  108. return true
  109. end
  110. function compute_times(io,start_dim, step_dim, stop_dim, nslicesx, nslicesy)
  111. for dim in (start_dim : step_dim : stop_dim)
  112. A = Array(rand(Cfloat, dim, dim))
  113. B = Array(rand(Cfloat, dim, dim))
  114. C = zeros(Float32, dim, dim)
  115. mt = multiply_with_starpu(A, B, C, nslicesx, nslicesy)
  116. flops = (2*dim-1)*dim*dim/mt
  117. size=dim*dim*4*3/1024/1024
  118. println(io,"$size $flops")
  119. println("$size $flops")
  120. end
  121. end
  122. io=open(ARGS[1],"w")
  123. compute_times(io,16*STRIDE,4*STRIDE,4096,2,2)
  124. close(io)
  125. starpu_shutdown()