mult.jl 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import Libdl
  2. using StarPU
  3. using LinearAlgebra
  4. #shoud be the same as in the makefile
  5. const STRIDE = 72
  6. @target STARPU_CPU+STARPU_CUDA
  7. @codelet function matrix_mult(m1 :: Matrix{Float32}, m2 :: Matrix{Float32}, m3 :: Matrix{Float32}) :: Float32
  8. width_m2 :: Int32 = width(m2)
  9. height_m1 :: Int32 = height(m1)
  10. width_m1 :: Int32 = width(m1)
  11. # Naive version
  12. #@parallel for j in (1 : width_m2)
  13. # @parallel for i in (1 : height_m1)
  14. #
  15. # sum :: Float32 = 0.
  16. # for k in (1 : width_m1)
  17. # sum = sum + m1[i, k] * m2[k, j]
  18. # end
  19. # m3[i, j] = sum
  20. # end
  21. # end
  22. ##### Tiled and unrolled version
  23. for l in (1 : width_m2)
  24. for m in (1 : height_m1)
  25. m3[m,l] = 0
  26. end
  27. end
  28. @parallel for i in (1 : STRIDE : height_m1)
  29. for k in (1 : STRIDE : width_m1 )
  30. for j in (1 : STRIDE : width_m2 )
  31. for kk in (k : 4 : k+STRIDE-1)
  32. for jj in (j : 2 : j+STRIDE-1)
  33. alpha00 :: Float32 =m2[kk,jj]
  34. alpha01 :: Float32 =m2[kk,jj+1]
  35. alpha10 :: Float32 =m2[kk+1,jj]
  36. alpha11 :: Float32 =m2[kk+1,jj+1]
  37. alpha20 :: Float32 =m2[kk+2,jj]
  38. alpha21 :: Float32 =m2[kk+2,jj+1]
  39. alpha30 :: Float32 =m2[kk+3,jj]
  40. alpha31 :: Float32 =m2[kk+3,jj+1]
  41. for ii in (i : 1 : i+STRIDE-1)
  42. m3[ii, jj] = m3[ii, jj] + m1[ii, kk] * alpha00 + m1[ii, kk+1] * alpha10 + m1[ii, kk+2] * alpha20 + m1[ii,kk+3]*alpha30
  43. m3[ii, jj+1] = m3[ii, jj+1] + m1[ii, kk] * alpha01 + m1[ii, kk+1] * alpha11 + m1[ii, kk+2]*alpha21 + m1[ii,kk+3]*alpha31
  44. end
  45. end
  46. end
  47. end
  48. end
  49. end
  50. return 0. :: Float32
  51. end
  52. @debugprint "starpu_init"
  53. starpu_init()
  54. function multiply_with_starpu(A :: Matrix{Float32}, B :: Matrix{Float32}, C :: Matrix{Float32}, nslicesx, nslicesy)
  55. scale= 3
  56. tmin=0
  57. vert = StarpuDataFilter(STARPU_MATRIX_FILTER_VERTICAL_BLOCK, nslicesx)
  58. horiz = StarpuDataFilter(STARPU_MATRIX_FILTER_BLOCK, nslicesy)
  59. @starpu_block let
  60. hA,hB,hC = starpu_data_register(A, B, C)
  61. starpu_data_partition(hB, vert)
  62. starpu_data_partition(hA, horiz)
  63. starpu_data_map_filters(hC, vert, horiz)
  64. tmin=0
  65. perfmodel = StarpuPerfmodel(
  66. perf_type = STARPU_HISTORY_BASED,
  67. symbol = "history_perf"
  68. )
  69. cl = StarpuCodelet(
  70. cpu_func = CPU_CODELETS["matrix_mult"],
  71. #cuda_func = "matrix_mult",
  72. #opencl_func="ocl_matrix_mult",
  73. modes = [STARPU_R, STARPU_R, STARPU_W],
  74. perfmodel = perfmodel
  75. )
  76. for i in (1 : 10 )
  77. t=time_ns()
  78. @starpu_sync_tasks begin
  79. for taskx in (1 : nslicesx)
  80. for tasky in (1 : nslicesy)
  81. handles = [hA[tasky], hB[taskx], hC[taskx, tasky]]
  82. task = StarpuTask(cl = cl, handles = handles)
  83. starpu_task_submit(task)
  84. #@starpu_async_cl matrix_mult(hA[tasky], hB[taskx], hC[taskx, tasky])
  85. end
  86. end
  87. end
  88. t=time_ns()-t
  89. if (tmin==0 || tmin>t)
  90. tmin=t
  91. end
  92. end
  93. end
  94. return tmin
  95. end
  96. function approximately_equals(
  97. A :: Matrix{Cfloat},
  98. B :: Matrix{Cfloat},
  99. eps = 1e-2
  100. )
  101. (height, width) = size(A)
  102. for j in (1 : width)
  103. for i in (1 : height)
  104. if (abs(A[i,j] - B[i,j]) > eps * max(abs(B[i,j]), abs(A[i,j])))
  105. println("A[$i,$j] : $(A[i,j]), B[$i,$j] : $(B[i,j])")
  106. return false
  107. end
  108. end
  109. end
  110. return true
  111. end
  112. function compute_times(io,start_dim, step_dim, stop_dim, nslicesx, nslicesy)
  113. for dim in (start_dim : step_dim : stop_dim)
  114. A = Array(rand(Cfloat, dim, dim))
  115. B = Array(rand(Cfloat, dim, dim))
  116. C = zeros(Float32, dim, dim)
  117. mt = multiply_with_starpu(A, B, C, nslicesx, nslicesy)
  118. flops = (2*dim-1)*dim*dim/mt
  119. size=dim*dim*4*3/1024/1024
  120. println(io,"$size $flops")
  121. println("$size $flops")
  122. end
  123. end
  124. io=open(ARGS[1],"w")
  125. compute_times(io,16*STRIDE,4*STRIDE,4096,2,2)
  126. close(io)
  127. @debugprint "starpu_shutdown"
  128. starpu_shutdown()