mult.jl 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. # StarPU --- Runtime system for heterogeneous multicore architectures.
  2. #
  3. # Copyright (C) 2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. #
  5. # StarPU is free software; you can redistribute it and/or modify
  6. # it under the terms of the GNU Lesser General Public License as published by
  7. # the Free Software Foundation; either version 2.1 of the License, or (at
  8. # your option) any later version.
  9. #
  10. # StarPU is distributed in the hope that it will be useful, but
  11. # WITHOUT ANY WARRANTY; without even the implied warranty of
  12. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. #
  14. # See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. #
  16. import Libdl
  17. using StarPU
  18. using LinearAlgebra
  19. @target STARPU_CPU+STARPU_CUDA
  20. @codelet function matrix_mult(m1 :: Matrix{Float32}, m2 :: Matrix{Float32}, m3 :: Matrix{Float32}, stride ::Int32) :: Nothing
  21. width_m2 :: Int32 = width(m2)
  22. height_m1 :: Int32 = height(m1)
  23. width_m1 :: Int32 = width(m1)
  24. # Naive version
  25. @parallel for j in (1 : width_m2)
  26. @parallel for i in (1 : height_m1)
  27. sum :: Float32 = 0.
  28. for k in (1 : width_m1)
  29. sum = sum + m1[i, k] * m2[k, j]
  30. end
  31. m3[i, j] = sum
  32. end
  33. end
  34. # ##### Tiled and unrolled version
  35. # for l in (1 : width_m2)
  36. # for m in (1 : height_m1)
  37. # m3[m,l] = 0
  38. # end
  39. # end
  40. # @parallel for i in (1 : STRIDE : height_m1)
  41. # for k in (1 : STRIDE : width_m1 )
  42. # for j in (1 : STRIDE : width_m2 )
  43. # for kk in (k : 4 : k+STRIDE-1)
  44. # for jj in (j : 2 : j+STRIDE-1)
  45. # alpha00 :: Float32 =m2[kk,jj]
  46. # alpha01 :: Float32 =m2[kk,jj+1]
  47. # alpha10 :: Float32 =m2[kk+1,jj]
  48. # alpha11 :: Float32 =m2[kk+1,jj+1]
  49. # alpha20 :: Float32 =m2[kk+2,jj]
  50. # alpha21 :: Float32 =m2[kk+2,jj+1]
  51. # alpha30 :: Float32 =m2[kk+3,jj]
  52. # alpha31 :: Float32 =m2[kk+3,jj+1]
  53. # for ii in (i : 1 : i+STRIDE-1)
  54. # m3[ii, jj] = m3[ii, jj] + m1[ii, kk] * alpha00 + m1[ii, kk+1] * alpha10 + m1[ii, kk+2] * alpha20 + m1[ii,kk+3]*alpha30
  55. # m3[ii, jj+1] = m3[ii, jj+1] + m1[ii, kk] * alpha01 + m1[ii, kk+1] * alpha11 + m1[ii, kk+2]*alpha21 + m1[ii,kk+3]*alpha31
  56. # end
  57. # end
  58. # end
  59. # end
  60. # end
  61. # end
  62. return
  63. end
  64. starpu_init()
  65. function multiply_with_starpu(A :: Matrix{Float32}, B :: Matrix{Float32}, C :: Matrix{Float32}, nslicesx, nslicesy, stride)
  66. scale= 3
  67. tmin=0
  68. vert = starpu_data_filter(STARPU_MATRIX_FILTER_VERTICAL_BLOCK, nslicesx)
  69. horiz = starpu_data_filter(STARPU_MATRIX_FILTER_BLOCK, nslicesy)
  70. @starpu_block let
  71. hA,hB,hC = starpu_data_register(A, B, C)
  72. starpu_data_partition(hB, vert)
  73. starpu_data_partition(hA, horiz)
  74. starpu_data_map_filters(hC, vert, horiz)
  75. tmin=0
  76. perfmodel = starpu_perfmodel(
  77. perf_type = starpu_perfmodel_type(STARPU_HISTORY_BASED),
  78. symbol = "history_perf"
  79. )
  80. cl = starpu_codelet(
  81. cpu_func = "matrix_mult",
  82. cuda_func = "matrix_mult",
  83. #opencl_func="matrix_mult",
  84. modes = [STARPU_R, STARPU_R, STARPU_W],
  85. perfmodel = perfmodel
  86. )
  87. for i in (1 : 10 )
  88. t=time_ns()
  89. @starpu_sync_tasks begin
  90. for taskx in (1 : nslicesx)
  91. for tasky in (1 : nslicesy)
  92. handles = [hA[tasky], hB[taskx], hC[taskx, tasky]]
  93. task = starpu_task(cl = cl, handles = handles, cl_arg=(Int32(stride),))
  94. starpu_task_submit(task)
  95. #@starpu_async_cl matrix_mult(hA[tasky], hB[taskx], hC[taskx, tasky])
  96. end
  97. end
  98. end
  99. t=time_ns()-t
  100. if (tmin==0 || tmin>t)
  101. tmin=t
  102. end
  103. end
  104. end
  105. return tmin
  106. end
  107. function approximately_equals(
  108. A :: Matrix{Cfloat},
  109. B :: Matrix{Cfloat},
  110. eps = 1e-2
  111. )
  112. (height, width) = size(A)
  113. for j in (1 : width)
  114. for i in (1 : height)
  115. if (abs(A[i,j] - B[i,j]) > eps * max(abs(B[i,j]), abs(A[i,j])))
  116. println("A[$i,$j] : $(A[i,j]), B[$i,$j] : $(B[i,j])")
  117. return false
  118. end
  119. end
  120. end
  121. return true
  122. end
  123. function compute_times(io,start_dim, step_dim, stop_dim, nslicesx, nslicesy, stride)
  124. for dim in (start_dim : step_dim : stop_dim)
  125. A = Array(rand(Cfloat, dim, dim))
  126. B = Array(rand(Cfloat, dim, dim))
  127. C = zeros(Float32, dim, dim)
  128. mt = multiply_with_starpu(A, B, C, nslicesx, nslicesy, stride)
  129. flops = (2*dim-1)*dim*dim/mt
  130. size=dim*dim*4*3/1024/1024
  131. println(io,"$size $flops")
  132. println("$size $flops")
  133. end
  134. end
  135. if size(ARGS, 1) < 2
  136. stride=4
  137. filename="x.dat"
  138. else
  139. stride=parse(Int, ARGS[1])
  140. filename=ARGS[2]
  141. end
  142. io=open(filename,"w")
  143. compute_times(io,16*stride,4*stride,128*stride,2,2,stride)
  144. close(io)
  145. starpu_shutdown()