dw_block_spmv.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2013 Inria
  4. * Copyright (C) 2008-2015,2017 Université de Bordeaux
  5. * Copyright (C) 2010 Mehdi Juhoor
  6. * Copyright (C) 2010-2017 CNRS
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. /*
  20. * This computes an SPMV on a BCSR sparse matrix. It simply splits the matrix
  21. * into its blocks, thus turning the problem into mere matrix-vector products
  22. * (GEMV) which can be run in parallel.
  23. */
  24. #include "dw_block_spmv.h"
  25. #include "matrix_market/mm_to_bcsr.h"
  26. #ifdef STARPU_HAVE_HELGRIND_H
  27. #include <valgrind/helgrind.h>
  28. #endif
  29. #ifndef ANNOTATE_HAPPENS_BEFORE
  30. #define ANNOTATE_HAPPENS_BEFORE(obj) ((void)0)
  31. #endif
  32. #ifndef ANNOTATE_HAPPENS_AFTER
  33. #define ANNOTATE_HAPPENS_AFTER(obj) ((void)0)
  34. #endif
  35. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  36. static double start;
  37. static double end;
  38. static sem_t sem;
  39. static unsigned c = 256;
  40. static unsigned r = 256;
  41. static int remainingtasks = -1;
  42. static starpu_data_handle_t sparse_matrix;
  43. static starpu_data_handle_t vector_in, vector_out;
  44. static uint32_t size;
  45. static char *inputfile;
  46. static bcsr_t *bcsr_matrix;
  47. static float *vector_in_ptr;
  48. static float *vector_out_ptr;
  49. void create_data(void)
  50. {
  51. /* read the input file */
  52. bcsr_matrix = mm_file_to_bcsr(inputfile, c, r);
  53. /* declare the corresponding block CSR to the runtime */
  54. starpu_bcsr_data_register(&sparse_matrix, STARPU_MAIN_RAM, bcsr_matrix->nnz_blocks, bcsr_matrix->nrows_blocks,
  55. (uintptr_t)bcsr_matrix->val, bcsr_matrix->colind, bcsr_matrix->rowptr,
  56. 0, bcsr_matrix->r, bcsr_matrix->c, sizeof(float));
  57. size = c*r*starpu_bcsr_get_nnz(sparse_matrix);
  58. /* printf("size = %d \n ", size); */
  59. /* initiate the 2 vectors */
  60. vector_in_ptr = malloc(size*sizeof(float));
  61. assert(vector_in_ptr);
  62. vector_out_ptr = malloc(size*sizeof(float));
  63. assert(vector_out_ptr);
  64. /* fill those */
  65. unsigned ind;
  66. for (ind = 0; ind < size; ind++)
  67. {
  68. vector_in_ptr[ind] = 2.0f;
  69. vector_out_ptr[ind] = 0.0f;
  70. }
  71. starpu_vector_data_register(&vector_in, STARPU_MAIN_RAM, (uintptr_t)vector_in_ptr, size, sizeof(float));
  72. starpu_vector_data_register(&vector_out, STARPU_MAIN_RAM, (uintptr_t)vector_out_ptr, size, sizeof(float));
  73. }
  74. void unregister_data(void)
  75. {
  76. starpu_data_unpartition(sparse_matrix, STARPU_MAIN_RAM);
  77. starpu_data_unregister(sparse_matrix);
  78. starpu_data_unpartition(vector_in, STARPU_MAIN_RAM);
  79. starpu_data_unregister(vector_in);
  80. starpu_data_unpartition(vector_out, STARPU_MAIN_RAM);
  81. starpu_data_unregister(vector_out);
  82. }
  83. void init_problem_callback(void *arg)
  84. {
  85. unsigned *remaining = arg;
  86. unsigned val = STARPU_ATOMIC_ADD(remaining, -1);
  87. ANNOTATE_HAPPENS_BEFORE(&remaining);
  88. /* if (val < 10)
  89. printf("callback %d remaining \n", val); */
  90. if ( val == 0 )
  91. {
  92. ANNOTATE_HAPPENS_AFTER(&remaining);
  93. printf("DONE ...\n");
  94. end = starpu_timing_now();
  95. sem_post(&sem);
  96. }
  97. }
  98. unsigned get_bcsr_nchildren(struct starpu_data_filter *f, starpu_data_handle_t handle)
  99. {
  100. return (unsigned)starpu_bcsr_get_nnz(handle);
  101. }
  102. struct starpu_data_interface_ops *get_bcsr_child_ops(struct starpu_data_filter *f, unsigned child)
  103. {
  104. return &starpu_interface_matrix_ops;
  105. }
  106. void call_filters(void)
  107. {
  108. struct starpu_data_filter bcsr_f;
  109. struct starpu_data_filter vector_in_f, vector_out_f;
  110. bcsr_f.filter_func = starpu_bcsr_filter_canonical_block;
  111. bcsr_f.get_nchildren = get_bcsr_nchildren;
  112. /* the children use a matrix interface ! */
  113. bcsr_f.get_child_ops = get_bcsr_child_ops;
  114. vector_in_f.filter_func = starpu_vector_filter_block;
  115. vector_in_f.nchildren = size/c;
  116. vector_in_f.get_nchildren = NULL;
  117. vector_in_f.get_child_ops = NULL;
  118. vector_out_f.filter_func = starpu_vector_filter_block;
  119. vector_out_f.nchildren = size/r;
  120. vector_out_f.get_nchildren = NULL;
  121. vector_out_f.get_child_ops = NULL;
  122. starpu_data_partition(sparse_matrix, &bcsr_f);
  123. starpu_data_partition(vector_in, &vector_in_f);
  124. starpu_data_partition(vector_out, &vector_out_f);
  125. }
  126. #define NSPMV 32
  127. unsigned totaltasks;
  128. struct starpu_codelet cl =
  129. {
  130. .cpu_funcs = { cpu_block_spmv},
  131. .cpu_funcs_name = { "cpu_block_spmv" },
  132. #ifdef STARPU_USE_CUDA
  133. .cuda_funcs = {cublas_block_spmv},
  134. #endif
  135. .cuda_flags = {STARPU_CUDA_ASYNC},
  136. .nbuffers = 3,
  137. .modes = {STARPU_R, STARPU_R, STARPU_RW}
  138. };
  139. void launch_spmv_codelets(void)
  140. {
  141. struct starpu_task *task_tab;
  142. uint8_t *is_entry_tab;
  143. /* we call one codelet per block */
  144. unsigned nblocks = starpu_bcsr_get_nnz(sparse_matrix);
  145. unsigned nrows = starpu_bcsr_get_nrow(sparse_matrix);
  146. remainingtasks = NSPMV*nblocks;
  147. totaltasks = remainingtasks;
  148. unsigned taskid = 0;
  149. task_tab = calloc(totaltasks, sizeof(struct starpu_task));
  150. STARPU_ASSERT(task_tab);
  151. is_entry_tab = calloc(totaltasks, sizeof(uint8_t));
  152. STARPU_ASSERT(is_entry_tab);
  153. printf("there will be %d codelets\n", remainingtasks);
  154. uint32_t *rowptr = starpu_bcsr_get_local_rowptr(sparse_matrix);
  155. uint32_t *colind = starpu_bcsr_get_local_colind(sparse_matrix);
  156. start = starpu_timing_now();
  157. unsigned loop;
  158. for (loop = 0; loop < NSPMV; loop++)
  159. {
  160. unsigned row;
  161. unsigned part = 0;
  162. for (row = 0; row < nrows; row++)
  163. {
  164. unsigned index;
  165. if (rowptr[row] == rowptr[row+1])
  166. {
  167. continue;
  168. }
  169. for (index = rowptr[row]; index < rowptr[row+1]; index++, part++)
  170. {
  171. struct starpu_task *task = &task_tab[taskid];
  172. starpu_task_init(task);
  173. task->use_tag = 1;
  174. task->tag_id = taskid;
  175. task->callback_func = init_problem_callback;
  176. task->callback_arg = &remainingtasks;
  177. task->cl = &cl;
  178. task->cl_arg = NULL;
  179. unsigned i = colind[index];
  180. unsigned j = row;
  181. task->handles[0] = starpu_data_get_sub_data(sparse_matrix, 1, part);
  182. task->handles[1] = starpu_data_get_sub_data(vector_in, 1, i);
  183. task->handles[2] = starpu_data_get_sub_data(vector_out, 1, j);
  184. /* all tasks in the same row are dependant so that we don't wait too much for data
  185. * we need to wait on the previous task if we are not the first task of a row */
  186. if (index != rowptr[row & ~0x3])
  187. {
  188. /* this is not the first task in the row */
  189. starpu_tag_declare_deps((starpu_tag_t)taskid, 1, (starpu_tag_t)(taskid-1));
  190. is_entry_tab[taskid] = 0;
  191. }
  192. else
  193. {
  194. /* this is an entry task */
  195. is_entry_tab[taskid] = 1;
  196. }
  197. taskid++;
  198. }
  199. }
  200. }
  201. printf("start submitting tasks !\n");
  202. /* submit ALL tasks now */
  203. unsigned nchains = 0;
  204. unsigned task;
  205. for (task = 0; task < totaltasks; task++)
  206. {
  207. int ret;
  208. if (is_entry_tab[task])
  209. {
  210. nchains++;
  211. }
  212. ret = starpu_task_submit(&task_tab[task]);
  213. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  214. }
  215. printf("end of task submission (there was %u chains for %u tasks : ratio %u tasks per chain) !\n", nchains, totaltasks, totaltasks/nchains);
  216. free(is_entry_tab);
  217. }
  218. void init_problem(void)
  219. {
  220. /* create the sparse input matrix */
  221. create_data();
  222. /* create a new codelet that will perform a SpMV on it */
  223. call_filters();
  224. }
  225. void print_results(void)
  226. {
  227. unsigned row;
  228. for (row = 0; row < STARPU_MIN(size, 16); row++)
  229. {
  230. printf("%2.2f\t%2.2f\n", vector_in_ptr[row], vector_out_ptr[row]);
  231. }
  232. }
  233. int main(int argc, char *argv[])
  234. {
  235. int ret;
  236. if (argc < 2)
  237. {
  238. FPRINTF(stderr, "usage : %s filename [tile size]\n", argv[0]);
  239. exit(-1);
  240. }
  241. if (argc == 3)
  242. {
  243. /* third argument is the tile size */
  244. char *argptr;
  245. r = strtol(argv[2], &argptr, 10);
  246. c = r;
  247. }
  248. inputfile = argv[1];
  249. /* start the runtime */
  250. ret = starpu_init(NULL);
  251. if (ret == -ENODEV)
  252. return 77;
  253. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  254. starpu_cublas_init();
  255. sem_init(&sem, 0, 0U);
  256. init_problem();
  257. launch_spmv_codelets();
  258. sem_wait(&sem);
  259. sem_destroy(&sem);
  260. unregister_data();
  261. print_results();
  262. double totalflop = 2.0*c*r*totaltasks;
  263. double timing = end - start;
  264. FPRINTF(stderr, "Computation took (in ms)\n");
  265. FPRINTF(stdout, "%2.2f\n", timing/1000);
  266. FPRINTF(stderr, "Flop %e\n", totalflop);
  267. FPRINTF(stderr, "GFlops : %2.2f\n", totalflop/timing/1000);
  268. return 0;
  269. }