dw_block_spmv.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012, 2014-2015 Université de Bordeaux
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2016 CNRS
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This computes an SPMV on a BCSR sparse matrix. It simply splits the matrix
  20. * into its blocks, thus turning the problem into mere matrix-vector products
  21. * (GEMV) which can be run in parallel.
  22. */
  23. #include "dw_block_spmv.h"
  24. #include "matrix_market/mm_to_bcsr.h"
  25. #ifdef STARPU_HAVE_HELGRIND_H
  26. #include <valgrind/helgrind.h>
  27. #endif
  28. #ifndef ANNOTATE_HAPPENS_BEFORE
  29. #define ANNOTATE_HAPPENS_BEFORE(obj) ((void)0)
  30. #endif
  31. #ifndef ANNOTATE_HAPPENS_AFTER
  32. #define ANNOTATE_HAPPENS_AFTER(obj) ((void)0)
  33. #endif
  34. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  35. static double start;
  36. static double end;
  37. static sem_t sem;
  38. static unsigned c = 256;
  39. static unsigned r = 256;
  40. static int remainingtasks = -1;
  41. static starpu_data_handle_t sparse_matrix;
  42. static starpu_data_handle_t vector_in, vector_out;
  43. static uint32_t size;
  44. static char *inputfile;
  45. static bcsr_t *bcsr_matrix;
  46. static float *vector_in_ptr;
  47. static float *vector_out_ptr;
  48. void create_data(void)
  49. {
  50. /* read the input file */
  51. bcsr_matrix = mm_file_to_bcsr(inputfile, c, r);
  52. /* declare the corresponding block CSR to the runtime */
  53. starpu_bcsr_data_register(&sparse_matrix, STARPU_MAIN_RAM, bcsr_matrix->nnz_blocks, bcsr_matrix->nrows_blocks,
  54. (uintptr_t)bcsr_matrix->val, bcsr_matrix->colind, bcsr_matrix->rowptr,
  55. 0, bcsr_matrix->r, bcsr_matrix->c, sizeof(float));
  56. size = c*r*starpu_bcsr_get_nnz(sparse_matrix);
  57. /* printf("size = %d \n ", size); */
  58. /* initiate the 2 vectors */
  59. vector_in_ptr = malloc(size*sizeof(float));
  60. assert(vector_in_ptr);
  61. vector_out_ptr = malloc(size*sizeof(float));
  62. assert(vector_out_ptr);
  63. /* fill those */
  64. unsigned ind;
  65. for (ind = 0; ind < size; ind++)
  66. {
  67. vector_in_ptr[ind] = 2.0f;
  68. vector_out_ptr[ind] = 0.0f;
  69. }
  70. starpu_vector_data_register(&vector_in, STARPU_MAIN_RAM, (uintptr_t)vector_in_ptr, size, sizeof(float));
  71. starpu_vector_data_register(&vector_out, STARPU_MAIN_RAM, (uintptr_t)vector_out_ptr, size, sizeof(float));
  72. }
  73. void unregister_data(void)
  74. {
  75. starpu_data_unpartition(sparse_matrix, STARPU_MAIN_RAM);
  76. starpu_data_unregister(sparse_matrix);
  77. starpu_data_unpartition(vector_in, STARPU_MAIN_RAM);
  78. starpu_data_unregister(vector_in);
  79. starpu_data_unpartition(vector_out, STARPU_MAIN_RAM);
  80. starpu_data_unregister(vector_out);
  81. }
  82. void init_problem_callback(void *arg)
  83. {
  84. unsigned *remaining = arg;
  85. unsigned val = STARPU_ATOMIC_ADD(remaining, -1);
  86. ANNOTATE_HAPPENS_BEFORE(&remaining);
  87. /* if (val < 10)
  88. printf("callback %d remaining \n", val); */
  89. if ( val == 0 )
  90. {
  91. ANNOTATE_HAPPENS_AFTER(&remaining);
  92. printf("DONE ...\n");
  93. end = starpu_timing_now();
  94. sem_post(&sem);
  95. }
  96. }
  97. unsigned get_bcsr_nchildren(STARPU_ATTRIBUTE_UNUSED struct starpu_data_filter *f, starpu_data_handle_t handle)
  98. {
  99. return (unsigned)starpu_bcsr_get_nnz(handle);
  100. }
  101. struct starpu_data_interface_ops *get_bcsr_child_ops(STARPU_ATTRIBUTE_UNUSED struct starpu_data_filter *f, STARPU_ATTRIBUTE_UNUSED unsigned child)
  102. {
  103. return &starpu_interface_matrix_ops;
  104. }
  105. void call_filters(void)
  106. {
  107. struct starpu_data_filter bcsr_f;
  108. struct starpu_data_filter vector_in_f, vector_out_f;
  109. bcsr_f.filter_func = starpu_bcsr_filter_canonical_block;
  110. bcsr_f.get_nchildren = get_bcsr_nchildren;
  111. /* the children use a matrix interface ! */
  112. bcsr_f.get_child_ops = get_bcsr_child_ops;
  113. vector_in_f.filter_func = starpu_vector_filter_block;
  114. vector_in_f.nchildren = size/c;
  115. vector_in_f.get_nchildren = NULL;
  116. vector_in_f.get_child_ops = NULL;
  117. vector_out_f.filter_func = starpu_vector_filter_block;
  118. vector_out_f.nchildren = size/r;
  119. vector_out_f.get_nchildren = NULL;
  120. vector_out_f.get_child_ops = NULL;
  121. starpu_data_partition(sparse_matrix, &bcsr_f);
  122. starpu_data_partition(vector_in, &vector_in_f);
  123. starpu_data_partition(vector_out, &vector_out_f);
  124. }
  125. #define NSPMV 32
  126. unsigned totaltasks;
  127. struct starpu_codelet cl =
  128. {
  129. .cpu_funcs = { cpu_block_spmv},
  130. .cpu_funcs_name = { "cpu_block_spmv" },
  131. #ifdef STARPU_USE_CUDA
  132. .cuda_funcs = {cublas_block_spmv},
  133. #endif
  134. .cuda_flags = {STARPU_CUDA_ASYNC},
  135. .nbuffers = 3,
  136. .modes = {STARPU_R, STARPU_R, STARPU_RW}
  137. };
  138. void launch_spmv_codelets(void)
  139. {
  140. struct starpu_task *task_tab;
  141. uint8_t *is_entry_tab;
  142. int ret;
  143. /* we call one codelet per block */
  144. unsigned nblocks = starpu_bcsr_get_nnz(sparse_matrix);
  145. unsigned nrows = starpu_bcsr_get_nrow(sparse_matrix);
  146. remainingtasks = NSPMV*nblocks;
  147. totaltasks = remainingtasks;
  148. unsigned taskid = 0;
  149. task_tab = calloc(totaltasks, sizeof(struct starpu_task));
  150. STARPU_ASSERT(task_tab);
  151. is_entry_tab = calloc(totaltasks, sizeof(uint8_t));
  152. STARPU_ASSERT(is_entry_tab);
  153. printf("there will be %d codelets\n", remainingtasks);
  154. uint32_t *rowptr = starpu_bcsr_get_local_rowptr(sparse_matrix);
  155. uint32_t *colind = starpu_bcsr_get_local_colind(sparse_matrix);
  156. start = starpu_timing_now();
  157. unsigned loop;
  158. for (loop = 0; loop < NSPMV; loop++)
  159. {
  160. unsigned row;
  161. unsigned part = 0;
  162. for (row = 0; row < nrows; row++)
  163. {
  164. unsigned index;
  165. if (rowptr[row] == rowptr[row+1])
  166. {
  167. continue;
  168. }
  169. for (index = rowptr[row]; index < rowptr[row+1]; index++, part++)
  170. {
  171. struct starpu_task *task = &task_tab[taskid];
  172. starpu_task_init(task);
  173. task->use_tag = 1;
  174. task->tag_id = taskid;
  175. task->callback_func = init_problem_callback;
  176. task->callback_arg = &remainingtasks;
  177. task->cl = &cl;
  178. task->cl_arg = NULL;
  179. unsigned i = colind[index];
  180. unsigned j = row;
  181. task->handles[0] = starpu_data_get_sub_data(sparse_matrix, 1, part);
  182. task->handles[1] = starpu_data_get_sub_data(vector_in, 1, i);
  183. task->handles[2] = starpu_data_get_sub_data(vector_out, 1, j);
  184. /* all tasks in the same row are dependant so that we don't wait too much for data
  185. * we need to wait on the previous task if we are not the first task of a row */
  186. if (index != rowptr[row & ~0x3])
  187. {
  188. /* this is not the first task in the row */
  189. starpu_tag_declare_deps((starpu_tag_t)taskid, 1, (starpu_tag_t)(taskid-1));
  190. is_entry_tab[taskid] = 0;
  191. }
  192. else
  193. {
  194. /* this is an entry task */
  195. is_entry_tab[taskid] = 1;
  196. }
  197. taskid++;
  198. }
  199. }
  200. }
  201. printf("start submitting tasks !\n");
  202. /* submit ALL tasks now */
  203. unsigned nchains = 0;
  204. unsigned task;
  205. for (task = 0; task < totaltasks; task++)
  206. {
  207. if (is_entry_tab[task])
  208. {
  209. nchains++;
  210. }
  211. ret = starpu_task_submit(&task_tab[task]);
  212. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  213. }
  214. printf("end of task submission (there was %u chains for %u tasks : ratio %u tasks per chain) !\n", nchains, totaltasks, totaltasks/nchains);
  215. free(is_entry_tab);
  216. }
  217. void init_problem(void)
  218. {
  219. /* create the sparse input matrix */
  220. create_data();
  221. /* create a new codelet that will perform a SpMV on it */
  222. call_filters();
  223. }
  224. void print_results(void)
  225. {
  226. unsigned row;
  227. for (row = 0; row < STARPU_MIN(size, 16); row++)
  228. {
  229. printf("%2.2f\t%2.2f\n", vector_in_ptr[row], vector_out_ptr[row]);
  230. }
  231. }
  232. int main(STARPU_ATTRIBUTE_UNUSED int argc,
  233. STARPU_ATTRIBUTE_UNUSED char **argv)
  234. {
  235. int ret;
  236. if (argc < 2)
  237. {
  238. FPRINTF(stderr, "usage : %s filename [tile size]\n", argv[0]);
  239. exit(-1);
  240. }
  241. if (argc == 3)
  242. {
  243. /* third argument is the tile size */
  244. char *argptr;
  245. r = strtol(argv[2], &argptr, 10);
  246. c = r;
  247. }
  248. inputfile = argv[1];
  249. /* start the runtime */
  250. ret = starpu_init(NULL);
  251. if (ret == -ENODEV)
  252. return 77;
  253. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  254. starpu_cublas_init();
  255. sem_init(&sem, 0, 0U);
  256. init_problem();
  257. launch_spmv_codelets();
  258. sem_wait(&sem);
  259. sem_destroy(&sem);
  260. unregister_data();
  261. print_results();
  262. double totalflop = 2.0*c*r*totaltasks;
  263. double timing = end - start;
  264. FPRINTF(stderr, "Computation took (in ms)\n");
  265. FPRINTF(stdout, "%2.2f\n", timing/1000);
  266. FPRINTF(stderr, "Flop %e\n", totalflop);
  267. FPRINTF(stderr, "GFlops : %2.2f\n", totalflop/timing/1000);
  268. return 0;
  269. }