dw_block_spmv.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012, 2014-2015 Université de Bordeaux
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2016 CNRS
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This computes an SPMV on a BCSR sparse matrix. It simply splits the matrix
  20. * into its blocks, thus turning the problem into mere matrix-vector products
  21. * (GEMV) which can be run in parallel.
  22. */
  23. #include "dw_block_spmv.h"
  24. #include "matrix_market/mm_to_bcsr.h"
  25. #ifdef STARPU_HAVE_HELGRIND_H
  26. #include <valgrind/helgrind.h>
  27. #endif
  28. #ifndef ANNOTATE_HAPPENS_BEFORE
  29. #define ANNOTATE_HAPPENS_BEFORE(obj) ((void)0)
  30. #endif
  31. #ifndef ANNOTATE_HAPPENS_AFTER
  32. #define ANNOTATE_HAPPENS_AFTER(obj) ((void)0)
  33. #endif
  34. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  35. static double start;
  36. static double end;
  37. static sem_t sem;
  38. static unsigned c = 256;
  39. static unsigned r = 256;
  40. static int remainingtasks = -1;
  41. static starpu_data_handle_t sparse_matrix;
  42. static starpu_data_handle_t vector_in, vector_out;
  43. static uint32_t size;
  44. static char *inputfile;
  45. static bcsr_t *bcsr_matrix;
  46. static float *vector_in_ptr;
  47. static float *vector_out_ptr;
  48. void create_data(void)
  49. {
  50. /* read the input file */
  51. bcsr_matrix = mm_file_to_bcsr(inputfile, c, r);
  52. /* declare the corresponding block CSR to the runtime */
  53. starpu_bcsr_data_register(&sparse_matrix, STARPU_MAIN_RAM, bcsr_matrix->nnz_blocks, bcsr_matrix->nrows_blocks,
  54. (uintptr_t)bcsr_matrix->val, bcsr_matrix->colind, bcsr_matrix->rowptr,
  55. 0, bcsr_matrix->r, bcsr_matrix->c, sizeof(float));
  56. size = c*r*starpu_bcsr_get_nnz(sparse_matrix);
  57. /* printf("size = %d \n ", size); */
  58. /* initiate the 2 vectors */
  59. vector_in_ptr = malloc(size*sizeof(float));
  60. assert(vector_in_ptr);
  61. vector_out_ptr = malloc(size*sizeof(float));
  62. assert(vector_out_ptr);
  63. /* fill those */
  64. unsigned ind;
  65. for (ind = 0; ind < size; ind++)
  66. {
  67. vector_in_ptr[ind] = 2.0f;
  68. vector_out_ptr[ind] = 0.0f;
  69. }
  70. starpu_vector_data_register(&vector_in, STARPU_MAIN_RAM, (uintptr_t)vector_in_ptr, size, sizeof(float));
  71. starpu_vector_data_register(&vector_out, STARPU_MAIN_RAM, (uintptr_t)vector_out_ptr, size, sizeof(float));
  72. }
  73. void unregister_data(void)
  74. {
  75. starpu_data_unpartition(sparse_matrix, STARPU_MAIN_RAM);
  76. starpu_data_unregister(sparse_matrix);
  77. starpu_data_unpartition(vector_in, STARPU_MAIN_RAM);
  78. starpu_data_unregister(vector_in);
  79. starpu_data_unpartition(vector_out, STARPU_MAIN_RAM);
  80. starpu_data_unregister(vector_out);
  81. }
  82. void init_problem_callback(void *arg)
  83. {
  84. unsigned *remaining = arg;
  85. unsigned val = STARPU_ATOMIC_ADD(remaining, -1);
  86. ANNOTATE_HAPPENS_BEFORE(&remaining);
  87. /* if (val < 10)
  88. printf("callback %d remaining \n", val); */
  89. if ( val == 0 )
  90. {
  91. ANNOTATE_HAPPENS_AFTER(&remaining);
  92. printf("DONE ...\n");
  93. end = starpu_timing_now();
  94. sem_post(&sem);
  95. }
  96. }
  97. unsigned get_bcsr_nchildren(STARPU_ATTRIBUTE_UNUSED struct starpu_data_filter *f, starpu_data_handle_t handle)
  98. {
  99. return (unsigned)starpu_bcsr_get_nnz(handle);
  100. }
  101. struct starpu_data_interface_ops *get_bcsr_child_ops(STARPU_ATTRIBUTE_UNUSED struct starpu_data_filter *f, STARPU_ATTRIBUTE_UNUSED unsigned child)
  102. {
  103. return &starpu_interface_matrix_ops;
  104. }
  105. void call_filters(void)
  106. {
  107. struct starpu_data_filter bcsr_f;
  108. struct starpu_data_filter vector_in_f, vector_out_f;
  109. bcsr_f.filter_func = starpu_bcsr_filter_canonical_block;
  110. bcsr_f.get_nchildren = get_bcsr_nchildren;
  111. /* the children use a matrix interface ! */
  112. bcsr_f.get_child_ops = get_bcsr_child_ops;
  113. vector_in_f.filter_func = starpu_vector_filter_block;
  114. vector_in_f.nchildren = size/c;
  115. vector_in_f.get_nchildren = NULL;
  116. vector_in_f.get_child_ops = NULL;
  117. vector_out_f.filter_func = starpu_vector_filter_block;
  118. vector_out_f.nchildren = size/r;
  119. vector_out_f.get_nchildren = NULL;
  120. vector_out_f.get_child_ops = NULL;
  121. starpu_data_partition(sparse_matrix, &bcsr_f);
  122. starpu_data_partition(vector_in, &vector_in_f);
  123. starpu_data_partition(vector_out, &vector_out_f);
  124. }
  125. #define NSPMV 32
  126. unsigned totaltasks;
  127. struct starpu_codelet cl =
  128. {
  129. .cpu_funcs = { cpu_block_spmv},
  130. .cpu_funcs_name = { "cpu_block_spmv" },
  131. #ifdef STARPU_USE_CUDA
  132. .cuda_funcs = {cublas_block_spmv},
  133. #endif
  134. .nbuffers = 3,
  135. .modes = {STARPU_R, STARPU_R, STARPU_RW}
  136. };
  137. void launch_spmv_codelets(void)
  138. {
  139. struct starpu_task *task_tab;
  140. uint8_t *is_entry_tab;
  141. int ret;
  142. /* we call one codelet per block */
  143. unsigned nblocks = starpu_bcsr_get_nnz(sparse_matrix);
  144. unsigned nrows = starpu_bcsr_get_nrow(sparse_matrix);
  145. remainingtasks = NSPMV*nblocks;
  146. totaltasks = remainingtasks;
  147. unsigned taskid = 0;
  148. task_tab = calloc(totaltasks, sizeof(struct starpu_task));
  149. STARPU_ASSERT(task_tab);
  150. is_entry_tab = calloc(totaltasks, sizeof(uint8_t));
  151. STARPU_ASSERT(is_entry_tab);
  152. printf("there will be %d codelets\n", remainingtasks);
  153. uint32_t *rowptr = starpu_bcsr_get_local_rowptr(sparse_matrix);
  154. uint32_t *colind = starpu_bcsr_get_local_colind(sparse_matrix);
  155. start = starpu_timing_now();
  156. unsigned loop;
  157. for (loop = 0; loop < NSPMV; loop++)
  158. {
  159. unsigned row;
  160. unsigned part = 0;
  161. for (row = 0; row < nrows; row++)
  162. {
  163. unsigned index;
  164. if (rowptr[row] == rowptr[row+1])
  165. {
  166. continue;
  167. }
  168. for (index = rowptr[row]; index < rowptr[row+1]; index++, part++)
  169. {
  170. struct starpu_task *task = &task_tab[taskid];
  171. starpu_task_init(task);
  172. task->use_tag = 1;
  173. task->tag_id = taskid;
  174. task->callback_func = init_problem_callback;
  175. task->callback_arg = &remainingtasks;
  176. task->cl = &cl;
  177. task->cl_arg = NULL;
  178. unsigned i = colind[index];
  179. unsigned j = row;
  180. task->handles[0] = starpu_data_get_sub_data(sparse_matrix, 1, part);
  181. task->handles[1] = starpu_data_get_sub_data(vector_in, 1, i);
  182. task->handles[2] = starpu_data_get_sub_data(vector_out, 1, j);
  183. /* all tasks in the same row are dependant so that we don't wait too much for data
  184. * we need to wait on the previous task if we are not the first task of a row */
  185. if (index != rowptr[row & ~0x3])
  186. {
  187. /* this is not the first task in the row */
  188. starpu_tag_declare_deps((starpu_tag_t)taskid, 1, (starpu_tag_t)(taskid-1));
  189. is_entry_tab[taskid] = 0;
  190. }
  191. else
  192. {
  193. /* this is an entry task */
  194. is_entry_tab[taskid] = 1;
  195. }
  196. taskid++;
  197. }
  198. }
  199. }
  200. printf("start submitting tasks !\n");
  201. /* submit ALL tasks now */
  202. unsigned nchains = 0;
  203. unsigned task;
  204. for (task = 0; task < totaltasks; task++)
  205. {
  206. if (is_entry_tab[task])
  207. {
  208. nchains++;
  209. }
  210. ret = starpu_task_submit(&task_tab[task]);
  211. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  212. }
  213. printf("end of task submission (there was %u chains for %u tasks : ratio %u tasks per chain) !\n", nchains, totaltasks, totaltasks/nchains);
  214. }
  215. void init_problem(void)
  216. {
  217. /* create the sparse input matrix */
  218. create_data();
  219. /* create a new codelet that will perform a SpMV on it */
  220. call_filters();
  221. }
  222. void print_results(void)
  223. {
  224. unsigned row;
  225. for (row = 0; row < STARPU_MIN(size, 16); row++)
  226. {
  227. printf("%2.2f\t%2.2f\n", vector_in_ptr[row], vector_out_ptr[row]);
  228. }
  229. }
  230. int main(STARPU_ATTRIBUTE_UNUSED int argc,
  231. STARPU_ATTRIBUTE_UNUSED char **argv)
  232. {
  233. int ret;
  234. if (argc < 2)
  235. {
  236. FPRINTF(stderr, "usage : %s filename [tile size]\n", argv[0]);
  237. exit(-1);
  238. }
  239. if (argc == 3)
  240. {
  241. /* third argument is the tile size */
  242. char *argptr;
  243. r = strtol(argv[2], &argptr, 10);
  244. c = r;
  245. }
  246. inputfile = argv[1];
  247. /* start the runtime */
  248. ret = starpu_init(NULL);
  249. if (ret == -ENODEV)
  250. return 77;
  251. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  252. sem_init(&sem, 0, 0U);
  253. init_problem();
  254. launch_spmv_codelets();
  255. sem_wait(&sem);
  256. sem_destroy(&sem);
  257. unregister_data();
  258. print_results();
  259. double totalflop = 2.0*c*r*totaltasks;
  260. double timing = end - start;
  261. FPRINTF(stderr, "Computation took (in ms)\n");
  262. FPRINTF(stdout, "%2.2f\n", timing/1000);
  263. FPRINTF(stderr, "Flop %e\n", totalflop);
  264. FPRINTF(stderr, "GFlops : %2.2f\n", totalflop/timing/1000);
  265. return 0;
  266. }