fmultiple_manual.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2017 CNRS
  4. * Copyright (C) 2017 Inria
  5. * Copyright (C) 2015,2018 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /*
  19. * This examplifies how to access the same matrix with different partitioned
  20. * views, doing the coherency by hand.
  21. * We first run a kernel on the whole matrix to fill it, then run a kernel on
  22. * each vertical slice to check the value and multiply it by two, then run a
  23. * kernel on each horizontal slice to do the same.
  24. */
  25. #include <starpu.h>
  26. #define NX 6
  27. #define NY 6
  28. #define PARTS 2
  29. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  30. void matrix_fill(void *buffers[], void *cl_arg)
  31. {
  32. unsigned i, j;
  33. (void)cl_arg;
  34. /* length of the matrix */
  35. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  36. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  37. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  38. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  39. for(j=0; j<ny ; j++)
  40. {
  41. for(i=0; i<nx ; i++)
  42. val[(j*ld)+i] = i+100*j;
  43. }
  44. }
  45. struct starpu_codelet cl_fill =
  46. {
  47. .cpu_funcs = {matrix_fill},
  48. .cpu_funcs_name = {"matrix_fill"},
  49. .nbuffers = 1,
  50. .modes = {STARPU_W},
  51. .name = "matrix_fill"
  52. };
  53. void fmultiple_check_scale(void *buffers[], void *cl_arg)
  54. {
  55. int start, factor;
  56. unsigned i, j;
  57. /* length of the matrix */
  58. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  59. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  60. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  61. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  62. starpu_codelet_unpack_args(cl_arg, &start, &factor);
  63. for(j=0; j<ny ; j++)
  64. {
  65. for(i=0; i<nx ; i++)
  66. {
  67. STARPU_ASSERT(val[(j*ld)+i] == start + factor*((int)(i+100*j)));
  68. val[(j*ld)+i] *= 2;
  69. }
  70. }
  71. }
  72. #ifdef STARPU_USE_CUDA
  73. extern void fmultiple_check_scale_cuda(void *buffers[], void *cl_arg);
  74. #endif
  75. struct starpu_codelet cl_check_scale =
  76. {
  77. #ifdef STARPU_USE_CUDA
  78. .cuda_funcs = {fmultiple_check_scale_cuda},
  79. .cuda_flags = {STARPU_CUDA_ASYNC},
  80. #else
  81. /* Only enable it on CPUs if we don't have a CUDA device, to force remote execution on the CUDA device */
  82. .cpu_funcs = {fmultiple_check_scale},
  83. .cpu_funcs_name = {"fmultiple_check_scale"},
  84. #endif
  85. .nbuffers = 1,
  86. .modes = {STARPU_RW},
  87. .name = "fmultiple_check_scale"
  88. };
  89. void empty(void *buffers[], void *cl_arg)
  90. {
  91. /* This doesn't need to do anything, it's simply used to make coherency
  92. * between the two views, by simply running on the home node of the
  93. * data, thus getting back all data pieces there. */
  94. (void)buffers;
  95. (void)cl_arg;
  96. /* This check is just for testsuite */
  97. int node = starpu_task_get_current_data_node(0);
  98. unsigned i;
  99. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(starpu_task_get_current());
  100. STARPU_ASSERT(node >= 0);
  101. for (i = 1; i < nbuffers; i++)
  102. STARPU_ASSERT(starpu_task_get_current_data_node(i) == node);
  103. }
  104. struct starpu_codelet cl_switch =
  105. {
  106. .cpu_funcs = {empty},
  107. .nbuffers = STARPU_VARIABLE_NBUFFERS,
  108. .name = "switch"
  109. };
  110. int main(void)
  111. {
  112. unsigned j, n=1;
  113. int matrix[NX][NY];
  114. int ret, i;
  115. /* We haven't taken care otherwise */
  116. STARPU_ASSERT((NX%PARTS) == 0);
  117. STARPU_ASSERT((NY%PARTS) == 0);
  118. starpu_data_handle_t handle;
  119. starpu_data_handle_t vert_handle[PARTS];
  120. starpu_data_handle_t horiz_handle[PARTS];
  121. ret = starpu_init(NULL);
  122. if (ret == -ENODEV)
  123. return 77;
  124. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  125. /* force to execute task on the home_node, here it is STARPU_MAIN_RAM */
  126. cl_switch.specific_nodes = 1;
  127. for (i = 0; i < STARPU_NMAXBUFS; i++)
  128. cl_switch.nodes[i] = STARPU_MAIN_RAM;
  129. /* Declare the whole matrix to StarPU */
  130. starpu_matrix_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)matrix, NX, NX, NY, sizeof(matrix[0][0]));
  131. /* Also declare the vertical slices to StarPU */
  132. for (i = 0; i < PARTS; i++)
  133. {
  134. starpu_matrix_data_register(&vert_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[0][i*(NX/PARTS)], NX, NX/PARTS, NY, sizeof(matrix[0][0]));
  135. /* But make it invalid for now, we'll access data through the whole matrix first */
  136. starpu_data_invalidate(vert_handle[i]);
  137. }
  138. /* And the horizontal slices to StarPU */
  139. for (i = 0; i < PARTS; i++)
  140. {
  141. starpu_matrix_data_register(&horiz_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[i*(NY/PARTS)][0], NX, NX, NY/PARTS, sizeof(matrix[0][0]));
  142. starpu_data_invalidate(horiz_handle[i]);
  143. }
  144. /* Fill the matrix */
  145. ret = starpu_task_insert(&cl_fill, STARPU_W, handle, 0);
  146. if (ret == -ENODEV) goto enodev;
  147. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  148. /* Now switch to vertical view of the matrix */
  149. struct starpu_data_descr vert_descr[PARTS];
  150. for (i = 0; i < PARTS; i++)
  151. {
  152. vert_descr[i].handle = vert_handle[i];
  153. vert_descr[i].mode = STARPU_W;
  154. }
  155. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, 0);
  156. if (ret == -ENODEV) goto enodev;
  157. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  158. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  159. starpu_data_invalidate_submit(handle);
  160. /* Check the values of the vertical slices */
  161. for (i = 0; i < PARTS; i++)
  162. {
  163. int factor = 1;
  164. int start = i*(NX/PARTS);
  165. ret = starpu_task_insert(&cl_check_scale,
  166. STARPU_RW, vert_handle[i],
  167. STARPU_VALUE, &start, sizeof(start),
  168. STARPU_VALUE, &factor, sizeof(factor),
  169. 0);
  170. if (ret == -ENODEV) goto enodev;
  171. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  172. }
  173. /* Now switch back to total view of the matrix */
  174. for (i = 0; i < PARTS; i++)
  175. vert_descr[i].mode = STARPU_RW;
  176. ret = starpu_task_insert(&cl_switch, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, STARPU_W, handle, 0);
  177. if (ret == -ENODEV) goto enodev;
  178. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  179. /* And make sure we don't accidentally access the matrix through the vertical slices */
  180. for (i = 0; i < PARTS; i++)
  181. starpu_data_invalidate_submit(vert_handle[i]);
  182. /* And switch to horizontal view of the matrix */
  183. struct starpu_data_descr horiz_descr[PARTS];
  184. for (i = 0; i < PARTS; i++)
  185. {
  186. horiz_descr[i].handle = horiz_handle[i];
  187. horiz_descr[i].mode = STARPU_W;
  188. }
  189. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, horiz_descr, PARTS, 0);
  190. if (ret == -ENODEV) goto enodev;
  191. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  192. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  193. starpu_data_invalidate_submit(handle);
  194. /* Check the values of the horizontal slices */
  195. for (i = 0; i < PARTS; i++)
  196. {
  197. int factor = 2;
  198. int start = factor*100*i*(NY/PARTS);
  199. ret = starpu_task_insert(&cl_check_scale,
  200. STARPU_RW, horiz_handle[i],
  201. STARPU_VALUE, &start, sizeof(start),
  202. STARPU_VALUE, &factor, sizeof(factor),
  203. 0);
  204. if (ret == -ENODEV) goto enodev;
  205. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  206. }
  207. /*
  208. * Unregister data from StarPU and shutdown It does not really matter
  209. * which view is active at unregistration here, since all views cover
  210. * the whole matrix, so it will be completely updated in the main memory.
  211. */
  212. for (i = 0; i < PARTS; i++)
  213. {
  214. starpu_data_unregister(vert_handle[i]);
  215. starpu_data_unregister(horiz_handle[i]);
  216. }
  217. starpu_data_unregister(handle);
  218. starpu_shutdown();
  219. return ret;
  220. enodev:
  221. starpu_shutdown();
  222. return 77;
  223. }