fmultiple_manual.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This examplifies how to access the same matrix with different partitioned
  18. * views, doing the coherency by hand.
  19. * We first run a kernel on the whole matrix to fill it, then run a kernel on
  20. * each vertical slice to check the value and multiply it by two, then run a
  21. * kernel on each horizontal slice to do the same.
  22. */
  23. #include <starpu.h>
  24. #define NX 6
  25. #define NY 6
  26. #define PARTS 2
  27. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  28. void matrix_fill(void *buffers[], void *cl_arg)
  29. {
  30. unsigned i, j;
  31. (void)cl_arg;
  32. /* length of the matrix */
  33. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  34. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  35. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  36. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  37. for(j=0; j<ny ; j++)
  38. {
  39. for(i=0; i<nx ; i++)
  40. val[(j*ld)+i] = i+100*j;
  41. }
  42. }
  43. struct starpu_codelet cl_fill =
  44. {
  45. .cpu_funcs = {matrix_fill},
  46. .cpu_funcs_name = {"matrix_fill"},
  47. .nbuffers = 1,
  48. .modes = {STARPU_W},
  49. .name = "matrix_fill"
  50. };
  51. void fmultiple_check_scale(void *buffers[], void *cl_arg)
  52. {
  53. int start, factor;
  54. unsigned i, j;
  55. /* length of the matrix */
  56. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  57. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  58. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  59. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  60. starpu_codelet_unpack_args(cl_arg, &start, &factor);
  61. for(j=0; j<ny ; j++)
  62. {
  63. for(i=0; i<nx ; i++)
  64. {
  65. STARPU_ASSERT(val[(j*ld)+i] == start + factor*((int)(i+100*j)));
  66. val[(j*ld)+i] *= 2;
  67. }
  68. }
  69. }
  70. #ifdef STARPU_USE_CUDA
  71. extern void fmultiple_check_scale_cuda(void *buffers[], void *cl_arg);
  72. #endif
  73. struct starpu_codelet cl_check_scale =
  74. {
  75. #ifdef STARPU_USE_CUDA
  76. .cuda_funcs = {fmultiple_check_scale_cuda},
  77. .cuda_flags = {STARPU_CUDA_ASYNC},
  78. #else
  79. /* Only enable it on CPUs if we don't have a CUDA device, to force remote execution on the CUDA device */
  80. .cpu_funcs = {fmultiple_check_scale},
  81. .cpu_funcs_name = {"fmultiple_check_scale"},
  82. #endif
  83. .nbuffers = 1,
  84. .modes = {STARPU_RW},
  85. .name = "fmultiple_check_scale"
  86. };
  87. void empty(void *buffers[], void *cl_arg)
  88. {
  89. /* This doesn't need to do anything, it's simply used to make coherency
  90. * between the two views, by simply running on the home node of the
  91. * data, thus getting back all data pieces there. */
  92. (void)buffers;
  93. (void)cl_arg;
  94. /* This check is just for testsuite */
  95. int node = starpu_task_get_current_data_node(0);
  96. unsigned i;
  97. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(starpu_task_get_current());
  98. STARPU_ASSERT(node >= 0);
  99. for (i = 1; i < nbuffers; i++)
  100. STARPU_ASSERT(starpu_task_get_current_data_node(i) == node);
  101. }
  102. struct starpu_codelet cl_switch =
  103. {
  104. #if 1
  105. /* Check for the values */
  106. .cpu_funcs = {empty},
  107. #else
  108. /* For production code: we do not need to actually execute anything */
  109. .where = STARPU_NOWHERE,
  110. #endif
  111. .nbuffers = STARPU_VARIABLE_NBUFFERS,
  112. .name = "switch",
  113. };
  114. int main(void)
  115. {
  116. unsigned n=1;
  117. int matrix[NX][NY];
  118. int ret, i;
  119. /* We haven't taken care otherwise */
  120. STARPU_ASSERT((NX%PARTS) == 0);
  121. STARPU_ASSERT((NY%PARTS) == 0);
  122. starpu_data_handle_t handle;
  123. starpu_data_handle_t vert_handle[PARTS];
  124. starpu_data_handle_t horiz_handle[PARTS];
  125. ret = starpu_init(NULL);
  126. if (ret == -ENODEV)
  127. return 77;
  128. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  129. /* force to execute task on the home_node, here it is STARPU_MAIN_RAM */
  130. cl_switch.specific_nodes = 1;
  131. for (i = 0; i < STARPU_NMAXBUFS; i++)
  132. cl_switch.nodes[i] = STARPU_MAIN_RAM;
  133. /* Declare the whole matrix to StarPU */
  134. starpu_matrix_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)matrix, NX, NX, NY, sizeof(matrix[0][0]));
  135. /* Also declare the vertical slices to StarPU */
  136. for (i = 0; i < PARTS; i++)
  137. {
  138. starpu_matrix_data_register(&vert_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[0][i*(NX/PARTS)], NX, NX/PARTS, NY, sizeof(matrix[0][0]));
  139. /* But make it invalid for now, we'll access data through the whole matrix first */
  140. starpu_data_invalidate(vert_handle[i]);
  141. }
  142. /* And the horizontal slices to StarPU */
  143. for (i = 0; i < PARTS; i++)
  144. {
  145. starpu_matrix_data_register(&horiz_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[i*(NY/PARTS)][0], NX, NX, NY/PARTS, sizeof(matrix[0][0]));
  146. starpu_data_invalidate(horiz_handle[i]);
  147. }
  148. /* Fill the matrix */
  149. ret = starpu_task_insert(&cl_fill, STARPU_W, handle, 0);
  150. if (ret == -ENODEV) goto enodev;
  151. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  152. /* Now switch to vertical view of the matrix */
  153. struct starpu_data_descr vert_descr[PARTS];
  154. for (i = 0; i < PARTS; i++)
  155. {
  156. vert_descr[i].handle = vert_handle[i];
  157. vert_descr[i].mode = STARPU_W;
  158. }
  159. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, 0);
  160. if (ret == -ENODEV) goto enodev;
  161. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  162. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  163. starpu_data_invalidate_submit(handle);
  164. /* Check the values of the vertical slices */
  165. for (i = 0; i < PARTS; i++)
  166. {
  167. int factor = 1;
  168. int start = i*(NX/PARTS);
  169. ret = starpu_task_insert(&cl_check_scale,
  170. STARPU_RW, vert_handle[i],
  171. STARPU_VALUE, &start, sizeof(start),
  172. STARPU_VALUE, &factor, sizeof(factor),
  173. 0);
  174. if (ret == -ENODEV) goto enodev;
  175. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  176. }
  177. /* Now switch back to total view of the matrix */
  178. for (i = 0; i < PARTS; i++)
  179. vert_descr[i].mode = STARPU_RW;
  180. ret = starpu_task_insert(&cl_switch, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, STARPU_W, handle, 0);
  181. if (ret == -ENODEV) goto enodev;
  182. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  183. /* And make sure we don't accidentally access the matrix through the vertical slices */
  184. for (i = 0; i < PARTS; i++)
  185. starpu_data_invalidate_submit(vert_handle[i]);
  186. /* And switch to horizontal view of the matrix */
  187. struct starpu_data_descr horiz_descr[PARTS];
  188. for (i = 0; i < PARTS; i++)
  189. {
  190. horiz_descr[i].handle = horiz_handle[i];
  191. horiz_descr[i].mode = STARPU_W;
  192. }
  193. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, horiz_descr, PARTS, 0);
  194. if (ret == -ENODEV) goto enodev;
  195. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  196. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  197. starpu_data_invalidate_submit(handle);
  198. /* Check the values of the horizontal slices */
  199. for (i = 0; i < PARTS; i++)
  200. {
  201. int factor = 2;
  202. int start = factor*100*i*(NY/PARTS);
  203. ret = starpu_task_insert(&cl_check_scale,
  204. STARPU_RW, horiz_handle[i],
  205. STARPU_VALUE, &start, sizeof(start),
  206. STARPU_VALUE, &factor, sizeof(factor),
  207. 0);
  208. if (ret == -ENODEV) goto enodev;
  209. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  210. }
  211. /*
  212. * Unregister data from StarPU and shutdown It does not really matter
  213. * which view is active at unregistration here, since all views cover
  214. * the whole matrix, so it will be completely updated in the main memory.
  215. */
  216. for (i = 0; i < PARTS; i++)
  217. {
  218. starpu_data_unregister(vert_handle[i]);
  219. starpu_data_unregister(horiz_handle[i]);
  220. }
  221. starpu_data_unregister(handle);
  222. starpu_shutdown();
  223. return ret;
  224. enodev:
  225. starpu_shutdown();
  226. return 77;
  227. }