fmultiple_manual.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This examplifies how to access the same matrix with different partitioned
  18. * views, doing the coherency by hand.
  19. * We first run a kernel on the whole matrix to fill it, then run a kernel on
  20. * each vertical slice to check the value and multiply it by two, then run a
  21. * kernel on each horizontal slice to do the same.
  22. */
  23. #include <starpu.h>
  24. #define NX 6
  25. #define NY 6
  26. #define PARTS 2
  27. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  28. void matrix_fill(void *buffers[], void *cl_arg)
  29. {
  30. unsigned i, j;
  31. (void)cl_arg;
  32. /* length of the matrix */
  33. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  34. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  35. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  36. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  37. for(j=0; j<ny ; j++)
  38. {
  39. for(i=0; i<nx ; i++)
  40. val[(j*ld)+i] = i+100*j;
  41. }
  42. }
  43. struct starpu_codelet cl_fill =
  44. {
  45. .cpu_funcs = {matrix_fill},
  46. .cpu_funcs_name = {"matrix_fill"},
  47. .nbuffers = 1,
  48. .modes = {STARPU_W},
  49. .name = "matrix_fill"
  50. };
  51. void fmultiple_check_scale(void *buffers[], void *cl_arg)
  52. {
  53. int start, factor;
  54. unsigned i, j;
  55. /* length of the matrix */
  56. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  57. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  58. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  59. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  60. starpu_codelet_unpack_args(cl_arg, &start, &factor);
  61. for(j=0; j<ny ; j++)
  62. {
  63. for(i=0; i<nx ; i++)
  64. {
  65. STARPU_ASSERT(val[(j*ld)+i] == start + factor*((int)(i+100*j)));
  66. val[(j*ld)+i] *= 2;
  67. }
  68. }
  69. }
  70. #ifdef STARPU_USE_CUDA
  71. extern void fmultiple_check_scale_cuda(void *buffers[], void *cl_arg);
  72. #endif
  73. struct starpu_codelet cl_check_scale =
  74. {
  75. #ifdef STARPU_USE_CUDA
  76. .cuda_funcs = {fmultiple_check_scale_cuda},
  77. .cuda_flags = {STARPU_CUDA_ASYNC},
  78. #else
  79. /* Only enable it on CPUs if we don't have a CUDA device, to force remote execution on the CUDA device */
  80. .cpu_funcs = {fmultiple_check_scale},
  81. .cpu_funcs_name = {"fmultiple_check_scale"},
  82. #endif
  83. .nbuffers = 1,
  84. .modes = {STARPU_RW},
  85. .name = "fmultiple_check_scale"
  86. };
  87. void empty(void *buffers[], void *cl_arg)
  88. {
  89. /* This doesn't need to do anything, it's simply used to make coherency
  90. * between the two views, by simply running on the home node of the
  91. * data, thus getting back all data pieces there. */
  92. (void)buffers;
  93. (void)cl_arg;
  94. /* This check is just for testsuite */
  95. int node = starpu_task_get_current_data_node(0);
  96. unsigned i;
  97. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(starpu_task_get_current());
  98. STARPU_ASSERT(node >= 0);
  99. for (i = 1; i < nbuffers; i++)
  100. STARPU_ASSERT(starpu_task_get_current_data_node(i) == node);
  101. }
  102. struct starpu_codelet cl_switch =
  103. {
  104. .cpu_funcs = {empty},
  105. .nbuffers = STARPU_VARIABLE_NBUFFERS,
  106. .name = "switch"
  107. };
  108. int main(void)
  109. {
  110. unsigned j, n=1;
  111. int matrix[NX][NY];
  112. int ret, i;
  113. /* We haven't taken care otherwise */
  114. STARPU_ASSERT((NX%PARTS) == 0);
  115. STARPU_ASSERT((NY%PARTS) == 0);
  116. starpu_data_handle_t handle;
  117. starpu_data_handle_t vert_handle[PARTS];
  118. starpu_data_handle_t horiz_handle[PARTS];
  119. ret = starpu_init(NULL);
  120. if (ret == -ENODEV)
  121. return 77;
  122. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  123. /* force to execute task on the home_node, here it is STARPU_MAIN_RAM */
  124. cl_switch.specific_nodes = 1;
  125. for (i = 0; i < STARPU_NMAXBUFS; i++)
  126. cl_switch.nodes[i] = STARPU_MAIN_RAM;
  127. /* Declare the whole matrix to StarPU */
  128. starpu_matrix_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)matrix, NX, NX, NY, sizeof(matrix[0][0]));
  129. /* Also declare the vertical slices to StarPU */
  130. for (i = 0; i < PARTS; i++)
  131. {
  132. starpu_matrix_data_register(&vert_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[0][i*(NX/PARTS)], NX, NX/PARTS, NY, sizeof(matrix[0][0]));
  133. /* But make it invalid for now, we'll access data through the whole matrix first */
  134. starpu_data_invalidate(vert_handle[i]);
  135. }
  136. /* And the horizontal slices to StarPU */
  137. for (i = 0; i < PARTS; i++)
  138. {
  139. starpu_matrix_data_register(&horiz_handle[i], STARPU_MAIN_RAM, (uintptr_t)&matrix[i*(NY/PARTS)][0], NX, NX, NY/PARTS, sizeof(matrix[0][0]));
  140. starpu_data_invalidate(horiz_handle[i]);
  141. }
  142. /* Fill the matrix */
  143. ret = starpu_task_insert(&cl_fill, STARPU_W, handle, 0);
  144. if (ret == -ENODEV) goto enodev;
  145. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  146. /* Now switch to vertical view of the matrix */
  147. struct starpu_data_descr vert_descr[PARTS];
  148. for (i = 0; i < PARTS; i++)
  149. {
  150. vert_descr[i].handle = vert_handle[i];
  151. vert_descr[i].mode = STARPU_W;
  152. }
  153. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, 0);
  154. if (ret == -ENODEV) goto enodev;
  155. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  156. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  157. starpu_data_invalidate_submit(handle);
  158. /* Check the values of the vertical slices */
  159. for (i = 0; i < PARTS; i++)
  160. {
  161. int factor = 1;
  162. int start = i*(NX/PARTS);
  163. ret = starpu_task_insert(&cl_check_scale,
  164. STARPU_RW, vert_handle[i],
  165. STARPU_VALUE, &start, sizeof(start),
  166. STARPU_VALUE, &factor, sizeof(factor),
  167. 0);
  168. if (ret == -ENODEV) goto enodev;
  169. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  170. }
  171. /* Now switch back to total view of the matrix */
  172. for (i = 0; i < PARTS; i++)
  173. vert_descr[i].mode = STARPU_RW;
  174. ret = starpu_task_insert(&cl_switch, STARPU_DATA_MODE_ARRAY, vert_descr, PARTS, STARPU_W, handle, 0);
  175. if (ret == -ENODEV) goto enodev;
  176. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  177. /* And make sure we don't accidentally access the matrix through the vertical slices */
  178. for (i = 0; i < PARTS; i++)
  179. starpu_data_invalidate_submit(vert_handle[i]);
  180. /* And switch to horizontal view of the matrix */
  181. struct starpu_data_descr horiz_descr[PARTS];
  182. for (i = 0; i < PARTS; i++)
  183. {
  184. horiz_descr[i].handle = horiz_handle[i];
  185. horiz_descr[i].mode = STARPU_W;
  186. }
  187. ret = starpu_task_insert(&cl_switch, STARPU_RW, handle, STARPU_DATA_MODE_ARRAY, horiz_descr, PARTS, 0);
  188. if (ret == -ENODEV) goto enodev;
  189. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  190. /* And make sure we don't accidentally access the matrix through the whole-matrix handle */
  191. starpu_data_invalidate_submit(handle);
  192. /* Check the values of the horizontal slices */
  193. for (i = 0; i < PARTS; i++)
  194. {
  195. int factor = 2;
  196. int start = factor*100*i*(NY/PARTS);
  197. ret = starpu_task_insert(&cl_check_scale,
  198. STARPU_RW, horiz_handle[i],
  199. STARPU_VALUE, &start, sizeof(start),
  200. STARPU_VALUE, &factor, sizeof(factor),
  201. 0);
  202. if (ret == -ENODEV) goto enodev;
  203. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  204. }
  205. /*
  206. * Unregister data from StarPU and shutdown It does not really matter
  207. * which view is active at unregistration here, since all views cover
  208. * the whole matrix, so it will be completely updated in the main memory.
  209. */
  210. for (i = 0; i < PARTS; i++)
  211. {
  212. starpu_data_unregister(vert_handle[i]);
  213. starpu_data_unregister(horiz_handle[i]);
  214. }
  215. starpu_data_unregister(handle);
  216. starpu_shutdown();
  217. return ret;
  218. enodev:
  219. starpu_shutdown();
  220. return 77;
  221. }