fmultiple_submit.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2017, 2018 CNRS
  4. * Copyright (C) 2015,2017 Université de Bordeaux
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This examplifies how to access the same matrix with different partitioned
  19. * views, doing the coherency through partition planning.
  20. * We first run a kernel on the whole matrix to fill it, then run a kernel on
  21. * each vertical slice to check the value and multiply it by two, then run a
  22. * kernel on each horizontal slice to do the same.
  23. */
  24. #include <starpu.h>
  25. #define NX 6
  26. #define NY 6
  27. #define PARTS 2
  28. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  29. void matrix_fill(void *buffers[], void *cl_arg)
  30. {
  31. unsigned i, j;
  32. (void)cl_arg;
  33. /* length of the matrix */
  34. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  35. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  36. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  37. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  38. for(j=0; j<ny ; j++)
  39. {
  40. for(i=0; i<nx ; i++)
  41. val[(j*ld)+i] = i+100*j;
  42. }
  43. }
  44. struct starpu_codelet cl_fill =
  45. {
  46. .cpu_funcs = {matrix_fill},
  47. .cpu_funcs_name = {"matrix_fill"},
  48. .nbuffers = 1,
  49. .modes = {STARPU_W},
  50. .name = "matrix_fill"
  51. };
  52. void fmultiple_check_scale(void *buffers[], void *cl_arg)
  53. {
  54. int start, factor;
  55. unsigned i, j;
  56. /* length of the matrix */
  57. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  58. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  59. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  60. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  61. starpu_codelet_unpack_args(cl_arg, &start, &factor);
  62. for(j=0; j<ny ; j++)
  63. {
  64. for(i=0; i<nx ; i++)
  65. {
  66. STARPU_ASSERT(val[(j*ld)+i] == start + factor*((int)(i+100*j)));
  67. val[(j*ld)+i] *= 2;
  68. }
  69. }
  70. }
  71. #ifdef STARPU_USE_CUDA
  72. extern void fmultiple_check_scale_cuda(void *buffers[], void *cl_arg);
  73. #endif
  74. struct starpu_codelet cl_check_scale =
  75. {
  76. #ifdef STARPU_USE_CUDA
  77. .cuda_funcs = {fmultiple_check_scale_cuda},
  78. .cuda_flags = {STARPU_CUDA_ASYNC},
  79. #endif
  80. .cpu_funcs = {fmultiple_check_scale},
  81. .cpu_funcs_name = {"fmultiple_check_scale"},
  82. .nbuffers = 1,
  83. .modes = {STARPU_RW},
  84. .name = "fmultiple_check_scale"
  85. };
  86. int main(void)
  87. {
  88. unsigned j, n=1;
  89. int matrix[NX][NY];
  90. int ret, i;
  91. /* We haven't taken care otherwise */
  92. STARPU_ASSERT((NX%PARTS) == 0);
  93. STARPU_ASSERT((NY%PARTS) == 0);
  94. starpu_data_handle_t handle;
  95. starpu_data_handle_t vert_handle[PARTS];
  96. starpu_data_handle_t horiz_handle[PARTS];
  97. ret = starpu_init(NULL);
  98. if (ret == -ENODEV)
  99. return 77;
  100. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  101. /* Disable codelet on CPUs if we have a CUDA device, to force remote execution on the CUDA device */
  102. if (starpu_cuda_worker_get_count())
  103. {
  104. cl_check_scale.cpu_funcs[0] = NULL;
  105. cl_check_scale.cpu_funcs_name[0] = NULL;
  106. }
  107. /* Declare the whole matrix to StarPU */
  108. starpu_matrix_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)matrix, NX, NX, NY, sizeof(matrix[0][0]));
  109. /* Partition the matrix in PARTS vertical slices */
  110. struct starpu_data_filter f_vert =
  111. {
  112. .filter_func = starpu_matrix_filter_block,
  113. .nchildren = PARTS
  114. };
  115. starpu_data_partition_plan(handle, &f_vert, vert_handle);
  116. /* Partition the matrix in PARTS horizontal slices */
  117. struct starpu_data_filter f_horiz =
  118. {
  119. .filter_func = starpu_matrix_filter_vertical_block,
  120. .nchildren = PARTS
  121. };
  122. starpu_data_partition_plan(handle, &f_horiz, horiz_handle);
  123. /* Fill the matrix */
  124. ret = starpu_task_insert(&cl_fill, STARPU_W, handle, 0);
  125. if (ret == -ENODEV) goto enodev;
  126. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  127. /* Now switch to vertical view of the matrix */
  128. starpu_data_partition_submit(handle, PARTS, vert_handle);
  129. /* Check the values of the vertical slices */
  130. for (i = 0; i < PARTS; i++)
  131. {
  132. int factor = 1;
  133. int start = i*(NX/PARTS);
  134. ret = starpu_task_insert(&cl_check_scale,
  135. STARPU_RW, vert_handle[i],
  136. STARPU_VALUE, &start, sizeof(start),
  137. STARPU_VALUE, &factor, sizeof(factor),
  138. 0);
  139. if (ret == -ENODEV) goto enodev;
  140. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  141. }
  142. /* Now switch back to total view of the matrix */
  143. starpu_data_unpartition_submit(handle, PARTS, vert_handle, -1);
  144. /* And switch to horizontal view of the matrix */
  145. starpu_data_partition_submit(handle, PARTS, horiz_handle);
  146. /* Check the values of the horizontal slices */
  147. for (i = 0; i < PARTS; i++)
  148. {
  149. int factor = 2;
  150. int start = factor*100*i*(NY/PARTS);
  151. ret = starpu_task_insert(&cl_check_scale,
  152. STARPU_RW, horiz_handle[i],
  153. STARPU_VALUE, &start, sizeof(start),
  154. STARPU_VALUE, &factor, sizeof(factor),
  155. 0);
  156. if (ret == -ENODEV) goto enodev;
  157. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  158. }
  159. /* Now switch back to total view of the matrix */
  160. starpu_data_unpartition_submit(handle, PARTS, horiz_handle, -1);
  161. /* And check and scale the values of the whole matrix */
  162. int factor = 4;
  163. int start = 0;
  164. ret = starpu_task_insert(&cl_check_scale,
  165. STARPU_RW, handle,
  166. STARPU_VALUE, &start, sizeof(start),
  167. STARPU_VALUE, &factor, sizeof(factor),
  168. 0);
  169. if (ret == -ENODEV) goto enodev;
  170. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  171. /*
  172. * Unregister data from StarPU and shutdown.
  173. */
  174. starpu_data_partition_clean(handle, PARTS, vert_handle);
  175. starpu_data_partition_clean(handle, PARTS, horiz_handle);
  176. starpu_data_unregister(handle);
  177. starpu_shutdown();
  178. return ret;
  179. enodev:
  180. starpu_shutdown();
  181. return 77;
  182. }