fmultiple_submit.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This examplifies how to access the same matrix with different partitioned
  18. * views, doing the coherency through partition planning.
  19. * We first run a kernel on the whole matrix to fill it, then run a kernel on
  20. * each vertical slice to check the value and multiply it by two, then run a
  21. * kernel on each horizontal slice to do the same.
  22. */
  23. #include <starpu.h>
  24. #define NX 6
  25. #define NY 6
  26. #define PARTS 2
  27. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  28. void matrix_fill(void *buffers[], void *cl_arg)
  29. {
  30. unsigned i, j;
  31. (void)cl_arg;
  32. /* length of the matrix */
  33. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  34. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  35. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  36. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  37. for(j=0; j<ny ; j++)
  38. {
  39. for(i=0; i<nx ; i++)
  40. val[(j*ld)+i] = i+100*j;
  41. }
  42. }
  43. struct starpu_codelet cl_fill =
  44. {
  45. .cpu_funcs = {matrix_fill},
  46. .cpu_funcs_name = {"matrix_fill"},
  47. .nbuffers = 1,
  48. .modes = {STARPU_W},
  49. .name = "matrix_fill"
  50. };
  51. void fmultiple_check_scale(void *buffers[], void *cl_arg)
  52. {
  53. int start, factor;
  54. unsigned i, j;
  55. /* length of the matrix */
  56. unsigned nx = STARPU_MATRIX_GET_NX(buffers[0]);
  57. unsigned ny = STARPU_MATRIX_GET_NY(buffers[0]);
  58. unsigned ld = STARPU_MATRIX_GET_LD(buffers[0]);
  59. int *val = (int *)STARPU_MATRIX_GET_PTR(buffers[0]);
  60. starpu_codelet_unpack_args(cl_arg, &start, &factor);
  61. for(j=0; j<ny ; j++)
  62. {
  63. for(i=0; i<nx ; i++)
  64. {
  65. STARPU_ASSERT(val[(j*ld)+i] == start + factor*((int)(i+100*j)));
  66. val[(j*ld)+i] *= 2;
  67. }
  68. }
  69. }
  70. #ifdef STARPU_USE_CUDA
  71. extern void fmultiple_check_scale_cuda(void *buffers[], void *cl_arg);
  72. #endif
  73. struct starpu_codelet cl_check_scale =
  74. {
  75. #ifdef STARPU_USE_CUDA
  76. .cuda_funcs = {fmultiple_check_scale_cuda},
  77. .cuda_flags = {STARPU_CUDA_ASYNC},
  78. #endif
  79. .cpu_funcs = {fmultiple_check_scale},
  80. .cpu_funcs_name = {"fmultiple_check_scale"},
  81. .nbuffers = 1,
  82. .modes = {STARPU_RW},
  83. .name = "fmultiple_check_scale"
  84. };
  85. int main(void)
  86. {
  87. unsigned j, n=1;
  88. int matrix[NX][NY];
  89. int ret, i;
  90. /* We haven't taken care otherwise */
  91. STARPU_ASSERT((NX%PARTS) == 0);
  92. STARPU_ASSERT((NY%PARTS) == 0);
  93. starpu_data_handle_t handle;
  94. starpu_data_handle_t vert_handle[PARTS];
  95. starpu_data_handle_t horiz_handle[PARTS];
  96. ret = starpu_init(NULL);
  97. if (ret == -ENODEV)
  98. return 77;
  99. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  100. /* Disable codelet on CPUs if we have a CUDA device, to force remote execution on the CUDA device */
  101. if (starpu_cuda_worker_get_count())
  102. {
  103. cl_check_scale.cpu_funcs[0] = NULL;
  104. cl_check_scale.cpu_funcs_name[0] = NULL;
  105. }
  106. /* Declare the whole matrix to StarPU */
  107. starpu_matrix_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)matrix, NX, NX, NY, sizeof(matrix[0][0]));
  108. /* Partition the matrix in PARTS vertical slices */
  109. struct starpu_data_filter f_vert =
  110. {
  111. .filter_func = starpu_matrix_filter_block,
  112. .nchildren = PARTS
  113. };
  114. starpu_data_partition_plan(handle, &f_vert, vert_handle);
  115. /* Partition the matrix in PARTS horizontal slices */
  116. struct starpu_data_filter f_horiz =
  117. {
  118. .filter_func = starpu_matrix_filter_vertical_block,
  119. .nchildren = PARTS
  120. };
  121. starpu_data_partition_plan(handle, &f_horiz, horiz_handle);
  122. /* Fill the matrix */
  123. ret = starpu_task_insert(&cl_fill, STARPU_W, handle, 0);
  124. if (ret == -ENODEV) goto enodev;
  125. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  126. /* Now switch to vertical view of the matrix */
  127. starpu_data_partition_submit(handle, PARTS, vert_handle);
  128. /* Check the values of the vertical slices */
  129. for (i = 0; i < PARTS; i++)
  130. {
  131. int factor = 1;
  132. int start = i*(NX/PARTS);
  133. ret = starpu_task_insert(&cl_check_scale,
  134. STARPU_RW, vert_handle[i],
  135. STARPU_VALUE, &start, sizeof(start),
  136. STARPU_VALUE, &factor, sizeof(factor),
  137. 0);
  138. if (ret == -ENODEV) goto enodev;
  139. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  140. }
  141. /* Now switch back to total view of the matrix */
  142. starpu_data_unpartition_submit(handle, PARTS, vert_handle, -1);
  143. /* And switch to horizontal view of the matrix */
  144. starpu_data_partition_submit(handle, PARTS, horiz_handle);
  145. /* Check the values of the horizontal slices */
  146. for (i = 0; i < PARTS; i++)
  147. {
  148. int factor = 2;
  149. int start = factor*100*i*(NY/PARTS);
  150. ret = starpu_task_insert(&cl_check_scale,
  151. STARPU_RW, horiz_handle[i],
  152. STARPU_VALUE, &start, sizeof(start),
  153. STARPU_VALUE, &factor, sizeof(factor),
  154. 0);
  155. if (ret == -ENODEV) goto enodev;
  156. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  157. }
  158. /* Now switch back to total view of the matrix */
  159. starpu_data_unpartition_submit(handle, PARTS, horiz_handle, -1);
  160. /* And check and scale the values of the whole matrix */
  161. int factor = 4;
  162. int start = 0;
  163. ret = starpu_task_insert(&cl_check_scale,
  164. STARPU_RW, handle,
  165. STARPU_VALUE, &start, sizeof(start),
  166. STARPU_VALUE, &factor, sizeof(factor),
  167. 0);
  168. if (ret == -ENODEV) goto enodev;
  169. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  170. /*
  171. * Unregister data from StarPU and shutdown.
  172. */
  173. starpu_data_partition_clean(handle, PARTS, vert_handle);
  174. starpu_data_partition_clean(handle, PARTS, horiz_handle);
  175. starpu_data_unregister(handle);
  176. starpu_shutdown();
  177. return ret;
  178. enodev:
  179. starpu_shutdown();
  180. return 77;
  181. }