insert_task_compute.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013, 2014, 2015, 2016, 2017 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #include "helper.h"
  18. void func_cpu(void *descr[], void *_args)
  19. {
  20. int rank;
  21. int *x = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  22. int *y = (int *)STARPU_VARIABLE_GET_PTR(descr[1]);
  23. starpu_codelet_unpack_args(_args, &rank);
  24. FPRINTF(stdout, "[%d] VALUES: %d %d\n", rank, *x, *y);
  25. *x = *x * *y;
  26. }
  27. /* Dummy cost function for simgrid */
  28. static double cost_function(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED)
  29. {
  30. return 0.000001;
  31. }
  32. static struct starpu_perfmodel dumb_model =
  33. {
  34. .type = STARPU_COMMON,
  35. .cost_function = cost_function
  36. };
  37. struct starpu_codelet mycodelet =
  38. {
  39. .cpu_funcs = {func_cpu},
  40. .nbuffers = 2,
  41. .modes = {STARPU_RW, STARPU_R},
  42. .model = &dumb_model
  43. };
  44. int test(int rank, int node, int *before, int *after, int task_insert, int data_array)
  45. {
  46. int ok, ret, i, x[2];
  47. starpu_data_handle_t data_handles[2];
  48. struct starpu_data_descr descrs[2];
  49. ret = starpu_init(NULL);
  50. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  51. ret = starpu_mpi_init(NULL, NULL, 0);
  52. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  53. if (starpu_cpu_worker_get_count() <= 0)
  54. {
  55. // If there is no cpu to execute the codelet, mpi will block trying to do the post-execution communication
  56. ret = -ENODEV;
  57. FPRINTF_MPI(stderr, "No CPU is available\n");
  58. goto nodata;
  59. }
  60. FPRINTF_MPI(stderr, "Testing with node=%d - task_insert=%d - data_array=%d - \n", node, task_insert, data_array);
  61. for(i=0 ; i<2 ; i++)
  62. {
  63. if (rank <= 1)
  64. {
  65. x[i] = before[rank*2+i];
  66. FPRINTF_MPI(stderr, "before computation x[%d] = %d\n", i, x[i]);
  67. }
  68. else
  69. x[i] = rank*2+i;
  70. if (rank == i)
  71. starpu_variable_data_register(&data_handles[i], 0, (uintptr_t)&x[i], sizeof(int));
  72. else
  73. starpu_variable_data_register(&data_handles[i], -1, (uintptr_t)NULL, sizeof(int));
  74. starpu_mpi_data_register(data_handles[i], i, i);
  75. descrs[i].handle = data_handles[i];
  76. }
  77. descrs[0].mode = STARPU_RW;
  78. descrs[1].mode = STARPU_R;
  79. switch(task_insert)
  80. {
  81. case 0:
  82. {
  83. struct starpu_task *task = NULL;
  84. switch(data_array)
  85. {
  86. case 0:
  87. {
  88. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  89. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  90. STARPU_VALUE, &rank, sizeof(rank),
  91. STARPU_EXECUTE_ON_NODE, node, 0);
  92. break;
  93. }
  94. case 1:
  95. {
  96. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  97. STARPU_DATA_ARRAY, data_handles, 2,
  98. STARPU_VALUE, &rank, sizeof(rank),
  99. STARPU_EXECUTE_ON_NODE, node, 0);
  100. break;
  101. }
  102. case 2:
  103. {
  104. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  105. STARPU_DATA_MODE_ARRAY, descrs, 2,
  106. STARPU_VALUE, &rank, sizeof(rank),
  107. STARPU_EXECUTE_ON_NODE, node, 0);
  108. break;
  109. }
  110. }
  111. if (task)
  112. {
  113. ret = starpu_task_submit(task);
  114. if (ret == -ENODEV) goto enodev;
  115. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  116. }
  117. switch(data_array)
  118. {
  119. case 0:
  120. {
  121. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  122. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  123. STARPU_EXECUTE_ON_NODE, node, 0);
  124. break;
  125. }
  126. case 1:
  127. {
  128. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  129. STARPU_DATA_ARRAY, data_handles, 2,
  130. STARPU_EXECUTE_ON_NODE, node, 0);
  131. break;
  132. }
  133. case 2:
  134. {
  135. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  136. STARPU_DATA_MODE_ARRAY, descrs, 2,
  137. STARPU_EXECUTE_ON_NODE, node, 0);
  138. break;
  139. }
  140. }
  141. break;
  142. }
  143. case 1:
  144. {
  145. switch(data_array)
  146. {
  147. case 0:
  148. {
  149. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  150. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  151. STARPU_VALUE, &rank, sizeof(rank),
  152. STARPU_EXECUTE_ON_NODE, node, 0);
  153. break;
  154. }
  155. case 1:
  156. {
  157. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  158. STARPU_DATA_ARRAY, data_handles, 2,
  159. STARPU_VALUE, &rank, sizeof(rank),
  160. STARPU_EXECUTE_ON_NODE, node, 0);
  161. break;
  162. }
  163. case 2:
  164. {
  165. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  166. STARPU_DATA_MODE_ARRAY, descrs, 2,
  167. STARPU_VALUE, &rank, sizeof(rank),
  168. STARPU_EXECUTE_ON_NODE, node, 0);
  169. break;
  170. }
  171. }
  172. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  173. break;
  174. }
  175. }
  176. starpu_task_wait_for_all();
  177. enodev:
  178. for(i=0; i<2; i++)
  179. {
  180. starpu_data_unregister(data_handles[i]);
  181. }
  182. ok = 1;
  183. #ifndef STARPU_SIMGRID
  184. if (rank <= 1)
  185. {
  186. for(i=0; i<2; i++)
  187. {
  188. ok = ok && (x[i] == after[rank*2+i]);
  189. FPRINTF_MPI(stderr, "after computation x[%d] = %d, should be %d\n", i, x[i], after[rank*2+i]);
  190. }
  191. FPRINTF_MPI(stderr, "result is %s\n", ok?"CORRECT":"NOT CORRECT");
  192. }
  193. #endif
  194. nodata:
  195. MPI_Barrier(MPI_COMM_WORLD);
  196. starpu_mpi_shutdown();
  197. starpu_shutdown();
  198. return ret == -ENODEV ? ret : !ok;
  199. }
  200. int main(int argc, char **argv)
  201. {
  202. int rank;
  203. int global_ret, ret;
  204. int before[4] = {10, 20, 11, 22};
  205. int after_node[2][4] = {{220, 20, 11, 22}, {220, 20, 11, 22}};
  206. int node, insert_task, data_array;
  207. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED);
  208. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  209. global_ret = 0;
  210. for(node=0 ; node<=1 ; node++)
  211. {
  212. for(insert_task=0 ; insert_task<=1 ; insert_task++)
  213. {
  214. for(data_array=0 ; data_array<=2 ; data_array++)
  215. {
  216. ret = test(rank, node, before, after_node[node], insert_task, data_array);
  217. if (ret == -ENODEV || ret) global_ret = ret;
  218. }
  219. }
  220. }
  221. MPI_Finalize();
  222. return global_ret==-ENODEV?STARPU_TEST_SKIPPED:global_ret;
  223. }