insert_task_compute.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013, 2014, 2015, 2016, 2017 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #include "helper.h"
  18. void func_cpu(void *descr[], void *_args)
  19. {
  20. int rank;
  21. int *x = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  22. int *y = (int *)STARPU_VARIABLE_GET_PTR(descr[1]);
  23. starpu_codelet_unpack_args(_args, &rank);
  24. FPRINTF(stdout, "[%d] VALUES: %d %d\n", rank, *x, *y);
  25. *x = *x * *y;
  26. }
  27. /* Dummy cost function for simgrid */
  28. static double cost_function(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED)
  29. {
  30. return 0.000001;
  31. }
  32. static struct starpu_perfmodel dumb_model =
  33. {
  34. .type = STARPU_COMMON,
  35. .cost_function = cost_function
  36. };
  37. struct starpu_codelet mycodelet =
  38. {
  39. .cpu_funcs = {func_cpu},
  40. .nbuffers = 2,
  41. .modes = {STARPU_RW, STARPU_R},
  42. .model = &dumb_model
  43. };
  44. int test(int rank, int node, int *before, int *after, int task_insert, int data_array)
  45. {
  46. int ok, ret, i, x[2];
  47. starpu_data_handle_t data_handles[2];
  48. struct starpu_data_descr descrs[2];
  49. int barrier_ret;
  50. ret = starpu_init(NULL);
  51. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  52. ret = starpu_mpi_init(NULL, NULL, 0);
  53. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  54. if (starpu_cpu_worker_get_count() <= 0)
  55. {
  56. // If there is no cpu to execute the codelet, mpi will block trying to do the post-execution communication
  57. ret = -ENODEV;
  58. FPRINTF_MPI(stderr, "No CPU is available\n");
  59. goto nodata;
  60. }
  61. FPRINTF_MPI(stderr, "Testing with node=%d - task_insert=%d - data_array=%d - \n", node, task_insert, data_array);
  62. for(i=0 ; i<2 ; i++)
  63. {
  64. if (rank <= 1)
  65. {
  66. x[i] = before[rank*2+i];
  67. FPRINTF_MPI(stderr, "before computation x[%d] = %d\n", i, x[i]);
  68. }
  69. else
  70. x[i] = rank*2+i;
  71. if (rank == i)
  72. starpu_variable_data_register(&data_handles[i], 0, (uintptr_t)&x[i], sizeof(int));
  73. else
  74. starpu_variable_data_register(&data_handles[i], -1, (uintptr_t)NULL, sizeof(int));
  75. starpu_mpi_data_register(data_handles[i], i, i);
  76. descrs[i].handle = data_handles[i];
  77. }
  78. descrs[0].mode = STARPU_RW;
  79. descrs[1].mode = STARPU_R;
  80. switch(task_insert)
  81. {
  82. case 0:
  83. {
  84. struct starpu_task *task = NULL;
  85. switch(data_array)
  86. {
  87. case 0:
  88. {
  89. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  90. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  91. STARPU_VALUE, &rank, sizeof(rank),
  92. STARPU_EXECUTE_ON_NODE, node, 0);
  93. break;
  94. }
  95. case 1:
  96. {
  97. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  98. STARPU_DATA_ARRAY, data_handles, 2,
  99. STARPU_VALUE, &rank, sizeof(rank),
  100. STARPU_EXECUTE_ON_NODE, node, 0);
  101. break;
  102. }
  103. case 2:
  104. {
  105. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  106. STARPU_DATA_MODE_ARRAY, descrs, 2,
  107. STARPU_VALUE, &rank, sizeof(rank),
  108. STARPU_EXECUTE_ON_NODE, node, 0);
  109. break;
  110. }
  111. }
  112. if (task)
  113. {
  114. ret = starpu_task_submit(task);
  115. if (ret == -ENODEV)
  116. goto enodev;
  117. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  118. }
  119. switch(data_array)
  120. {
  121. case 0:
  122. {
  123. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  124. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  125. STARPU_EXECUTE_ON_NODE, node, 0);
  126. break;
  127. }
  128. case 1:
  129. {
  130. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  131. STARPU_DATA_ARRAY, data_handles, 2,
  132. STARPU_EXECUTE_ON_NODE, node, 0);
  133. break;
  134. }
  135. case 2:
  136. {
  137. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  138. STARPU_DATA_MODE_ARRAY, descrs, 2,
  139. STARPU_EXECUTE_ON_NODE, node, 0);
  140. break;
  141. }
  142. }
  143. break;
  144. }
  145. case 1:
  146. {
  147. switch(data_array)
  148. {
  149. case 0:
  150. {
  151. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  152. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  153. STARPU_VALUE, &rank, sizeof(rank),
  154. STARPU_EXECUTE_ON_NODE, node, 0);
  155. break;
  156. }
  157. case 1:
  158. {
  159. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  160. STARPU_DATA_ARRAY, data_handles, 2,
  161. STARPU_VALUE, &rank, sizeof(rank),
  162. STARPU_EXECUTE_ON_NODE, node, 0);
  163. break;
  164. }
  165. case 2:
  166. {
  167. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  168. STARPU_DATA_MODE_ARRAY, descrs, 2,
  169. STARPU_VALUE, &rank, sizeof(rank),
  170. STARPU_EXECUTE_ON_NODE, node, 0);
  171. break;
  172. }
  173. }
  174. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  175. break;
  176. }
  177. }
  178. starpu_task_wait_for_all();
  179. enodev:
  180. for(i=0; i<2; i++)
  181. {
  182. starpu_data_unregister(data_handles[i]);
  183. }
  184. ok = 1;
  185. #ifndef STARPU_SIMGRID
  186. if (rank <= 1)
  187. {
  188. for(i=0; i<2; i++)
  189. {
  190. ok = ok && (x[i] == after[rank*2+i]);
  191. FPRINTF_MPI(stderr, "after computation x[%d] = %d, should be %d\n", i, x[i], after[rank*2+i]);
  192. }
  193. FPRINTF_MPI(stderr, "result is %s\n", ok?"CORRECT":"NOT CORRECT");
  194. }
  195. #endif
  196. nodata:
  197. barrier_ret = MPI_Barrier(MPI_COMM_WORLD);
  198. STARPU_ASSERT(barrier_ret == MPI_SUCCESS);
  199. starpu_mpi_shutdown();
  200. starpu_shutdown();
  201. return ret == -ENODEV ? ret : !ok;
  202. }
  203. int main(int argc, char **argv)
  204. {
  205. int rank;
  206. int global_ret, ret;
  207. int before[4] = {10, 20, 11, 22};
  208. int after_node[2][4] = {{220, 20, 11, 22}, {220, 20, 11, 22}};
  209. int node, insert_task, data_array;
  210. MPI_INIT_THREAD_real(&argc, &argv, MPI_THREAD_SERIALIZED);
  211. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  212. global_ret = 0;
  213. for(node=0 ; node<=1 ; node++)
  214. {
  215. for(insert_task=0 ; insert_task<=1 ; insert_task++)
  216. {
  217. for(data_array=0 ; data_array<=2 ; data_array++)
  218. {
  219. ret = test(rank, node, before, after_node[node], insert_task, data_array);
  220. if (ret == -ENODEV || ret)
  221. global_ret = ret;
  222. }
  223. }
  224. }
  225. MPI_Finalize();
  226. return global_ret==-ENODEV?STARPU_TEST_SKIPPED:global_ret;
  227. }