insert_task_compute.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013, 2014, 2015, 2016, 2017 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #include "helper.h"
  18. void func_cpu(void *descr[], void *_args)
  19. {
  20. int rank;
  21. int *x = (int *)STARPU_VARIABLE_GET_PTR(descr[0]);
  22. int *y = (int *)STARPU_VARIABLE_GET_PTR(descr[1]);
  23. starpu_codelet_unpack_args(_args, &rank);
  24. FPRINTF(stdout, "[%d] VALUES: %d %d\n", rank, *x, *y);
  25. *x = *x * *y;
  26. }
  27. /* Dummy cost function for simgrid */
  28. static double cost_function(struct starpu_task *task STARPU_ATTRIBUTE_UNUSED, unsigned nimpl STARPU_ATTRIBUTE_UNUSED)
  29. {
  30. return 0.000001;
  31. }
  32. static struct starpu_perfmodel dumb_model =
  33. {
  34. .type = STARPU_COMMON,
  35. .cost_function = cost_function
  36. };
  37. struct starpu_codelet mycodelet =
  38. {
  39. .cpu_funcs = {func_cpu},
  40. .nbuffers = 2,
  41. .modes = {STARPU_RW, STARPU_R},
  42. .model = &dumb_model
  43. };
  44. int test(int rank, int node, int *before, int *after, int task_insert, int data_array)
  45. {
  46. int ok, ret, i, x[2];
  47. starpu_data_handle_t data_handles[2];
  48. struct starpu_data_descr descrs[2];
  49. int barrier_ret;
  50. ret = starpu_init(NULL);
  51. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  52. ret = starpu_mpi_init(NULL, NULL, 0);
  53. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  54. if (starpu_cpu_worker_get_count() <= 0)
  55. {
  56. // If there is no cpu to execute the codelet, mpi will block trying to do the post-execution communication
  57. ret = -ENODEV;
  58. FPRINTF_MPI(stderr, "No CPU is available\n");
  59. goto nodata;
  60. }
  61. FPRINTF_MPI(stderr, "Testing with node=%d - task_insert=%d - data_array=%d - \n", node, task_insert, data_array);
  62. for(i=0 ; i<2 ; i++)
  63. {
  64. if (rank <= 1)
  65. {
  66. x[i] = before[rank*2+i];
  67. FPRINTF_MPI(stderr, "before computation x[%d] = %d\n", i, x[i]);
  68. }
  69. else
  70. x[i] = rank*2+i;
  71. if (rank == i)
  72. starpu_variable_data_register(&data_handles[i], 0, (uintptr_t)&x[i], sizeof(int));
  73. else
  74. starpu_variable_data_register(&data_handles[i], -1, (uintptr_t)NULL, sizeof(int));
  75. starpu_mpi_data_register(data_handles[i], i, i);
  76. descrs[i].handle = data_handles[i];
  77. }
  78. descrs[0].mode = STARPU_RW;
  79. descrs[1].mode = STARPU_R;
  80. switch(task_insert)
  81. {
  82. case 0:
  83. {
  84. struct starpu_task *task = NULL;
  85. switch(data_array)
  86. {
  87. case 0:
  88. {
  89. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  90. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  91. STARPU_VALUE, &rank, sizeof(rank),
  92. STARPU_EXECUTE_ON_NODE, node, 0);
  93. break;
  94. }
  95. case 1:
  96. {
  97. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  98. STARPU_DATA_ARRAY, data_handles, 2,
  99. STARPU_VALUE, &rank, sizeof(rank),
  100. STARPU_EXECUTE_ON_NODE, node, 0);
  101. break;
  102. }
  103. case 2:
  104. {
  105. task = starpu_mpi_task_build(MPI_COMM_WORLD, &mycodelet,
  106. STARPU_DATA_MODE_ARRAY, descrs, 2,
  107. STARPU_VALUE, &rank, sizeof(rank),
  108. STARPU_EXECUTE_ON_NODE, node, 0);
  109. break;
  110. }
  111. }
  112. if (task)
  113. {
  114. ret = starpu_task_submit(task);
  115. if (ret == -ENODEV) goto enodev;
  116. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  117. }
  118. switch(data_array)
  119. {
  120. case 0:
  121. {
  122. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  123. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  124. STARPU_EXECUTE_ON_NODE, node, 0);
  125. break;
  126. }
  127. case 1:
  128. {
  129. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  130. STARPU_DATA_ARRAY, data_handles, 2,
  131. STARPU_EXECUTE_ON_NODE, node, 0);
  132. break;
  133. }
  134. case 2:
  135. {
  136. starpu_mpi_task_post_build(MPI_COMM_WORLD, &mycodelet,
  137. STARPU_DATA_MODE_ARRAY, descrs, 2,
  138. STARPU_EXECUTE_ON_NODE, node, 0);
  139. break;
  140. }
  141. }
  142. break;
  143. }
  144. case 1:
  145. {
  146. switch(data_array)
  147. {
  148. case 0:
  149. {
  150. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  151. STARPU_RW, data_handles[0], STARPU_R, data_handles[1],
  152. STARPU_VALUE, &rank, sizeof(rank),
  153. STARPU_EXECUTE_ON_NODE, node, 0);
  154. break;
  155. }
  156. case 1:
  157. {
  158. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  159. STARPU_DATA_ARRAY, data_handles, 2,
  160. STARPU_VALUE, &rank, sizeof(rank),
  161. STARPU_EXECUTE_ON_NODE, node, 0);
  162. break;
  163. }
  164. case 2:
  165. {
  166. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet,
  167. STARPU_DATA_MODE_ARRAY, descrs, 2,
  168. STARPU_VALUE, &rank, sizeof(rank),
  169. STARPU_EXECUTE_ON_NODE, node, 0);
  170. break;
  171. }
  172. }
  173. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  174. break;
  175. }
  176. }
  177. starpu_task_wait_for_all();
  178. enodev:
  179. for(i=0; i<2; i++)
  180. {
  181. starpu_data_unregister(data_handles[i]);
  182. }
  183. ok = 1;
  184. #ifndef STARPU_SIMGRID
  185. if (rank <= 1)
  186. {
  187. for(i=0; i<2; i++)
  188. {
  189. ok = ok && (x[i] == after[rank*2+i]);
  190. FPRINTF_MPI(stderr, "after computation x[%d] = %d, should be %d\n", i, x[i], after[rank*2+i]);
  191. }
  192. FPRINTF_MPI(stderr, "result is %s\n", ok?"CORRECT":"NOT CORRECT");
  193. }
  194. #endif
  195. nodata:
  196. barrier_ret = MPI_Barrier(MPI_COMM_WORLD);
  197. STARPU_ASSERT(barrier_ret == MPI_SUCCESS);
  198. starpu_mpi_shutdown();
  199. starpu_shutdown();
  200. return ret == -ENODEV ? ret : !ok;
  201. }
  202. int main(int argc, char **argv)
  203. {
  204. int rank;
  205. int global_ret, ret;
  206. int before[4] = {10, 20, 11, 22};
  207. int after_node[2][4] = {{220, 20, 11, 22}, {220, 20, 11, 22}};
  208. int node, insert_task, data_array;
  209. MPI_INIT_THREAD_real(&argc, &argv, MPI_THREAD_SERIALIZED);
  210. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  211. global_ret = 0;
  212. for(node=0 ; node<=1 ; node++)
  213. {
  214. for(insert_task=0 ; insert_task<=1 ; insert_task++)
  215. {
  216. for(data_array=0 ; data_array<=2 ; data_array++)
  217. {
  218. ret = test(rank, node, before, after_node[node], insert_task, data_array);
  219. if (ret == -ENODEV || ret) global_ret = ret;
  220. }
  221. }
  222. }
  223. MPI_Finalize();
  224. return global_ret==-ENODEV?STARPU_TEST_SKIPPED:global_ret;
  225. }