early_request.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015, 2016, 2017 CNRS
  4. * Copyright (C) 2015 INRIA
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <starpu_mpi.h>
  19. #include "helper.h"
  20. #define NUM_EL 5
  21. #define NUM_LOOPS 10
  22. /*
  23. * This testcase written by J-M Couteyen allows to test that several
  24. * early requests for a given source and tag can be posted to StarPU
  25. * by the application before data arrive.
  26. *
  27. * In this test case, multiples processes (called "domains") exchanges
  28. * informations between multiple "elements" multiple times, with
  29. * different sizes (in order to catch error more easily).
  30. * The communications are independent between the elements (each one
  31. * as its proper tag), but must occur in the submitted order for an
  32. * element taken independtly.
  33. */
  34. struct element
  35. {
  36. int tag;
  37. int foreign_domain;
  38. int array_send[100];
  39. int array_recv[100];
  40. starpu_data_handle_t ensure_submitted_order_send;
  41. starpu_data_handle_t ensure_submitted_order_recv;
  42. starpu_data_handle_t send;
  43. starpu_data_handle_t recv;
  44. };
  45. /* functions/codelet to fill the bufferss*/
  46. void fill_tmp_buffer(void *buffers[], void *cl_arg)
  47. {
  48. (void)cl_arg;
  49. int *tmp = (int *) STARPU_VECTOR_GET_PTR(buffers[0]);
  50. int nx = STARPU_VECTOR_GET_NX(buffers[0]);
  51. int i;
  52. for (i=0; i<nx; i++)
  53. tmp[i]=nx+i;
  54. }
  55. static struct starpu_codelet fill_tmp_buffer_cl =
  56. {
  57. .where = STARPU_CPU,
  58. .cpu_funcs = {fill_tmp_buffer, NULL},
  59. .nbuffers = 1,
  60. .modes = {STARPU_W},
  61. #ifdef STARPU_SIMGRID
  62. .model = &starpu_perfmodel_nop,
  63. #endif
  64. .name = "fill_tmp_buffer"
  65. };
  66. void read_ghost(void *buffers[], void *cl_arg)
  67. {
  68. (void)cl_arg;
  69. int *tmp = (int *) STARPU_VECTOR_GET_PTR(buffers[0]);
  70. int nx=STARPU_VECTOR_GET_NX(buffers[0]);
  71. int i;
  72. for(i=0; i<nx;i++)
  73. {
  74. assert(tmp[i]==nx+i);
  75. }
  76. }
  77. static struct starpu_codelet read_ghost_value_cl =
  78. {
  79. .where = STARPU_CPU,
  80. .cpu_funcs = {read_ghost, NULL},
  81. .nbuffers = 1,
  82. .modes = {STARPU_R},
  83. #ifdef STARPU_SIMGRID
  84. .model = &starpu_perfmodel_nop,
  85. #endif
  86. .name = "read_ghost_value"
  87. };
  88. /*codelet to ensure submitted order for a given element*/
  89. void noop(void *buffers[], void *cl_arg)
  90. {
  91. (void)buffers;
  92. (void)cl_arg;
  93. }
  94. void submitted_order_fun(void *buffers[], void *cl_arg)
  95. {
  96. (void)buffers;
  97. (void)cl_arg;
  98. }
  99. static struct starpu_codelet submitted_order =
  100. {
  101. .where = STARPU_CPU,
  102. .cpu_funcs = {submitted_order_fun, NULL},
  103. .nbuffers = 2,
  104. .modes = {STARPU_RW, STARPU_W},
  105. #ifdef STARPU_SIMGRID
  106. .model = &starpu_perfmodel_nop,
  107. #endif
  108. .name = "submitted_order_enforcer"
  109. };
  110. void init_element(struct element *el, int size, int foreign_domain)
  111. {
  112. el->tag=size;
  113. el->foreign_domain=foreign_domain;
  114. int mpi_rank;
  115. starpu_mpi_comm_rank(MPI_COMM_WORLD, &mpi_rank);
  116. starpu_vector_data_register(&el->recv, 0, (uintptr_t)el->array_recv, size, sizeof(int));
  117. starpu_vector_data_register(&el->send, 0, (uintptr_t)el->array_send, size, sizeof(int));
  118. starpu_void_data_register(&el->ensure_submitted_order_send);
  119. starpu_void_data_register(&el->ensure_submitted_order_recv);
  120. }
  121. void free_element(struct element *el)
  122. {
  123. starpu_data_unregister(el->recv);
  124. starpu_data_unregister(el->send);
  125. starpu_data_unregister(el->ensure_submitted_order_send);
  126. starpu_data_unregister(el->ensure_submitted_order_recv);
  127. }
  128. void insert_work_for_one_element(struct element *el)
  129. {
  130. starpu_data_handle_t tmp_recv;
  131. starpu_data_handle_t tmp_send;
  132. starpu_vector_data_register(&tmp_recv, -1, 0, el->tag, sizeof(int));
  133. starpu_vector_data_register(&tmp_send, -1, 0, el->tag, sizeof(int));
  134. //Emulate the work to fill the send buffer
  135. starpu_insert_task(&fill_tmp_buffer_cl,
  136. STARPU_W,tmp_send,
  137. 0);
  138. //Send operation
  139. starpu_insert_task(&submitted_order,
  140. STARPU_RW,el->ensure_submitted_order_send,
  141. STARPU_W,tmp_send,
  142. 0);
  143. starpu_mpi_isend_detached(tmp_send,el->foreign_domain,el->tag, MPI_COMM_WORLD, NULL, NULL);
  144. //Recv operation for current element
  145. starpu_insert_task(&submitted_order,
  146. STARPU_RW,el->ensure_submitted_order_recv,
  147. STARPU_W,tmp_recv,
  148. 0);
  149. starpu_mpi_irecv_detached(tmp_recv,el->foreign_domain,el->tag, MPI_COMM_WORLD, NULL, NULL);
  150. //Emulate the "reading" of the recv value.
  151. starpu_insert_task(&read_ghost_value_cl,
  152. STARPU_R,tmp_recv,
  153. 0);
  154. starpu_data_unregister_submit(tmp_send);
  155. starpu_data_unregister_submit(tmp_recv);
  156. }
  157. /*main program*/
  158. int main(int argc, char * argv[])
  159. {
  160. /* Init */
  161. int ret;
  162. int mpi_rank, mpi_size;
  163. int mpi_init;
  164. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED, &mpi_init);
  165. ret = starpu_init(NULL);
  166. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  167. ret = starpu_mpi_init(&argc, &argv, mpi_init);
  168. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  169. starpu_mpi_comm_rank(MPI_COMM_WORLD, &mpi_rank);
  170. starpu_mpi_comm_size(MPI_COMM_WORLD, &mpi_size);
  171. if (starpu_cpu_worker_get_count() == 0)
  172. {
  173. if (mpi_rank == 0)
  174. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  175. starpu_mpi_shutdown();
  176. starpu_shutdown();
  177. if (!mpi_init)
  178. MPI_Finalize();
  179. return STARPU_TEST_SKIPPED;
  180. }
  181. /*element initialization : domains are connected as a ring for this test*/
  182. int num_elements=NUM_EL;
  183. struct element * el_left=malloc(num_elements*sizeof(el_left[0]));
  184. struct element * el_right=malloc(num_elements*sizeof(el_right[0]));
  185. int i;
  186. for(i=0;i<num_elements;i++)
  187. {
  188. init_element(el_left+i,i+1,((mpi_rank-1)+mpi_size)%mpi_size);
  189. init_element(el_right+i,i+1,(mpi_rank+1)%mpi_size);
  190. }
  191. /* Communication loop */
  192. for (i=0; i<NUM_LOOPS; i++) //number of "computations loops"
  193. {
  194. int e;
  195. for (e=0;e<num_elements;e++) //Do something for each elements
  196. {
  197. insert_work_for_one_element(el_right+e);
  198. insert_work_for_one_element(el_left+e);
  199. }
  200. }
  201. /* End */
  202. starpu_task_wait_for_all();
  203. for(i=0;i<num_elements;i++)
  204. {
  205. free_element(el_left+i);
  206. free_element(el_right+i);
  207. }
  208. free(el_left);
  209. free(el_right);
  210. starpu_mpi_shutdown();
  211. starpu_shutdown();
  212. if (!mpi_init)
  213. MPI_Finalize();
  214. FPRINTF(stderr, "No assert until end\n");
  215. return 0;
  216. }