early_request.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_mpi.h>
  18. #include "helper.h"
  19. #define NUM_EL 5
  20. #define NUM_LOOPS 10
  21. /*
  22. * This testcase written by J-M Couteyen allows to test that several
  23. * early requests for a given source and tag can be posted to StarPU
  24. * by the application before data arrive.
  25. *
  26. * In this test case, multiples processes (called "domains") exchanges
  27. * informations between multiple "elements" multiple times, with
  28. * different sizes (in order to catch error more easily).
  29. * The communications are independent between the elements (each one
  30. * as its proper tag), but must occur in the submitted order for an
  31. * element taken independtly.
  32. */
  33. struct element
  34. {
  35. int tag;
  36. int foreign_domain;
  37. int array_send[100];
  38. int array_recv[100];
  39. starpu_data_handle_t ensure_submitted_order_send;
  40. starpu_data_handle_t ensure_submitted_order_recv;
  41. starpu_data_handle_t send;
  42. starpu_data_handle_t recv;
  43. };
  44. /* functions/codelet to fill the bufferss*/
  45. void fill_tmp_buffer(void *buffers[], void *cl_arg)
  46. {
  47. (void)cl_arg;
  48. int *tmp = (int *) STARPU_VECTOR_GET_PTR(buffers[0]);
  49. int nx = STARPU_VECTOR_GET_NX(buffers[0]);
  50. int i;
  51. for (i=0; i<nx; i++)
  52. tmp[i]=nx+i;
  53. }
  54. static struct starpu_codelet fill_tmp_buffer_cl =
  55. {
  56. .where = STARPU_CPU,
  57. .cpu_funcs = {fill_tmp_buffer, NULL},
  58. .nbuffers = 1,
  59. .modes = {STARPU_W},
  60. #ifdef STARPU_SIMGRID
  61. .model = &starpu_perfmodel_nop,
  62. #endif
  63. .name = "fill_tmp_buffer"
  64. };
  65. void read_ghost(void *buffers[], void *cl_arg)
  66. {
  67. (void)cl_arg;
  68. int *tmp = (int *) STARPU_VECTOR_GET_PTR(buffers[0]);
  69. int nx=STARPU_VECTOR_GET_NX(buffers[0]);
  70. int i;
  71. for(i=0; i<nx;i++)
  72. {
  73. assert(tmp[i]==nx+i);
  74. }
  75. }
  76. static struct starpu_codelet read_ghost_value_cl =
  77. {
  78. .where = STARPU_CPU,
  79. .cpu_funcs = {read_ghost, NULL},
  80. .nbuffers = 1,
  81. .modes = {STARPU_R},
  82. #ifdef STARPU_SIMGRID
  83. .model = &starpu_perfmodel_nop,
  84. #endif
  85. .name = "read_ghost_value"
  86. };
  87. /*codelet to ensure submitted order for a given element*/
  88. void noop(void *buffers[], void *cl_arg)
  89. {
  90. (void)buffers;
  91. (void)cl_arg;
  92. }
  93. void submitted_order_fun(void *buffers[], void *cl_arg)
  94. {
  95. (void)buffers;
  96. (void)cl_arg;
  97. }
  98. static struct starpu_codelet submitted_order =
  99. {
  100. .where = STARPU_CPU,
  101. .cpu_funcs = {submitted_order_fun, NULL},
  102. .nbuffers = 2,
  103. .modes = {STARPU_RW, STARPU_W},
  104. #ifdef STARPU_SIMGRID
  105. .model = &starpu_perfmodel_nop,
  106. #endif
  107. .name = "submitted_order_enforcer"
  108. };
  109. void init_element(struct element *el, int size, int foreign_domain)
  110. {
  111. el->tag=size;
  112. el->foreign_domain=foreign_domain;
  113. int mpi_rank;
  114. starpu_mpi_comm_rank(MPI_COMM_WORLD, &mpi_rank);
  115. starpu_vector_data_register(&el->recv, 0, (uintptr_t)el->array_recv, size, sizeof(int));
  116. starpu_vector_data_register(&el->send, 0, (uintptr_t)el->array_send, size, sizeof(int));
  117. starpu_void_data_register(&el->ensure_submitted_order_send);
  118. starpu_void_data_register(&el->ensure_submitted_order_recv);
  119. }
  120. void free_element(struct element *el)
  121. {
  122. starpu_data_unregister(el->recv);
  123. starpu_data_unregister(el->send);
  124. starpu_data_unregister(el->ensure_submitted_order_send);
  125. starpu_data_unregister(el->ensure_submitted_order_recv);
  126. }
  127. void insert_work_for_one_element(struct element *el)
  128. {
  129. starpu_data_handle_t tmp_recv;
  130. starpu_data_handle_t tmp_send;
  131. starpu_vector_data_register(&tmp_recv, -1, 0, el->tag, sizeof(int));
  132. starpu_vector_data_register(&tmp_send, -1, 0, el->tag, sizeof(int));
  133. //Emulate the work to fill the send buffer
  134. starpu_insert_task(&fill_tmp_buffer_cl,
  135. STARPU_W,tmp_send,
  136. 0);
  137. //Send operation
  138. starpu_insert_task(&submitted_order,
  139. STARPU_RW,el->ensure_submitted_order_send,
  140. STARPU_W,tmp_send,
  141. 0);
  142. starpu_mpi_isend_detached(tmp_send,el->foreign_domain,el->tag, MPI_COMM_WORLD, NULL, NULL);
  143. //Recv operation for current element
  144. starpu_insert_task(&submitted_order,
  145. STARPU_RW,el->ensure_submitted_order_recv,
  146. STARPU_W,tmp_recv,
  147. 0);
  148. starpu_mpi_irecv_detached(tmp_recv,el->foreign_domain,el->tag, MPI_COMM_WORLD, NULL, NULL);
  149. //Emulate the "reading" of the recv value.
  150. starpu_insert_task(&read_ghost_value_cl,
  151. STARPU_R,tmp_recv,
  152. 0);
  153. starpu_data_unregister_submit(tmp_send);
  154. starpu_data_unregister_submit(tmp_recv);
  155. }
  156. /*main program*/
  157. int main(int argc, char * argv[])
  158. {
  159. /* Init */
  160. int ret;
  161. int mpi_rank, mpi_size;
  162. int mpi_init;
  163. MPI_INIT_THREAD(&argc, &argv, MPI_THREAD_SERIALIZED, &mpi_init);
  164. ret = starpu_mpi_init_conf(&argc, &argv, mpi_init, MPI_COMM_WORLD, NULL);
  165. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  166. starpu_mpi_comm_rank(MPI_COMM_WORLD, &mpi_rank);
  167. starpu_mpi_comm_size(MPI_COMM_WORLD, &mpi_size);
  168. if (starpu_cpu_worker_get_count() == 0)
  169. {
  170. if (mpi_rank == 0)
  171. FPRINTF(stderr, "We need at least 1 CPU worker.\n");
  172. starpu_mpi_shutdown();
  173. if (!mpi_init)
  174. MPI_Finalize();
  175. return STARPU_TEST_SKIPPED;
  176. }
  177. /*element initialization : domains are connected as a ring for this test*/
  178. int num_elements=NUM_EL;
  179. struct element * el_left=malloc(num_elements*sizeof(el_left[0]));
  180. struct element * el_right=malloc(num_elements*sizeof(el_right[0]));
  181. int i;
  182. for(i=0;i<num_elements;i++)
  183. {
  184. init_element(el_left+i,i+1,((mpi_rank-1)+mpi_size)%mpi_size);
  185. init_element(el_right+i,i+1,(mpi_rank+1)%mpi_size);
  186. }
  187. /* Communication loop */
  188. for (i=0; i<NUM_LOOPS; i++) //number of "computations loops"
  189. {
  190. int e;
  191. for (e=0;e<num_elements;e++) //Do something for each elements
  192. {
  193. insert_work_for_one_element(el_right+e);
  194. insert_work_for_one_element(el_left+e);
  195. }
  196. }
  197. /* End */
  198. starpu_task_wait_for_all();
  199. for(i=0;i<num_elements;i++)
  200. {
  201. free_element(el_left+i);
  202. free_element(el_right+i);
  203. }
  204. free(el_left);
  205. free(el_right);
  206. starpu_mpi_shutdown();
  207. if (!mpi_init)
  208. MPI_Finalize();
  209. FPRINTF(stderr, "No assert until end\n");
  210. return 0;
  211. }