mpi_earlyrecv.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include "helper.h"
  19. #include <unistd.h>
  20. int main(int argc, char **argv)
  21. {
  22. int ret, rank, size, i, nb_requests;
  23. starpu_data_handle_t tab_handle[3];
  24. starpu_mpi_req request[3];
  25. MPI_Init(NULL, NULL);
  26. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  27. MPI_Comm_size(MPI_COMM_WORLD, &size);
  28. if (size%2 != 0)
  29. {
  30. if (rank == 0)
  31. FPRINTF(stderr, "We need a even number of processes.\n");
  32. MPI_Finalize();
  33. return STARPU_TEST_SKIPPED;
  34. }
  35. ret = starpu_init(NULL);
  36. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  37. ret = starpu_mpi_init(NULL, NULL, 0);
  38. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  39. for(i=0 ; i<3 ; i++)
  40. {
  41. starpu_variable_data_register(&tab_handle[i], STARPU_MAIN_RAM, (uintptr_t)&rank, sizeof(int));
  42. starpu_data_set_tag(tab_handle[i], i);
  43. request[i] = NULL;
  44. }
  45. int other_rank = rank%2 == 0 ? rank+1 : rank-1;
  46. FPRINTF_MPI("rank %d exchanging with rank %d\n", rank, other_rank);
  47. if (rank%2)
  48. {
  49. starpu_mpi_isend(tab_handle[0], &request[0], other_rank, 0, MPI_COMM_WORLD);
  50. starpu_mpi_recv(tab_handle[2], other_rank, 2, MPI_COMM_WORLD, NULL);
  51. starpu_mpi_isend(tab_handle[1], &request[1], other_rank, 1, MPI_COMM_WORLD);
  52. nb_requests = 2;
  53. }
  54. else
  55. {
  56. starpu_mpi_irecv(tab_handle[0], &request[0], other_rank, 0, MPI_COMM_WORLD);
  57. starpu_mpi_irecv(tab_handle[1], &request[1], other_rank, 1, MPI_COMM_WORLD);
  58. starpu_mpi_isend(tab_handle[2], &request[2], other_rank, 2, MPI_COMM_WORLD);
  59. nb_requests = 3;
  60. }
  61. int finished=0;
  62. while (!finished)
  63. {
  64. for(i=0 ; i<nb_requests ; i++)
  65. {
  66. if (request[i])
  67. {
  68. int flag;
  69. MPI_Status status;
  70. starpu_mpi_test(&request[i], &flag, &status);
  71. if (flag)
  72. FPRINTF_MPI("request[%d] = %d %p\n", i, flag, request[i]);
  73. }
  74. }
  75. finished = request[0] == NULL;
  76. for(i=1 ; i<nb_requests ; i++) finished = finished && request[i] == NULL;
  77. }
  78. for(i=0 ; i<3 ; i++)
  79. starpu_data_unregister(tab_handle[i]);
  80. starpu_mpi_shutdown();
  81. starpu_shutdown();
  82. MPI_Finalize();
  83. return 0;
  84. }