mpi_earlyrecv2.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include "helper.h"
  19. #include <unistd.h>
  20. //#define NB 1000
  21. #define NB 10
  22. int main(int argc, char **argv)
  23. {
  24. int ret, rank, size, i;
  25. starpu_data_handle_t tab_handle[NB];
  26. MPI_Init(NULL, NULL);
  27. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  28. MPI_Comm_size(MPI_COMM_WORLD, &size);
  29. if (size%2 != 0)
  30. {
  31. if (rank == 0)
  32. FPRINTF(stderr, "We need a even number of processes.\n");
  33. MPI_Finalize();
  34. return STARPU_TEST_SKIPPED;
  35. }
  36. ret = starpu_init(NULL);
  37. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  38. ret = starpu_mpi_init(NULL, NULL, 0);
  39. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  40. for(i=0 ; i<NB ; i++)
  41. {
  42. starpu_variable_data_register(&tab_handle[i], 0, (uintptr_t)&rank, sizeof(int));
  43. starpu_data_set_tag(tab_handle[i], i);
  44. }
  45. int other_rank = rank%2 == 0 ? rank+1 : rank-1;
  46. if (rank%2)
  47. {
  48. starpu_mpi_send(tab_handle[0], other_rank, 0, MPI_COMM_WORLD);
  49. starpu_mpi_send(tab_handle[NB-1], other_rank, NB-1, MPI_COMM_WORLD);
  50. for(i=1 ; i<NB-1 ; i++)
  51. {
  52. starpu_mpi_send(tab_handle[i], other_rank, i, MPI_COMM_WORLD);
  53. }
  54. }
  55. else
  56. {
  57. starpu_mpi_req req[NB];
  58. memset(req, 0, NB*sizeof(starpu_mpi_req));
  59. starpu_mpi_irecv(tab_handle[0], &req[0], other_rank, 0, MPI_COMM_WORLD);
  60. STARPU_ASSERT(req[0] != NULL);
  61. // We sleep to make sure that the data for the tag 9 will be received before the recv is posted
  62. usleep(2000000);
  63. for(i=1 ; i<NB ; i++)
  64. {
  65. starpu_mpi_irecv(tab_handle[i], &req[i], other_rank, i, MPI_COMM_WORLD);
  66. STARPU_ASSERT(req[i] != NULL);
  67. }
  68. for(i=0 ; i<NB ; i++)
  69. {
  70. starpu_mpi_wait(&req[i], NULL);
  71. }
  72. }
  73. for(i=0 ; i<NB ; i++)
  74. starpu_data_unregister(tab_handle[i]);
  75. starpu_mpi_shutdown();
  76. starpu_shutdown();
  77. MPI_Finalize();
  78. return 0;
  79. }