ring_async.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu_mpi.h>
  17. #define NITER 2048
  18. unsigned token = 42;
  19. starpu_data_handle token_handle;
  20. #ifdef USE_CUDA
  21. extern void increment_cuda(void *descr[], __attribute__ ((unused)) void *_args);
  22. #endif
  23. void increment_core(void *descr[], __attribute__ ((unused)) void *_args)
  24. {
  25. unsigned *tokenptr = (unsigned *)GET_VECTOR_PTR(descr[0]);
  26. (*tokenptr)++;
  27. }
  28. static starpu_codelet increment_cl = {
  29. .where = CORE|CUDA,
  30. #ifdef USE_CUDA
  31. .cuda_func = increment_cuda,
  32. #endif
  33. .core_func = increment_core,
  34. .nbuffers = 1
  35. };
  36. void increment_token(void)
  37. {
  38. struct starpu_task *task = starpu_task_create();
  39. task->cl = &increment_cl;
  40. task->buffers[0].handle = token_handle;
  41. task->buffers[0].mode = STARPU_RW;
  42. task->synchronous = 1;
  43. starpu_submit_task(task);
  44. }
  45. int main(int argc, char **argv)
  46. {
  47. MPI_Init(NULL, NULL);
  48. int rank, size;
  49. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  50. MPI_Comm_size(MPI_COMM_WORLD, &size);
  51. if (size < 2)
  52. {
  53. if (rank == 0)
  54. fprintf(stderr, "We need at least 2 processes.\n");
  55. MPI_Finalize();
  56. return 0;
  57. }
  58. starpu_init(NULL);
  59. starpu_mpi_initialize();
  60. starpu_register_vector_data(&token_handle, 0, (uintptr_t)&token, 1, sizeof(unsigned));
  61. unsigned nloops = NITER;
  62. unsigned loop;
  63. unsigned last_loop = nloops - 1;
  64. unsigned last_rank = size - 1;
  65. for (loop = 0; loop < nloops; loop++)
  66. {
  67. int tag = loop*size + rank;
  68. if (!((loop == 0) && (rank == 0)))
  69. {
  70. token = 0;
  71. MPI_Status status;
  72. starpu_mpi_req req;
  73. starpu_mpi_irecv(token_handle, &req, (rank+size-1)%size, tag, MPI_COMM_WORLD);
  74. starpu_mpi_wait(&req, &status);
  75. }
  76. else {
  77. token = 0;
  78. fprintf(stdout, "Start with token value %d\n", token);
  79. }
  80. increment_token();
  81. if (!((loop == last_loop) && (rank == last_rank)))
  82. {
  83. starpu_mpi_req req;
  84. MPI_Status status;
  85. starpu_mpi_isend(token_handle, &req, (rank+1)%size, tag+1, MPI_COMM_WORLD);
  86. starpu_mpi_wait(&req, &status);
  87. }
  88. else {
  89. starpu_sync_data_with_mem(token_handle, STARPU_R);
  90. fprintf(stdout, "Finished : token value %d\n", token);
  91. starpu_release_data_from_mem(token_handle);
  92. }
  93. }
  94. starpu_mpi_shutdown();
  95. starpu_shutdown();
  96. MPI_Finalize();
  97. if (rank == last_rank)
  98. {
  99. STARPU_ASSERT(token == nloops*size);
  100. }
  101. return 0;
  102. }