insert_task_sent_cache.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_mpi.h>
  18. #include <math.h>
  19. #include "helper.h"
  20. #if !defined(STARPU_HAVE_SETENV)
  21. #warning setenv is not defined. Skipping test
  22. int main(void)
  23. {
  24. return STARPU_TEST_SKIPPED;
  25. }
  26. #else
  27. void func_cpu(void *descr[], void *_args)
  28. {
  29. (void)descr;
  30. (void)_args;
  31. }
  32. struct starpu_codelet mycodelet =
  33. {
  34. .cpu_funcs = {func_cpu},
  35. .nbuffers = 2,
  36. .modes = {STARPU_RW, STARPU_R},
  37. .model = &starpu_perfmodel_nop,
  38. };
  39. #define N 1000
  40. /* Returns the MPI node number where data indexes index is */
  41. int my_distrib(int x)
  42. {
  43. return x;
  44. }
  45. void test_cache(int rank, char *enabled, size_t *comm_amount)
  46. {
  47. int i;
  48. int ret;
  49. unsigned *v[2];
  50. starpu_data_handle_t data_handles[2];
  51. setenv("STARPU_MPI_CACHE", enabled, 1);
  52. ret = starpu_mpi_init_conf(NULL, NULL, 0, MPI_COMM_WORLD, NULL);
  53. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  54. for(i = 0; i < 2; i++)
  55. {
  56. int j;
  57. v[i] = malloc(N * sizeof(unsigned));
  58. for(j=0 ; j<N ; j++)
  59. {
  60. v[i][j] = 12;
  61. }
  62. }
  63. for(i = 0; i < 2; i++)
  64. {
  65. int mpi_rank = my_distrib(i);
  66. if (mpi_rank == rank)
  67. {
  68. //FPRINTF(stderr, "[%d] Owning data[%d][%d]\n", rank, x, y);
  69. starpu_vector_data_register(&data_handles[i], STARPU_MAIN_RAM, (uintptr_t)v[i], N, sizeof(unsigned));
  70. }
  71. else
  72. {
  73. /* I don't own this index, but will need it for my computations */
  74. //FPRINTF(stderr, "[%d] Neighbour of data[%d][%d]\n", rank, x, y);
  75. starpu_vector_data_register(&data_handles[i], -1, (uintptr_t)NULL, N, sizeof(unsigned));
  76. }
  77. starpu_mpi_data_register(data_handles[i], i, mpi_rank);
  78. }
  79. for(i = 0; i < 5; i++)
  80. {
  81. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[0], STARPU_R, data_handles[1], 0);
  82. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  83. }
  84. for(i = 0; i < 5; i++)
  85. {
  86. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[1], STARPU_R, data_handles[0], 0);
  87. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  88. }
  89. for(i = 0; i < 5; i++)
  90. {
  91. starpu_mpi_cache_flush(MPI_COMM_WORLD, data_handles[0]);
  92. }
  93. for(i = 0; i < 5; i++)
  94. {
  95. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[1], STARPU_R, data_handles[0], 0);
  96. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  97. }
  98. starpu_task_wait_for_all();
  99. for(i = 0; i < 2; i++)
  100. {
  101. starpu_data_unregister(data_handles[i]);
  102. free(v[i]);
  103. }
  104. starpu_mpi_comm_amounts_retrieve(comm_amount);
  105. starpu_mpi_shutdown();
  106. }
  107. int main(int argc, char **argv)
  108. {
  109. int rank, size;
  110. int result=0;
  111. size_t *comm_amount_with_cache;
  112. size_t *comm_amount_without_cache;
  113. MPI_INIT_THREAD_real(&argc, &argv, MPI_THREAD_SERIALIZED);
  114. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  115. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  116. setenv("STARPU_COMM_STATS", "1", 1);
  117. comm_amount_with_cache = malloc(size * sizeof(size_t));
  118. comm_amount_without_cache = malloc(size * sizeof(size_t));
  119. test_cache(rank, "0", comm_amount_with_cache);
  120. test_cache(rank, "1", comm_amount_without_cache);
  121. if (rank == 0 || rank == 1)
  122. {
  123. int dst = (rank == 0) ? 1 : 0;
  124. result = (comm_amount_with_cache[dst] == comm_amount_without_cache[dst] * 5);
  125. FPRINTF_MPI(stderr, "Communication cache mechanism is %sworking\n", result?"":"NOT ");
  126. }
  127. else
  128. {
  129. result = 1;
  130. }
  131. free(comm_amount_without_cache);
  132. free(comm_amount_with_cache);
  133. MPI_Finalize();
  134. return !result;
  135. }
  136. #endif