insert_task_sent_cache.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016, 2017 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_mpi.h>
  18. #include <math.h>
  19. #include "helper.h"
  20. #if !defined(STARPU_HAVE_SETENV)
  21. #warning setenv is not defined. Skipping test
  22. int main(void)
  23. {
  24. return STARPU_TEST_SKIPPED;
  25. }
  26. #else
  27. void func_cpu(void *descr[], void *_args)
  28. {
  29. (void)descr;
  30. (void)_args;
  31. }
  32. struct starpu_codelet mycodelet =
  33. {
  34. .cpu_funcs = {func_cpu},
  35. .nbuffers = 2,
  36. .modes = {STARPU_RW, STARPU_R},
  37. .model = &starpu_perfmodel_nop,
  38. };
  39. #define N 1000
  40. /* Returns the MPI node number where data indexes index is */
  41. int my_distrib(int x)
  42. {
  43. return x;
  44. }
  45. void test_cache(int rank, char *enabled, size_t *comm_amount)
  46. {
  47. int i;
  48. int ret;
  49. unsigned *v[2];
  50. starpu_data_handle_t data_handles[2];
  51. setenv("STARPU_MPI_CACHE", enabled, 1);
  52. ret = starpu_init(NULL);
  53. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  54. ret = starpu_mpi_init(NULL, NULL, 0);
  55. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init");
  56. for(i = 0; i < 2; i++)
  57. {
  58. int j;
  59. v[i] = malloc(N * sizeof(unsigned));
  60. for(j=0 ; j<N ; j++)
  61. {
  62. v[i][j] = 12;
  63. }
  64. }
  65. for(i = 0; i < 2; i++)
  66. {
  67. int mpi_rank = my_distrib(i);
  68. if (mpi_rank == rank)
  69. {
  70. //FPRINTF(stderr, "[%d] Owning data[%d][%d]\n", rank, x, y);
  71. starpu_vector_data_register(&data_handles[i], STARPU_MAIN_RAM, (uintptr_t)v[i], N, sizeof(unsigned));
  72. }
  73. else
  74. {
  75. /* I don't own that index, but will need it for my computations */
  76. //FPRINTF(stderr, "[%d] Neighbour of data[%d][%d]\n", rank, x, y);
  77. starpu_vector_data_register(&data_handles[i], -1, (uintptr_t)NULL, N, sizeof(unsigned));
  78. }
  79. starpu_mpi_data_register(data_handles[i], i, mpi_rank);
  80. }
  81. for(i = 0; i < 5; i++)
  82. {
  83. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[0], STARPU_R, data_handles[1], 0);
  84. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  85. }
  86. for(i = 0; i < 5; i++)
  87. {
  88. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[1], STARPU_R, data_handles[0], 0);
  89. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  90. }
  91. for(i = 0; i < 5; i++)
  92. {
  93. starpu_mpi_cache_flush(MPI_COMM_WORLD, data_handles[0]);
  94. }
  95. for(i = 0; i < 5; i++)
  96. {
  97. ret = starpu_mpi_task_insert(MPI_COMM_WORLD, &mycodelet, STARPU_RW, data_handles[1], STARPU_R, data_handles[0], 0);
  98. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_task_insert");
  99. }
  100. starpu_task_wait_for_all();
  101. for(i = 0; i < 2; i++)
  102. {
  103. starpu_data_unregister(data_handles[i]);
  104. free(v[i]);
  105. }
  106. starpu_mpi_comm_amounts_retrieve(comm_amount);
  107. starpu_mpi_shutdown();
  108. starpu_shutdown();
  109. }
  110. int main(int argc, char **argv)
  111. {
  112. int rank, size;
  113. int result=0;
  114. size_t *comm_amount_with_cache;
  115. size_t *comm_amount_without_cache;
  116. MPI_INIT_THREAD_real(&argc, &argv, MPI_THREAD_SERIALIZED);
  117. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  118. starpu_mpi_comm_size(MPI_COMM_WORLD, &size);
  119. setenv("STARPU_COMM_STATS", "1", 1);
  120. comm_amount_with_cache = malloc(size * sizeof(size_t));
  121. comm_amount_without_cache = malloc(size * sizeof(size_t));
  122. test_cache(rank, "0", comm_amount_with_cache);
  123. test_cache(rank, "1", comm_amount_without_cache);
  124. if (rank == 0 || rank == 1)
  125. {
  126. int dst = (rank == 0) ? 1 : 0;
  127. result = (comm_amount_with_cache[dst] == comm_amount_without_cache[dst] * 5);
  128. FPRINTF_MPI(stderr, "Communication cache mechanism is %sworking\n", result?"":"NOT ");
  129. }
  130. else
  131. {
  132. result = 1;
  133. }
  134. free(comm_amount_without_cache);
  135. free(comm_amount_with_cache);
  136. MPI_Finalize();
  137. return !result;
  138. }
  139. #endif