block_interface.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu_mpi.h>
  18. #include <stdlib.h>
  19. #define NITER 2048
  20. #define BIGSIZE 128
  21. #define SIZE 64
  22. int main(int argc, char **argv)
  23. {
  24. MPI_Init(NULL, NULL);
  25. int rank, size;
  26. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  27. MPI_Comm_size(MPI_COMM_WORLD, &size);
  28. if (size < 2)
  29. {
  30. if (rank == 0)
  31. fprintf(stderr, "We need at least processes.\n");
  32. MPI_Finalize();
  33. return 0;
  34. }
  35. /* We only use 2 nodes for that test */
  36. if (rank >= 2)
  37. {
  38. MPI_Finalize();
  39. return 0;
  40. }
  41. starpu_init(NULL);
  42. starpu_mpi_initialize();
  43. /* Node 0 will allocate a big block and only register an inner part of
  44. * it as the block data, Node 1 will allocate a block of small size and
  45. * register it directly. Node 0 and 1 will then exchange the content of
  46. * their blocks. */
  47. float *block;
  48. starpu_data_handle block_handle;
  49. if (rank == 0)
  50. {
  51. block = calloc(BIGSIZE*BIGSIZE*BIGSIZE, sizeof(float));
  52. assert(block);
  53. /* fill the inner block */
  54. unsigned i, j, k;
  55. for (k = 0; k < SIZE; k++)
  56. for (j = 0; j < SIZE; j++)
  57. for (i = 0; i < SIZE; i++)
  58. {
  59. block[i + j*BIGSIZE + k*BIGSIZE*BIGSIZE] = 1.0f;
  60. }
  61. starpu_block_data_register(&block_handle, 0,
  62. (uintptr_t)block, BIGSIZE, BIGSIZE*BIGSIZE,
  63. SIZE, SIZE, SIZE, sizeof(float));
  64. }
  65. else /* rank == 1 */
  66. {
  67. block = calloc(SIZE*SIZE*SIZE, sizeof(float));
  68. assert(block);
  69. starpu_block_data_register(&block_handle, 0,
  70. (uintptr_t)block, SIZE, SIZE*SIZE,
  71. SIZE, SIZE, SIZE, sizeof(float));
  72. }
  73. if (rank == 0)
  74. {
  75. starpu_mpi_send(block_handle, 1, 0x42, MPI_COMM_WORLD);
  76. MPI_Status status;
  77. starpu_mpi_recv(block_handle, 1, 0x1337, MPI_COMM_WORLD, &status);
  78. /* check the content of the block */
  79. starpu_data_acquire(block_handle, STARPU_R);
  80. unsigned i, j, k;
  81. for (k = 0; k < SIZE; k++)
  82. for (j = 0; j < SIZE; j++)
  83. for (i = 0; i < SIZE; i++)
  84. {
  85. assert(block[i + j*BIGSIZE + k*BIGSIZE*BIGSIZE] == 33.0f);
  86. }
  87. starpu_data_release(block_handle);
  88. }
  89. else /* rank == 1 */
  90. {
  91. MPI_Status status;
  92. starpu_mpi_recv(block_handle, 0, 0x42, MPI_COMM_WORLD, &status);
  93. /* check the content of the block and modify it */
  94. starpu_data_acquire(block_handle, STARPU_RW);
  95. unsigned i, j, k;
  96. for (k = 0; k < SIZE; k++)
  97. for (j = 0; j < SIZE; j++)
  98. for (i = 0; i < SIZE; i++)
  99. {
  100. assert(block[i + j*SIZE + k*SIZE*SIZE] == 1.0f);
  101. block[i + j*SIZE + k*SIZE*SIZE] = 33.0f;
  102. }
  103. starpu_data_release(block_handle);
  104. starpu_mpi_send(block_handle, 0, 0x1337, MPI_COMM_WORLD);
  105. }
  106. fprintf(stdout, "Rank %d is done\n", rank);
  107. fflush(stdout);
  108. starpu_mpi_shutdown();
  109. starpu_shutdown();
  110. MPI_Finalize();
  111. return 0;
  112. }