mpi_decomposition_matrix.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2010 Mehdi Juhoor
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include "mpi_cholesky.h"
  18. /* Returns the MPI node number where data indexes index is */
  19. int my_distrib(int y, int x, int nb_nodes)
  20. {
  21. (void)nb_nodes;
  22. //return (x+y) % nb_nodes;
  23. return (x%dblockx)+(y%dblocky)*dblockx;
  24. }
  25. void matrix_display(float ***bmat, int rank, int nodes)
  26. {
  27. int n;
  28. if (!display)
  29. return;
  30. starpu_mpi_barrier(MPI_COMM_WORLD);
  31. for (n = 0; n < rank; n++)
  32. starpu_mpi_barrier(MPI_COMM_WORLD);
  33. unsigned y;
  34. printf("[%d] Input :\n", rank);
  35. for(y=0 ; y<nblocks ; y++)
  36. {
  37. unsigned x;
  38. for(x=0 ; x<=y ; x++)
  39. {
  40. if (my_distrib(y, x, nodes) == rank)
  41. {
  42. unsigned j;
  43. printf("Block %u,%u :\n", x, y);
  44. for (j = 0; j < BLOCKSIZE; j++)
  45. {
  46. unsigned i;
  47. for (i = 0; i < BLOCKSIZE; i++)
  48. {
  49. if (x < y || i <= j)
  50. {
  51. printf("%2.2f\t", bmat[y][x][j +i*BLOCKSIZE]);
  52. }
  53. else
  54. {
  55. printf(".\t");
  56. }
  57. }
  58. printf("\n");
  59. }
  60. }
  61. }
  62. }
  63. starpu_mpi_barrier(MPI_COMM_WORLD);
  64. for (n = rank+1; n < nodes; n++)
  65. starpu_mpi_barrier(MPI_COMM_WORLD);
  66. }
  67. /* Note: bmat is indexed by bmat[m][n][mm+nn*BLOCKSIZE],
  68. * i.e. the content of the tiles is column-major, but the array of tiles is
  69. * row-major to keep the m,n notation everywhere */
  70. void matrix_init(float ****bmat, int rank, int nodes, int alloc_everywhere)
  71. {
  72. unsigned nn,mm,m,n;
  73. *bmat = malloc(nblocks * sizeof(float **));
  74. for(m=0 ; m<nblocks ; m++)
  75. {
  76. (*bmat)[m] = malloc(nblocks * sizeof(float *));
  77. for(n=0 ; n<nblocks ; n++)
  78. {
  79. int mpi_rank = my_distrib(m, n, nodes);
  80. if (alloc_everywhere || (mpi_rank == rank))
  81. {
  82. starpu_malloc((void **)&(*bmat)[m][n], BLOCKSIZE*BLOCKSIZE*sizeof(float));
  83. if (mpi_rank == rank)
  84. for (nn = 0; nn < BLOCKSIZE; nn++)
  85. {
  86. for (mm = 0; mm < BLOCKSIZE; mm++)
  87. {
  88. #ifndef STARPU_SIMGRID
  89. (*bmat)[m][n][mm +nn*BLOCKSIZE] = (1.0f/(1.0f+(nn+(m*BLOCKSIZE)+mm+(n*BLOCKSIZE)))) + ((nn+(m*BLOCKSIZE) == mm+(n*BLOCKSIZE))?1.0f*size:0.0f);
  90. //mat[mm +nn*size] = ((nn == mm)?1.0f*size:0.0f);
  91. #endif
  92. }
  93. }
  94. }
  95. }
  96. }
  97. }
  98. void matrix_free(float ****bmat, int rank, int nodes, int alloc_everywhere)
  99. {
  100. unsigned m, n;
  101. for(m=0 ; m<nblocks ; m++)
  102. {
  103. for(n=0 ; n<nblocks ; n++)
  104. {
  105. int mpi_rank = my_distrib(m, n, nodes);
  106. if (alloc_everywhere || (mpi_rank == rank))
  107. {
  108. starpu_free((void *)(*bmat)[m][n]);
  109. }
  110. }
  111. free((*bmat)[m]);
  112. }
  113. free(*bmat);
  114. }