pxlu_implicit.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2011, 2013-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2012, 2013 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include "pxlu.h"
  18. #include "pxlu_kernels.h"
  19. #include <sys/time.h>
  20. //#define VERBOSE_INIT 1
  21. //#define DEBUG 1
  22. static unsigned no_prio = 0;
  23. static unsigned nblocks = 0;
  24. static int rank = -1;
  25. static int world_size = -1;
  26. struct callback_arg {
  27. unsigned i, j, k;
  28. };
  29. /*
  30. * Task 11 (diagonal factorization)
  31. */
  32. static void create_task_11(unsigned k)
  33. {
  34. starpu_mpi_task_insert(MPI_COMM_WORLD,
  35. &STARPU_PLU(cl11),
  36. STARPU_VALUE, &k, sizeof(k),
  37. STARPU_VALUE, &k, sizeof(k),
  38. STARPU_VALUE, &k, sizeof(k),
  39. STARPU_RW, STARPU_PLU(get_block_handle)(k, k),
  40. STARPU_PRIORITY, !no_prio ?
  41. STARPU_MAX_PRIO : STARPU_MIN_PRIO,
  42. 0);
  43. }
  44. /*
  45. * Task 12 (Update lower left (TRSM))
  46. */
  47. static void create_task_12(unsigned k, unsigned j)
  48. {
  49. #warning temporary fix
  50. starpu_mpi_task_insert(MPI_COMM_WORLD,
  51. //&STARPU_PLU(cl12),
  52. &STARPU_PLU(cl21),
  53. STARPU_VALUE, &j, sizeof(j),
  54. STARPU_VALUE, &j, sizeof(j),
  55. STARPU_VALUE, &k, sizeof(k),
  56. STARPU_R, STARPU_PLU(get_block_handle)(k, k),
  57. STARPU_RW, STARPU_PLU(get_block_handle)(k, j),
  58. STARPU_PRIORITY, !no_prio && (j == k+1) ?
  59. STARPU_MAX_PRIO : STARPU_MIN_PRIO,
  60. 0);
  61. }
  62. /*
  63. * Task 21 (Update upper right (TRSM))
  64. */
  65. static void create_task_21(unsigned k, unsigned i)
  66. {
  67. #warning temporary fix
  68. starpu_mpi_task_insert(MPI_COMM_WORLD,
  69. //&STARPU_PLU(cl21),
  70. &STARPU_PLU(cl12),
  71. STARPU_VALUE, &i, sizeof(i),
  72. STARPU_VALUE, &i, sizeof(i),
  73. STARPU_VALUE, &k, sizeof(k),
  74. STARPU_R, STARPU_PLU(get_block_handle)(k, k),
  75. STARPU_RW, STARPU_PLU(get_block_handle)(i, k),
  76. STARPU_PRIORITY, !no_prio && (i == k+1) ?
  77. STARPU_MAX_PRIO : STARPU_MIN_PRIO,
  78. 0);
  79. }
  80. /*
  81. * Task 22 (GEMM)
  82. */
  83. static void create_task_22(unsigned k, unsigned i, unsigned j)
  84. {
  85. starpu_mpi_task_insert(MPI_COMM_WORLD,
  86. &STARPU_PLU(cl22),
  87. STARPU_VALUE, &i, sizeof(i),
  88. STARPU_VALUE, &j, sizeof(j),
  89. STARPU_VALUE, &k, sizeof(k),
  90. STARPU_R, STARPU_PLU(get_block_handle)(k, j),
  91. STARPU_R, STARPU_PLU(get_block_handle)(i, k),
  92. STARPU_RW, STARPU_PLU(get_block_handle)(i, j),
  93. STARPU_PRIORITY, !no_prio && (i == k + 1) && (j == k +1) ?
  94. STARPU_MAX_PRIO : STARPU_MIN_PRIO,
  95. 0);
  96. }
  97. /*
  98. * code to bootstrap the factorization
  99. */
  100. double STARPU_PLU(plu_main)(unsigned _nblocks, int _rank, int _world_size)
  101. {
  102. double start;
  103. double end;
  104. nblocks = _nblocks;
  105. rank = _rank;
  106. world_size = _world_size;
  107. /* create all the DAG nodes */
  108. unsigned i,j,k;
  109. starpu_mpi_barrier(MPI_COMM_WORLD);
  110. start = starpu_timing_now();
  111. for (k = 0; k < nblocks; k++)
  112. {
  113. create_task_11(k);
  114. for (i = k+1; i<nblocks; i++)
  115. {
  116. create_task_12(k, i);
  117. create_task_21(k, i);
  118. }
  119. starpu_mpi_cache_flush(MPI_COMM_WORLD, STARPU_PLU(get_block_handle)(k,k));
  120. if (get_block_rank(k, k) == _rank)
  121. starpu_data_wont_use(STARPU_PLU(get_block_handle)(k,k));
  122. for (i = k+1; i<nblocks; i++)
  123. {
  124. for (j = k+1; j<nblocks; j++)
  125. {
  126. create_task_22(k, i, j);
  127. }
  128. }
  129. for (i = k+1; i<nblocks; i++)
  130. {
  131. starpu_mpi_cache_flush(MPI_COMM_WORLD, STARPU_PLU(get_block_handle)(k,i));
  132. if (get_block_rank(k, i) == _rank)
  133. starpu_data_wont_use(STARPU_PLU(get_block_handle)(k,i));
  134. starpu_mpi_cache_flush(MPI_COMM_WORLD, STARPU_PLU(get_block_handle)(i,k));
  135. if (get_block_rank(i, k) == _rank)
  136. starpu_data_wont_use(STARPU_PLU(get_block_handle)(i,k));
  137. }
  138. }
  139. starpu_task_wait_for_all();
  140. starpu_mpi_barrier(MPI_COMM_WORLD);
  141. end = starpu_timing_now();
  142. double timing = end - start;
  143. // fprintf(stderr, "RANK %d -> took %f ms\n", rank, timing/1000);
  144. return timing;
  145. }