plu_solve.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <math.h>
  18. #include "pxlu.h"
  19. /*
  20. * Various useful functions
  21. */
  22. static double frobenius_norm(TYPE *v, unsigned n)
  23. {
  24. double sum2 = 0.0;
  25. /* compute sqrt(Sum(|x|^2)) */
  26. unsigned i,j;
  27. for (j = 0; j < n; j++)
  28. for (i = 0; i < n; i++)
  29. {
  30. double a = fabsl((double)v[i+n*j]);
  31. sum2 += a*a;
  32. }
  33. return sqrt(sum2);
  34. }
  35. void STARPU_PLU(display_data_content)(TYPE *data, unsigned blocksize)
  36. {
  37. if (!STARPU_PLU(display_flag)())
  38. return;
  39. fprintf(stderr, "DISPLAY BLOCK\n");
  40. unsigned i, j;
  41. for (j = 0; j < blocksize; j++)
  42. {
  43. for (i = 0; i < blocksize; i++)
  44. {
  45. fprintf(stderr, "%f ", data[j+i*blocksize]);
  46. }
  47. fprintf(stderr, "\n");
  48. }
  49. fprintf(stderr, "****\n");
  50. }
  51. void STARPU_PLU(extract_upper)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  52. {
  53. unsigned li, lj;
  54. for (lj = 0; lj < block_size; lj++)
  55. {
  56. /* Upper block diag is 1 */
  57. outblock[lj*(block_size + 1)] = (TYPE)1.0;
  58. for (li = lj + 1; li < block_size; li++)
  59. {
  60. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  61. }
  62. }
  63. }
  64. void STARPU_PLU(extract_lower)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  65. {
  66. unsigned li, lj;
  67. for (lj = 0; lj < block_size; lj++)
  68. {
  69. for (li = 0; li <= lj; li++)
  70. {
  71. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  72. }
  73. }
  74. }
  75. /*
  76. * Compute Ax = y
  77. */
  78. static void STARPU_PLU(compute_ax_block)(unsigned block_size, TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  79. {
  80. fprintf(stderr, "block data %p sub x %p sub y %p\n", block_data, sub_x, sub_y);
  81. CPU_GEMV("N", block_size, block_size, 1.0, block_data, block_size, sub_x, 1, 1.0, sub_y, 1);
  82. }
  83. static void STARPU_PLU(compute_ax_block_upper)(unsigned size, unsigned nblocks,
  84. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  85. {
  86. unsigned block_size = size/nblocks;
  87. /* Take a copy of the upper part of the diagonal block */
  88. TYPE *upper_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  89. STARPU_PLU(extract_upper)(block_size, block_data, upper_block_copy);
  90. STARPU_PLU(compute_ax_block)(block_size, upper_block_copy, sub_x, sub_y);
  91. free(upper_block_copy);
  92. }
  93. static void STARPU_PLU(compute_ax_block_lower)(unsigned size, unsigned nblocks,
  94. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  95. {
  96. unsigned block_size = size/nblocks;
  97. /* Take a copy of the upper part of the diagonal block */
  98. TYPE *lower_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  99. STARPU_PLU(extract_lower)(block_size, block_data, lower_block_copy);
  100. STARPU_PLU(compute_ax_block)(size/nblocks, lower_block_copy, sub_x, sub_y);
  101. free(lower_block_copy);
  102. }
  103. void STARPU_PLU(compute_lux)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  104. {
  105. /* Create temporary buffers where all MPI processes are going to
  106. * compute Ui x = yi where Ai is the matrix containing the blocks of U
  107. * affected to process i, and 0 everywhere else. We then have y as the
  108. * sum of all yi. */
  109. TYPE *yi = calloc(size, sizeof(TYPE));
  110. fprintf(stderr, "Compute LU\n");
  111. unsigned block_size = size/nblocks;
  112. /* Compute UiX = Yi */
  113. unsigned long i,j;
  114. for (j = 0; j < nblocks; j++)
  115. {
  116. if (get_block_rank(j, j) == rank)
  117. {
  118. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  119. TYPE *sub_x = &x[j*(block_size)];
  120. TYPE *sub_yi = &yi[j*(block_size)];
  121. STARPU_PLU(compute_ax_block_upper)(size, nblocks, block_data, sub_x, sub_yi);
  122. }
  123. for (i = j + 1; i < nblocks; i++)
  124. {
  125. if (get_block_rank(i, j) == rank)
  126. {
  127. /* That block belongs to the current MPI process */
  128. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  129. TYPE *sub_x = &x[i*(block_size)];
  130. TYPE *sub_yi = &yi[j*(block_size)];
  131. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  132. }
  133. }
  134. }
  135. /* Grab Sum Yi in X */
  136. MPI_Reduce(yi, x, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  137. memset(yi, 0, size*sizeof(TYPE));
  138. // unsigned ind;
  139. // if (rank == 0)
  140. // {
  141. // fprintf(stderr, "INTERMEDIATE\n");
  142. // for (ind = 0; ind < STARPU_MIN(10, size); ind++)
  143. // {
  144. // fprintf(stderr, "x[%d] = %f\n", ind, (float)x[ind]);
  145. // }
  146. // fprintf(stderr, "****\n");
  147. // }
  148. /* Everyone needs x */
  149. int bcst_ret;
  150. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  151. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  152. /* Compute LiX = Yi (with X = UX) */
  153. for (j = 0; j < nblocks; j++)
  154. {
  155. if (j > 0)
  156. for (i = 0; i < j; i++)
  157. {
  158. if (get_block_rank(i, j) == rank)
  159. {
  160. /* That block belongs to the current MPI process */
  161. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  162. TYPE *sub_x = &x[i*(block_size)];
  163. TYPE *sub_yi = &yi[j*(block_size)];
  164. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  165. }
  166. }
  167. if (get_block_rank(j, j) == rank)
  168. {
  169. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  170. TYPE *sub_x = &x[j*(block_size)];
  171. TYPE *sub_yi = &yi[j*(block_size)];
  172. STARPU_PLU(compute_ax_block_lower)(size, nblocks, block_data, sub_x, sub_yi);
  173. }
  174. }
  175. /* Grab Sum Yi in Y */
  176. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  177. free(yi);
  178. }
  179. /*
  180. * Allocate a contiguous matrix on node 0 and fill it with the whole
  181. * content of the matrix distributed accross all nodes.
  182. */
  183. TYPE *STARPU_PLU(reconstruct_matrix)(unsigned size, unsigned nblocks)
  184. {
  185. // fprintf(stderr, "RECONSTRUCT MATRIX size %d nblocks %d\n", size, nblocks);
  186. TYPE *bigmatrix = calloc(size*size, sizeof(TYPE));
  187. unsigned block_size = size/nblocks;
  188. int rank;
  189. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  190. unsigned bi, bj;
  191. for (bj = 0; bj < nblocks; bj++)
  192. for (bi = 0; bi < nblocks; bi++)
  193. {
  194. TYPE *block = NULL;
  195. int block_rank = get_block_rank(bi, bj);
  196. if (block_rank == 0)
  197. {
  198. block = STARPU_PLU(get_block)(bi, bj);
  199. }
  200. else
  201. {
  202. MPI_Status status;
  203. if (rank == 0)
  204. {
  205. block = calloc(block_size*block_size, sizeof(TYPE));
  206. int ret = MPI_Recv(block, block_size*block_size, MPI_TYPE, block_rank, 0, MPI_COMM_WORLD, &status);
  207. STARPU_ASSERT(ret == MPI_SUCCESS);
  208. }
  209. else if (rank == block_rank)
  210. {
  211. block = STARPU_PLU(get_block)(bi, bj);
  212. int ret = MPI_Send(block, block_size*block_size, MPI_TYPE, 0, 0, MPI_COMM_WORLD);
  213. STARPU_ASSERT(ret == MPI_SUCCESS);
  214. }
  215. }
  216. if (rank == 0)
  217. {
  218. unsigned j, i;
  219. for (j = 0; j < block_size; j++)
  220. for (i = 0; i < block_size; i++)
  221. {
  222. bigmatrix[(j + bj*block_size)+(i+bi*block_size)*size] =
  223. block[j+i*block_size];
  224. }
  225. if (get_block_rank(bi, bj) != 0)
  226. free(block);
  227. }
  228. }
  229. return bigmatrix;
  230. }
  231. /* x and y must be valid (at least) on 0 */
  232. void STARPU_PLU(compute_ax)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  233. {
  234. unsigned block_size = size/nblocks;
  235. /* Send x to everyone */
  236. int bcst_ret;
  237. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  238. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  239. /* Create temporary buffers where all MPI processes are going to
  240. * compute Ai x = yi where Ai is the matrix containing the blocks of A
  241. * affected to process i, and 0 everywhere else. We then have y as the
  242. * sum of all yi. */
  243. TYPE *yi = calloc(size, sizeof(TYPE));
  244. /* Compute Aix = yi */
  245. unsigned long i,j;
  246. for (j = 0; j < nblocks; j++)
  247. {
  248. for (i = 0; i < nblocks; i++)
  249. {
  250. if (get_block_rank(i, j) == rank)
  251. {
  252. /* That block belongs to the current MPI process */
  253. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  254. TYPE *sub_x = &x[i*block_size];
  255. TYPE *sub_yi = &yi[j*block_size];
  256. STARPU_PLU(compute_ax_block)(block_size, block_data, sub_x, sub_yi);
  257. }
  258. }
  259. }
  260. /* Compute the Sum of all yi = y */
  261. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  262. fprintf(stderr, "RANK %d - FOO 1 y[0] %f\n", rank, y[0]);
  263. free(yi);
  264. }
  265. void STARPU_PLU(compute_lu_matrix)(unsigned size, unsigned nblocks, TYPE *Asaved)
  266. {
  267. TYPE *all_r = STARPU_PLU(reconstruct_matrix)(size, nblocks);
  268. unsigned display = STARPU_PLU(display_flag)();
  269. int rank;
  270. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  271. if (rank == 0)
  272. {
  273. TYPE *L = malloc((size_t)size*size*sizeof(TYPE));
  274. TYPE *U = malloc((size_t)size*size*sizeof(TYPE));
  275. memset(L, 0, size*size*sizeof(TYPE));
  276. memset(U, 0, size*size*sizeof(TYPE));
  277. /* only keep the lower part */
  278. unsigned i, j;
  279. for (j = 0; j < size; j++)
  280. {
  281. for (i = 0; i < j; i++)
  282. {
  283. L[j+i*size] = all_r[j+i*size];
  284. }
  285. /* diag i = j */
  286. L[j+j*size] = all_r[j+j*size];
  287. U[j+j*size] = 1.0;
  288. for (i = j+1; i < size; i++)
  289. {
  290. U[j+i*size] = all_r[j+i*size];
  291. }
  292. }
  293. STARPU_PLU(display_data_content)(L, size);
  294. STARPU_PLU(display_data_content)(U, size);
  295. /* now A_err = L, compute L*U */
  296. CPU_TRMM("R", "U", "N", "U", size, size, 1.0f, U, size, L, size);
  297. if (display)
  298. fprintf(stderr, "\nLU\n");
  299. STARPU_PLU(display_data_content)(L, size);
  300. /* compute "LU - A" in L*/
  301. CPU_AXPY(size*size, -1.0, Asaved, 1, L, 1);
  302. TYPE err = CPU_ASUM(size*size, L, 1);
  303. int max = CPU_IAMAX(size*size, L, 1);
  304. if (display)
  305. fprintf(stderr, "DISPLAY ERROR\n");
  306. STARPU_PLU(display_data_content)(L, size);
  307. fprintf(stderr, "(A - LU) Avg error : %e\n", err/(size*size));
  308. fprintf(stderr, "(A - LU) Max error : %e\n", L[max]);
  309. double residual = frobenius_norm(L, size);
  310. double matnorm = frobenius_norm(Asaved, size);
  311. fprintf(stderr, "||A-LU|| / (||A||*N) : %e\n", residual/(matnorm*size));
  312. }
  313. free(all_r);
  314. }