plu_solve.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2014 Université de Bordeaux
  4. * Copyright (C) 2010, 2016, 2017 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <math.h>
  19. #include "pxlu.h"
  20. /*
  21. * Various useful functions
  22. */
  23. static double frobenius_norm(TYPE *v, unsigned n)
  24. {
  25. double sum2 = 0.0;
  26. /* compute sqrt(Sum(|x|^2)) */
  27. unsigned i,j;
  28. for (j = 0; j < n; j++)
  29. for (i = 0; i < n; i++)
  30. {
  31. double a = fabsl((double)v[i+n*j]);
  32. sum2 += a*a;
  33. }
  34. return sqrt(sum2);
  35. }
  36. void STARPU_PLU(display_data_content)(TYPE *data, unsigned blocksize)
  37. {
  38. if (!STARPU_PLU(display_flag)())
  39. return;
  40. fprintf(stderr, "DISPLAY BLOCK\n");
  41. unsigned i, j;
  42. for (j = 0; j < blocksize; j++)
  43. {
  44. for (i = 0; i < blocksize; i++)
  45. {
  46. fprintf(stderr, "%f ", data[j+i*blocksize]);
  47. }
  48. fprintf(stderr, "\n");
  49. }
  50. fprintf(stderr, "****\n");
  51. }
  52. void STARPU_PLU(extract_upper)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  53. {
  54. unsigned li, lj;
  55. for (lj = 0; lj < block_size; lj++)
  56. {
  57. /* Upper block diag is 1 */
  58. outblock[lj*(block_size + 1)] = (TYPE)1.0;
  59. for (li = lj + 1; li < block_size; li++)
  60. {
  61. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  62. }
  63. }
  64. }
  65. void STARPU_PLU(extract_lower)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  66. {
  67. unsigned li, lj;
  68. for (lj = 0; lj < block_size; lj++)
  69. {
  70. for (li = 0; li <= lj; li++)
  71. {
  72. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  73. }
  74. }
  75. }
  76. /*
  77. * Compute Ax = y
  78. */
  79. static void STARPU_PLU(compute_ax_block)(unsigned block_size, TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  80. {
  81. fprintf(stderr, "block data %p sub x %p sub y %p\n", block_data, sub_x, sub_y);
  82. CPU_GEMV("N", block_size, block_size, 1.0, block_data, block_size, sub_x, 1, 1.0, sub_y, 1);
  83. }
  84. static void STARPU_PLU(compute_ax_block_upper)(unsigned size, unsigned nblocks,
  85. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  86. {
  87. unsigned block_size = size/nblocks;
  88. /* Take a copy of the upper part of the diagonal block */
  89. TYPE *upper_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  90. STARPU_PLU(extract_upper)(block_size, block_data, upper_block_copy);
  91. STARPU_PLU(compute_ax_block)(block_size, upper_block_copy, sub_x, sub_y);
  92. free(upper_block_copy);
  93. }
  94. static void STARPU_PLU(compute_ax_block_lower)(unsigned size, unsigned nblocks,
  95. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  96. {
  97. unsigned block_size = size/nblocks;
  98. /* Take a copy of the upper part of the diagonal block */
  99. TYPE *lower_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  100. STARPU_PLU(extract_lower)(block_size, block_data, lower_block_copy);
  101. STARPU_PLU(compute_ax_block)(size/nblocks, lower_block_copy, sub_x, sub_y);
  102. free(lower_block_copy);
  103. }
  104. void STARPU_PLU(compute_lux)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  105. {
  106. /* Create temporary buffers where all MPI processes are going to
  107. * compute Ui x = yi where Ai is the matrix containing the blocks of U
  108. * affected to process i, and 0 everywhere else. We then have y as the
  109. * sum of all yi. */
  110. TYPE *yi = calloc(size, sizeof(TYPE));
  111. fprintf(stderr, "Compute LU\n");
  112. unsigned block_size = size/nblocks;
  113. /* Compute UiX = Yi */
  114. unsigned long i,j;
  115. for (j = 0; j < nblocks; j++)
  116. {
  117. if (get_block_rank(j, j) == rank)
  118. {
  119. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  120. TYPE *sub_x = &x[j*(block_size)];
  121. TYPE *sub_yi = &yi[j*(block_size)];
  122. STARPU_PLU(compute_ax_block_upper)(size, nblocks, block_data, sub_x, sub_yi);
  123. }
  124. for (i = j + 1; i < nblocks; i++)
  125. {
  126. if (get_block_rank(i, j) == rank)
  127. {
  128. /* That block belongs to the current MPI process */
  129. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  130. TYPE *sub_x = &x[i*(block_size)];
  131. TYPE *sub_yi = &yi[j*(block_size)];
  132. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  133. }
  134. }
  135. }
  136. /* Grab Sum Yi in X */
  137. MPI_Reduce(yi, x, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  138. memset(yi, 0, size*sizeof(TYPE));
  139. // unsigned ind;
  140. // if (rank == 0)
  141. // {
  142. // fprintf(stderr, "INTERMEDIATE\n");
  143. // for (ind = 0; ind < STARPU_MIN(10, size); ind++)
  144. // {
  145. // fprintf(stderr, "x[%d] = %f\n", ind, (float)x[ind]);
  146. // }
  147. // fprintf(stderr, "****\n");
  148. // }
  149. /* Everyone needs x */
  150. int bcst_ret;
  151. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  152. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  153. /* Compute LiX = Yi (with X = UX) */
  154. for (j = 0; j < nblocks; j++)
  155. {
  156. if (j > 0)
  157. for (i = 0; i < j; i++)
  158. {
  159. if (get_block_rank(i, j) == rank)
  160. {
  161. /* That block belongs to the current MPI process */
  162. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  163. TYPE *sub_x = &x[i*(block_size)];
  164. TYPE *sub_yi = &yi[j*(block_size)];
  165. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  166. }
  167. }
  168. if (get_block_rank(j, j) == rank)
  169. {
  170. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  171. TYPE *sub_x = &x[j*(block_size)];
  172. TYPE *sub_yi = &yi[j*(block_size)];
  173. STARPU_PLU(compute_ax_block_lower)(size, nblocks, block_data, sub_x, sub_yi);
  174. }
  175. }
  176. /* Grab Sum Yi in Y */
  177. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  178. free(yi);
  179. }
  180. /*
  181. * Allocate a contiguous matrix on node 0 and fill it with the whole
  182. * content of the matrix distributed accross all nodes.
  183. */
  184. TYPE *STARPU_PLU(reconstruct_matrix)(unsigned size, unsigned nblocks)
  185. {
  186. // fprintf(stderr, "RECONSTRUCT MATRIX size %d nblocks %d\n", size, nblocks);
  187. TYPE *bigmatrix = calloc(size*size, sizeof(TYPE));
  188. unsigned block_size = size/nblocks;
  189. int rank;
  190. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  191. unsigned bi, bj;
  192. for (bj = 0; bj < nblocks; bj++)
  193. for (bi = 0; bi < nblocks; bi++)
  194. {
  195. TYPE *block = NULL;
  196. int block_rank = get_block_rank(bi, bj);
  197. if (block_rank == 0)
  198. {
  199. block = STARPU_PLU(get_block)(bi, bj);
  200. }
  201. else
  202. {
  203. MPI_Status status;
  204. if (rank == 0)
  205. {
  206. block = calloc(block_size*block_size, sizeof(TYPE));
  207. int ret = MPI_Recv(block, block_size*block_size, MPI_TYPE, block_rank, 0, MPI_COMM_WORLD, &status);
  208. STARPU_ASSERT(ret == MPI_SUCCESS);
  209. }
  210. else if (rank == block_rank)
  211. {
  212. block = STARPU_PLU(get_block)(bi, bj);
  213. int ret = MPI_Send(block, block_size*block_size, MPI_TYPE, 0, 0, MPI_COMM_WORLD);
  214. STARPU_ASSERT(ret == MPI_SUCCESS);
  215. }
  216. }
  217. if (rank == 0)
  218. {
  219. unsigned j, i;
  220. for (j = 0; j < block_size; j++)
  221. for (i = 0; i < block_size; i++)
  222. {
  223. bigmatrix[(j + bj*block_size)+(i+bi*block_size)*size] =
  224. block[j+i*block_size];
  225. }
  226. if (get_block_rank(bi, bj) != 0)
  227. free(block);
  228. }
  229. }
  230. return bigmatrix;
  231. }
  232. /* x and y must be valid (at least) on 0 */
  233. void STARPU_PLU(compute_ax)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  234. {
  235. unsigned block_size = size/nblocks;
  236. /* Send x to everyone */
  237. int bcst_ret;
  238. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  239. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  240. /* Create temporary buffers where all MPI processes are going to
  241. * compute Ai x = yi where Ai is the matrix containing the blocks of A
  242. * affected to process i, and 0 everywhere else. We then have y as the
  243. * sum of all yi. */
  244. TYPE *yi = calloc(size, sizeof(TYPE));
  245. /* Compute Aix = yi */
  246. unsigned long i,j;
  247. for (j = 0; j < nblocks; j++)
  248. {
  249. for (i = 0; i < nblocks; i++)
  250. {
  251. if (get_block_rank(i, j) == rank)
  252. {
  253. /* That block belongs to the current MPI process */
  254. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  255. TYPE *sub_x = &x[i*block_size];
  256. TYPE *sub_yi = &yi[j*block_size];
  257. STARPU_PLU(compute_ax_block)(block_size, block_data, sub_x, sub_yi);
  258. }
  259. }
  260. }
  261. /* Compute the Sum of all yi = y */
  262. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  263. fprintf(stderr, "RANK %d - FOO 1 y[0] %f\n", rank, y[0]);
  264. free(yi);
  265. }
  266. void STARPU_PLU(compute_lu_matrix)(unsigned size, unsigned nblocks, TYPE *Asaved)
  267. {
  268. TYPE *all_r = STARPU_PLU(reconstruct_matrix)(size, nblocks);
  269. unsigned display = STARPU_PLU(display_flag)();
  270. int rank;
  271. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  272. if (rank == 0)
  273. {
  274. TYPE *L = malloc((size_t)size*size*sizeof(TYPE));
  275. TYPE *U = malloc((size_t)size*size*sizeof(TYPE));
  276. memset(L, 0, size*size*sizeof(TYPE));
  277. memset(U, 0, size*size*sizeof(TYPE));
  278. /* only keep the lower part */
  279. unsigned i, j;
  280. for (j = 0; j < size; j++)
  281. {
  282. for (i = 0; i < j; i++)
  283. {
  284. L[j+i*size] = all_r[j+i*size];
  285. }
  286. /* diag i = j */
  287. L[j+j*size] = all_r[j+j*size];
  288. U[j+j*size] = 1.0;
  289. for (i = j+1; i < size; i++)
  290. {
  291. U[j+i*size] = all_r[j+i*size];
  292. }
  293. }
  294. STARPU_PLU(display_data_content)(L, size);
  295. STARPU_PLU(display_data_content)(U, size);
  296. /* now A_err = L, compute L*U */
  297. CPU_TRMM("R", "U", "N", "U", size, size, 1.0f, U, size, L, size);
  298. if (display)
  299. fprintf(stderr, "\nLU\n");
  300. STARPU_PLU(display_data_content)(L, size);
  301. /* compute "LU - A" in L*/
  302. CPU_AXPY(size*size, -1.0, Asaved, 1, L, 1);
  303. TYPE err = CPU_ASUM(size*size, L, 1);
  304. int max = CPU_IAMAX(size*size, L, 1);
  305. if (display)
  306. fprintf(stderr, "DISPLAY ERROR\n");
  307. STARPU_PLU(display_data_content)(L, size);
  308. fprintf(stderr, "(A - LU) Avg error : %e\n", err/(size*size));
  309. fprintf(stderr, "(A - LU) Max error : %e\n", L[max]);
  310. double residual = frobenius_norm(L, size);
  311. double matnorm = frobenius_norm(Asaved, size);
  312. fprintf(stderr, "||A-LU|| / (||A||*N) : %e\n", residual/(matnorm*size));
  313. }
  314. free(all_r);
  315. }