plu_solve.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2010 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <math.h>
  18. #include "pxlu.h"
  19. /*
  20. * Various useful functions
  21. */
  22. static double frobenius_norm(TYPE *v, unsigned n)
  23. {
  24. double sum2 = 0.0;
  25. /* compute sqrt(Sum(|x|^2)) */
  26. unsigned i,j;
  27. for (j = 0; j < n; j++)
  28. for (i = 0; i < n; i++)
  29. {
  30. double a = fabsl((double)v[i+n*j]);
  31. sum2 += a*a;
  32. }
  33. return sqrt(sum2);
  34. }
  35. void STARPU_PLU(display_data_content)(TYPE *data, unsigned blocksize)
  36. {
  37. fprintf(stderr, "DISPLAY BLOCK\n");
  38. unsigned i, j;
  39. for (j = 0; j < blocksize; j++)
  40. {
  41. for (i = 0; i < blocksize; i++)
  42. {
  43. fprintf(stderr, "%f ", data[j+i*blocksize]);
  44. }
  45. fprintf(stderr, "\n");
  46. }
  47. fprintf(stderr, "****\n");
  48. }
  49. void STARPU_PLU(extract_upper)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  50. {
  51. unsigned li, lj;
  52. for (lj = 0; lj < block_size; lj++)
  53. {
  54. /* Upper block diag is 1 */
  55. outblock[lj*(block_size + 1)] = (TYPE)1.0;
  56. for (li = lj + 1; li < block_size; li++)
  57. {
  58. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  59. }
  60. }
  61. }
  62. void STARPU_PLU(extract_lower)(unsigned block_size, TYPE *inblock, TYPE *outblock)
  63. {
  64. unsigned li, lj;
  65. for (lj = 0; lj < block_size; lj++)
  66. {
  67. for (li = 0; li <= lj; li++)
  68. {
  69. outblock[lj + li*block_size] = inblock[lj + li*block_size];
  70. }
  71. }
  72. }
  73. /*
  74. * Compute Ax = y
  75. */
  76. static STARPU_PLU(compute_ax_block)(unsigned block_size, TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  77. {
  78. fprintf(stderr, "block data %p sub x %p sub y %p\n", block_data, sub_x, sub_y);
  79. CPU_GEMV("N", block_size, block_size, 1.0, block_data, block_size, sub_x, 1, 1.0, sub_y, 1);
  80. }
  81. static STARPU_PLU(compute_ax_block_upper)(unsigned size, unsigned nblocks,
  82. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  83. {
  84. unsigned block_size = size/nblocks;
  85. /* Take a copy of the upper part of the diagonal block */
  86. TYPE *upper_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  87. STARPU_PLU(extract_upper)(block_size, block_data, upper_block_copy);
  88. STARPU_PLU(compute_ax_block)(block_size, upper_block_copy, sub_x, sub_y);
  89. free(upper_block_copy);
  90. }
  91. static STARPU_PLU(compute_ax_block_lower)(unsigned size, unsigned nblocks,
  92. TYPE *block_data, TYPE *sub_x, TYPE *sub_y)
  93. {
  94. unsigned block_size = size/nblocks;
  95. /* Take a copy of the upper part of the diagonal block */
  96. TYPE *lower_block_copy = calloc((block_size)*(block_size), sizeof(TYPE));
  97. STARPU_PLU(extract_lower)(block_size, block_data, lower_block_copy);
  98. STARPU_PLU(compute_ax_block)(size/nblocks, lower_block_copy, sub_x, sub_y);
  99. free(lower_block_copy);
  100. }
  101. void STARPU_PLU(compute_lux)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  102. {
  103. /* Create temporary buffers where all MPI processes are going to
  104. * compute Ui x = yi where Ai is the matrix containing the blocks of U
  105. * affected to process i, and 0 everywhere else. We then have y as the
  106. * sum of all yi. */
  107. TYPE *yi = calloc(size, sizeof(TYPE));
  108. fprintf(stderr, "Compute LU\n");
  109. unsigned block_size = size/nblocks;
  110. /* Compute UiX = Yi */
  111. unsigned long i,j;
  112. for (j = 0; j < nblocks; j++)
  113. {
  114. if (get_block_rank(j, j) == rank)
  115. {
  116. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  117. TYPE *sub_x = &x[j*(block_size)];
  118. TYPE *sub_yi = &yi[j*(block_size)];
  119. STARPU_PLU(compute_ax_block_upper)(size, nblocks, block_data, sub_x, sub_yi);
  120. }
  121. for (i = j + 1; i < nblocks; i++)
  122. {
  123. if (get_block_rank(i, j) == rank)
  124. {
  125. /* That block belongs to the current MPI process */
  126. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  127. TYPE *sub_x = &x[i*(block_size)];
  128. TYPE *sub_yi = &yi[j*(block_size)];
  129. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  130. }
  131. }
  132. }
  133. /* Grab Sum Yi in X */
  134. MPI_Reduce(yi, x, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  135. memset(yi, 0, size*sizeof(TYPE));
  136. unsigned ind;
  137. // if (rank == 0)
  138. // {
  139. // fprintf(stderr, "INTERMEDIATE\n");
  140. // for (ind = 0; ind < STARPU_MIN(10, size); ind++)
  141. // {
  142. // fprintf(stderr, "x[%d] = %f\n", ind, (float)x[ind]);
  143. // }
  144. // fprintf(stderr, "****\n");
  145. // }
  146. /* Everyone needs x */
  147. int bcst_ret;
  148. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  149. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  150. /* Compute LiX = Yi (with X = UX) */
  151. for (j = 0; j < nblocks; j++)
  152. {
  153. if (j > 0)
  154. for (i = 0; i < j; i++)
  155. {
  156. if (get_block_rank(i, j) == rank)
  157. {
  158. /* That block belongs to the current MPI process */
  159. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  160. TYPE *sub_x = &x[i*(block_size)];
  161. TYPE *sub_yi = &yi[j*(block_size)];
  162. STARPU_PLU(compute_ax_block)(size/nblocks, block_data, sub_x, sub_yi);
  163. }
  164. }
  165. if (get_block_rank(j, j) == rank)
  166. {
  167. TYPE *block_data = STARPU_PLU(get_block)(j, j);
  168. TYPE *sub_x = &x[j*(block_size)];
  169. TYPE *sub_yi = &yi[j*(block_size)];
  170. STARPU_PLU(compute_ax_block_lower)(size, nblocks, block_data, sub_x, sub_yi);
  171. }
  172. }
  173. /* Grab Sum Yi in Y */
  174. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  175. free(yi);
  176. }
  177. /*
  178. * Allocate a contiguous matrix on node 0 and fill it with the whole
  179. * content of the matrix distributed accross all nodes.
  180. */
  181. TYPE *STARPU_PLU(reconstruct_matrix)(unsigned size, unsigned nblocks)
  182. {
  183. // fprintf(stderr, "RECONSTRUCT MATRIX size %d nblocks %d\n", size, nblocks);
  184. TYPE *bigmatrix = calloc(size*size, sizeof(TYPE));
  185. unsigned block_size = size/nblocks;
  186. int rank;
  187. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  188. unsigned bi, bj;
  189. for (bj = 0; bj < nblocks; bj++)
  190. for (bi = 0; bi < nblocks; bi++)
  191. {
  192. TYPE *block;
  193. int block_rank = get_block_rank(bi, bj);
  194. if (block_rank == 0)
  195. {
  196. block = STARPU_PLU(get_block)(bi, bj);
  197. }
  198. else {
  199. MPI_Status status;
  200. if (rank == 0)
  201. {
  202. block = calloc(block_size*block_size, sizeof(TYPE));
  203. int ret = MPI_Recv(block, block_size*block_size, MPI_TYPE, block_rank, 0, MPI_COMM_WORLD, &status);
  204. STARPU_ASSERT(ret == MPI_SUCCESS);
  205. }
  206. else {
  207. block = STARPU_PLU(get_block)(bi, bj);
  208. int ret = MPI_Send(block, block_size*block_size, MPI_TYPE, 0, 0, MPI_COMM_WORLD);
  209. STARPU_ASSERT(ret == MPI_SUCCESS);
  210. }
  211. }
  212. if (rank == 0)
  213. {
  214. unsigned j, i;
  215. for (j = 0; j < block_size; j++)
  216. for (i = 0; i < block_size; i++)
  217. {
  218. bigmatrix[(j + bj*block_size)+(i+bi*block_size)*size] =
  219. block[j+i*block_size];
  220. }
  221. if (get_block_rank(bi, bj) != 0)
  222. free(block);
  223. }
  224. }
  225. return bigmatrix;
  226. }
  227. /* x and y must be valid (at least) on 0 */
  228. void STARPU_PLU(compute_ax)(unsigned size, TYPE *x, TYPE *y, unsigned nblocks, int rank)
  229. {
  230. unsigned block_size = size/nblocks;
  231. /* Send x to everyone */
  232. int bcst_ret;
  233. bcst_ret = MPI_Bcast(&x, size, MPI_TYPE, 0, MPI_COMM_WORLD);
  234. STARPU_ASSERT(bcst_ret == MPI_SUCCESS);
  235. /* Create temporary buffers where all MPI processes are going to
  236. * compute Ai x = yi where Ai is the matrix containing the blocks of A
  237. * affected to process i, and 0 everywhere else. We then have y as the
  238. * sum of all yi. */
  239. TYPE *yi = calloc(size, sizeof(TYPE));
  240. /* Compute Aix = yi */
  241. unsigned long i,j;
  242. for (j = 0; j < nblocks; j++)
  243. {
  244. for (i = 0; i < nblocks; i++)
  245. {
  246. if (get_block_rank(i, j) == rank)
  247. {
  248. /* That block belongs to the current MPI process */
  249. TYPE *block_data = STARPU_PLU(get_block)(i, j);
  250. TYPE *sub_x = &x[i*block_size];
  251. TYPE *sub_yi = &yi[j*block_size];
  252. STARPU_PLU(compute_ax_block)(block_size, block_data, sub_x, sub_yi);
  253. }
  254. }
  255. }
  256. /* Compute the Sum of all yi = y */
  257. MPI_Reduce(yi, y, size, MPI_TYPE, MPI_SUM, 0, MPI_COMM_WORLD);
  258. fprintf(stderr, "RANK %d - FOO 1 y[0] %f\n", rank, y[0]);
  259. free(yi);
  260. }
  261. void STARPU_PLU(compute_lu_matrix)(unsigned size, unsigned nblocks, TYPE *Asaved)
  262. {
  263. TYPE *all_r = STARPU_PLU(reconstruct_matrix)(size, nblocks);
  264. int rank;
  265. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  266. if (rank == 0)
  267. {
  268. TYPE *L = malloc((size_t)size*size*sizeof(TYPE));
  269. TYPE *U = malloc((size_t)size*size*sizeof(TYPE));
  270. memset(L, 0, size*size*sizeof(TYPE));
  271. memset(U, 0, size*size*sizeof(TYPE));
  272. /* only keep the lower part */
  273. unsigned i, j;
  274. for (j = 0; j < size; j++)
  275. {
  276. for (i = 0; i < j; i++)
  277. {
  278. L[j+i*size] = all_r[j+i*size];
  279. }
  280. /* diag i = j */
  281. L[j+j*size] = all_r[j+j*size];
  282. U[j+j*size] = 1.0;
  283. for (i = j+1; i < size; i++)
  284. {
  285. U[j+i*size] = all_r[j+i*size];
  286. }
  287. }
  288. STARPU_PLU(display_data_content)(L, size);
  289. STARPU_PLU(display_data_content)(U, size);
  290. /* now A_err = L, compute L*U */
  291. CPU_TRMM("R", "U", "N", "U", size, size, 1.0f, U, size, L, size);
  292. fprintf(stderr, "\nLU\n");
  293. STARPU_PLU(display_data_content)(L, size);
  294. /* compute "LU - A" in L*/
  295. CPU_AXPY(size*size, -1.0, Asaved, 1, L, 1);
  296. TYPE err = CPU_ASUM(size*size, L, 1);
  297. int max = CPU_IAMAX(size*size, L, 1);
  298. fprintf(stderr, "DISPLAY ERROR\n");
  299. STARPU_PLU(display_data_content)(L, size);
  300. fprintf(stderr, "(A - LU) Avg error : %e\n", err/(size*size));
  301. fprintf(stderr, "(A - LU) Max error : %e\n", L[max]);
  302. double residual = frobenius_norm(L, size);
  303. double matnorm = frobenius_norm(Asaved, size);
  304. fprintf(stderr, "||A-LU|| / (||A||*N) : %e\n", residual/(matnorm*size));
  305. }
  306. }