implicit-stencil-blocks.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "implicit-stencil.h"
  17. #include <math.h>
  18. /* Manage block and tags allocation */
  19. static struct block_description *blocks;
  20. static unsigned sizex, sizey, sizez;
  21. static unsigned nbz;
  22. static unsigned *block_sizes_z;
  23. /*
  24. * Tags for various codelet completion
  25. */
  26. /*
  27. * common tag format:
  28. */
  29. static starpu_tag_t tag_common(int z, int dir, int type)
  30. {
  31. return (((((starpu_tag_t)type) << 4) | ((dir+1)/2)) << 32)|(starpu_tag_t)z;
  32. }
  33. /* Completion of last update tasks */
  34. starpu_tag_t TAG_FINISH(int z)
  35. {
  36. z = (z + nbz)%nbz;
  37. starpu_tag_t tag = tag_common(z, 0, 1);
  38. return tag;
  39. }
  40. /* Completion of the save codelet for MPI send/recv */
  41. starpu_tag_t TAG_START(int z, int dir)
  42. {
  43. z = (z + nbz)%nbz;
  44. starpu_tag_t tag = tag_common(z, dir, 2);
  45. return tag;
  46. }
  47. /*
  48. * common MPI tag format:
  49. */
  50. static int mpi_tag_common(int z, int dir, int layer_or_boundary, int buffer)
  51. {
  52. return (z<<12) | (layer_or_boundary << 8) | ((((1+dir)/2))<<4) | buffer;
  53. }
  54. int MPI_TAG_LAYERS(int z, int buffer)
  55. {
  56. z = (z + nbz)%nbz;
  57. /* No direction for layers ; layer is 0 */
  58. int tag = mpi_tag_common(z, 0, 0, buffer);
  59. return tag;
  60. }
  61. int MPI_TAG_BOUNDARIES(int z, int dir, int buffer)
  62. {
  63. z = (z + nbz)%nbz;
  64. int tag = mpi_tag_common(z, dir, 1, buffer);
  65. return tag;
  66. }
  67. /*
  68. * Block descriptors
  69. */
  70. /* Compute the size of the different blocks */
  71. static void compute_block_sizes(void)
  72. {
  73. block_sizes_z = (unsigned *) malloc(nbz*sizeof(unsigned));
  74. STARPU_ASSERT(block_sizes_z);
  75. /* Perhaps the last chunk is smaller */
  76. unsigned default_block_size = (sizez+nbz-1)/nbz;
  77. unsigned remaining = sizez;
  78. unsigned b;
  79. for (b = 0; b < nbz; b++)
  80. {
  81. block_sizes_z[b] = MIN(default_block_size, remaining);
  82. remaining -= block_sizes_z[b];
  83. }
  84. STARPU_ASSERT(remaining == 0);
  85. }
  86. unsigned get_block_size(int bz)
  87. {
  88. return block_sizes_z[bz];
  89. }
  90. struct block_description *get_block_description(int z)
  91. {
  92. z = (z + nbz)%nbz;
  93. STARPU_ASSERT(&blocks[z]);
  94. return &blocks[z];
  95. }
  96. int get_block_mpi_node(int z)
  97. {
  98. z = (z + nbz)%nbz;
  99. return blocks[z].mpi_node;
  100. }
  101. void create_blocks_array(unsigned _sizex, unsigned _sizey, unsigned _sizez, unsigned _nbz)
  102. {
  103. /* Store the parameters */
  104. nbz = _nbz;
  105. sizex = _sizex;
  106. sizey = _sizey;
  107. sizez = _sizez;
  108. /* Create a grid of block descriptors */
  109. blocks = (struct block_description *) calloc(nbz, sizeof(struct block_description));
  110. STARPU_ASSERT(blocks);
  111. /* What is the size of the different blocks ? */
  112. compute_block_sizes();
  113. unsigned bz;
  114. for (bz = 0; bz < nbz; bz++)
  115. {
  116. struct block_description * block =
  117. get_block_description(bz);
  118. /* Which block is it ? */
  119. block->bz = bz;
  120. /* For simplicity, we store which are the neighbours blocks */
  121. block->boundary_blocks[B] = get_block_description((bz-1+nbz)%nbz);
  122. block->boundary_blocks[T] = get_block_description((bz+1)%nbz);
  123. }
  124. }
  125. void free_blocks_array()
  126. {
  127. free(blocks);
  128. free(block_sizes_z);
  129. }
  130. /*
  131. * Initialization of the blocks
  132. */
  133. void assign_blocks_to_workers(int rank)
  134. {
  135. unsigned bz;
  136. /* NB: perhaps we could count a GPU as multiple workers */
  137. /* how many workers are there ? */
  138. /*unsigned nworkers = starpu_worker_get_count();*/
  139. /* how many blocks are on that MPI node ? */
  140. // unsigned nblocks = 0;
  141. // for (bz = 0; bz < nbz; bz++)
  142. // {
  143. // struct block_description *block =
  144. // get_block_description(bz);
  145. //
  146. // if (block->mpi_node == rank)
  147. // nblocks++;
  148. // }
  149. /* how many blocks per worker ? */
  150. /*unsigned nblocks_per_worker = (nblocks + nworkers - 1)/nworkers;*/
  151. /* we now attribute up to nblocks_per_worker blocks per workers */
  152. unsigned attributed = 0;
  153. for (bz = 0; bz < nbz; bz++)
  154. {
  155. struct block_description *block =
  156. get_block_description(bz);
  157. if (block->mpi_node == rank)
  158. {
  159. unsigned workerid;
  160. /* Manage initial block distribution between CPU and GPU */
  161. #if 0
  162. #if 1
  163. /* GPUs then CPUs */
  164. if (attributed < 3*18)
  165. workerid = attributed / 18;
  166. else
  167. workerid = 3+ (attributed - 3*18) / 2;
  168. #else
  169. /* GPUs interleaved with CPUs */
  170. if ((attributed % 20) <= 1)
  171. workerid = 3 + attributed / 20;
  172. else if (attributed < 60)
  173. workerid = attributed / 20;
  174. else
  175. workerid = (attributed - 60)/2 + 6;
  176. #endif
  177. #else
  178. /* Only GPUS */
  179. workerid = (attributed / 21) % 3;
  180. #endif
  181. /*= attributed/nblocks_per_worker;*/
  182. block->preferred_worker = workerid;
  183. attributed++;
  184. }
  185. }
  186. }
  187. void assign_blocks_to_mpi_nodes(int world_size)
  188. {
  189. unsigned nzblocks_per_process = (nbz + world_size - 1) / world_size;
  190. unsigned bz;
  191. for (bz = 0; bz < nbz; bz++)
  192. {
  193. struct block_description *block =
  194. get_block_description(bz);
  195. block->mpi_node = bz / nzblocks_per_process;
  196. }
  197. }
  198. static size_t allocated = 0;
  199. static void allocate_block_on_node(starpu_data_handle_t *handleptr, unsigned bz, TYPE **ptr, unsigned nx, unsigned ny, unsigned nz)
  200. {
  201. int ret;
  202. size_t block_size = nx*ny*nz*sizeof(TYPE);
  203. /* Allocate memory */
  204. #if 1
  205. ret = starpu_malloc_flags((void **)ptr, block_size, STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  206. STARPU_ASSERT(ret == 0);
  207. #else
  208. *ptr = malloc(block_size);
  209. STARPU_ASSERT(*ptr);
  210. #endif
  211. allocated += block_size;
  212. //#ifndef STARPU_SIMGRID
  213. // /* Fill the blocks with 0 */
  214. // memset(*ptr, 0, block_size);
  215. //#endif
  216. /* Register it to StarPU */
  217. starpu_block_data_register(handleptr, STARPU_MAIN_RAM, (uintptr_t)*ptr, nx, nx*ny, nx, ny, nz, sizeof(TYPE));
  218. starpu_data_set_coordinates(*handleptr, 1, bz);
  219. }
  220. static void free_block_on_node(starpu_data_handle_t handleptr, unsigned nx, unsigned ny, unsigned nz)
  221. {
  222. void *ptr = (void *) starpu_block_get_local_ptr(handleptr);
  223. size_t block_size = nx*ny*nz*sizeof(TYPE);
  224. starpu_data_unregister(handleptr);
  225. starpu_free_flags(ptr, block_size, STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  226. }
  227. void display_memory_consumption(int rank, double time)
  228. {
  229. FPRINTF(stderr, "%lu B of memory were allocated on node %d in %f ms\n", (unsigned long)allocated, rank, time/1000);
  230. }
  231. void allocate_memory_on_node(int rank)
  232. {
  233. unsigned bz;
  234. /* Correctly allocate and declare all data handles to StarPU. */
  235. for (bz = 0; bz < nbz; bz++)
  236. {
  237. struct block_description *block = get_block_description(bz);
  238. int node = block->mpi_node;
  239. unsigned size_bz = block_sizes_z[bz];
  240. if (node == rank)
  241. {
  242. /* Main blocks */
  243. allocate_block_on_node(&block->layers_handle[0], bz, &block->layers[0],
  244. (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K));
  245. allocate_block_on_node(&block->layers_handle[1], bz, &block->layers[1],
  246. (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K));
  247. /* Boundary blocks : Top */
  248. allocate_block_on_node(&block->boundaries_handle[T][0], bz, &block->boundaries[T][0],
  249. (sizex + 2*K), (sizey + 2*K), K);
  250. allocate_block_on_node(&block->boundaries_handle[T][1], bz, &block->boundaries[T][1],
  251. (sizex + 2*K), (sizey + 2*K), K);
  252. /* Boundary blocks : Bottom */
  253. allocate_block_on_node(&block->boundaries_handle[B][0], bz, &block->boundaries[B][0],
  254. (sizex + 2*K), (sizey + 2*K), K);
  255. allocate_block_on_node(&block->boundaries_handle[B][1], bz, &block->boundaries[B][1],
  256. (sizex + 2*K), (sizey + 2*K), K);
  257. }
  258. /* Register void blocks to StarPU, that StarPU-MPI will request to
  259. * neighbour nodes if needed for the local computation */
  260. else
  261. {
  262. /* Main blocks */
  263. starpu_block_data_register(&block->layers_handle[0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K), sizeof(TYPE));
  264. starpu_block_data_register(&block->layers_handle[1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K), sizeof(TYPE));
  265. /* Boundary blocks : Top */
  266. starpu_block_data_register(&block->boundaries_handle[T][0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  267. starpu_block_data_register(&block->boundaries_handle[T][1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  268. /* Boundary blocks : Bottom */
  269. starpu_block_data_register(&block->boundaries_handle[B][0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  270. starpu_block_data_register(&block->boundaries_handle[B][1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  271. }
  272. #if defined(STARPU_USE_MPI) && !defined(STARPU_USE_MPI_MASTER_SLAVE)
  273. /* Register all data to StarPU-MPI, even the ones that are not
  274. * allocated on the local node. */
  275. /* Main blocks */
  276. starpu_mpi_data_register(block->layers_handle[0], MPI_TAG_LAYERS(bz, 0), node);
  277. starpu_mpi_data_register(block->layers_handle[1], MPI_TAG_LAYERS(bz, 1), node);
  278. /* Boundary blocks : Top */
  279. starpu_mpi_data_register(block->boundaries_handle[T][0], MPI_TAG_BOUNDARIES(bz, T, 0), node);
  280. starpu_mpi_data_register(block->boundaries_handle[T][1], MPI_TAG_BOUNDARIES(bz, T, 1), node);
  281. /* Boundary blocks : Bottom */
  282. starpu_mpi_data_register(block->boundaries_handle[B][0], MPI_TAG_BOUNDARIES(bz, B, 0), node);
  283. starpu_mpi_data_register(block->boundaries_handle[B][1], MPI_TAG_BOUNDARIES(bz, B, 1), node);
  284. #endif
  285. }
  286. /* Initialize all the data in parallel */
  287. for (bz = 0; bz < nbz; bz++)
  288. {
  289. struct block_description *block = get_block_description(bz);
  290. int node = block->mpi_node;
  291. if (node == rank)
  292. {
  293. /* Set all the data to 0 */
  294. create_task_memset(sizex, sizey, bz);
  295. /* Initialize the first layer with some random data */
  296. create_task_initlayer(sizex, sizey, bz);
  297. }
  298. }
  299. starpu_task_wait_for_all();
  300. }
  301. void free_memory_on_node(int rank)
  302. {
  303. unsigned bz;
  304. for (bz = 0; bz < nbz; bz++)
  305. {
  306. struct block_description *block = get_block_description(bz);
  307. int node = block->mpi_node;
  308. /* Main blocks */
  309. if (node == rank)
  310. {
  311. free_block_on_node(block->layers_handle[0], (sizex + 2*K), (sizey + 2*K), K);
  312. free_block_on_node(block->layers_handle[1], (sizex + 2*K), (sizey + 2*K), K);
  313. }
  314. else
  315. {
  316. starpu_data_unregister(block->layers_handle[0]);
  317. starpu_data_unregister(block->layers_handle[1]);
  318. }
  319. /* Boundary blocks : Top */
  320. if (node == rank)
  321. {
  322. free_block_on_node(block->boundaries_handle[T][0], (sizex + 2*K), (sizey + 2*K), K);
  323. free_block_on_node(block->boundaries_handle[T][1], (sizex + 2*K), (sizey + 2*K), K);
  324. }
  325. else
  326. {
  327. starpu_data_unregister(block->boundaries_handle[T][0]);
  328. starpu_data_unregister(block->boundaries_handle[T][1]);
  329. }
  330. /* Boundary blocks : Bottom */
  331. if (node == rank)
  332. {
  333. free_block_on_node(block->boundaries_handle[B][0], (sizex + 2*K), (sizey + 2*K), K);
  334. free_block_on_node(block->boundaries_handle[B][1], (sizex + 2*K), (sizey + 2*K), K);
  335. }
  336. else
  337. {
  338. starpu_data_unregister(block->boundaries_handle[B][0]);
  339. starpu_data_unregister(block->boundaries_handle[B][1]);
  340. }
  341. }
  342. }
  343. /* check how many cells are alive */
  344. void check(int rank)
  345. {
  346. unsigned bz;
  347. for (bz = 0; bz < nbz; bz++)
  348. {
  349. struct block_description *block = get_block_description(bz);
  350. int node = block->mpi_node;
  351. /* Main blocks */
  352. if (node == rank)
  353. {
  354. unsigned size_bz = block_sizes_z[bz];
  355. #ifdef LIFE
  356. unsigned x, y, z;
  357. unsigned sum = 0;
  358. for (x = 0; x < sizex; x++)
  359. for (y = 0; y < sizey; y++)
  360. for (z = 0; z < size_bz; z++)
  361. sum += block->layers[0][(K+x)+(K+y)*(sizex + 2*K)+(K+z)*(sizex+2*K)*(sizey+2*K)];
  362. printf("block %u got %u/%u alive\n", bz, sum, sizex*sizey*size_bz);
  363. #endif
  364. }
  365. }
  366. }