implicit-stencil-blocks.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2013-2017 Université de Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "implicit-stencil.h"
  17. #include <math.h>
  18. /* Manage block and tags allocation */
  19. static struct block_description *blocks;
  20. static unsigned sizex, sizey, sizez;
  21. static unsigned nbz;
  22. static unsigned *block_sizes_z;
  23. /*
  24. * Tags for various codelet completion
  25. */
  26. /*
  27. * common tag format:
  28. */
  29. static starpu_tag_t tag_common(int z, int dir, int type)
  30. {
  31. return (((((starpu_tag_t)type) << 4) | ((dir+1)/2)) << 32)|(starpu_tag_t)z;
  32. }
  33. /* Completion of last update tasks */
  34. starpu_tag_t TAG_FINISH(int z)
  35. {
  36. z = (z + nbz)%nbz;
  37. starpu_tag_t tag = tag_common(z, 0, 1);
  38. return tag;
  39. }
  40. /* Completion of the save codelet for MPI send/recv */
  41. starpu_tag_t TAG_START(int z, int dir)
  42. {
  43. z = (z + nbz)%nbz;
  44. starpu_tag_t tag = tag_common(z, dir, 2);
  45. return tag;
  46. }
  47. /*
  48. * common MPI tag format:
  49. */
  50. static int mpi_tag_common(int z, int dir, int layer_or_boundary, int buffer)
  51. {
  52. return (z<<12) | (layer_or_boundary << 8) | ((((1+dir)/2))<<4) | buffer;
  53. }
  54. int MPI_TAG_LAYERS(int z, int buffer)
  55. {
  56. z = (z + nbz)%nbz;
  57. /* No direction for layers ; layer is 0 */
  58. int tag = mpi_tag_common(z, 0, 0, buffer);
  59. return tag;
  60. }
  61. int MPI_TAG_BOUNDARIES(int z, int dir, int buffer)
  62. {
  63. z = (z + nbz)%nbz;
  64. int tag = mpi_tag_common(z, dir, 1, buffer);
  65. return tag;
  66. }
  67. /*
  68. * Block descriptors
  69. */
  70. /* Compute the size of the different blocks */
  71. static void compute_block_sizes(void)
  72. {
  73. block_sizes_z = (unsigned *) malloc(nbz*sizeof(unsigned));
  74. STARPU_ASSERT(block_sizes_z);
  75. /* Perhaps the last chunk is smaller */
  76. unsigned default_block_size = (sizez+nbz-1)/nbz;
  77. unsigned remaining = sizez;
  78. unsigned b;
  79. for (b = 0; b < nbz; b++)
  80. {
  81. block_sizes_z[b] = MIN(default_block_size, remaining);
  82. remaining -= block_sizes_z[b];
  83. }
  84. STARPU_ASSERT(remaining == 0);
  85. }
  86. unsigned get_block_size(int bz)
  87. {
  88. return block_sizes_z[bz];
  89. }
  90. struct block_description *get_block_description(int z)
  91. {
  92. z = (z + nbz)%nbz;
  93. STARPU_ASSERT(&blocks[z]);
  94. return &blocks[z];
  95. }
  96. int get_block_mpi_node(int z)
  97. {
  98. z = (z + nbz)%nbz;
  99. return blocks[z].mpi_node;
  100. }
  101. void create_blocks_array(unsigned _sizex, unsigned _sizey, unsigned _sizez, unsigned _nbz)
  102. {
  103. /* Store the parameters */
  104. nbz = _nbz;
  105. sizex = _sizex;
  106. sizey = _sizey;
  107. sizez = _sizez;
  108. /* Create a grid of block descriptors */
  109. blocks = (struct block_description *) calloc(nbz, sizeof(struct block_description));
  110. STARPU_ASSERT(blocks);
  111. /* What is the size of the different blocks ? */
  112. compute_block_sizes();
  113. unsigned bz;
  114. for (bz = 0; bz < nbz; bz++)
  115. {
  116. struct block_description * block =
  117. get_block_description(bz);
  118. /* Which block is it ? */
  119. block->bz = bz;
  120. /* For simplicity, we store which are the neighbours blocks */
  121. block->boundary_blocks[B] = get_block_description((bz-1+nbz)%nbz);
  122. block->boundary_blocks[T] = get_block_description((bz+1)%nbz);
  123. }
  124. }
  125. void free_blocks_array()
  126. {
  127. free(blocks);
  128. free(block_sizes_z);
  129. }
  130. /*
  131. * Initialization of the blocks
  132. */
  133. void assign_blocks_to_workers(int rank)
  134. {
  135. unsigned bz;
  136. /* NB: perhaps we could count a GPU as multiple workers */
  137. /* how many workers are there ? */
  138. /*unsigned nworkers = starpu_worker_get_count();*/
  139. /* how many blocks are on that MPI node ? */
  140. unsigned nblocks = 0;
  141. for (bz = 0; bz < nbz; bz++)
  142. {
  143. struct block_description *block =
  144. get_block_description(bz);
  145. if (block->mpi_node == rank)
  146. nblocks++;
  147. }
  148. /* how many blocks per worker ? */
  149. /*unsigned nblocks_per_worker = (nblocks + nworkers - 1)/nworkers;*/
  150. /* we now attribute up to nblocks_per_worker blocks per workers */
  151. unsigned attributed = 0;
  152. for (bz = 0; bz < nbz; bz++)
  153. {
  154. struct block_description *block =
  155. get_block_description(bz);
  156. if (block->mpi_node == rank)
  157. {
  158. unsigned workerid;
  159. /* Manage initial block distribution between CPU and GPU */
  160. #if 0
  161. #if 1
  162. /* GPUs then CPUs */
  163. if (attributed < 3*18)
  164. workerid = attributed / 18;
  165. else
  166. workerid = 3+ (attributed - 3*18) / 2;
  167. #else
  168. /* GPUs interleaved with CPUs */
  169. if ((attributed % 20) <= 1)
  170. workerid = 3 + attributed / 20;
  171. else if (attributed < 60)
  172. workerid = attributed / 20;
  173. else
  174. workerid = (attributed - 60)/2 + 6;
  175. #endif
  176. #else
  177. /* Only GPUS */
  178. workerid = (attributed / 21) % 3;
  179. #endif
  180. /*= attributed/nblocks_per_worker;*/
  181. block->preferred_worker = workerid;
  182. attributed++;
  183. }
  184. }
  185. }
  186. void assign_blocks_to_mpi_nodes(int world_size)
  187. {
  188. unsigned nzblocks_per_process = (nbz + world_size - 1) / world_size;
  189. unsigned bz;
  190. for (bz = 0; bz < nbz; bz++)
  191. {
  192. struct block_description *block =
  193. get_block_description(bz);
  194. block->mpi_node = bz / nzblocks_per_process;
  195. }
  196. }
  197. static size_t allocated = 0;
  198. static void allocate_block_on_node(starpu_data_handle_t *handleptr, unsigned bz, TYPE **ptr, unsigned nx, unsigned ny, unsigned nz)
  199. {
  200. int ret;
  201. size_t block_size = nx*ny*nz*sizeof(TYPE);
  202. /* Allocate memory */
  203. #if 1
  204. ret = starpu_malloc_flags((void **)ptr, block_size, STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  205. STARPU_ASSERT(ret == 0);
  206. #else
  207. *ptr = malloc(block_size);
  208. STARPU_ASSERT(*ptr);
  209. #endif
  210. allocated += block_size;
  211. //#ifndef STARPU_SIMGRID
  212. // /* Fill the blocks with 0 */
  213. // memset(*ptr, 0, block_size);
  214. //#endif
  215. /* Register it to StarPU */
  216. starpu_block_data_register(handleptr, STARPU_MAIN_RAM, (uintptr_t)*ptr, nx, nx*ny, nx, ny, nz, sizeof(TYPE));
  217. starpu_data_set_coordinates(*handleptr, 1, bz);
  218. }
  219. static void free_block_on_node(starpu_data_handle_t handleptr, unsigned nx, unsigned ny, unsigned nz)
  220. {
  221. void *ptr = (void *) starpu_block_get_local_ptr(handleptr);
  222. size_t block_size = nx*ny*nz*sizeof(TYPE);
  223. starpu_data_unregister(handleptr);
  224. starpu_free_flags(ptr, block_size, STARPU_MALLOC_PINNED|STARPU_MALLOC_SIMULATION_FOLDED);
  225. }
  226. void display_memory_consumption(int rank, double time)
  227. {
  228. FPRINTF(stderr, "%lu B of memory were allocated on node %d in %f ms\n", (unsigned long)allocated, rank, time/1000);
  229. }
  230. void allocate_memory_on_node(int rank)
  231. {
  232. unsigned bz;
  233. /* Correctly allocate and declare all data handles to StarPU. */
  234. for (bz = 0; bz < nbz; bz++)
  235. {
  236. struct block_description *block = get_block_description(bz);
  237. int node = block->mpi_node;
  238. unsigned size_bz = block_sizes_z[bz];
  239. if (node == rank)
  240. {
  241. /* Main blocks */
  242. allocate_block_on_node(&block->layers_handle[0], bz, &block->layers[0],
  243. (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K));
  244. allocate_block_on_node(&block->layers_handle[1], bz, &block->layers[1],
  245. (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K));
  246. /* Boundary blocks : Top */
  247. allocate_block_on_node(&block->boundaries_handle[T][0], bz, &block->boundaries[T][0],
  248. (sizex + 2*K), (sizey + 2*K), K);
  249. allocate_block_on_node(&block->boundaries_handle[T][1], bz, &block->boundaries[T][1],
  250. (sizex + 2*K), (sizey + 2*K), K);
  251. /* Boundary blocks : Bottom */
  252. allocate_block_on_node(&block->boundaries_handle[B][0], bz, &block->boundaries[B][0],
  253. (sizex + 2*K), (sizey + 2*K), K);
  254. allocate_block_on_node(&block->boundaries_handle[B][1], bz, &block->boundaries[B][1],
  255. (sizex + 2*K), (sizey + 2*K), K);
  256. }
  257. /* Register void blocks to StarPU, that StarPU-MPI will request to
  258. * neighbour nodes if needed for the local computation */
  259. else
  260. {
  261. /* Main blocks */
  262. starpu_block_data_register(&block->layers_handle[0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K), sizeof(TYPE));
  263. starpu_block_data_register(&block->layers_handle[1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), (size_bz + 2*K), sizeof(TYPE));
  264. /* Boundary blocks : Top */
  265. starpu_block_data_register(&block->boundaries_handle[T][0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  266. starpu_block_data_register(&block->boundaries_handle[T][1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  267. /* Boundary blocks : Bottom */
  268. starpu_block_data_register(&block->boundaries_handle[B][0], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  269. starpu_block_data_register(&block->boundaries_handle[B][1], -1, (uintptr_t) NULL, (sizex + 2*K), (sizex + 2*K)*(sizey + 2*K), (sizex + 2*K), (sizey + 2*K), K, sizeof(TYPE));
  270. }
  271. #if defined(STARPU_USE_MPI) && !defined(STARPU_USE_MPI_MASTER_SLAVE)
  272. /* Register all data to StarPU-MPI, even the ones that are not
  273. * allocated on the local node. */
  274. /* Main blocks */
  275. starpu_mpi_data_register(block->layers_handle[0], MPI_TAG_LAYERS(bz, 0), node);
  276. starpu_mpi_data_register(block->layers_handle[1], MPI_TAG_LAYERS(bz, 1), node);
  277. /* Boundary blocks : Top */
  278. starpu_mpi_data_register(block->boundaries_handle[T][0], MPI_TAG_BOUNDARIES(bz, T, 0), node);
  279. starpu_mpi_data_register(block->boundaries_handle[T][1], MPI_TAG_BOUNDARIES(bz, T, 1), node);
  280. /* Boundary blocks : Bottom */
  281. starpu_mpi_data_register(block->boundaries_handle[B][0], MPI_TAG_BOUNDARIES(bz, B, 0), node);
  282. starpu_mpi_data_register(block->boundaries_handle[B][1], MPI_TAG_BOUNDARIES(bz, B, 1), node);
  283. #endif
  284. }
  285. /* Initialize all the data in parallel */
  286. for (bz = 0; bz < nbz; bz++)
  287. {
  288. struct block_description *block = get_block_description(bz);
  289. int node = block->mpi_node;
  290. if (node == rank)
  291. {
  292. /* Set all the data to 0 */
  293. create_task_memset(sizex, sizey, bz);
  294. /* Initialize the first layer with some random data */
  295. create_task_initlayer(sizex, sizey, bz);
  296. }
  297. }
  298. starpu_task_wait_for_all();
  299. }
  300. void free_memory_on_node(int rank)
  301. {
  302. unsigned bz;
  303. for (bz = 0; bz < nbz; bz++)
  304. {
  305. struct block_description *block = get_block_description(bz);
  306. int node = block->mpi_node;
  307. /* Main blocks */
  308. if (node == rank)
  309. {
  310. free_block_on_node(block->layers_handle[0], (sizex + 2*K), (sizey + 2*K), K);
  311. free_block_on_node(block->layers_handle[1], (sizex + 2*K), (sizey + 2*K), K);
  312. }
  313. else
  314. {
  315. starpu_data_unregister(block->layers_handle[0]);
  316. starpu_data_unregister(block->layers_handle[1]);
  317. }
  318. /* Boundary blocks : Top */
  319. if (node == rank)
  320. {
  321. free_block_on_node(block->boundaries_handle[T][0], (sizex + 2*K), (sizey + 2*K), K);
  322. free_block_on_node(block->boundaries_handle[T][1], (sizex + 2*K), (sizey + 2*K), K);
  323. }
  324. else
  325. {
  326. starpu_data_unregister(block->boundaries_handle[T][0]);
  327. starpu_data_unregister(block->boundaries_handle[T][1]);
  328. }
  329. /* Boundary blocks : Bottom */
  330. if (node == rank)
  331. {
  332. free_block_on_node(block->boundaries_handle[B][0], (sizex + 2*K), (sizey + 2*K), K);
  333. free_block_on_node(block->boundaries_handle[B][1], (sizex + 2*K), (sizey + 2*K), K);
  334. }
  335. else
  336. {
  337. starpu_data_unregister(block->boundaries_handle[B][0]);
  338. starpu_data_unregister(block->boundaries_handle[B][1]);
  339. }
  340. }
  341. }
  342. /* check how many cells are alive */
  343. void check(int rank)
  344. {
  345. unsigned bz;
  346. for (bz = 0; bz < nbz; bz++)
  347. {
  348. struct block_description *block = get_block_description(bz);
  349. int node = block->mpi_node;
  350. /* Main blocks */
  351. if (node == rank)
  352. {
  353. unsigned size_bz = block_sizes_z[bz];
  354. #ifdef LIFE
  355. unsigned x, y, z;
  356. unsigned sum = 0;
  357. for (x = 0; x < sizex; x++)
  358. for (y = 0; y < sizey; y++)
  359. for (z = 0; z < size_bz; z++)
  360. sum += block->layers[0][(K+x)+(K+y)*(sizex + 2*K)+(K+z)*(sizex+2*K)*(sizey+2*K)];
  361. printf("block %u got %u/%u alive\n", bz, sum, sizex*sizey*size_bz);
  362. #endif
  363. }
  364. }
  365. }