block_interface.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <datawizard/coherency.h>
  20. #include <datawizard/copy_driver.h>
  21. #include <datawizard/filters.h>
  22. #include <starpu_hash.h>
  23. #include <starpu_cuda.h>
  24. #include <starpu_opencl.h>
  25. #include <drivers/opencl/driver_opencl.h>
  26. static int copy_ram_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  27. #ifdef STARPU_USE_CUDA
  28. static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  29. static int copy_cuda_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  30. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
  31. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
  32. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  33. #endif
  34. #ifdef STARPU_USE_OPENCL
  35. static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  36. static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  37. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, void *_event);
  38. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, void *_event);
  39. #endif
  40. static const struct starpu_data_copy_methods block_copy_data_methods_s =
  41. {
  42. .ram_to_ram = copy_ram_to_ram,
  43. .ram_to_spu = NULL,
  44. #ifdef STARPU_USE_CUDA
  45. .ram_to_cuda = copy_ram_to_cuda,
  46. .cuda_to_ram = copy_cuda_to_ram,
  47. .ram_to_cuda_async = copy_ram_to_cuda_async,
  48. .cuda_to_ram_async = copy_cuda_to_ram_async,
  49. .cuda_to_cuda = copy_cuda_to_cuda,
  50. #endif
  51. #ifdef STARPU_USE_OPENCL
  52. .ram_to_opencl = copy_ram_to_opencl,
  53. .opencl_to_ram = copy_opencl_to_ram,
  54. .ram_to_opencl_async = copy_ram_to_opencl_async,
  55. .opencl_to_ram_async = copy_opencl_to_ram_async,
  56. #endif
  57. .cuda_to_spu = NULL,
  58. .spu_to_ram = NULL,
  59. .spu_to_cuda = NULL,
  60. .spu_to_spu = NULL
  61. };
  62. static void register_block_handle(starpu_data_handle_t handle, uint32_t home_node, void *data_interface);
  63. static void *block_handle_to_pointer(starpu_data_handle_t data_handle, uint32_t node);
  64. static ssize_t allocate_block_buffer_on_node(void *data_interface_, uint32_t dst_node);
  65. static void free_block_buffer_on_node(void *data_interface, uint32_t node);
  66. static size_t block_interface_get_size(starpu_data_handle_t handle);
  67. static uint32_t footprint_block_interface_crc32(starpu_data_handle_t handle);
  68. static int block_compare(void *data_interface_a, void *data_interface_b);
  69. static void display_block_interface(starpu_data_handle_t handle, FILE *f);
  70. #ifdef STARPU_USE_GORDON
  71. static int convert_block_to_gordon(void *data_interface, uint64_t *ptr, gordon_strideSize_t *ss);
  72. #endif
  73. static struct starpu_data_interface_ops interface_block_ops =
  74. {
  75. .register_data_handle = register_block_handle,
  76. .allocate_data_on_node = allocate_block_buffer_on_node,
  77. .handle_to_pointer = block_handle_to_pointer,
  78. .free_data_on_node = free_block_buffer_on_node,
  79. .copy_methods = &block_copy_data_methods_s,
  80. .get_size = block_interface_get_size,
  81. .footprint = footprint_block_interface_crc32,
  82. .compare = block_compare,
  83. #ifdef STARPU_USE_GORDON
  84. .convert_to_gordon = convert_block_to_gordon,
  85. #endif
  86. .interfaceid = STARPU_BLOCK_INTERFACE_ID,
  87. .interface_size = sizeof(struct starpu_block_interface),
  88. .display = display_block_interface
  89. };
  90. #ifdef STARPU_USE_GORDON
  91. int convert_block_to_gordon(void *data_interface, uint64_t *ptr, gordon_strideSize_t *ss)
  92. {
  93. /* TODO */
  94. STARPU_ABORT();
  95. return 0;
  96. }
  97. #endif
  98. static void *block_handle_to_pointer(starpu_data_handle_t handle, uint32_t node)
  99. {
  100. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  101. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  102. starpu_data_get_interface_on_node(handle, node);
  103. return (void*) block_interface->ptr;
  104. }
  105. static void register_block_handle(starpu_data_handle_t handle, uint32_t home_node, void *data_interface)
  106. {
  107. struct starpu_block_interface *block_interface = (struct starpu_block_interface *) data_interface;
  108. unsigned node;
  109. for (node = 0; node < STARPU_MAXNODES; node++)
  110. {
  111. struct starpu_block_interface *local_interface = (struct starpu_block_interface *)
  112. starpu_data_get_interface_on_node(handle, node);
  113. if (node == home_node)
  114. {
  115. local_interface->ptr = block_interface->ptr;
  116. local_interface->dev_handle = block_interface->dev_handle;
  117. local_interface->offset = block_interface->offset;
  118. local_interface->ldy = block_interface->ldy;
  119. local_interface->ldz = block_interface->ldz;
  120. }
  121. else
  122. {
  123. local_interface->ptr = 0;
  124. local_interface->dev_handle = 0;
  125. local_interface->offset = 0;
  126. local_interface->ldy = 0;
  127. local_interface->ldz = 0;
  128. }
  129. local_interface->nx = block_interface->nx;
  130. local_interface->ny = block_interface->ny;
  131. local_interface->nz = block_interface->nz;
  132. local_interface->elemsize = block_interface->elemsize;
  133. }
  134. }
  135. /* declare a new data with the BLAS interface */
  136. void starpu_block_data_register(starpu_data_handle_t *handleptr, uint32_t home_node,
  137. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  138. uint32_t ny, uint32_t nz, size_t elemsize)
  139. {
  140. struct starpu_block_interface block_interface =
  141. {
  142. .ptr = ptr,
  143. .dev_handle = ptr,
  144. .offset = 0,
  145. .ldy = ldy,
  146. .ldz = ldz,
  147. .nx = nx,
  148. .ny = ny,
  149. .nz = nz,
  150. .elemsize = elemsize
  151. };
  152. starpu_data_register(handleptr, home_node, &block_interface, &interface_block_ops);
  153. }
  154. static uint32_t footprint_block_interface_crc32(starpu_data_handle_t handle)
  155. {
  156. uint32_t hash;
  157. hash = starpu_crc32_be(starpu_block_get_nx(handle), 0);
  158. hash = starpu_crc32_be(starpu_block_get_ny(handle), hash);
  159. hash = starpu_crc32_be(starpu_block_get_nz(handle), hash);
  160. return hash;
  161. }
  162. static int block_compare(void *data_interface_a, void *data_interface_b)
  163. {
  164. struct starpu_block_interface *block_a = (struct starpu_block_interface *) data_interface_a;
  165. struct starpu_block_interface *block_b = (struct starpu_block_interface *) data_interface_b;
  166. /* Two matricess are considered compatible if they have the same size */
  167. return ((block_a->nx == block_b->nx)
  168. && (block_a->ny == block_b->ny)
  169. && (block_a->nz == block_b->nz)
  170. && (block_a->elemsize == block_b->elemsize));
  171. }
  172. static void display_block_interface(starpu_data_handle_t handle, FILE *f)
  173. {
  174. struct starpu_block_interface *block_interface;
  175. block_interface = (struct starpu_block_interface *) starpu_data_get_interface_on_node(handle, 0);
  176. fprintf(f, "%u\t%u\t%u\t", block_interface->nx, block_interface->ny, block_interface->nz);
  177. }
  178. static size_t block_interface_get_size(starpu_data_handle_t handle)
  179. {
  180. size_t size;
  181. struct starpu_block_interface *block_interface;
  182. block_interface = (struct starpu_block_interface *) starpu_data_get_interface_on_node(handle, 0);
  183. size = block_interface->nx*block_interface->ny*block_interface->nz*block_interface->elemsize;
  184. return size;
  185. }
  186. /* offer an access to the data parameters */
  187. uint32_t starpu_block_get_nx(starpu_data_handle_t handle)
  188. {
  189. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  190. starpu_data_get_interface_on_node(handle, 0);
  191. return block_interface->nx;
  192. }
  193. uint32_t starpu_block_get_ny(starpu_data_handle_t handle)
  194. {
  195. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  196. starpu_data_get_interface_on_node(handle, 0);
  197. return block_interface->ny;
  198. }
  199. uint32_t starpu_block_get_nz(starpu_data_handle_t handle)
  200. {
  201. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  202. starpu_data_get_interface_on_node(handle, 0);
  203. return block_interface->nz;
  204. }
  205. uint32_t starpu_block_get_local_ldy(starpu_data_handle_t handle)
  206. {
  207. unsigned node;
  208. node = _starpu_get_local_memory_node();
  209. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  210. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  211. starpu_data_get_interface_on_node(handle, node);
  212. return block_interface->ldy;
  213. }
  214. uint32_t starpu_block_get_local_ldz(starpu_data_handle_t handle)
  215. {
  216. unsigned node;
  217. node = _starpu_get_local_memory_node();
  218. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  219. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  220. starpu_data_get_interface_on_node(handle, node);
  221. return block_interface->ldz;
  222. }
  223. uintptr_t starpu_block_get_local_ptr(starpu_data_handle_t handle)
  224. {
  225. unsigned node;
  226. node = _starpu_get_local_memory_node();
  227. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  228. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  229. starpu_data_get_interface_on_node(handle, node);
  230. return block_interface->ptr;
  231. }
  232. size_t starpu_block_get_elemsize(starpu_data_handle_t handle)
  233. {
  234. struct starpu_block_interface *block_interface = (struct starpu_block_interface *)
  235. starpu_data_get_interface_on_node(handle, 0);
  236. return block_interface->elemsize;
  237. }
  238. /* memory allocation/deallocation primitives for the BLOCK interface */
  239. /* returns the size of the allocated area */
  240. static ssize_t allocate_block_buffer_on_node(void *data_interface_, uint32_t dst_node)
  241. {
  242. uintptr_t addr = 0, handle = 0;
  243. unsigned fail = 0;
  244. ssize_t allocated_memory;
  245. #ifdef STARPU_USE_CUDA
  246. cudaError_t status;
  247. #endif
  248. struct starpu_block_interface *dst_block = (struct starpu_block_interface *) data_interface_;
  249. uint32_t nx = dst_block->nx;
  250. uint32_t ny = dst_block->ny;
  251. uint32_t nz = dst_block->nz;
  252. size_t elemsize = dst_block->elemsize;
  253. enum starpu_node_kind kind = starpu_node_get_kind(dst_node);
  254. switch(kind)
  255. {
  256. case STARPU_CPU_RAM:
  257. handle = addr = (uintptr_t)malloc(nx*ny*nz*elemsize);
  258. if (!addr)
  259. fail = 1;
  260. break;
  261. #ifdef STARPU_USE_CUDA
  262. case STARPU_CUDA_RAM:
  263. status = cudaMalloc((void **)&addr, nx*ny*nz*elemsize);
  264. //_STARPU_DEBUG("cudaMalloc -> addr %p\n", addr);
  265. if (!addr || status != cudaSuccess)
  266. {
  267. if (STARPU_UNLIKELY(status != cudaErrorMemoryAllocation))
  268. STARPU_CUDA_REPORT_ERROR(status);
  269. fail = 1;
  270. }
  271. handle = addr;
  272. break;
  273. #endif
  274. #ifdef STARPU_USE_OPENCL
  275. case STARPU_OPENCL_RAM:
  276. {
  277. int ret;
  278. cl_mem mem;
  279. ret = starpu_opencl_allocate_memory(&mem, nx*ny*nz*elemsize, CL_MEM_READ_WRITE);
  280. handle = (uintptr_t)mem;
  281. if (ret)
  282. {
  283. fail = 1;
  284. }
  285. break;
  286. }
  287. #endif
  288. default:
  289. STARPU_ASSERT(0);
  290. }
  291. if (!fail)
  292. {
  293. /* allocation succeeded */
  294. allocated_memory = nx*ny*nz*elemsize;
  295. /* update the data properly in consequence */
  296. dst_block->ptr = addr;
  297. dst_block->dev_handle = handle;
  298. dst_block->offset = 0;
  299. dst_block->ldy = nx;
  300. dst_block->ldz = nx*ny;
  301. }
  302. else
  303. {
  304. /* allocation failed */
  305. allocated_memory = -ENOMEM;
  306. }
  307. return allocated_memory;
  308. }
  309. static void free_block_buffer_on_node(void *data_interface, uint32_t node)
  310. {
  311. struct starpu_block_interface *block_interface = (struct starpu_block_interface *) data_interface;
  312. #ifdef STARPU_USE_CUDA
  313. cudaError_t status;
  314. #endif
  315. enum starpu_node_kind kind = starpu_node_get_kind(node);
  316. switch(kind)
  317. {
  318. case STARPU_CPU_RAM:
  319. free((void*)block_interface->ptr);
  320. break;
  321. #ifdef STARPU_USE_CUDA
  322. case STARPU_CUDA_RAM:
  323. status = cudaFree((void*)block_interface->ptr);
  324. if (STARPU_UNLIKELY(status))
  325. STARPU_CUDA_REPORT_ERROR(status);
  326. break;
  327. #endif
  328. #ifdef STARPU_USE_OPENCL
  329. case STARPU_OPENCL_RAM:
  330. clReleaseMemObject((void *)block_interface->dev_handle);
  331. break;
  332. #endif
  333. default:
  334. STARPU_ASSERT(0);
  335. }
  336. }
  337. #ifdef STARPU_USE_CUDA
  338. static int copy_cuda_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, enum cudaMemcpyKind kind)
  339. {
  340. struct starpu_block_interface *src_block = src_interface;
  341. struct starpu_block_interface *dst_block = dst_interface;
  342. uint32_t nx = src_block->nx;
  343. uint32_t ny = src_block->ny;
  344. uint32_t nz = src_block->nz;
  345. size_t elemsize = src_block->elemsize;
  346. cudaError_t cures;
  347. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  348. {
  349. /* Is that a single contiguous buffer ? */
  350. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  351. {
  352. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  353. nx*ny*nz*elemsize, kind);
  354. if (STARPU_UNLIKELY(cures))
  355. STARPU_CUDA_REPORT_ERROR(cures);
  356. }
  357. else
  358. {
  359. /* Are all plans contiguous */
  360. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  361. (char *)src_block->ptr, src_block->ldz*elemsize,
  362. nx*ny*elemsize, nz, kind);
  363. if (STARPU_UNLIKELY(cures))
  364. STARPU_CUDA_REPORT_ERROR(cures);
  365. }
  366. }
  367. else
  368. {
  369. /* Default case: we transfer all lines one by one: ny*nz transfers */
  370. unsigned layer;
  371. for (layer = 0; layer < src_block->nz; layer++)
  372. {
  373. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  374. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  375. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  376. (char *)src_ptr, src_block->ldy*elemsize,
  377. nx*elemsize, ny, kind);
  378. if (STARPU_UNLIKELY(cures))
  379. STARPU_CUDA_REPORT_ERROR(cures);
  380. }
  381. }
  382. _STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->elemsize*src_block->elemsize);
  383. return 0;
  384. }
  385. static int copy_cuda_async_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream, enum cudaMemcpyKind kind)
  386. {
  387. struct starpu_block_interface *src_block = src_interface;
  388. struct starpu_block_interface *dst_block = dst_interface;
  389. uint32_t nx = src_block->nx;
  390. uint32_t ny = src_block->ny;
  391. uint32_t nz = src_block->nz;
  392. size_t elemsize = src_block->elemsize;
  393. cudaError_t cures;
  394. int ret;
  395. /* We may have a contiguous buffer for the entire block, or contiguous
  396. * plans within the block, we can avoid many small transfers that way */
  397. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  398. {
  399. /* Is that a single contiguous buffer ? */
  400. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  401. {
  402. _STARPU_TRACE_START_DRIVER_COPY_ASYNC(src_node, dst_node);
  403. cures = cudaMemcpyAsync((char *)dst_block->ptr, (char *)src_block->ptr,
  404. nx*ny*nz*elemsize, kind, stream);
  405. _STARPU_TRACE_END_DRIVER_COPY_ASYNC(src_node, dst_node);
  406. if (STARPU_UNLIKELY(cures))
  407. {
  408. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  409. nx*ny*nz*elemsize, kind);
  410. if (STARPU_UNLIKELY(cures))
  411. STARPU_CUDA_REPORT_ERROR(cures);
  412. ret = 0;
  413. }
  414. else
  415. {
  416. ret = -EAGAIN;
  417. }
  418. }
  419. else
  420. {
  421. /* Are all plans contiguous */
  422. _STARPU_TRACE_START_DRIVER_COPY_ASYNC(src_node, dst_node);
  423. cures = cudaMemcpy2DAsync((char *)dst_block->ptr, dst_block->ldz*elemsize,
  424. (char *)src_block->ptr, src_block->ldz*elemsize,
  425. nx*ny*elemsize, nz, kind, stream);
  426. _STARPU_TRACE_END_DRIVER_COPY_ASYNC(src_node, dst_node);
  427. if (STARPU_UNLIKELY(cures))
  428. {
  429. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  430. (char *)src_block->ptr, src_block->ldz*elemsize,
  431. nx*ny*elemsize, nz, kind);
  432. if (STARPU_UNLIKELY(cures))
  433. STARPU_CUDA_REPORT_ERROR(cures);
  434. ret = 0;
  435. }
  436. else
  437. {
  438. ret = -EAGAIN;
  439. }
  440. }
  441. }
  442. else
  443. {
  444. /* Default case: we transfer all lines one by one: ny*nz transfers */
  445. unsigned layer;
  446. for (layer = 0; layer < src_block->nz; layer++)
  447. {
  448. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  449. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  450. _STARPU_TRACE_START_DRIVER_COPY_ASYNC(src_node, dst_node);
  451. cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
  452. (char *)src_ptr, src_block->ldy*elemsize,
  453. nx*elemsize, ny, kind, stream);
  454. _STARPU_TRACE_END_DRIVER_COPY_ASYNC(src_node, dst_node);
  455. if (STARPU_UNLIKELY(cures))
  456. {
  457. /* I don't know how to do that "better" */
  458. goto no_async_default;
  459. }
  460. }
  461. ret = -EAGAIN;
  462. }
  463. _STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  464. return ret;
  465. no_async_default:
  466. {
  467. unsigned layer;
  468. for (layer = 0; layer < src_block->nz; layer++)
  469. {
  470. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  471. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  472. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  473. (char *)src_ptr, src_block->ldy*elemsize,
  474. nx*elemsize, ny, kind);
  475. if (STARPU_UNLIKELY(cures))
  476. STARPU_CUDA_REPORT_ERROR(cures);
  477. }
  478. _STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  479. return 0;
  480. }
  481. }
  482. static int copy_cuda_to_ram(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)
  483. {
  484. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToHost);
  485. }
  486. static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  487. {
  488. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyHostToDevice);
  489. }
  490. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  491. {
  492. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToDevice);
  493. }
  494. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
  495. {
  496. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyDeviceToHost);
  497. }
  498. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
  499. {
  500. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyHostToDevice);
  501. }
  502. #endif // STARPU_USE_CUDA
  503. #ifdef STARPU_USE_OPENCL
  504. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, void *_event)
  505. {
  506. struct starpu_block_interface *src_block = src_interface;
  507. struct starpu_block_interface *dst_block = dst_interface;
  508. int err, ret = 0;
  509. uint32_t nx = src_block->nx;
  510. uint32_t ny = src_block->ny;
  511. /* We may have a contiguous buffer for the entire block, or contiguous
  512. * plans within the block, we can avoid many small transfers that way */
  513. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  514. {
  515. /* Is that a single contiguous buffer ? */
  516. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  517. {
  518. err = starpu_opencl_copy_ram_to_opencl_async_sync((void*)src_block->ptr, src_node, (cl_mem)dst_block->dev_handle, dst_node,
  519. src_block->nx*src_block->ny*src_block->nz*src_block->elemsize,
  520. dst_block->offset, (cl_event*)_event, &ret);
  521. if (STARPU_UNLIKELY(err))
  522. STARPU_OPENCL_REPORT_ERROR(err);
  523. }
  524. else
  525. {
  526. /* Are all plans contiguous */
  527. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  528. STARPU_ASSERT(0);
  529. }
  530. }
  531. else
  532. {
  533. /* Default case: we transfer all lines one by one: ny*nz transfers */
  534. unsigned layer;
  535. for (layer = 0; layer < src_block->nz; layer++)
  536. {
  537. unsigned j;
  538. for(j=0 ; j<src_block->ny ; j++)
  539. {
  540. void *ptr = (void*)src_block->ptr+(layer*src_block->ldz*src_block->elemsize)+(j*src_block->ldy*src_block->elemsize);
  541. err = starpu_opencl_copy_ram_to_opencl(ptr, src_node, (cl_mem)dst_block->dev_handle, dst_node,
  542. src_block->nx*src_block->elemsize,
  543. layer*dst_block->ldz*dst_block->elemsize + j*dst_block->ldy*dst_block->elemsize
  544. + dst_block->offset, NULL);
  545. if (STARPU_UNLIKELY(err))
  546. STARPU_OPENCL_REPORT_ERROR(err);
  547. }
  548. // int *foo = (int *)(src_block->ptr+(layer*src_block->ldz*src_block->elemsize));
  549. // fprintf(stderr, "layer %d --> value %d\n", layer, foo[1]);
  550. // const size_t buffer_origin[3] = {layer*src_block->ldz*src_block->elemsize, 0, 0};
  551. // //const size_t buffer_origin[3] = {0, 0, 0};
  552. // const size_t host_origin[3] = {layer*dst_block->ldz*dst_block->elemsize+dst_block->offset, 0, 0};
  553. // size_t region[3] = {src_block->nx*src_block->elemsize,src_block->ny, 1};
  554. // size_t buffer_row_pitch=region[0];
  555. // size_t buffer_slice_pitch=region[1] * buffer_row_pitch;
  556. // size_t host_row_pitch=region[0];
  557. // size_t host_slice_pitch=region[1] * host_row_pitch;
  558. //
  559. // _starpu_opencl_copy_rect_ram_to_opencl((void *)src_block->ptr, src_node, (cl_mem)dst_block->dev_handle, dst_node,
  560. // buffer_origin, host_origin, region,
  561. // buffer_row_pitch, buffer_slice_pitch,
  562. // host_row_pitch, host_slice_pitch, NULL);
  563. }
  564. }
  565. _STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  566. return ret;
  567. }
  568. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, void *_event)
  569. {
  570. struct starpu_block_interface *src_block = src_interface;
  571. struct starpu_block_interface *dst_block = dst_interface;
  572. int err, ret = 0;
  573. /* We may have a contiguous buffer for the entire block, or contiguous
  574. * plans within the block, we can avoid many small transfers that way */
  575. if ((src_block->nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  576. {
  577. /* Is that a single contiguous buffer ? */
  578. if (((src_block->nx*src_block->ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  579. {
  580. err = starpu_opencl_copy_opencl_to_ram_async_sync((cl_mem)src_block->dev_handle, src_node, (void*)dst_block->ptr, dst_node,
  581. src_block->nx*src_block->ny*src_block->nz*src_block->elemsize,
  582. src_block->offset, (cl_event*)_event, &ret);
  583. if (STARPU_UNLIKELY(err))
  584. STARPU_OPENCL_REPORT_ERROR(err);
  585. }
  586. else
  587. {
  588. /* Are all plans contiguous */
  589. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  590. STARPU_ASSERT(0);
  591. }
  592. }
  593. else
  594. {
  595. /* Default case: we transfer all lines one by one: ny*nz transfers */
  596. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  597. unsigned layer;
  598. for (layer = 0; layer < src_block->nz; layer++)
  599. {
  600. unsigned j;
  601. for(j=0 ; j<src_block->ny ; j++)
  602. {
  603. void *ptr = (void *)dst_block->ptr+(layer*dst_block->ldz*dst_block->elemsize)+(j*dst_block->ldy*dst_block->elemsize);
  604. err = starpu_opencl_copy_opencl_to_ram((void*)src_block->dev_handle, src_node, ptr, dst_node,
  605. src_block->nx*src_block->elemsize,
  606. layer*src_block->ldz*src_block->elemsize+j*src_block->ldy*src_block->elemsize+
  607. src_block->offset, NULL);
  608. if (STARPU_UNLIKELY(err))
  609. STARPU_OPENCL_REPORT_ERROR(err);
  610. }
  611. // const size_t buffer_origin[3] = {src_block->offset, 0, 0};
  612. // const size_t host_origin[3] = {layer*src_block->ldz*src_block->elemsize, 0, 0};
  613. // size_t region[3] = {src_block->nx*src_block->elemsize,src_block->ny, 1};
  614. // size_t buffer_row_pitch=region[0];
  615. // size_t buffer_slice_pitch=region[1] * buffer_row_pitch;
  616. // size_t host_row_pitch=region[0];
  617. // size_t host_slice_pitch=region[1] * host_row_pitch;
  618. //
  619. // _starpu_opencl_copy_rect_opencl_to_ram((cl_mem)src_block->dev_handle, src_node, (void *)dst_block->ptr, dst_node,
  620. // buffer_origin, host_origin, region,
  621. // buffer_row_pitch, buffer_slice_pitch,
  622. // host_row_pitch, host_slice_pitch, NULL);
  623. }
  624. }
  625. _STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  626. return ret;
  627. }
  628. static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  629. {
  630. return copy_ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
  631. }
  632. static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  633. {
  634. return copy_opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, NULL);
  635. }
  636. #endif
  637. /* as not all platform easily have a BLAS lib installed ... */
  638. static int copy_ram_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  639. {
  640. struct starpu_block_interface *src_block = (struct starpu_block_interface *) src_interface;
  641. struct starpu_block_interface *dst_block = (struct starpu_block_interface *) dst_interface;
  642. uint32_t nx = dst_block->nx;
  643. uint32_t ny = dst_block->ny;
  644. uint32_t nz = dst_block->nz;
  645. size_t elemsize = dst_block->elemsize;
  646. uint32_t ldy_src = src_block->ldy;
  647. uint32_t ldz_src = src_block->ldz;
  648. uint32_t ldy_dst = dst_block->ldy;
  649. uint32_t ldz_dst = dst_block->ldz;
  650. uintptr_t ptr_src = src_block->ptr;
  651. uintptr_t ptr_dst = dst_block->ptr;
  652. unsigned y, z;
  653. for (z = 0; z < nz; z++)
  654. {
  655. for (y = 0; y < ny; y++)
  656. {
  657. uint32_t src_offset = (y*ldy_src + z*ldz_src)*elemsize;
  658. uint32_t dst_offset = (y*ldy_dst + z*ldz_dst)*elemsize;
  659. memcpy((void *)(ptr_dst + dst_offset),
  660. (void *)(ptr_src + src_offset), nx*elemsize);
  661. }
  662. }
  663. _STARPU_TRACE_DATA_COPY(src_node, dst_node, nx*ny*nz*elemsize);
  664. return 0;
  665. }