block_interface.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <datawizard/coherency.h>
  20. #include <datawizard/copy_driver.h>
  21. #include <datawizard/filters.h>
  22. #include <common/hash.h>
  23. #include <starpu_cuda.h>
  24. #include <starpu_opencl.h>
  25. #include <drivers/opencl/driver_opencl.h>
  26. static int copy_ram_to_ram(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  27. #ifdef STARPU_USE_CUDA
  28. static int copy_ram_to_cuda(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  29. static int copy_cuda_to_ram(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  30. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), cudaStream_t stream);
  31. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), cudaStream_t stream);
  32. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  33. #endif
  34. #ifdef STARPU_USE_OPENCL
  35. static int copy_ram_to_opencl(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  36. static int copy_opencl_to_ram(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)));
  37. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), void *_event);
  38. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), void *_event);
  39. #endif
  40. static const struct starpu_data_copy_methods block_copy_data_methods_s = {
  41. .ram_to_ram = copy_ram_to_ram,
  42. .ram_to_spu = NULL,
  43. #ifdef STARPU_USE_CUDA
  44. .ram_to_cuda = copy_ram_to_cuda,
  45. .cuda_to_ram = copy_cuda_to_ram,
  46. .ram_to_cuda_async = copy_ram_to_cuda_async,
  47. .cuda_to_ram_async = copy_cuda_to_ram_async,
  48. .cuda_to_cuda = copy_cuda_to_cuda,
  49. #endif
  50. #ifdef STARPU_USE_OPENCL
  51. .ram_to_opencl = copy_ram_to_opencl,
  52. .opencl_to_ram = copy_opencl_to_ram,
  53. .ram_to_opencl_async = copy_ram_to_opencl_async,
  54. .opencl_to_ram_async = copy_opencl_to_ram_async,
  55. #endif
  56. .cuda_to_spu = NULL,
  57. .spu_to_ram = NULL,
  58. .spu_to_cuda = NULL,
  59. .spu_to_spu = NULL
  60. };
  61. static void register_block_handle(starpu_data_handle handle, uint32_t home_node, void *interface);
  62. static ssize_t allocate_block_buffer_on_node(void *interface_, uint32_t dst_node);
  63. static void free_block_buffer_on_node(void *interface, uint32_t node);
  64. static size_t block_interface_get_size(starpu_data_handle handle);
  65. static uint32_t footprint_block_interface_crc32(starpu_data_handle handle);
  66. static int block_compare(void *interface_a, void *interface_b);
  67. static void display_block_interface(starpu_data_handle handle, FILE *f);
  68. #ifdef STARPU_USE_GORDON
  69. static int convert_block_to_gordon(void *interface, uint64_t *ptr, gordon_strideSize_t *ss);
  70. #endif
  71. static struct starpu_data_interface_ops_t interface_block_ops = {
  72. .register_data_handle = register_block_handle,
  73. .allocate_data_on_node = allocate_block_buffer_on_node,
  74. .free_data_on_node = free_block_buffer_on_node,
  75. .copy_methods = &block_copy_data_methods_s,
  76. .get_size = block_interface_get_size,
  77. .footprint = footprint_block_interface_crc32,
  78. .compare = block_compare,
  79. #ifdef STARPU_USE_GORDON
  80. .convert_to_gordon = convert_block_to_gordon,
  81. #endif
  82. .interfaceid = STARPU_BLOCK_INTERFACE_ID,
  83. .interface_size = sizeof(starpu_block_interface_t),
  84. .display = display_block_interface
  85. };
  86. #ifdef STARPU_USE_GORDON
  87. int convert_block_to_gordon(void *interface, uint64_t *ptr, gordon_strideSize_t *ss)
  88. {
  89. /* TODO */
  90. STARPU_ABORT();
  91. return 0;
  92. }
  93. #endif
  94. static void register_block_handle(starpu_data_handle handle, uint32_t home_node, void *interface)
  95. {
  96. starpu_block_interface_t *block_interface = interface;
  97. unsigned node;
  98. for (node = 0; node < STARPU_MAXNODES; node++)
  99. {
  100. starpu_block_interface_t *local_interface =
  101. starpu_data_get_interface_on_node(handle, node);
  102. if (node == home_node) {
  103. local_interface->ptr = block_interface->ptr;
  104. local_interface->dev_handle = block_interface->dev_handle;
  105. local_interface->offset = block_interface->offset;
  106. local_interface->ldy = block_interface->ldy;
  107. local_interface->ldz = block_interface->ldz;
  108. }
  109. else {
  110. local_interface->ptr = 0;
  111. local_interface->dev_handle = 0;
  112. local_interface->offset = 0;
  113. local_interface->ldy = 0;
  114. local_interface->ldz = 0;
  115. }
  116. local_interface->nx = block_interface->nx;
  117. local_interface->ny = block_interface->ny;
  118. local_interface->nz = block_interface->nz;
  119. local_interface->elemsize = block_interface->elemsize;
  120. }
  121. }
  122. /* declare a new data with the BLAS interface */
  123. void starpu_block_data_register(starpu_data_handle *handleptr, uint32_t home_node,
  124. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  125. uint32_t ny, uint32_t nz, size_t elemsize)
  126. {
  127. starpu_block_interface_t interface = {
  128. .ptr = ptr,
  129. .dev_handle = ptr,
  130. .offset = 0,
  131. .ldy = ldy,
  132. .ldz = ldz,
  133. .nx = nx,
  134. .ny = ny,
  135. .nz = nz,
  136. .elemsize = elemsize
  137. };
  138. starpu_data_register(handleptr, home_node, &interface, &interface_block_ops);
  139. }
  140. static uint32_t footprint_block_interface_crc32(starpu_data_handle handle)
  141. {
  142. uint32_t hash;
  143. hash = _starpu_crc32_be(starpu_block_get_nx(handle), 0);
  144. hash = _starpu_crc32_be(starpu_block_get_ny(handle), hash);
  145. hash = _starpu_crc32_be(starpu_block_get_nz(handle), hash);
  146. return hash;
  147. }
  148. static int block_compare(void *interface_a, void *interface_b)
  149. {
  150. starpu_block_interface_t *block_a = interface_a;
  151. starpu_block_interface_t *block_b = interface_b;
  152. /* Two matricess are considered compatible if they have the same size */
  153. return ((block_a->nx == block_b->nx)
  154. && (block_a->ny == block_b->ny)
  155. && (block_a->nz == block_b->nz)
  156. && (block_a->elemsize == block_b->elemsize));
  157. }
  158. static void display_block_interface(starpu_data_handle handle, FILE *f)
  159. {
  160. starpu_block_interface_t *interface;
  161. interface = starpu_data_get_interface_on_node(handle, 0);
  162. fprintf(f, "%u\t%u\t%u\t", interface->nx, interface->ny, interface->nz);
  163. }
  164. static size_t block_interface_get_size(starpu_data_handle handle)
  165. {
  166. size_t size;
  167. starpu_block_interface_t *interface;
  168. interface = starpu_data_get_interface_on_node(handle, 0);
  169. size = interface->nx*interface->ny*interface->nz*interface->elemsize;
  170. return size;
  171. }
  172. /* offer an access to the data parameters */
  173. uint32_t starpu_block_get_nx(starpu_data_handle handle)
  174. {
  175. starpu_block_interface_t *interface =
  176. starpu_data_get_interface_on_node(handle, 0);
  177. return interface->nx;
  178. }
  179. uint32_t starpu_block_get_ny(starpu_data_handle handle)
  180. {
  181. starpu_block_interface_t *interface =
  182. starpu_data_get_interface_on_node(handle, 0);
  183. return interface->ny;
  184. }
  185. uint32_t starpu_block_get_nz(starpu_data_handle handle)
  186. {
  187. starpu_block_interface_t *interface =
  188. starpu_data_get_interface_on_node(handle, 0);
  189. return interface->nz;
  190. }
  191. uint32_t starpu_block_get_local_ldy(starpu_data_handle handle)
  192. {
  193. unsigned node;
  194. node = _starpu_get_local_memory_node();
  195. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  196. starpu_block_interface_t *interface =
  197. starpu_data_get_interface_on_node(handle, node);
  198. return interface->ldy;
  199. }
  200. uint32_t starpu_block_get_local_ldz(starpu_data_handle handle)
  201. {
  202. unsigned node;
  203. node = _starpu_get_local_memory_node();
  204. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  205. starpu_block_interface_t *interface =
  206. starpu_data_get_interface_on_node(handle, node);
  207. return interface->ldz;
  208. }
  209. uintptr_t starpu_block_get_local_ptr(starpu_data_handle handle)
  210. {
  211. unsigned node;
  212. node = _starpu_get_local_memory_node();
  213. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  214. starpu_block_interface_t *interface =
  215. starpu_data_get_interface_on_node(handle, node);
  216. return interface->ptr;
  217. }
  218. size_t starpu_block_get_elemsize(starpu_data_handle handle)
  219. {
  220. starpu_block_interface_t *interface =
  221. starpu_data_get_interface_on_node(handle, 0);
  222. return interface->elemsize;
  223. }
  224. /* memory allocation/deallocation primitives for the BLOCK interface */
  225. /* returns the size of the allocated area */
  226. static ssize_t allocate_block_buffer_on_node(void *interface_, uint32_t dst_node)
  227. {
  228. uintptr_t addr = 0;
  229. unsigned fail = 0;
  230. ssize_t allocated_memory;
  231. #ifdef STARPU_USE_CUDA
  232. cudaError_t status;
  233. #endif
  234. starpu_block_interface_t *dst_block = interface_;
  235. uint32_t nx = dst_block->nx;
  236. uint32_t ny = dst_block->ny;
  237. uint32_t nz = dst_block->nz;
  238. size_t elemsize = dst_block->elemsize;
  239. starpu_node_kind kind = _starpu_get_node_kind(dst_node);
  240. switch(kind) {
  241. case STARPU_CPU_RAM:
  242. addr = (uintptr_t)malloc(nx*ny*nz*elemsize);
  243. if (!addr)
  244. fail = 1;
  245. break;
  246. #ifdef STARPU_USE_CUDA
  247. case STARPU_CUDA_RAM:
  248. status = cudaMalloc((void **)&addr, nx*ny*nz*elemsize);
  249. //_STARPU_DEBUG("cudaMalloc -> addr %p\n", addr);
  250. if (!addr || status != cudaSuccess)
  251. {
  252. if (STARPU_UNLIKELY(status != cudaErrorMemoryAllocation))
  253. STARPU_CUDA_REPORT_ERROR(status);
  254. fail = 1;
  255. }
  256. break;
  257. #endif
  258. #ifdef STARPU_USE_OPENCL
  259. case STARPU_OPENCL_RAM:
  260. {
  261. int ret;
  262. void *ptr;
  263. ret = _starpu_opencl_allocate_memory(&ptr, nx*ny*nz*elemsize, CL_MEM_READ_WRITE);
  264. addr = (uintptr_t)ptr;
  265. if (ret) {
  266. fail = 1;
  267. }
  268. break;
  269. }
  270. #endif
  271. default:
  272. assert(0);
  273. }
  274. if (!fail) {
  275. /* allocation succeeded */
  276. allocated_memory = nx*ny*nz*elemsize;
  277. /* update the data properly in consequence */
  278. dst_block->ptr = addr;
  279. dst_block->dev_handle = addr;
  280. dst_block->offset = 0;
  281. dst_block->ldy = nx;
  282. dst_block->ldz = nx*ny;
  283. } else {
  284. /* allocation failed */
  285. allocated_memory = -ENOMEM;
  286. }
  287. return allocated_memory;
  288. }
  289. static void free_block_buffer_on_node(void *interface, uint32_t node)
  290. {
  291. starpu_block_interface_t *block_interface = interface;
  292. #ifdef STARPU_USE_CUDA
  293. cudaError_t status;
  294. #endif
  295. starpu_node_kind kind = _starpu_get_node_kind(node);
  296. switch(kind) {
  297. case STARPU_CPU_RAM:
  298. free((void*)block_interface->ptr);
  299. break;
  300. #ifdef STARPU_USE_CUDA
  301. case STARPU_CUDA_RAM:
  302. status = cudaFree((void*)block_interface->ptr);
  303. if (STARPU_UNLIKELY(status))
  304. STARPU_CUDA_REPORT_ERROR(status);
  305. break;
  306. #endif
  307. #ifdef STARPU_USE_OPENCL
  308. case STARPU_OPENCL_RAM:
  309. clReleaseMemObject((void *)block_interface->ptr);
  310. break;
  311. #endif
  312. default:
  313. assert(0);
  314. }
  315. }
  316. #ifdef STARPU_USE_CUDA
  317. static int copy_cuda_common(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), enum cudaMemcpyKind kind)
  318. {
  319. starpu_block_interface_t *src_block = src_interface;
  320. starpu_block_interface_t *dst_block = dst_interface;
  321. uint32_t nx = src_block->nx;
  322. uint32_t ny = src_block->ny;
  323. uint32_t nz = src_block->nz;
  324. size_t elemsize = src_block->elemsize;
  325. cudaError_t cures;
  326. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  327. {
  328. /* Is that a single contiguous buffer ? */
  329. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  330. {
  331. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  332. nx*ny*nz*elemsize, kind);
  333. if (STARPU_UNLIKELY(cures))
  334. STARPU_CUDA_REPORT_ERROR(cures);
  335. }
  336. else {
  337. /* Are all plans contiguous */
  338. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  339. (char *)src_block->ptr, src_block->ldz*elemsize,
  340. nx*ny*elemsize, nz, kind);
  341. if (STARPU_UNLIKELY(cures))
  342. STARPU_CUDA_REPORT_ERROR(cures);
  343. }
  344. }
  345. else {
  346. /* Default case: we transfer all lines one by one: ny*nz transfers */
  347. unsigned layer;
  348. for (layer = 0; layer < src_block->nz; layer++)
  349. {
  350. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  351. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  352. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  353. (char *)src_ptr, src_block->ldy*elemsize,
  354. nx*elemsize, ny, kind);
  355. if (STARPU_UNLIKELY(cures))
  356. STARPU_CUDA_REPORT_ERROR(cures);
  357. }
  358. }
  359. STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->elemsize*src_block->elemsize);
  360. return 0;
  361. }
  362. static int copy_cuda_async_common(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), cudaStream_t stream, enum cudaMemcpyKind kind)
  363. {
  364. starpu_block_interface_t *src_block = src_interface;
  365. starpu_block_interface_t *dst_block = dst_interface;
  366. uint32_t nx = src_block->nx;
  367. uint32_t ny = src_block->ny;
  368. uint32_t nz = src_block->nz;
  369. size_t elemsize = src_block->elemsize;
  370. cudaError_t cures;
  371. int ret;
  372. /* We may have a contiguous buffer for the entire block, or contiguous
  373. * plans within the block, we can avoid many small transfers that way */
  374. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  375. {
  376. /* Is that a single contiguous buffer ? */
  377. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  378. {
  379. cures = cudaMemcpyAsync((char *)dst_block->ptr, (char *)src_block->ptr,
  380. nx*ny*nz*elemsize, kind, stream);
  381. if (STARPU_UNLIKELY(cures))
  382. {
  383. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  384. nx*ny*nz*elemsize, kind);
  385. if (STARPU_UNLIKELY(cures))
  386. STARPU_CUDA_REPORT_ERROR(cures);
  387. ret = 0;
  388. }
  389. else {
  390. ret = -EAGAIN;
  391. }
  392. }
  393. else {
  394. /* Are all plans contiguous */
  395. cures = cudaMemcpy2DAsync((char *)dst_block->ptr, dst_block->ldz*elemsize,
  396. (char *)src_block->ptr, src_block->ldz*elemsize,
  397. nx*ny*elemsize, nz, kind, stream);
  398. if (STARPU_UNLIKELY(cures))
  399. {
  400. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  401. (char *)src_block->ptr, src_block->ldz*elemsize,
  402. nx*ny*elemsize, nz, kind);
  403. if (STARPU_UNLIKELY(cures))
  404. STARPU_CUDA_REPORT_ERROR(cures);
  405. ret = 0;
  406. }
  407. else {
  408. ret = -EAGAIN;
  409. }
  410. }
  411. }
  412. else {
  413. /* Default case: we transfer all lines one by one: ny*nz transfers */
  414. unsigned layer;
  415. for (layer = 0; layer < src_block->nz; layer++)
  416. {
  417. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  418. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  419. cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
  420. (char *)src_ptr, src_block->ldy*elemsize,
  421. nx*elemsize, ny, kind, stream);
  422. if (STARPU_UNLIKELY(cures))
  423. {
  424. /* I don't know how to do that "better" */
  425. goto no_async_default;
  426. }
  427. }
  428. ret = -EAGAIN;
  429. }
  430. STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  431. return ret;
  432. no_async_default:
  433. {
  434. unsigned layer;
  435. for (layer = 0; layer < src_block->nz; layer++)
  436. {
  437. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + layer*src_block->ldz*src_block->elemsize;
  438. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + layer*dst_block->ldz*dst_block->elemsize;
  439. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  440. (char *)src_ptr, src_block->ldy*elemsize,
  441. nx*elemsize, ny, kind);
  442. if (STARPU_UNLIKELY(cures))
  443. STARPU_CUDA_REPORT_ERROR(cures);
  444. }
  445. STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  446. return 0;
  447. }
  448. }
  449. static int copy_cuda_to_ram(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)
  450. {
  451. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToHost);
  452. }
  453. static int copy_ram_to_cuda(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)))
  454. {
  455. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyHostToDevice);
  456. }
  457. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)))
  458. {
  459. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToDevice);
  460. }
  461. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), cudaStream_t stream)
  462. {
  463. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyDeviceToHost);
  464. }
  465. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), cudaStream_t stream)
  466. {
  467. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyHostToDevice);
  468. }
  469. #endif // STARPU_USE_CUDA
  470. #ifdef STARPU_USE_OPENCL
  471. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), void *_event)
  472. {
  473. starpu_block_interface_t *src_block = src_interface;
  474. starpu_block_interface_t *dst_block = dst_interface;
  475. int err,ret;
  476. uint32_t nx = src_block->nx;
  477. uint32_t ny = src_block->ny;
  478. /* We may have a contiguous buffer for the entire block, or contiguous
  479. * plans within the block, we can avoid many small transfers that way */
  480. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  481. {
  482. /* Is that a single contiguous buffer ? */
  483. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  484. {
  485. err = _starpu_opencl_copy_ram_to_opencl_async_sync((void*)src_block->ptr, (cl_mem)dst_block->dev_handle,
  486. src_block->nx*src_block->ny*src_block->nz*src_block->elemsize,
  487. dst_block->offset, (cl_event*)_event, &ret);
  488. if (STARPU_UNLIKELY(err))
  489. STARPU_OPENCL_REPORT_ERROR(err);
  490. }
  491. else {
  492. /* Are all plans contiguous */
  493. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  494. STARPU_ASSERT(0);
  495. }
  496. }
  497. else {
  498. /* Default case: we transfer all lines one by one: ny*nz transfers */
  499. unsigned layer;
  500. for (layer = 0; layer < src_block->nz; layer++)
  501. {
  502. unsigned j;
  503. for(j=0 ; j<src_block->ny ; j++) {
  504. void *ptr = (void*)src_block->ptr+(layer*src_block->ldz*src_block->elemsize)+(j*src_block->ldy*src_block->elemsize);
  505. err = _starpu_opencl_copy_ram_to_opencl(ptr, (cl_mem)dst_block->dev_handle,
  506. src_block->nx*src_block->elemsize,
  507. layer*dst_block->ldz*dst_block->elemsize + j*dst_block->ldy*dst_block->elemsize
  508. + dst_block->offset, NULL);
  509. if (STARPU_UNLIKELY(err))
  510. STARPU_OPENCL_REPORT_ERROR(err);
  511. }
  512. // int *foo = (int *)(src_block->ptr+(layer*src_block->ldz*src_block->elemsize));
  513. // fprintf(stderr, "layer %d --> value %d\n", layer, foo[1]);
  514. // const size_t buffer_origin[3] = {layer*src_block->ldz*src_block->elemsize, 0, 0};
  515. // //const size_t buffer_origin[3] = {0, 0, 0};
  516. // const size_t host_origin[3] = {layer*dst_block->ldz*dst_block->elemsize+dst_block->offset, 0, 0};
  517. // size_t region[3] = {src_block->nx*src_block->elemsize,src_block->ny, 1};
  518. // size_t buffer_row_pitch=region[0];
  519. // size_t buffer_slice_pitch=region[1] * buffer_row_pitch;
  520. // size_t host_row_pitch=region[0];
  521. // size_t host_slice_pitch=region[1] * host_row_pitch;
  522. //
  523. // _starpu_opencl_copy_rect_ram_to_opencl((void *)src_block->ptr, (cl_mem)dst_block->dev_handle,
  524. // buffer_origin, host_origin, region,
  525. // buffer_row_pitch, buffer_slice_pitch,
  526. // host_row_pitch, host_slice_pitch, NULL);
  527. }
  528. }
  529. STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  530. return ret;
  531. }
  532. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)), void *_event)
  533. {
  534. starpu_block_interface_t *src_block = src_interface;
  535. starpu_block_interface_t *dst_block = dst_interface;
  536. int err, ret;
  537. /* We may have a contiguous buffer for the entire block, or contiguous
  538. * plans within the block, we can avoid many small transfers that way */
  539. if ((src_block->nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  540. {
  541. /* Is that a single contiguous buffer ? */
  542. if (((src_block->nx*src_block->ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  543. {
  544. err = _starpu_opencl_copy_opencl_to_ram_async_sync((cl_mem)src_block->dev_handle, (void*)dst_block->ptr,
  545. src_block->nx*src_block->ny*src_block->nz*src_block->elemsize,
  546. src_block->offset, (cl_event*)_event, &ret);
  547. if (STARPU_UNLIKELY(err))
  548. STARPU_OPENCL_REPORT_ERROR(err);
  549. }
  550. else {
  551. /* Are all plans contiguous */
  552. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  553. STARPU_ASSERT(0);
  554. }
  555. }
  556. else {
  557. /* Default case: we transfer all lines one by one: ny*nz transfers */
  558. /* XXX non contiguous buffers are not properly supported yet. (TODO) */
  559. unsigned layer;
  560. for (layer = 0; layer < src_block->nz; layer++)
  561. {
  562. unsigned j;
  563. for(j=0 ; j<src_block->ny ; j++) {
  564. void *ptr = (void *)dst_block->ptr+(layer*dst_block->ldz*dst_block->elemsize)+(j*dst_block->ldy*dst_block->elemsize);
  565. err = _starpu_opencl_copy_opencl_to_ram((void*)src_block->dev_handle, ptr,
  566. src_block->nx*src_block->elemsize,
  567. layer*src_block->ldz*src_block->elemsize+j*src_block->ldy*src_block->elemsize+
  568. src_block->offset, NULL);
  569. }
  570. // const size_t buffer_origin[3] = {src_block->offset, 0, 0};
  571. // const size_t host_origin[3] = {layer*src_block->ldz*src_block->elemsize, 0, 0};
  572. // size_t region[3] = {src_block->nx*src_block->elemsize,src_block->ny, 1};
  573. // size_t buffer_row_pitch=region[0];
  574. // size_t buffer_slice_pitch=region[1] * buffer_row_pitch;
  575. // size_t host_row_pitch=region[0];
  576. // size_t host_slice_pitch=region[1] * host_row_pitch;
  577. //
  578. // _starpu_opencl_copy_rect_opencl_to_ram((cl_mem)src_block->dev_handle, (void *)dst_block->ptr,
  579. // buffer_origin, host_origin, region,
  580. // buffer_row_pitch, buffer_slice_pitch,
  581. // host_row_pitch, host_slice_pitch, NULL);
  582. }
  583. }
  584. STARPU_TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  585. return ret;
  586. }
  587. static int copy_ram_to_opencl(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)))
  588. {
  589. return copy_ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
  590. }
  591. static int copy_opencl_to_ram(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)))
  592. {
  593. return copy_opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, NULL);
  594. }
  595. #endif
  596. /* as not all platform easily have a BLAS lib installed ... */
  597. static int copy_ram_to_ram(void *src_interface, unsigned src_node __attribute__((unused)), void *dst_interface, unsigned dst_node __attribute__((unused)))
  598. {
  599. starpu_block_interface_t *src_block = src_interface;
  600. starpu_block_interface_t *dst_block = dst_interface;
  601. uint32_t nx = dst_block->nx;
  602. uint32_t ny = dst_block->ny;
  603. uint32_t nz = dst_block->nz;
  604. size_t elemsize = dst_block->elemsize;
  605. uint32_t ldy_src = src_block->ldy;
  606. uint32_t ldz_src = src_block->ldz;
  607. uint32_t ldy_dst = dst_block->ldy;
  608. uint32_t ldz_dst = dst_block->ldz;
  609. uintptr_t ptr_src = src_block->ptr;
  610. uintptr_t ptr_dst = dst_block->ptr;
  611. unsigned y, z;
  612. for (z = 0; z < nz; z++)
  613. for (y = 0; y < ny; y++)
  614. {
  615. uint32_t src_offset = (y*ldy_src + y*z*ldz_src)*elemsize;
  616. uint32_t dst_offset = (y*ldy_dst + y*z*ldz_dst)*elemsize;
  617. memcpy((void *)(ptr_dst + dst_offset),
  618. (void *)(ptr_src + src_offset), nx*elemsize);
  619. }
  620. STARPU_TRACE_DATA_COPY(src_node, dst_node, nx*ny*nz*elemsize);
  621. return 0;
  622. }