block_interface.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <common/config.h>
  18. #include <datawizard/coherency.h>
  19. #include <datawizard/copy-driver.h>
  20. #include <datawizard/hierarchy.h>
  21. #include <common/hash.h>
  22. static int dummy_copy_ram_to_ram(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node);
  23. #ifdef USE_CUDA
  24. static int copy_ram_to_cuda(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node);
  25. static int copy_cuda_to_ram(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node);
  26. static int copy_ram_to_cuda_async(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node, cudaStream_t *stream);
  27. static int copy_cuda_to_ram_async(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node, cudaStream_t *stream);
  28. #endif
  29. static const struct copy_data_methods_s block_copy_data_methods_s = {
  30. .ram_to_ram = dummy_copy_ram_to_ram,
  31. .ram_to_spu = NULL,
  32. #ifdef USE_CUDA
  33. .ram_to_cuda = copy_ram_to_cuda,
  34. .cuda_to_ram = copy_cuda_to_ram,
  35. .ram_to_cuda_async = copy_ram_to_cuda_async,
  36. .cuda_to_ram_async = copy_cuda_to_ram_async,
  37. #endif
  38. .cuda_to_cuda = NULL,
  39. .cuda_to_spu = NULL,
  40. .spu_to_ram = NULL,
  41. .spu_to_cuda = NULL,
  42. .spu_to_spu = NULL
  43. };
  44. static void register_block_handle(starpu_data_handle handle, uint32_t home_node, void *interface);
  45. static size_t allocate_block_buffer_on_node(starpu_data_handle handle, uint32_t dst_node);
  46. static void liberate_block_buffer_on_node(void *interface, uint32_t node);
  47. static size_t block_interface_get_size(starpu_data_handle handle);
  48. static uint32_t footprint_block_interface_crc32(starpu_data_handle handle);
  49. static void display_block_interface(starpu_data_handle handle, FILE *f);
  50. #ifdef USE_GORDON
  51. static int convert_block_to_gordon(void *interface, uint64_t *ptr, gordon_strideSize_t *ss);
  52. #endif
  53. struct starpu_data_interface_ops_t interface_block_ops = {
  54. .register_data_handle = register_block_handle,
  55. .allocate_data_on_node = allocate_block_buffer_on_node,
  56. .liberate_data_on_node = liberate_block_buffer_on_node,
  57. .copy_methods = &block_copy_data_methods_s,
  58. .get_size = block_interface_get_size,
  59. .footprint = footprint_block_interface_crc32,
  60. #ifdef USE_GORDON
  61. .convert_to_gordon = convert_block_to_gordon,
  62. #endif
  63. .interfaceid = STARPU_BLOCK_INTERFACE_ID,
  64. .interface_size = sizeof(starpu_block_interface_t),
  65. .display = display_block_interface
  66. };
  67. #ifdef USE_GORDON
  68. int convert_block_to_gordon(void *interface, uint64_t *ptr, gordon_strideSize_t *ss)
  69. {
  70. /* TODO */
  71. STARPU_ABORT();
  72. return 0;
  73. }
  74. #endif
  75. static void register_block_handle(starpu_data_handle handle, uint32_t home_node, void *interface)
  76. {
  77. starpu_block_interface_t *block_interface = interface;
  78. unsigned node;
  79. for (node = 0; node < STARPU_MAXNODES; node++)
  80. {
  81. starpu_block_interface_t *local_interface =
  82. starpu_data_get_interface_on_node(handle, node);
  83. if (node == home_node) {
  84. local_interface->ptr = block_interface->ptr;
  85. local_interface->ldy = block_interface->ldy;
  86. local_interface->ldz = block_interface->ldz;
  87. }
  88. else {
  89. local_interface->ptr = 0;
  90. local_interface->ldy = 0;
  91. local_interface->ldz = 0;
  92. }
  93. local_interface->nx = block_interface->nx;
  94. local_interface->ny = block_interface->ny;
  95. local_interface->nz = block_interface->nz;
  96. local_interface->elemsize = block_interface->elemsize;
  97. }
  98. }
  99. /* declare a new data with the BLAS interface */
  100. void starpu_register_block_data(starpu_data_handle *handleptr, uint32_t home_node,
  101. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t nx,
  102. uint32_t ny, uint32_t nz, size_t elemsize)
  103. {
  104. starpu_block_interface_t interface = {
  105. .ptr = ptr,
  106. .ldy = ldy,
  107. .ldz = ldz,
  108. .nx = nx,
  109. .ny = ny,
  110. .nz = nz,
  111. .elemsize = elemsize
  112. };
  113. register_data_handle(handleptr, home_node, &interface, &interface_block_ops);
  114. }
  115. static uint32_t footprint_block_interface_crc32(starpu_data_handle handle)
  116. {
  117. uint32_t hash;
  118. hash = crc32_be(starpu_get_block_nx(handle), 0);
  119. hash = crc32_be(starpu_get_block_ny(handle), hash);
  120. hash = crc32_be(starpu_get_block_nz(handle), hash);
  121. return hash;
  122. }
  123. static void display_block_interface(starpu_data_handle handle, FILE *f)
  124. {
  125. starpu_block_interface_t *interface;
  126. interface = starpu_data_get_interface_on_node(handle, 0);
  127. fprintf(f, "%u\t%u\t%u\t", interface->nx, interface->ny, interface->nz);
  128. }
  129. static size_t block_interface_get_size(starpu_data_handle handle)
  130. {
  131. size_t size;
  132. starpu_block_interface_t *interface;
  133. interface = starpu_data_get_interface_on_node(handle, 0);
  134. size = interface->nx*interface->ny*interface->nz*interface->elemsize;
  135. return size;
  136. }
  137. /* offer an access to the data parameters */
  138. uint32_t starpu_get_block_nx(starpu_data_handle handle)
  139. {
  140. starpu_block_interface_t *interface =
  141. starpu_data_get_interface_on_node(handle, 0);
  142. return interface->nx;
  143. }
  144. uint32_t starpu_get_block_ny(starpu_data_handle handle)
  145. {
  146. starpu_block_interface_t *interface =
  147. starpu_data_get_interface_on_node(handle, 0);
  148. return interface->ny;
  149. }
  150. uint32_t starpu_get_block_nz(starpu_data_handle handle)
  151. {
  152. starpu_block_interface_t *interface =
  153. starpu_data_get_interface_on_node(handle, 0);
  154. return interface->nz;
  155. }
  156. uint32_t starpu_get_block_local_ldy(starpu_data_handle handle)
  157. {
  158. unsigned node;
  159. node = get_local_memory_node();
  160. STARPU_ASSERT(starpu_test_if_data_is_allocated_on_node(handle, node));
  161. starpu_block_interface_t *interface =
  162. starpu_data_get_interface_on_node(handle, node);
  163. return interface->ldy;
  164. }
  165. uint32_t starpu_get_block_local_ldz(starpu_data_handle handle)
  166. {
  167. unsigned node;
  168. node = get_local_memory_node();
  169. STARPU_ASSERT(starpu_test_if_data_is_allocated_on_node(handle, node));
  170. starpu_block_interface_t *interface =
  171. starpu_data_get_interface_on_node(handle, node);
  172. return interface->ldz;
  173. }
  174. uintptr_t starpu_get_block_local_ptr(starpu_data_handle handle)
  175. {
  176. unsigned node;
  177. node = get_local_memory_node();
  178. STARPU_ASSERT(starpu_test_if_data_is_allocated_on_node(handle, node));
  179. starpu_block_interface_t *interface =
  180. starpu_data_get_interface_on_node(handle, node);
  181. return interface->ptr;
  182. }
  183. size_t starpu_get_block_elemsize(starpu_data_handle handle)
  184. {
  185. starpu_block_interface_t *interface =
  186. starpu_data_get_interface_on_node(handle, 0);
  187. return interface->elemsize;
  188. }
  189. /* memory allocation/deallocation primitives for the BLOCK interface */
  190. /* returns the size of the allocated area */
  191. static size_t allocate_block_buffer_on_node(starpu_data_handle handle, uint32_t dst_node)
  192. {
  193. uintptr_t addr = 0;
  194. unsigned fail = 0;
  195. size_t allocated_memory;
  196. #ifdef USE_CUDA
  197. cudaError_t status;
  198. #endif
  199. starpu_block_interface_t *dst_block =
  200. starpu_data_get_interface_on_node(handle, dst_node);
  201. uint32_t nx = dst_block->nx;
  202. uint32_t ny = dst_block->ny;
  203. uint32_t nz = dst_block->nz;
  204. size_t elemsize = dst_block->elemsize;
  205. node_kind kind = get_node_kind(dst_node);
  206. switch(kind) {
  207. case RAM:
  208. addr = (uintptr_t)malloc(nx*ny*nz*elemsize);
  209. if (!addr)
  210. fail = 1;
  211. break;
  212. #ifdef USE_CUDA
  213. case CUDA_RAM:
  214. status = cudaMalloc((void **)&addr, nx*ny*nz*elemsize);
  215. //fprintf(stderr, "cudaMalloc -> addr %p\n", addr);
  216. if (!addr || status != cudaSuccess)
  217. {
  218. if (STARPU_UNLIKELY(status != cudaErrorMemoryAllocation))
  219. CUDA_REPORT_ERROR(status);
  220. fail = 1;
  221. }
  222. break;
  223. #endif
  224. default:
  225. assert(0);
  226. }
  227. if (!fail) {
  228. /* allocation succeeded */
  229. allocated_memory = nx*ny*nz*elemsize;
  230. /* update the data properly in consequence */
  231. dst_block->ptr = addr;
  232. dst_block->ldy = nx;
  233. dst_block->ldz = nx*ny;
  234. } else {
  235. /* allocation failed */
  236. allocated_memory = 0;
  237. }
  238. return allocated_memory;
  239. }
  240. static void liberate_block_buffer_on_node(void *interface, uint32_t node)
  241. {
  242. starpu_block_interface_t *block_interface = interface;
  243. #ifdef USE_CUDA
  244. cudaError_t status;
  245. #endif
  246. node_kind kind = get_node_kind(node);
  247. switch(kind) {
  248. case RAM:
  249. free((void*)block_interface->ptr);
  250. break;
  251. #ifdef USE_CUDA
  252. case CUDA_RAM:
  253. status = cudaFree((void*)block_interface->ptr);
  254. if (STARPU_UNLIKELY(status))
  255. CUDA_REPORT_ERROR(status);
  256. break;
  257. #endif
  258. default:
  259. assert(0);
  260. }
  261. }
  262. #ifdef USE_CUDA
  263. static int copy_cuda_to_ram(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node)
  264. {
  265. cudaError_t cures;
  266. starpu_block_interface_t *src_block;
  267. starpu_block_interface_t *dst_block;
  268. src_block = starpu_data_get_interface_on_node(handle, src_node);
  269. dst_block = starpu_data_get_interface_on_node(handle, dst_node);
  270. uint32_t nx = src_block->nx;
  271. uint32_t ny = src_block->ny;
  272. uint32_t nz = src_block->nz;
  273. size_t elemsize = src_block->elemsize;
  274. if ((src_block->nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  275. {
  276. /* we are lucky */
  277. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  278. nx*ny*nz*elemsize, cudaMemcpyDeviceToHost);
  279. if (STARPU_UNLIKELY(cures))
  280. CUDA_REPORT_ERROR(cures);
  281. }
  282. else {
  283. unsigned layer;
  284. for (layer = 0; layer < src_block->nz; layer++)
  285. {
  286. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  287. + src_block->ldz*src_block->elemsize;
  288. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  289. + dst_block->ldz*dst_block->elemsize;
  290. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  291. (char *)src_ptr, src_block->ldy*elemsize,
  292. nx*elemsize, ny, cudaMemcpyDeviceToHost);
  293. if (STARPU_UNLIKELY(cures))
  294. CUDA_REPORT_ERROR(cures);
  295. }
  296. }
  297. cudaThreadSynchronize();
  298. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->elemsize*src_block->elemsize);
  299. return 0;
  300. }
  301. static int copy_cuda_to_ram_async(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node, cudaStream_t *stream)
  302. {
  303. starpu_block_interface_t *src_block;
  304. starpu_block_interface_t *dst_block;
  305. src_block = starpu_data_get_interface_on_node(handle, src_node);
  306. dst_block = starpu_data_get_interface_on_node(handle, dst_node);
  307. uint32_t nx = src_block->nx;
  308. uint32_t ny = src_block->ny;
  309. uint32_t nz = src_block->nz;
  310. size_t elemsize = src_block->elemsize;
  311. cudaError_t cures;
  312. int ret;
  313. /* We may have a contiguous buffer for the entire block, or contiguous
  314. * plans within the block, we can avoid many small transfers that way */
  315. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  316. {
  317. /* Is that a single contiguous buffer ? */
  318. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  319. {
  320. cures = cudaMemcpyAsync((char *)dst_block->ptr, (char *)src_block->ptr,
  321. nx*ny*nz*elemsize, cudaMemcpyDeviceToHost, *stream);
  322. if (STARPU_UNLIKELY(cures))
  323. {
  324. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  325. nx*ny*nz*elemsize, cudaMemcpyDeviceToHost);
  326. if (STARPU_UNLIKELY(cures))
  327. CUDA_REPORT_ERROR(cures);
  328. cudaThreadSynchronize();
  329. ret = 0;
  330. }
  331. else {
  332. ret = EAGAIN;
  333. }
  334. }
  335. else {
  336. /* Are all plans contiguous */
  337. cures = cudaMemcpy2DAsync((char *)dst_block->ptr, dst_block->ldz*elemsize,
  338. (char *)src_block->ptr, src_block->ldz*elemsize,
  339. nx*ny*elemsize, nz, cudaMemcpyDeviceToHost, *stream);
  340. if (STARPU_UNLIKELY(cures))
  341. {
  342. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  343. (char *)src_block->ptr, src_block->ldz*elemsize,
  344. nx*ny*elemsize, nz, cudaMemcpyDeviceToHost);
  345. if (STARPU_UNLIKELY(cures))
  346. CUDA_REPORT_ERROR(cures);
  347. cudaThreadSynchronize();
  348. ret = 0;
  349. }
  350. else {
  351. ret = EAGAIN;
  352. }
  353. }
  354. }
  355. else {
  356. /* Default case: we transfer all lines one by one: ny*nz transfers */
  357. unsigned layer;
  358. for (layer = 0; layer < src_block->nz; layer++)
  359. {
  360. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  361. + src_block->ldz*src_block->elemsize;
  362. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  363. + dst_block->ldz*dst_block->elemsize;
  364. cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
  365. (char *)src_ptr, src_block->ldy*elemsize,
  366. nx*elemsize, ny, cudaMemcpyDeviceToHost, *stream);
  367. if (STARPU_UNLIKELY(cures))
  368. {
  369. /* I don't know how to do that "better" */
  370. goto no_async_default;
  371. }
  372. }
  373. ret = EAGAIN;
  374. }
  375. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  376. return ret;
  377. no_async_default:
  378. {
  379. unsigned layer;
  380. for (layer = 0; layer < src_block->nz; layer++)
  381. {
  382. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  383. + src_block->ldz*src_block->elemsize;
  384. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  385. + dst_block->ldz*dst_block->elemsize;
  386. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  387. (char *)src_ptr, src_block->ldy*elemsize,
  388. nx*elemsize, ny, cudaMemcpyDeviceToHost);
  389. if (STARPU_UNLIKELY(cures))
  390. CUDA_REPORT_ERROR(cures);
  391. }
  392. cudaThreadSynchronize();
  393. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  394. return 0;
  395. }
  396. }
  397. static int copy_ram_to_cuda_async(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node, cudaStream_t *stream)
  398. {
  399. starpu_block_interface_t *src_block;
  400. starpu_block_interface_t *dst_block;
  401. src_block = starpu_data_get_interface_on_node(handle, src_node);
  402. dst_block = starpu_data_get_interface_on_node(handle, dst_node);
  403. uint32_t nx = src_block->nx;
  404. uint32_t ny = src_block->ny;
  405. uint32_t nz = src_block->nz;
  406. size_t elemsize = src_block->elemsize;
  407. cudaError_t cures;
  408. int ret;
  409. /* We may have a contiguous buffer for the entire block, or contiguous
  410. * plans within the block, we can avoid many small transfers that way */
  411. if ((nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  412. {
  413. /* Is that a single contiguous buffer ? */
  414. if (((nx*ny) == src_block->ldz) && (src_block->ldz == dst_block->ldz))
  415. {
  416. cures = cudaMemcpyAsync((char *)dst_block->ptr, (char *)src_block->ptr,
  417. nx*ny*nz*elemsize, cudaMemcpyHostToDevice, *stream);
  418. if (STARPU_UNLIKELY(cures))
  419. {
  420. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  421. nx*ny*nz*elemsize, cudaMemcpyHostToDevice);
  422. if (STARPU_UNLIKELY(cures))
  423. CUDA_REPORT_ERROR(cures);
  424. cudaThreadSynchronize();
  425. ret = 0;
  426. }
  427. else {
  428. ret = EAGAIN;
  429. }
  430. }
  431. else {
  432. /* Are all plans contiguous */
  433. cures = cudaMemcpy2DAsync((char *)dst_block->ptr, dst_block->ldz*elemsize,
  434. (char *)src_block->ptr, src_block->ldz*elemsize,
  435. nx*ny*elemsize, nz, cudaMemcpyHostToDevice, *stream);
  436. if (STARPU_UNLIKELY(cures))
  437. {
  438. cures = cudaMemcpy2D((char *)dst_block->ptr, dst_block->ldz*elemsize,
  439. (char *)src_block->ptr, src_block->ldz*elemsize,
  440. nx*ny*elemsize, nz, cudaMemcpyHostToDevice);
  441. if (STARPU_UNLIKELY(cures))
  442. CUDA_REPORT_ERROR(cures);
  443. cudaThreadSynchronize();
  444. ret = 0;
  445. }
  446. else {
  447. ret = EAGAIN;
  448. }
  449. }
  450. }
  451. else {
  452. /* Default case: we transfer all lines one by one: ny*nz transfers */
  453. unsigned layer;
  454. for (layer = 0; layer < src_block->nz; layer++)
  455. {
  456. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  457. + src_block->ldz*src_block->elemsize;
  458. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  459. + dst_block->ldz*dst_block->elemsize;
  460. cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
  461. (char *)src_ptr, src_block->ldy*elemsize,
  462. nx*elemsize, ny, cudaMemcpyHostToDevice, *stream);
  463. if (STARPU_UNLIKELY(cures))
  464. {
  465. /* I don't know how to do that "better" */
  466. goto no_async_default;
  467. }
  468. }
  469. ret = EAGAIN;
  470. }
  471. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  472. return ret;
  473. no_async_default:
  474. {
  475. unsigned layer;
  476. for (layer = 0; layer < src_block->nz; layer++)
  477. {
  478. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  479. + src_block->ldz*src_block->elemsize;
  480. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  481. + dst_block->ldz*dst_block->elemsize;
  482. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  483. (char *)src_ptr, src_block->ldy*elemsize,
  484. nx*elemsize, ny, cudaMemcpyHostToDevice);
  485. if (STARPU_UNLIKELY(cures))
  486. CUDA_REPORT_ERROR(cures);
  487. }
  488. cudaThreadSynchronize();
  489. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  490. return 0;
  491. }
  492. }
  493. static int copy_ram_to_cuda(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node)
  494. {
  495. cudaError_t cures;
  496. starpu_block_interface_t *src_block;
  497. starpu_block_interface_t *dst_block;
  498. src_block = starpu_data_get_interface_on_node(handle, src_node);
  499. dst_block = starpu_data_get_interface_on_node(handle, dst_node);
  500. uint32_t nx = src_block->nx;
  501. uint32_t ny = src_block->ny;
  502. uint32_t nz = src_block->nz;
  503. size_t elemsize = src_block->elemsize;
  504. if ((src_block->nx == src_block->ldy) && (src_block->ldy == dst_block->ldy))
  505. {
  506. /* we are lucky */
  507. cures = cudaMemcpy((char *)dst_block->ptr, (char *)src_block->ptr,
  508. nx*ny*nz*elemsize, cudaMemcpyHostToDevice);
  509. if (STARPU_UNLIKELY(cures))
  510. CUDA_REPORT_ERROR(cures);
  511. }
  512. else {
  513. unsigned layer;
  514. for (layer = 0; layer < src_block->nz; layer++)
  515. {
  516. uint8_t *src_ptr = ((uint8_t *)src_block->ptr)
  517. + src_block->ldz*src_block->elemsize;
  518. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr)
  519. + dst_block->ldz*dst_block->elemsize;
  520. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  521. (char *)src_ptr, src_block->ldy*elemsize,
  522. nx*elemsize, ny, cudaMemcpyHostToDevice);
  523. if (STARPU_UNLIKELY(cures))
  524. CUDA_REPORT_ERROR(cures);
  525. }
  526. }
  527. cudaThreadSynchronize();
  528. TRACE_DATA_COPY(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->elemsize);
  529. return 0;
  530. }
  531. #endif // USE_CUDA
  532. /* as not all platform easily have a BLAS lib installed ... */
  533. static int dummy_copy_ram_to_ram(starpu_data_handle handle, uint32_t src_node, uint32_t dst_node)
  534. {
  535. starpu_block_interface_t *src_block;
  536. starpu_block_interface_t *dst_block;
  537. src_block = starpu_data_get_interface_on_node(handle, src_node);
  538. dst_block = starpu_data_get_interface_on_node(handle, dst_node);
  539. uint32_t nx = dst_block->nx;
  540. uint32_t ny = dst_block->ny;
  541. uint32_t nz = dst_block->nz;
  542. size_t elemsize = dst_block->elemsize;
  543. uint32_t ldy_src = src_block->ldy;
  544. uint32_t ldz_src = src_block->ldz;
  545. uint32_t ldy_dst = dst_block->ldy;
  546. uint32_t ldz_dst = dst_block->ldz;
  547. uintptr_t ptr_src = src_block->ptr;
  548. uintptr_t ptr_dst = dst_block->ptr;
  549. unsigned y, z;
  550. for (z = 0; z < nz; z++)
  551. for (y = 0; y < ny; y++)
  552. {
  553. uint32_t src_offset = (y*ldy_src + y*z*ldz_src)*elemsize;
  554. uint32_t dst_offset = (y*ldy_dst + y*z*ldz_dst)*elemsize;
  555. memcpy((void *)(ptr_dst + dst_offset),
  556. (void *)(ptr_src + src_offset), nx*elemsize);
  557. }
  558. TRACE_DATA_COPY(src_node, dst_node, nx*ny*nz*elemsize);
  559. return 0;
  560. }