tensor_interface.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2019 Université de Bordeaux
  4. * Copyright (C) 2011,2012,2017 Inria
  5. * Copyright (C) 2010-2017,2019 CNRS
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #ifdef STARPU_USE_CUDA
  20. static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  21. static int copy_cuda_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  22. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
  23. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream);
  24. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  25. #endif
  26. #ifdef STARPU_USE_OPENCL
  27. static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  28. static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  29. static int copy_opencl_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED);
  30. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
  31. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
  32. static int copy_opencl_to_opencl_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cl_event *event);
  33. #endif
  34. static int copy_any_to_any(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data);
  35. static const struct starpu_data_copy_methods tensor_copy_data_methods_s =
  36. {
  37. #ifdef STARPU_USE_CUDA
  38. .ram_to_cuda = copy_ram_to_cuda,
  39. .cuda_to_ram = copy_cuda_to_ram,
  40. .ram_to_cuda_async = copy_ram_to_cuda_async,
  41. .cuda_to_ram_async = copy_cuda_to_ram_async,
  42. .cuda_to_cuda = copy_cuda_to_cuda,
  43. #endif
  44. #ifdef STARPU_USE_OPENCL
  45. .ram_to_opencl = copy_ram_to_opencl,
  46. .opencl_to_ram = copy_opencl_to_ram,
  47. .opencl_to_opencl = copy_opencl_to_opencl,
  48. .ram_to_opencl_async = copy_ram_to_opencl_async,
  49. .opencl_to_ram_async = copy_opencl_to_ram_async,
  50. .opencl_to_opencl_async = copy_opencl_to_opencl_async,
  51. #endif
  52. .any_to_any = copy_any_to_any,
  53. };
  54. static void register_tensor_handle(starpu_data_handle_t handle, unsigned home_node, void *data_interface);
  55. static void *tensor_to_pointer(void *data_interface, unsigned node);
  56. static int tensor_pointer_is_inside(void *data_interface, unsigned node, void *ptr);
  57. static starpu_ssize_t allocate_tensor_buffer_on_node(void *data_interface_, unsigned dst_node);
  58. static void free_tensor_buffer_on_node(void *data_interface, unsigned node);
  59. static size_t tensor_interface_get_size(starpu_data_handle_t handle);
  60. static uint32_t footprint_tensor_interface_crc32(starpu_data_handle_t handle);
  61. static int tensor_compare(void *data_interface_a, void *data_interface_b);
  62. static void display_tensor_interface(starpu_data_handle_t handle, FILE *f);
  63. static int pack_tensor_handle(starpu_data_handle_t handle, unsigned node, void **ptr, starpu_ssize_t *count);
  64. static int unpack_tensor_handle(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count);
  65. static starpu_ssize_t describe(void *data_interface, char *buf, size_t size);
  66. struct starpu_data_interface_ops starpu_interface_tensor_ops =
  67. {
  68. .register_data_handle = register_tensor_handle,
  69. .allocate_data_on_node = allocate_tensor_buffer_on_node,
  70. .to_pointer = tensor_to_pointer,
  71. .pointer_is_inside = tensor_pointer_is_inside,
  72. .free_data_on_node = free_tensor_buffer_on_node,
  73. .copy_methods = &tensor_copy_data_methods_s,
  74. .get_size = tensor_interface_get_size,
  75. .footprint = footprint_tensor_interface_crc32,
  76. .compare = tensor_compare,
  77. .interfaceid = STARPU_TENSOR_INTERFACE_ID,
  78. .interface_size = sizeof(struct starpu_tensor_interface),
  79. .display = display_tensor_interface,
  80. .pack_data = pack_tensor_handle,
  81. .unpack_data = unpack_tensor_handle,
  82. .describe = describe,
  83. .name = "STARPU_TENSOR_INTERFACE"
  84. };
  85. static void *tensor_to_pointer(void *data_interface, unsigned node)
  86. {
  87. (void) node;
  88. struct starpu_tensor_interface *tensor_interface = data_interface;
  89. return (void*) tensor_interface->ptr;
  90. }
  91. static int tensor_pointer_is_inside(void *data_interface, unsigned node, void *ptr)
  92. {
  93. (void) node;
  94. struct starpu_tensor_interface *tensor_interface = data_interface;
  95. uint32_t ldy = tensor_interface->ldy;
  96. uint32_t ldz = tensor_interface->ldz;
  97. uint32_t ldt = tensor_interface->ldt;
  98. uint32_t nx = tensor_interface->nx;
  99. uint32_t ny = tensor_interface->ny;
  100. uint32_t nz = tensor_interface->nz;
  101. uint32_t nt = tensor_interface->nt;
  102. size_t elemsize = tensor_interface->elemsize;
  103. return (char*) ptr >= (char*) tensor_interface->ptr &&
  104. (char*) ptr < (char*) tensor_interface->ptr + (nt-1)*ldt*elemsize + (nz-1)*ldz*elemsize + (ny-1)*ldy*elemsize + nx*elemsize;
  105. }
  106. static void register_tensor_handle(starpu_data_handle_t handle, unsigned home_node, void *data_interface)
  107. {
  108. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *) data_interface;
  109. unsigned node;
  110. for (node = 0; node < STARPU_MAXNODES; node++)
  111. {
  112. struct starpu_tensor_interface *local_interface = (struct starpu_tensor_interface *)
  113. starpu_data_get_interface_on_node(handle, node);
  114. if (node == home_node)
  115. {
  116. local_interface->ptr = tensor_interface->ptr;
  117. local_interface->dev_handle = tensor_interface->dev_handle;
  118. local_interface->offset = tensor_interface->offset;
  119. local_interface->ldy = tensor_interface->ldy;
  120. local_interface->ldz = tensor_interface->ldz;
  121. local_interface->ldt = tensor_interface->ldt;
  122. }
  123. else
  124. {
  125. local_interface->ptr = 0;
  126. local_interface->dev_handle = 0;
  127. local_interface->offset = 0;
  128. local_interface->ldy = 0;
  129. local_interface->ldz = 0;
  130. local_interface->ldt = 0;
  131. }
  132. local_interface->id = tensor_interface->id;
  133. local_interface->nx = tensor_interface->nx;
  134. local_interface->ny = tensor_interface->ny;
  135. local_interface->nz = tensor_interface->nz;
  136. local_interface->nt = tensor_interface->nt;
  137. local_interface->elemsize = tensor_interface->elemsize;
  138. }
  139. }
  140. /* declare a new data with the BLAS interface */
  141. void starpu_tensor_data_register(starpu_data_handle_t *handleptr, int home_node,
  142. uintptr_t ptr, uint32_t ldy, uint32_t ldz, uint32_t ldt, uint32_t nx,
  143. uint32_t ny, uint32_t nz, uint32_t nt, size_t elemsize)
  144. {
  145. struct starpu_tensor_interface tensor_interface =
  146. {
  147. .id = STARPU_TENSOR_INTERFACE_ID,
  148. .ptr = ptr,
  149. .dev_handle = ptr,
  150. .offset = 0,
  151. .ldy = ldy,
  152. .ldz = ldz,
  153. .ldt = ldt,
  154. .nx = nx,
  155. .ny = ny,
  156. .nz = nz,
  157. .nt = nt,
  158. .elemsize = elemsize
  159. };
  160. #ifndef STARPU_SIMGRID
  161. if (home_node >= 0 && starpu_node_get_kind(home_node) == STARPU_CPU_RAM)
  162. {
  163. STARPU_ASSERT_ACCESSIBLE(ptr);
  164. STARPU_ASSERT_ACCESSIBLE(ptr + (nt-1)*ldt*elemsize + (nz-1)*ldz*elemsize + (ny-1)*ldy*elemsize + nx*elemsize - 1);
  165. }
  166. #endif
  167. starpu_data_register(handleptr, home_node, &tensor_interface, &starpu_interface_tensor_ops);
  168. }
  169. void starpu_tensor_ptr_register(starpu_data_handle_t handle, unsigned node,
  170. uintptr_t ptr, uintptr_t dev_handle, size_t offset, uint32_t ldy, uint32_t ldz, uint32_t ldt)
  171. {
  172. struct starpu_tensor_interface *tensor_interface = starpu_data_get_interface_on_node(handle, node);
  173. starpu_data_ptr_register(handle, node);
  174. tensor_interface->ptr = ptr;
  175. tensor_interface->dev_handle = dev_handle;
  176. tensor_interface->offset = offset;
  177. tensor_interface->ldy = ldy;
  178. tensor_interface->ldz = ldz;
  179. tensor_interface->ldt = ldt;
  180. }
  181. static uint32_t footprint_tensor_interface_crc32(starpu_data_handle_t handle)
  182. {
  183. uint32_t hash;
  184. hash = starpu_hash_crc32c_be(starpu_tensor_get_nx(handle), 0);
  185. hash = starpu_hash_crc32c_be(starpu_tensor_get_ny(handle), hash);
  186. hash = starpu_hash_crc32c_be(starpu_tensor_get_nz(handle), hash);
  187. hash = starpu_hash_crc32c_be(starpu_tensor_get_nt(handle), hash);
  188. return hash;
  189. }
  190. static int tensor_compare(void *data_interface_a, void *data_interface_b)
  191. {
  192. struct starpu_tensor_interface *tensor_a = (struct starpu_tensor_interface *) data_interface_a;
  193. struct starpu_tensor_interface *tensor_b = (struct starpu_tensor_interface *) data_interface_b;
  194. /* Two matricess are considered compatible if they have the same size */
  195. return (tensor_a->nx == tensor_b->nx)
  196. && (tensor_a->ny == tensor_b->ny)
  197. && (tensor_a->nz == tensor_b->nz)
  198. && (tensor_a->nt == tensor_b->nt)
  199. && (tensor_a->elemsize == tensor_b->elemsize);
  200. }
  201. static void display_tensor_interface(starpu_data_handle_t handle, FILE *f)
  202. {
  203. struct starpu_tensor_interface *tensor_interface;
  204. tensor_interface = (struct starpu_tensor_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  205. fprintf(f, "%u\t%u\t%u\t%u\t", tensor_interface->nx, tensor_interface->ny, tensor_interface->nz, tensor_interface->nt);
  206. }
  207. static int pack_tensor_handle(starpu_data_handle_t handle, unsigned node, void **ptr, starpu_ssize_t *count)
  208. {
  209. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  210. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  211. starpu_data_get_interface_on_node(handle, node);
  212. *count = tensor_interface->nx*tensor_interface->ny*tensor_interface->nz*tensor_interface->nt*tensor_interface->elemsize;
  213. if (ptr != NULL)
  214. {
  215. uint32_t t, z, y;
  216. char *block = (void *)tensor_interface->ptr;
  217. *ptr = (void *)starpu_malloc_on_node_flags(node, *count, 0);
  218. char *cur = *ptr;
  219. char *block_t = block;
  220. for(t=0 ; t<tensor_interface->nt ; t++)
  221. {
  222. char *block_z = block_t;
  223. for(z=0 ; z<tensor_interface->nz ; z++)
  224. {
  225. char *block_y = block_z;
  226. for(y=0 ; y<tensor_interface->ny ; y++)
  227. {
  228. memcpy(cur, block_y, tensor_interface->nx*tensor_interface->elemsize);
  229. cur += tensor_interface->nx*tensor_interface->elemsize;
  230. block_y += tensor_interface->ldy * tensor_interface->elemsize;
  231. }
  232. block_z += tensor_interface->ldz * tensor_interface->elemsize;
  233. }
  234. block_t += tensor_interface->ldt * tensor_interface->elemsize;
  235. }
  236. }
  237. return 0;
  238. }
  239. static int unpack_tensor_handle(starpu_data_handle_t handle, unsigned node, void *ptr, size_t count)
  240. {
  241. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  242. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  243. starpu_data_get_interface_on_node(handle, node);
  244. STARPU_ASSERT(count == tensor_interface->elemsize * tensor_interface->nx * tensor_interface->ny * tensor_interface->nz * tensor_interface->nt);
  245. uint32_t t, z, y;
  246. char *cur = ptr;
  247. char *block = (void *)tensor_interface->ptr;
  248. char *block_t = block;
  249. for(t=0 ; t<tensor_interface->nt ; t++)
  250. {
  251. char *block_z = block_t;
  252. for(z=0 ; z<tensor_interface->nz ; z++)
  253. {
  254. char *block_y = block_z;
  255. for(y=0 ; y<tensor_interface->ny ; y++)
  256. {
  257. memcpy(block_y, cur, tensor_interface->nx*tensor_interface->elemsize);
  258. cur += tensor_interface->nx*tensor_interface->elemsize;
  259. block_y += tensor_interface->ldy * tensor_interface->elemsize;
  260. }
  261. block_z += tensor_interface->ldz * tensor_interface->elemsize;
  262. }
  263. block_t += tensor_interface->ldt * tensor_interface->elemsize;
  264. }
  265. starpu_free_on_node_flags(node, (uintptr_t)ptr, count, 0);
  266. return 0;
  267. }
  268. static size_t tensor_interface_get_size(starpu_data_handle_t handle)
  269. {
  270. size_t size;
  271. struct starpu_tensor_interface *tensor_interface;
  272. tensor_interface = (struct starpu_tensor_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  273. #ifdef STARPU_DEBUG
  274. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  275. #endif
  276. size = tensor_interface->nx*tensor_interface->ny*tensor_interface->nz*tensor_interface->nt*tensor_interface->elemsize;
  277. return size;
  278. }
  279. /* offer an access to the data parameters */
  280. uint32_t starpu_tensor_get_nx(starpu_data_handle_t handle)
  281. {
  282. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  283. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  284. #ifdef STARPU_DEBUG
  285. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  286. #endif
  287. return tensor_interface->nx;
  288. }
  289. uint32_t starpu_tensor_get_ny(starpu_data_handle_t handle)
  290. {
  291. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  292. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  293. #ifdef STARPU_DEBUG
  294. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  295. #endif
  296. return tensor_interface->ny;
  297. }
  298. uint32_t starpu_tensor_get_nz(starpu_data_handle_t handle)
  299. {
  300. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  301. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  302. #ifdef STARPU_DEBUG
  303. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  304. #endif
  305. return tensor_interface->nz;
  306. }
  307. uint32_t starpu_tensor_get_nt(starpu_data_handle_t handle)
  308. {
  309. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  310. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  311. #ifdef STARPU_DEBUG
  312. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  313. #endif
  314. return tensor_interface->nt;
  315. }
  316. uint32_t starpu_tensor_get_local_ldy(starpu_data_handle_t handle)
  317. {
  318. unsigned node;
  319. node = starpu_worker_get_local_memory_node();
  320. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  321. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  322. starpu_data_get_interface_on_node(handle, node);
  323. #ifdef STARPU_DEBUG
  324. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  325. #endif
  326. return tensor_interface->ldy;
  327. }
  328. uint32_t starpu_tensor_get_local_ldz(starpu_data_handle_t handle)
  329. {
  330. unsigned node;
  331. node = starpu_worker_get_local_memory_node();
  332. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  333. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  334. starpu_data_get_interface_on_node(handle, node);
  335. #ifdef STARPU_DEBUG
  336. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  337. #endif
  338. return tensor_interface->ldz;
  339. }
  340. uint32_t starpu_tensor_get_local_ldt(starpu_data_handle_t handle)
  341. {
  342. unsigned node;
  343. node = starpu_worker_get_local_memory_node();
  344. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  345. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  346. starpu_data_get_interface_on_node(handle, node);
  347. #ifdef STARPU_DEBUG
  348. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  349. #endif
  350. return tensor_interface->ldt;
  351. }
  352. uintptr_t starpu_tensor_get_local_ptr(starpu_data_handle_t handle)
  353. {
  354. unsigned node;
  355. node = starpu_worker_get_local_memory_node();
  356. STARPU_ASSERT(starpu_data_test_if_allocated_on_node(handle, node));
  357. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  358. starpu_data_get_interface_on_node(handle, node);
  359. #ifdef STARPU_DEBUG
  360. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  361. #endif
  362. return tensor_interface->ptr;
  363. }
  364. size_t starpu_tensor_get_elemsize(starpu_data_handle_t handle)
  365. {
  366. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *)
  367. starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  368. #ifdef STARPU_DEBUG
  369. STARPU_ASSERT_MSG(tensor_interface->id == STARPU_TENSOR_INTERFACE_ID, "Error. The given data is not a block.");
  370. #endif
  371. return tensor_interface->elemsize;
  372. }
  373. /* memory allocation/deallocation primitives for the BLOCK interface */
  374. /* returns the size of the allocated area */
  375. static starpu_ssize_t allocate_tensor_buffer_on_node(void *data_interface_, unsigned dst_node)
  376. {
  377. uintptr_t addr = 0, handle;
  378. struct starpu_tensor_interface *dst_block = (struct starpu_tensor_interface *) data_interface_;
  379. uint32_t nx = dst_block->nx;
  380. uint32_t ny = dst_block->ny;
  381. uint32_t nz = dst_block->nz;
  382. uint32_t nt = dst_block->nt;
  383. size_t elemsize = dst_block->elemsize;
  384. starpu_ssize_t allocated_memory;
  385. handle = starpu_malloc_on_node(dst_node, nx*ny*nz*nt*elemsize);
  386. if (!handle)
  387. return -ENOMEM;
  388. if (starpu_node_get_kind(dst_node) != STARPU_OPENCL_RAM)
  389. addr = handle;
  390. allocated_memory = nx*ny*nz*nt*elemsize;
  391. /* update the data properly in consequence */
  392. dst_block->ptr = addr;
  393. dst_block->dev_handle = handle;
  394. dst_block->offset = 0;
  395. dst_block->ldy = nx;
  396. dst_block->ldz = nx*ny;
  397. dst_block->ldt = nx*ny*nz;
  398. return allocated_memory;
  399. }
  400. static void free_tensor_buffer_on_node(void *data_interface, unsigned node)
  401. {
  402. struct starpu_tensor_interface *tensor_interface = (struct starpu_tensor_interface *) data_interface;
  403. uint32_t nx = tensor_interface->nx;
  404. uint32_t ny = tensor_interface->ny;
  405. uint32_t nz = tensor_interface->nz;
  406. uint32_t nt = tensor_interface->nt;
  407. size_t elemsize = tensor_interface->elemsize;
  408. starpu_free_on_node(node, tensor_interface->dev_handle, nx*ny*nz*nt*elemsize);
  409. }
  410. #ifdef STARPU_USE_CUDA
  411. static int copy_cuda_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, enum cudaMemcpyKind kind)
  412. {
  413. struct starpu_tensor_interface *src_block = src_interface;
  414. struct starpu_tensor_interface *dst_block = dst_interface;
  415. uint32_t nx = src_block->nx;
  416. uint32_t ny = src_block->ny;
  417. uint32_t nz = src_block->nz;
  418. uint32_t nt = src_block->nt;
  419. size_t elemsize = src_block->elemsize;
  420. cudaError_t cures;
  421. if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
  422. && nx*ny*nz == src_block->ldt)
  423. {
  424. /* Same lds on both sides, and contiguous, simple */
  425. starpu_cuda_copy_async_sync((void *)src_block->ptr, src_node, (void *)dst_block->ptr, dst_node, nx*ny*nz*nt*elemsize, NULL, kind);
  426. }
  427. else
  428. {
  429. /* TODO: use cudaMemcpy2D for whole 3D blocks etc. when they are contiguous */
  430. /* Default case: we transfer all blocks one by one: nz transfers */
  431. /* TODO: use cudaMemcpy3D now that it works (except on cuda 4.2) */
  432. unsigned t;
  433. for (t = 0; t < src_block->nt; t++)
  434. {
  435. unsigned z;
  436. for (z = 0; z < src_block->nz; z++)
  437. {
  438. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
  439. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*src_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
  440. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  441. (char *)src_ptr, src_block->ldy*elemsize,
  442. nx*elemsize, ny, kind);
  443. if (!cures)
  444. cures = cudaThreadSynchronize();
  445. if (STARPU_UNLIKELY(cures))
  446. STARPU_CUDA_REPORT_ERROR(cures);
  447. }
  448. }
  449. }
  450. starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
  451. return 0;
  452. }
  453. static int copy_cuda_async_common(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream, enum cudaMemcpyKind kind)
  454. {
  455. struct starpu_tensor_interface *src_block = src_interface;
  456. struct starpu_tensor_interface *dst_block = dst_interface;
  457. uint32_t nx = src_block->nx;
  458. uint32_t ny = src_block->ny;
  459. uint32_t nz = src_block->nz;
  460. uint32_t nt = src_block->nt;
  461. size_t elemsize = src_block->elemsize;
  462. cudaError_t cures;
  463. int ret;
  464. if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
  465. && nx*ny*nz == src_block->ldt)
  466. {
  467. /* Same lds on both sides, and contiguous, simple */
  468. ret = starpu_cuda_copy_async_sync((void *)src_block->ptr, src_node, (void *)dst_block->ptr, dst_node, nx*ny*nz*nt*elemsize, stream, kind);
  469. }
  470. else
  471. {
  472. /* TODO: use cudaMemcpy2D for whole 3D blocks etc. when they are contiguous */
  473. /* Default case: we transfer all blocks one by one: nz transfers */
  474. /* TODO: use cudaMemcpy3D now that it works (except on cuda 4.2) */
  475. unsigned t;
  476. for (t = 0; t < src_block->nt; t++)
  477. {
  478. unsigned z;
  479. for (z = 0; z < src_block->nz; z++)
  480. {
  481. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
  482. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
  483. double start;
  484. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  485. cures = cudaMemcpy2DAsync((char *)dst_ptr, dst_block->ldy*elemsize,
  486. (char *)src_ptr, src_block->ldy*elemsize,
  487. nx*elemsize, ny, kind, stream);
  488. starpu_interface_end_driver_copy_async(src_node, dst_node, start);
  489. if (STARPU_UNLIKELY(cures))
  490. {
  491. /* I don't know how to do that "better" */
  492. goto no_async_default;
  493. }
  494. }
  495. }
  496. ret = -EAGAIN;
  497. }
  498. starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
  499. return ret;
  500. no_async_default:
  501. {
  502. unsigned t;
  503. for (t = 0; t < src_block->nt; t++)
  504. {
  505. unsigned z;
  506. for (z = 0; z < src_block->nz; z++)
  507. {
  508. uint8_t *src_ptr = ((uint8_t *)src_block->ptr) + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize;
  509. uint8_t *dst_ptr = ((uint8_t *)dst_block->ptr) + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize;
  510. cures = cudaMemcpy2D((char *)dst_ptr, dst_block->ldy*elemsize,
  511. (char *)src_ptr, src_block->ldy*elemsize,
  512. nx*elemsize, ny, kind);
  513. if (!cures)
  514. cures = cudaThreadSynchronize();
  515. if (STARPU_UNLIKELY(cures))
  516. STARPU_CUDA_REPORT_ERROR(cures);
  517. }
  518. }
  519. starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
  520. return 0;
  521. }
  522. }
  523. static int copy_cuda_to_ram(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node)
  524. {
  525. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToHost);
  526. }
  527. static int copy_ram_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  528. {
  529. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyHostToDevice);
  530. }
  531. static int copy_cuda_to_cuda(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  532. {
  533. return copy_cuda_common(src_interface, src_node, dst_interface, dst_node, cudaMemcpyDeviceToDevice);
  534. }
  535. static int copy_cuda_to_ram_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
  536. {
  537. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyDeviceToHost);
  538. }
  539. static int copy_ram_to_cuda_async(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED, cudaStream_t stream)
  540. {
  541. return copy_cuda_async_common(src_interface, src_node, dst_interface, dst_node, stream, cudaMemcpyHostToDevice);
  542. }
  543. #endif // STARPU_USE_CUDA
  544. #ifdef STARPU_USE_OPENCL
  545. static int copy_opencl_common(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
  546. {
  547. struct starpu_tensor_interface *src_block = src_interface;
  548. struct starpu_tensor_interface *dst_block = dst_interface;
  549. int ret = 0;
  550. uint32_t nx = src_block->nx;
  551. uint32_t ny = src_block->ny;
  552. uint32_t nz = src_block->nz;
  553. /* We may have a contiguous buffer for the entire block, or contiguous
  554. * plans within the block, we can avoid many small transfers that way */
  555. if (src_block->ldy == dst_block->ldy && src_block->ldz == dst_block->ldz && src_block->ldt == dst_block->ldt
  556. && nx*ny*nz == src_block->ldt)
  557. {
  558. ret = starpu_opencl_copy_async_sync(src_block->dev_handle, src_block->offset, src_node,
  559. dst_block->dev_handle, dst_block->offset, dst_node,
  560. src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize,
  561. event);
  562. }
  563. else
  564. {
  565. /* Default case: we transfer all lines one by one: ny*nz transfers */
  566. /* TODO: rect support */
  567. unsigned t;
  568. for (t = 0; t < src_block->nt; t++)
  569. {
  570. unsigned z;
  571. for (z = 0; z < src_block->nz; z++)
  572. {
  573. unsigned j;
  574. for(j=0 ; j<src_block->ny ; j++)
  575. {
  576. ret = starpu_opencl_copy_async_sync(src_block->dev_handle,
  577. src_block->offset + t*src_block->ldt*src_block->elemsize + z*src_block->ldz*src_block->elemsize + j*src_block->ldy*src_block->elemsize,
  578. src_node,
  579. dst_block->dev_handle,
  580. dst_block->offset + t*dst_block->ldt*dst_block->elemsize + z*dst_block->ldz*dst_block->elemsize + j*dst_block->ldy*dst_block->elemsize,
  581. dst_node,
  582. src_block->nx*src_block->elemsize,
  583. event);
  584. }
  585. }
  586. }
  587. }
  588. starpu_interface_data_copy(src_node, dst_node, src_block->nx*src_block->ny*src_block->nz*src_block->nt*src_block->elemsize);
  589. return ret;
  590. }
  591. static int copy_ram_to_opencl_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
  592. {
  593. return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
  594. }
  595. static int copy_opencl_to_ram_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
  596. {
  597. return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
  598. }
  599. static int copy_opencl_to_opencl_async(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, cl_event *event)
  600. {
  601. return copy_opencl_common(src_interface, src_node, dst_interface, dst_node, event);
  602. }
  603. static int copy_ram_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  604. {
  605. return copy_ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
  606. }
  607. static int copy_opencl_to_ram(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  608. {
  609. return copy_opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, NULL);
  610. }
  611. static int copy_opencl_to_opencl(void *src_interface, unsigned src_node STARPU_ATTRIBUTE_UNUSED, void *dst_interface, unsigned dst_node STARPU_ATTRIBUTE_UNUSED)
  612. {
  613. return copy_opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, NULL);
  614. }
  615. #endif
  616. static int copy_any_to_any(void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, void *async_data)
  617. {
  618. struct starpu_tensor_interface *src_block = (struct starpu_tensor_interface *) src_interface;
  619. struct starpu_tensor_interface *dst_block = (struct starpu_tensor_interface *) dst_interface;
  620. int ret = 0;
  621. uint32_t nx = dst_block->nx;
  622. uint32_t ny = dst_block->ny;
  623. uint32_t nz = dst_block->nz;
  624. uint32_t nt = dst_block->nt;
  625. size_t elemsize = dst_block->elemsize;
  626. uint32_t ldy_src = src_block->ldy;
  627. uint32_t ldz_src = src_block->ldz;
  628. uint32_t ldt_src = src_block->ldt;
  629. uint32_t ldy_dst = dst_block->ldy;
  630. uint32_t ldz_dst = dst_block->ldz;
  631. uint32_t ldt_dst = dst_block->ldt;
  632. if (ldy_src == nx && ldy_dst == nx && ldz_src == nx*ny && ldz_dst == nx*ny && ldt_src == nx*ny*nz && ldt_dst == nx*ny*nz)
  633. {
  634. /* Optimise non-partitioned and z-partitioned case */
  635. if (starpu_interface_copy(src_block->dev_handle, src_block->offset, src_node,
  636. dst_block->dev_handle, dst_block->offset, dst_node,
  637. nx*ny*nz*nt*elemsize, async_data))
  638. ret = -EAGAIN;
  639. }
  640. else
  641. {
  642. unsigned t;
  643. for (t = 0; t < nt; t++)
  644. {
  645. unsigned z;
  646. for (z = 0; z < nz; z++)
  647. {
  648. if (ldy_src == nx && ldy_dst == nx)
  649. {
  650. /* Optimise y-partitioned case */
  651. uint32_t src_offset = t*ldt_src*elemsize + z*ldz_src*elemsize;
  652. uint32_t dst_offset = t*ldt_dst*elemsize + z*ldz_dst*elemsize;
  653. if (starpu_interface_copy(src_block->dev_handle, src_block->offset + src_offset, src_node,
  654. dst_block->dev_handle, dst_block->offset + dst_offset, dst_node,
  655. nx*ny*elemsize, async_data))
  656. ret = -EAGAIN;
  657. }
  658. else
  659. {
  660. unsigned y;
  661. for (y = 0; y < ny; y++)
  662. {
  663. /* Eerf, x-partitioned case */
  664. uint32_t src_offset = (y*ldy_src + z*ldz_src + t*ldt_src)*elemsize;
  665. uint32_t dst_offset = (y*ldy_dst + z*ldz_dst + t*ldt_dst)*elemsize;
  666. if (starpu_interface_copy(src_block->dev_handle, src_block->offset + src_offset, src_node,
  667. dst_block->dev_handle, dst_block->offset + dst_offset, dst_node,
  668. nx*elemsize, async_data))
  669. ret = -EAGAIN;
  670. }
  671. }
  672. }
  673. }
  674. }
  675. starpu_interface_data_copy(src_node, dst_node, nx*ny*nz*nt*elemsize);
  676. return ret;
  677. }
  678. static starpu_ssize_t describe(void *data_interface, char *buf, size_t size)
  679. {
  680. struct starpu_tensor_interface *block = (struct starpu_tensor_interface *) data_interface;
  681. return snprintf(buf, size, "T%ux%ux%ux%ux%u",
  682. (unsigned) block->nx,
  683. (unsigned) block->ny,
  684. (unsigned) block->nz,
  685. (unsigned) block->nt,
  686. (unsigned) block->elemsize);
  687. }