copy_driver.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2014 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2013 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <common/utils.h>
  20. #include <core/sched_policy.h>
  21. #include <datawizard/datastats.h>
  22. #include <drivers/disk/driver_disk.h>
  23. #include <common/fxt.h>
  24. #include "copy_driver.h"
  25. #include "memalloc.h"
  26. #include <starpu_opencl.h>
  27. #include <starpu_cuda.h>
  28. #include <profiling/profiling.h>
  29. #include <core/disk.h>
  30. #ifdef STARPU_SIMGRID
  31. #include <core/simgrid.h>
  32. #include <msg/msg.h>
  33. #endif
  34. void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
  35. {
  36. /* wake up all workers on that memory node */
  37. unsigned cond_id;
  38. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  39. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  40. unsigned nconds = descr->condition_count[nodeid];
  41. for (cond_id = 0; cond_id < nconds; cond_id++)
  42. {
  43. struct _starpu_cond_and_mutex *condition;
  44. condition = &descr->conditions_attached_to_node[nodeid][cond_id];
  45. /* wake anybody waiting on that condition */
  46. STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  47. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  48. STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  49. }
  50. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  51. }
  52. void starpu_wake_all_blocked_workers(void)
  53. {
  54. /* workers may be blocked on the various queues' conditions */
  55. unsigned cond_id;
  56. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  57. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  58. unsigned nconds = descr->total_condition_count;
  59. for (cond_id = 0; cond_id < nconds; cond_id++)
  60. {
  61. struct _starpu_cond_and_mutex *condition;
  62. condition = &descr->conditions_all[cond_id];
  63. /* wake anybody waiting on that condition */
  64. STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  65. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  66. STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  67. }
  68. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  69. }
  70. #ifdef STARPU_USE_FXT
  71. /* we need to identify each communication so that we can match the beginning
  72. * and the end of a communication in the trace, so we use a unique identifier
  73. * per communication */
  74. static unsigned communication_cnt = 0;
  75. #endif
  76. static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
  77. struct _starpu_data_replicate *src_replicate,
  78. struct _starpu_data_replicate *dst_replicate,
  79. struct _starpu_data_request *req)
  80. {
  81. unsigned src_node = src_replicate->memory_node;
  82. unsigned dst_node = dst_replicate->memory_node;
  83. STARPU_ASSERT(src_replicate->refcnt);
  84. STARPU_ASSERT(dst_replicate->refcnt);
  85. STARPU_ASSERT(src_replicate->allocated);
  86. STARPU_ASSERT(dst_replicate->allocated);
  87. _starpu_comm_amounts_inc(src_node, dst_node, handle->ops->get_size(handle));
  88. #ifdef STARPU_SIMGRID
  89. return _starpu_simgrid_transfer(handle->ops->get_size(handle), src_node, dst_node, req);
  90. #else /* !SIMGRID */
  91. int ret = 0;
  92. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  93. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  94. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  95. #ifdef STARPU_USE_CUDA
  96. cudaError_t cures;
  97. cudaStream_t stream;
  98. #endif
  99. void *src_interface = src_replicate->data_interface;
  100. void *dst_interface = dst_replicate->data_interface;
  101. #if defined(STARPU_USE_CUDA) && defined(HAVE_CUDA_MEMCPY_PEER)
  102. if ((src_kind == STARPU_CUDA_RAM) || (dst_kind == STARPU_CUDA_RAM))
  103. {
  104. unsigned devid;
  105. if ((src_kind == STARPU_CUDA_RAM) && (dst_kind == STARPU_CUDA_RAM))
  106. {
  107. /* GPU-GPU transfer, issue it from the device we are supposed to drive */
  108. int worker = starpu_worker_get_id();
  109. devid = starpu_worker_get_devid(worker);
  110. }
  111. else
  112. {
  113. unsigned node = (dst_kind == STARPU_CUDA_RAM)?dst_node:src_node;
  114. devid = _starpu_memory_node_get_devid(node);
  115. }
  116. starpu_cuda_set_device(devid);
  117. }
  118. #endif
  119. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  120. {
  121. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  122. /* STARPU_CPU_RAM -> STARPU_CPU_RAM */
  123. if (copy_methods->ram_to_ram)
  124. copy_methods->ram_to_ram(src_interface, src_node, dst_interface, dst_node);
  125. else
  126. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  127. break;
  128. #ifdef STARPU_USE_CUDA
  129. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  130. /* only the proper CUBLAS thread can initiate this directly ! */
  131. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  132. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  133. #endif
  134. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  135. !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
  136. {
  137. /* this is not associated to a request so it's synchronous */
  138. STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
  139. if (copy_methods->cuda_to_ram)
  140. copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
  141. else
  142. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  143. }
  144. else
  145. {
  146. req->async_channel.type = STARPU_CUDA_RAM;
  147. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  148. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  149. stream = starpu_cuda_get_local_out_transfer_stream();
  150. if (copy_methods->cuda_to_ram_async)
  151. ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
  152. else
  153. {
  154. STARPU_ASSERT(copy_methods->any_to_any);
  155. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  156. }
  157. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  158. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  159. }
  160. break;
  161. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  162. /* STARPU_CPU_RAM -> CUBLAS_RAM */
  163. /* only the proper CUBLAS thread can initiate this ! */
  164. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  165. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  166. #endif
  167. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  168. !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
  169. {
  170. /* this is not associated to a request so it's synchronous */
  171. STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
  172. if (copy_methods->ram_to_cuda)
  173. copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
  174. else
  175. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  176. }
  177. else
  178. {
  179. req->async_channel.type = STARPU_CUDA_RAM;
  180. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  181. if (STARPU_UNLIKELY(cures != cudaSuccess))
  182. STARPU_CUDA_REPORT_ERROR(cures);
  183. stream = starpu_cuda_get_local_in_transfer_stream();
  184. if (copy_methods->ram_to_cuda_async)
  185. ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  186. else
  187. {
  188. STARPU_ASSERT(copy_methods->any_to_any);
  189. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  190. }
  191. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  192. if (STARPU_UNLIKELY(cures != cudaSuccess))
  193. STARPU_CUDA_REPORT_ERROR(cures);
  194. }
  195. break;
  196. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  197. /* CUDA - CUDA transfer */
  198. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  199. !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
  200. {
  201. STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
  202. /* this is not associated to a request so it's synchronous */
  203. if (copy_methods->cuda_to_cuda)
  204. copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
  205. else
  206. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  207. }
  208. else
  209. {
  210. req->async_channel.type = STARPU_CUDA_RAM;
  211. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  212. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  213. stream = starpu_cuda_get_peer_transfer_stream(src_node, dst_node);
  214. if (copy_methods->cuda_to_cuda_async)
  215. ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  216. else
  217. {
  218. STARPU_ASSERT(copy_methods->any_to_any);
  219. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  220. }
  221. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  222. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  223. }
  224. break;
  225. #endif
  226. #ifdef STARPU_USE_OPENCL
  227. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  228. /* OpenCL -> RAM */
  229. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  230. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  231. !(copy_methods->opencl_to_ram_async || copy_methods->any_to_any))
  232. {
  233. STARPU_ASSERT(copy_methods->opencl_to_ram || copy_methods->any_to_any);
  234. /* this is not associated to a request so it's synchronous */
  235. if (copy_methods->opencl_to_ram)
  236. copy_methods->opencl_to_ram(src_interface, src_node, dst_interface, dst_node);
  237. else
  238. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  239. }
  240. else
  241. {
  242. req->async_channel.type = STARPU_OPENCL_RAM;
  243. if (copy_methods->opencl_to_ram_async)
  244. ret = copy_methods->opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  245. else
  246. {
  247. STARPU_ASSERT(copy_methods->any_to_any);
  248. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  249. }
  250. }
  251. break;
  252. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  253. /* STARPU_CPU_RAM -> STARPU_OPENCL_RAM */
  254. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  255. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  256. !(copy_methods->ram_to_opencl_async || copy_methods->any_to_any))
  257. {
  258. STARPU_ASSERT(copy_methods->ram_to_opencl || copy_methods->any_to_any);
  259. /* this is not associated to a request so it's synchronous */
  260. if (copy_methods->ram_to_opencl)
  261. copy_methods->ram_to_opencl(src_interface, src_node, dst_interface, dst_node);
  262. else
  263. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  264. }
  265. else
  266. {
  267. req->async_channel.type = STARPU_OPENCL_RAM;
  268. if (copy_methods->ram_to_opencl_async)
  269. ret = copy_methods->ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  270. else
  271. {
  272. STARPU_ASSERT(copy_methods->any_to_any);
  273. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  274. }
  275. }
  276. break;
  277. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  278. /* STARPU_OPENCL_RAM -> STARPU_OPENCL_RAM */
  279. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node || _starpu_memory_node_get_local_key() == src_node);
  280. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  281. !(copy_methods->opencl_to_opencl_async || copy_methods->any_to_any))
  282. {
  283. STARPU_ASSERT(copy_methods->opencl_to_opencl || copy_methods->any_to_any);
  284. /* this is not associated to a request so it's synchronous */
  285. if (copy_methods->opencl_to_opencl)
  286. copy_methods->opencl_to_opencl(src_interface, src_node, dst_interface, dst_node);
  287. else
  288. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  289. }
  290. else
  291. {
  292. req->async_channel.type = STARPU_OPENCL_RAM;
  293. if (copy_methods->opencl_to_opencl_async)
  294. ret = copy_methods->opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  295. else
  296. {
  297. STARPU_ASSERT(copy_methods->any_to_any);
  298. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  299. }
  300. }
  301. break;
  302. #endif
  303. #ifdef STARPU_USE_MIC
  304. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  305. /* RAM -> MIC */
  306. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  307. !(copy_methods->ram_to_mic_async || copy_methods->any_to_any))
  308. {
  309. /* this is not associated to a request so it's synchronous */
  310. STARPU_ASSERT(copy_methods->ram_to_mic || copy_methods->any_to_any);
  311. if (copy_methods->ram_to_mic)
  312. copy_methods->ram_to_mic(src_interface, src_node, dst_interface, dst_node);
  313. else
  314. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  315. }
  316. else
  317. {
  318. req->async_channel.type = STARPU_MIC_RAM;
  319. if (copy_methods->ram_to_mic_async)
  320. ret = copy_methods->ram_to_mic_async(src_interface, src_node, dst_interface, dst_node);
  321. else
  322. {
  323. STARPU_ASSERT(copy_methods->any_to_any);
  324. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  325. }
  326. _starpu_mic_init_event(&(req->async_channel.event.mic_event), dst_node);
  327. }
  328. break;
  329. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  330. /* MIC -> RAM */
  331. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  332. !(copy_methods->mic_to_ram_async || copy_methods->any_to_any))
  333. {
  334. /* this is not associated to a request so it's synchronous */
  335. STARPU_ASSERT(copy_methods->mic_to_ram || copy_methods->any_to_any);
  336. if (copy_methods->mic_to_ram)
  337. copy_methods->mic_to_ram(src_interface, src_node, dst_interface, dst_node);
  338. else
  339. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  340. }
  341. else
  342. {
  343. req->async_channel.type = STARPU_MIC_RAM;
  344. if (copy_methods->mic_to_ram_async)
  345. ret = copy_methods->mic_to_ram_async(src_interface, src_node, dst_interface, dst_node);
  346. else
  347. {
  348. STARPU_ASSERT(copy_methods->any_to_any);
  349. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  350. }
  351. _starpu_mic_init_event(&(req->async_channel.event.mic_event), src_node);
  352. }
  353. break;
  354. #endif
  355. #ifdef STARPU_USE_SCC
  356. /* SCC RAM associated to the master process is considered as
  357. * the main memory node. */
  358. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  359. /* master private SCC RAM -> slave private SCC RAM */
  360. if (copy_methods->scc_src_to_sink)
  361. copy_methods->scc_src_to_sink(src_interface, src_node, dst_interface, dst_node);
  362. else
  363. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  364. break;
  365. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  366. /* slave private SCC RAM -> master private SCC RAM */
  367. if (copy_methods->scc_sink_to_src)
  368. copy_methods->scc_sink_to_src(src_interface, src_node, dst_interface, dst_node);
  369. else
  370. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  371. break;
  372. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  373. /* slave private SCC RAM -> slave private SCC RAM */
  374. if (copy_methods->scc_sink_to_sink)
  375. copy_methods->scc_sink_to_sink(src_interface, src_node, dst_interface, dst_node);
  376. else
  377. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  378. break;
  379. #endif
  380. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_DISK_RAM):
  381. if(copy_methods->any_to_any)
  382. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  383. else
  384. {
  385. void *obj = starpu_data_handle_to_pointer(handle, dst_node);
  386. void * ptr = NULL;
  387. starpu_ssize_t size = 0;
  388. handle->ops->pack_data(handle, src_node, &ptr, &size);
  389. ret = _starpu_disk_full_write(src_node, dst_node, obj, ptr, size, &req->async_channel);
  390. if (ret == 0)
  391. /* write is already finished, ptr was allocated in pack_data */
  392. free(ptr);
  393. /* For now, asynchronous is not supported */
  394. STARPU_ASSERT(ret == 0);
  395. }
  396. break;
  397. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_CPU_RAM):
  398. if(copy_methods->any_to_any)
  399. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  400. else
  401. {
  402. void *obj = starpu_data_handle_to_pointer(handle, src_node);
  403. void * ptr = NULL;
  404. size_t size = 0;
  405. ret = _starpu_disk_full_read(src_node, dst_node, obj, &ptr, &size, &req->async_channel);
  406. if (ret == 0)
  407. {
  408. /* read is already finished, we can already unpack */
  409. handle->ops->unpack_data(handle, dst_node, ptr, size);
  410. /* ptr is allocated in full_read */
  411. free(ptr);
  412. }
  413. /* For now, asynchronous is not supported */
  414. STARPU_ASSERT(ret == 0);
  415. }
  416. break;
  417. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_DISK_RAM):
  418. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  419. break;
  420. default:
  421. STARPU_ABORT();
  422. break;
  423. }
  424. return ret;
  425. #endif /* !SIMGRID */
  426. }
  427. int STARPU_ATTRIBUTE_WARN_UNUSED_RESULT _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
  428. struct _starpu_data_replicate *src_replicate,
  429. struct _starpu_data_replicate *dst_replicate,
  430. unsigned donotread,
  431. struct _starpu_data_request *req,
  432. unsigned may_alloc)
  433. {
  434. if (!donotread)
  435. {
  436. STARPU_ASSERT(src_replicate->allocated);
  437. STARPU_ASSERT(src_replicate->refcnt);
  438. }
  439. int ret_alloc, ret_copy;
  440. unsigned STARPU_ATTRIBUTE_UNUSED com_id = 0;
  441. unsigned src_node = src_replicate->memory_node;
  442. unsigned dst_node = dst_replicate->memory_node;
  443. /* first make sure the destination has an allocated buffer */
  444. if (!dst_replicate->allocated)
  445. {
  446. if (!may_alloc)
  447. return -ENOMEM;
  448. ret_alloc = _starpu_allocate_memory_on_node(handle, dst_replicate, req ? req->prefetch : 0);
  449. if (ret_alloc)
  450. return -ENOMEM;
  451. }
  452. STARPU_ASSERT(dst_replicate->allocated);
  453. STARPU_ASSERT(dst_replicate->refcnt);
  454. /* if there is no need to actually read the data,
  455. * we do not perform any transfer */
  456. if (!donotread)
  457. {
  458. size_t size = _starpu_data_get_size(handle);
  459. _starpu_bus_update_profiling_info((int)src_node, (int)dst_node, size);
  460. #ifdef STARPU_USE_FXT
  461. com_id = STARPU_ATOMIC_ADD(&communication_cnt, 1);
  462. if (req)
  463. req->com_id = com_id;
  464. #endif
  465. dst_replicate->initialized = 1;
  466. _STARPU_TRACE_START_DRIVER_COPY(src_node, dst_node, size, com_id);
  467. ret_copy = copy_data_1_to_1_generic(handle, src_replicate, dst_replicate, req);
  468. if (!req)
  469. /* Synchronous, this is already finished */
  470. _STARPU_TRACE_END_DRIVER_COPY(src_node, dst_node, size, com_id);
  471. return ret_copy;
  472. }
  473. return 0;
  474. }
  475. /* This can be used by interfaces to easily transfer a piece of data without
  476. * caring about the particular CUDA/OpenCL methods. */
  477. /* This should either return 0 if the transfer is complete, or -EAGAIN if the
  478. * transfer is still pending, and will have to be waited for by
  479. * _starpu_driver_test_request_completion/_starpu_driver_wait_request_completion
  480. */
  481. int starpu_interface_copy(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, void *async_data)
  482. {
  483. struct _starpu_async_channel *async_channel = async_data;
  484. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  485. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  486. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  487. {
  488. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  489. memcpy((void *) (dst + dst_offset), (void *) (src + src_offset), size);
  490. return 0;
  491. #ifdef STARPU_USE_CUDA
  492. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  493. return starpu_cuda_copy_async_sync(
  494. (void*) (src + src_offset), src_node,
  495. (void*) (dst + dst_offset), dst_node,
  496. size,
  497. async_channel?starpu_cuda_get_local_out_transfer_stream():NULL,
  498. cudaMemcpyDeviceToHost);
  499. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  500. return starpu_cuda_copy_async_sync(
  501. (void*) (src + src_offset), src_node,
  502. (void*) (dst + dst_offset), dst_node,
  503. size,
  504. async_channel?starpu_cuda_get_local_in_transfer_stream():NULL,
  505. cudaMemcpyHostToDevice);
  506. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  507. return starpu_cuda_copy_async_sync(
  508. (void*) (src + src_offset), src_node,
  509. (void*) (dst + dst_offset), dst_node,
  510. size,
  511. async_channel?starpu_cuda_get_peer_transfer_stream(src_node, dst_node):NULL,
  512. cudaMemcpyDeviceToDevice);
  513. #endif
  514. #ifdef STARPU_USE_OPENCL
  515. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  516. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  517. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  518. return starpu_opencl_copy_async_sync(
  519. src, src_offset, src_node,
  520. dst, dst_offset, dst_node,
  521. size,
  522. &async_channel->event.opencl_event);
  523. #endif
  524. #ifdef STARPU_USE_MIC
  525. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  526. if (async_data)
  527. return _starpu_mic_copy_mic_to_ram_async(
  528. (void*) (src + src_offset), src_node,
  529. (void*) (dst + dst_offset), dst_node,
  530. size);
  531. else
  532. return _starpu_mic_copy_mic_to_ram(
  533. (void*) (src + src_offset), src_node,
  534. (void*) (dst + dst_offset), dst_node,
  535. size);
  536. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  537. if (async_data)
  538. return _starpu_mic_copy_ram_to_mic_async(
  539. (void*) (src + src_offset), src_node,
  540. (void*) (dst + dst_offset), dst_node,
  541. size);
  542. else
  543. return _starpu_mic_copy_ram_to_mic(
  544. (void*) (src + src_offset), src_node,
  545. (void*) (dst + dst_offset), dst_node,
  546. size);
  547. #endif
  548. #ifdef STARPU_USE_SCC
  549. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  550. return _starpu_scc_copy_sink_to_src(
  551. (void*) (src + src_offset), src_node,
  552. (void*) (dst + dst_offset), dst_node,
  553. size);
  554. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  555. return _starpu_scc_copy_src_to_sink(
  556. (void*) (src + src_offset), src_node,
  557. (void*) (dst + dst_offset), dst_node,
  558. size);
  559. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  560. return _starpu_scc_copy_sink_to_sink(
  561. (void*) (src + src_offset), src_node,
  562. (void*) (dst + dst_offset), dst_node,
  563. size);
  564. #endif
  565. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM, STARPU_DISK_RAM):
  566. {
  567. return _starpu_disk_copy_src_to_disk(
  568. (void*) (src + src_offset), src_node,
  569. (void*) dst, dst_offset, dst_node,
  570. size, async_channel);
  571. }
  572. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_CPU_RAM):
  573. return _starpu_disk_copy_disk_to_src(
  574. (void*) src, src_offset, src_node,
  575. (void*) (dst + dst_offset), dst_node,
  576. size, async_channel);
  577. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_DISK_RAM):
  578. return _starpu_disk_copy_disk_to_disk(
  579. (void*) src, src_offset, src_node,
  580. (void*) dst, dst_offset, dst_node,
  581. size, async_channel);
  582. default:
  583. STARPU_ABORT();
  584. return -1;
  585. }
  586. return 0;
  587. }
  588. void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel)
  589. {
  590. #ifdef STARPU_SIMGRID
  591. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  592. while (!async_channel->event.finished)
  593. STARPU_PTHREAD_COND_WAIT(&async_channel->event.cond, &async_channel->event.mutex);
  594. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  595. #else /* !SIMGRID */
  596. enum starpu_node_kind kind = async_channel->type;
  597. #ifdef STARPU_USE_CUDA
  598. cudaEvent_t event;
  599. cudaError_t cures;
  600. #endif
  601. switch (kind)
  602. {
  603. #ifdef STARPU_USE_CUDA
  604. case STARPU_CUDA_RAM:
  605. event = (*async_channel).event.cuda_event;
  606. cures = cudaEventSynchronize(event);
  607. if (STARPU_UNLIKELY(cures))
  608. STARPU_CUDA_REPORT_ERROR(cures);
  609. cures = cudaEventDestroy(event);
  610. if (STARPU_UNLIKELY(cures))
  611. STARPU_CUDA_REPORT_ERROR(cures);
  612. break;
  613. #endif
  614. #ifdef STARPU_USE_OPENCL
  615. case STARPU_OPENCL_RAM:
  616. {
  617. cl_int err;
  618. if ((*async_channel).event.opencl_event == NULL)
  619. STARPU_ABORT();
  620. err = clWaitForEvents(1, &((*async_channel).event.opencl_event));
  621. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  622. STARPU_OPENCL_REPORT_ERROR(err);
  623. err = clReleaseEvent((*async_channel).event.opencl_event);
  624. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  625. STARPU_OPENCL_REPORT_ERROR(err);
  626. break;
  627. }
  628. #endif
  629. #ifdef STARPU_USE_MIC
  630. case STARPU_MIC_RAM:
  631. _starpu_mic_wait_request_completion(&(async_channel->event.mic_event));
  632. break;
  633. #endif
  634. case STARPU_MAIN_RAM:
  635. starpu_disk_wait_request(async_channel);
  636. case STARPU_CPU_RAM:
  637. default:
  638. STARPU_ABORT();
  639. }
  640. #endif /* !SIMGRID */
  641. }
  642. unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel)
  643. {
  644. #ifdef STARPU_SIMGRID
  645. unsigned ret;
  646. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  647. ret = async_channel->event.finished;
  648. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  649. return ret;
  650. #else /* !SIMGRID */
  651. enum starpu_node_kind kind = async_channel->type;
  652. unsigned success = 0;
  653. #ifdef STARPU_USE_CUDA
  654. cudaEvent_t event;
  655. #endif
  656. switch (kind)
  657. {
  658. #ifdef STARPU_USE_CUDA
  659. case STARPU_CUDA_RAM:
  660. event = (*async_channel).event.cuda_event;
  661. cudaError_t cures = cudaEventQuery(event);
  662. success = (cures == cudaSuccess);
  663. if (success)
  664. cudaEventDestroy(event);
  665. else if (cures != cudaErrorNotReady)
  666. STARPU_CUDA_REPORT_ERROR(cures);
  667. break;
  668. #endif
  669. #ifdef STARPU_USE_OPENCL
  670. case STARPU_OPENCL_RAM:
  671. {
  672. cl_int event_status;
  673. cl_event opencl_event = (*async_channel).event.opencl_event;
  674. if (opencl_event == NULL) STARPU_ABORT();
  675. cl_int err = clGetEventInfo(opencl_event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(event_status), &event_status, NULL);
  676. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  677. STARPU_OPENCL_REPORT_ERROR(err);
  678. if (event_status < 0)
  679. STARPU_OPENCL_REPORT_ERROR(event_status);
  680. success = (event_status == CL_COMPLETE);
  681. break;
  682. }
  683. #endif
  684. #ifdef STARPU_USE_MIC
  685. case STARPU_MIC_RAM:
  686. success = _starpu_mic_request_is_complete(&(async_channel->event.mic_event));
  687. break;
  688. #endif
  689. case STARPU_DISK_RAM:
  690. success = starpu_disk_test_request(async_channel);
  691. break;
  692. case STARPU_CPU_RAM:
  693. default:
  694. STARPU_ABORT();
  695. }
  696. return success;
  697. #endif /* !SIMGRID */
  698. }