copy_driver.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2013 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <common/utils.h>
  20. #include <core/sched_policy.h>
  21. #include <datawizard/datastats.h>
  22. #include <datawizard/memory_nodes.h>
  23. #include <drivers/disk/driver_disk.h>
  24. #include <common/fxt.h>
  25. #include "copy_driver.h"
  26. #include "memalloc.h"
  27. #include <starpu_opencl.h>
  28. #include <starpu_cuda.h>
  29. #include <profiling/profiling.h>
  30. #include <core/disk.h>
  31. #ifdef STARPU_SIMGRID
  32. #include <core/simgrid.h>
  33. #endif
  34. void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
  35. {
  36. /* wake up all workers on that memory node */
  37. unsigned cond_id;
  38. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  39. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  40. unsigned nconds = descr->condition_count[nodeid];
  41. for (cond_id = 0; cond_id < nconds; cond_id++)
  42. {
  43. struct _starpu_cond_and_mutex *condition;
  44. condition = &descr->conditions_attached_to_node[nodeid][cond_id];
  45. /* wake anybody waiting on that condition */
  46. STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  47. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  48. STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  49. }
  50. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  51. #ifdef STARPU_SIMGRID
  52. starpu_pthread_queue_broadcast(&_starpu_simgrid_transfer_queue[nodeid]);
  53. #endif
  54. }
  55. void starpu_wake_all_blocked_workers(void)
  56. {
  57. /* workers may be blocked on the various queues' conditions */
  58. unsigned cond_id;
  59. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  60. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  61. unsigned nconds = descr->total_condition_count;
  62. for (cond_id = 0; cond_id < nconds; cond_id++)
  63. {
  64. struct _starpu_cond_and_mutex *condition;
  65. condition = &descr->conditions_all[cond_id];
  66. /* wake anybody waiting on that condition */
  67. STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  68. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  69. STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  70. }
  71. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  72. #ifdef STARPU_SIMGRID
  73. unsigned workerid, nodeid;
  74. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  75. starpu_pthread_queue_broadcast(&_starpu_simgrid_task_queue[workerid]);
  76. for (nodeid = 0; nodeid < starpu_memory_nodes_get_count(); nodeid++)
  77. starpu_pthread_queue_broadcast(&_starpu_simgrid_transfer_queue[nodeid]);
  78. #endif
  79. }
  80. #ifdef STARPU_USE_FXT
  81. /* we need to identify each communication so that we can match the beginning
  82. * and the end of a communication in the trace, so we use a unique identifier
  83. * per communication */
  84. static unsigned communication_cnt = 0;
  85. #endif
  86. static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
  87. struct _starpu_data_replicate *src_replicate,
  88. struct _starpu_data_replicate *dst_replicate,
  89. struct _starpu_data_request *req)
  90. {
  91. unsigned src_node = src_replicate->memory_node;
  92. unsigned dst_node = dst_replicate->memory_node;
  93. STARPU_ASSERT(src_replicate->refcnt);
  94. STARPU_ASSERT(dst_replicate->refcnt);
  95. STARPU_ASSERT(src_replicate->allocated);
  96. STARPU_ASSERT(dst_replicate->allocated);
  97. _starpu_comm_amounts_inc(src_node, dst_node, handle->ops->get_size(handle));
  98. #ifdef STARPU_SIMGRID
  99. return _starpu_simgrid_transfer(handle->ops->get_size(handle), src_node, dst_node, req);
  100. #else /* !SIMGRID */
  101. int ret = 0;
  102. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  103. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  104. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  105. #ifdef STARPU_USE_CUDA
  106. cudaError_t cures;
  107. cudaStream_t stream;
  108. #endif
  109. void *src_interface = src_replicate->data_interface;
  110. void *dst_interface = dst_replicate->data_interface;
  111. #if defined(STARPU_USE_CUDA) && defined(HAVE_CUDA_MEMCPY_PEER) && !defined(STARPU_SIMGRID)
  112. if ((src_kind == STARPU_CUDA_RAM) || (dst_kind == STARPU_CUDA_RAM))
  113. {
  114. unsigned devid;
  115. if ((src_kind == STARPU_CUDA_RAM) && (dst_kind == STARPU_CUDA_RAM))
  116. {
  117. /* GPU-GPU transfer, issue it from the device we are supposed to drive */
  118. int worker = starpu_worker_get_id();
  119. devid = starpu_worker_get_devid(worker);
  120. }
  121. else
  122. {
  123. unsigned node = (dst_kind == STARPU_CUDA_RAM)?dst_node:src_node;
  124. devid = _starpu_memory_node_get_devid(node);
  125. }
  126. starpu_cuda_set_device(devid);
  127. }
  128. #endif
  129. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  130. {
  131. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  132. /* STARPU_CPU_RAM -> STARPU_CPU_RAM */
  133. if (copy_methods->ram_to_ram)
  134. copy_methods->ram_to_ram(src_interface, src_node, dst_interface, dst_node);
  135. else
  136. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  137. break;
  138. #ifdef STARPU_USE_CUDA
  139. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  140. /* only the proper CUBLAS thread can initiate this directly ! */
  141. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  142. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  143. #endif
  144. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  145. !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
  146. {
  147. /* this is not associated to a request so it's synchronous */
  148. STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
  149. if (copy_methods->cuda_to_ram)
  150. copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
  151. else
  152. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  153. }
  154. else
  155. {
  156. req->async_channel.type = STARPU_CUDA_RAM;
  157. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  158. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  159. stream = starpu_cuda_get_local_out_transfer_stream();
  160. if (copy_methods->cuda_to_ram_async)
  161. ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
  162. else
  163. {
  164. STARPU_ASSERT(copy_methods->any_to_any);
  165. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  166. }
  167. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  168. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  169. }
  170. break;
  171. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  172. /* STARPU_CPU_RAM -> CUBLAS_RAM */
  173. /* only the proper CUBLAS thread can initiate this ! */
  174. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  175. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  176. #endif
  177. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  178. !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
  179. {
  180. /* this is not associated to a request so it's synchronous */
  181. STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
  182. if (copy_methods->ram_to_cuda)
  183. copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
  184. else
  185. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  186. }
  187. else
  188. {
  189. req->async_channel.type = STARPU_CUDA_RAM;
  190. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  191. if (STARPU_UNLIKELY(cures != cudaSuccess))
  192. STARPU_CUDA_REPORT_ERROR(cures);
  193. stream = starpu_cuda_get_local_in_transfer_stream();
  194. if (copy_methods->ram_to_cuda_async)
  195. ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  196. else
  197. {
  198. STARPU_ASSERT(copy_methods->any_to_any);
  199. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  200. }
  201. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  202. if (STARPU_UNLIKELY(cures != cudaSuccess))
  203. STARPU_CUDA_REPORT_ERROR(cures);
  204. }
  205. break;
  206. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  207. /* CUDA - CUDA transfer */
  208. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  209. !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
  210. {
  211. STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
  212. /* this is not associated to a request so it's synchronous */
  213. if (copy_methods->cuda_to_cuda)
  214. copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
  215. else
  216. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  217. }
  218. else
  219. {
  220. req->async_channel.type = STARPU_CUDA_RAM;
  221. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  222. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  223. stream = starpu_cuda_get_peer_transfer_stream(src_node, dst_node);
  224. if (copy_methods->cuda_to_cuda_async)
  225. ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  226. else
  227. {
  228. STARPU_ASSERT(copy_methods->any_to_any);
  229. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  230. }
  231. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  232. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  233. }
  234. break;
  235. #endif
  236. #ifdef STARPU_USE_OPENCL
  237. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  238. /* OpenCL -> RAM */
  239. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  240. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  241. !(copy_methods->opencl_to_ram_async || copy_methods->any_to_any))
  242. {
  243. STARPU_ASSERT(copy_methods->opencl_to_ram || copy_methods->any_to_any);
  244. /* this is not associated to a request so it's synchronous */
  245. if (copy_methods->opencl_to_ram)
  246. copy_methods->opencl_to_ram(src_interface, src_node, dst_interface, dst_node);
  247. else
  248. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  249. }
  250. else
  251. {
  252. req->async_channel.type = STARPU_OPENCL_RAM;
  253. if (copy_methods->opencl_to_ram_async)
  254. ret = copy_methods->opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  255. else
  256. {
  257. STARPU_ASSERT(copy_methods->any_to_any);
  258. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  259. }
  260. }
  261. break;
  262. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  263. /* STARPU_CPU_RAM -> STARPU_OPENCL_RAM */
  264. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  265. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  266. !(copy_methods->ram_to_opencl_async || copy_methods->any_to_any))
  267. {
  268. STARPU_ASSERT(copy_methods->ram_to_opencl || copy_methods->any_to_any);
  269. /* this is not associated to a request so it's synchronous */
  270. if (copy_methods->ram_to_opencl)
  271. copy_methods->ram_to_opencl(src_interface, src_node, dst_interface, dst_node);
  272. else
  273. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  274. }
  275. else
  276. {
  277. req->async_channel.type = STARPU_OPENCL_RAM;
  278. if (copy_methods->ram_to_opencl_async)
  279. ret = copy_methods->ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  280. else
  281. {
  282. STARPU_ASSERT(copy_methods->any_to_any);
  283. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  284. }
  285. }
  286. break;
  287. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  288. /* STARPU_OPENCL_RAM -> STARPU_OPENCL_RAM */
  289. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node || _starpu_memory_node_get_local_key() == src_node);
  290. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  291. !(copy_methods->opencl_to_opencl_async || copy_methods->any_to_any))
  292. {
  293. STARPU_ASSERT(copy_methods->opencl_to_opencl || copy_methods->any_to_any);
  294. /* this is not associated to a request so it's synchronous */
  295. if (copy_methods->opencl_to_opencl)
  296. copy_methods->opencl_to_opencl(src_interface, src_node, dst_interface, dst_node);
  297. else
  298. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  299. }
  300. else
  301. {
  302. req->async_channel.type = STARPU_OPENCL_RAM;
  303. if (copy_methods->opencl_to_opencl_async)
  304. ret = copy_methods->opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  305. else
  306. {
  307. STARPU_ASSERT(copy_methods->any_to_any);
  308. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  309. }
  310. }
  311. break;
  312. #endif
  313. #ifdef STARPU_USE_MIC
  314. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  315. /* RAM -> MIC */
  316. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  317. !(copy_methods->ram_to_mic_async || copy_methods->any_to_any))
  318. {
  319. /* this is not associated to a request so it's synchronous */
  320. STARPU_ASSERT(copy_methods->ram_to_mic || copy_methods->any_to_any);
  321. if (copy_methods->ram_to_mic)
  322. copy_methods->ram_to_mic(src_interface, src_node, dst_interface, dst_node);
  323. else
  324. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  325. }
  326. else
  327. {
  328. req->async_channel.type = STARPU_MIC_RAM;
  329. if (copy_methods->ram_to_mic_async)
  330. ret = copy_methods->ram_to_mic_async(src_interface, src_node, dst_interface, dst_node);
  331. else
  332. {
  333. STARPU_ASSERT(copy_methods->any_to_any);
  334. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  335. }
  336. _starpu_mic_init_event(&(req->async_channel.event.mic_event), dst_node);
  337. }
  338. break;
  339. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  340. /* MIC -> RAM */
  341. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  342. !(copy_methods->mic_to_ram_async || copy_methods->any_to_any))
  343. {
  344. /* this is not associated to a request so it's synchronous */
  345. STARPU_ASSERT(copy_methods->mic_to_ram || copy_methods->any_to_any);
  346. if (copy_methods->mic_to_ram)
  347. copy_methods->mic_to_ram(src_interface, src_node, dst_interface, dst_node);
  348. else
  349. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  350. }
  351. else
  352. {
  353. req->async_channel.type = STARPU_MIC_RAM;
  354. if (copy_methods->mic_to_ram_async)
  355. ret = copy_methods->mic_to_ram_async(src_interface, src_node, dst_interface, dst_node);
  356. else
  357. {
  358. STARPU_ASSERT(copy_methods->any_to_any);
  359. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  360. }
  361. _starpu_mic_init_event(&(req->async_channel.event.mic_event), src_node);
  362. }
  363. break;
  364. /* TODO: MIC -> MIC */
  365. #endif
  366. #ifdef STARPU_USE_SCC
  367. /* SCC RAM associated to the master process is considered as
  368. * the main memory node. */
  369. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  370. /* master private SCC RAM -> slave private SCC RAM */
  371. if (copy_methods->scc_src_to_sink)
  372. copy_methods->scc_src_to_sink(src_interface, src_node, dst_interface, dst_node);
  373. else
  374. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  375. break;
  376. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  377. /* slave private SCC RAM -> master private SCC RAM */
  378. if (copy_methods->scc_sink_to_src)
  379. copy_methods->scc_sink_to_src(src_interface, src_node, dst_interface, dst_node);
  380. else
  381. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  382. break;
  383. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  384. /* slave private SCC RAM -> slave private SCC RAM */
  385. if (copy_methods->scc_sink_to_sink)
  386. copy_methods->scc_sink_to_sink(src_interface, src_node, dst_interface, dst_node);
  387. else
  388. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  389. break;
  390. #endif
  391. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_DISK_RAM):
  392. if(copy_methods->any_to_any)
  393. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  394. else
  395. {
  396. void *obj = starpu_data_handle_to_pointer(handle, dst_node);
  397. void * ptr = NULL;
  398. starpu_ssize_t size = 0;
  399. handle->ops->pack_data(handle, src_node, &ptr, &size);
  400. ret = _starpu_disk_full_write(src_node, dst_node, obj, ptr, size, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  401. if (ret == 0)
  402. /* write is already finished, ptr was allocated in pack_data */
  403. free(ptr);
  404. /* For now, asynchronous is not supported */
  405. STARPU_ASSERT(ret == 0);
  406. }
  407. break;
  408. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_CPU_RAM):
  409. if(copy_methods->any_to_any)
  410. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  411. else
  412. {
  413. void *obj = starpu_data_handle_to_pointer(handle, src_node);
  414. void * ptr = NULL;
  415. size_t size = 0;
  416. ret = _starpu_disk_full_read(src_node, dst_node, obj, &ptr, &size, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  417. if (ret == 0)
  418. {
  419. /* read is already finished, we can already unpack */
  420. handle->ops->unpack_data(handle, dst_node, ptr, size);
  421. /* ptr is allocated in full_read */
  422. free(ptr);
  423. }
  424. /* For now, asynchronous is not supported */
  425. STARPU_ASSERT(ret == 0);
  426. }
  427. break;
  428. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_DISK_RAM):
  429. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  430. break;
  431. default:
  432. STARPU_ABORT();
  433. break;
  434. }
  435. return ret;
  436. #endif /* !SIMGRID */
  437. }
  438. int STARPU_ATTRIBUTE_WARN_UNUSED_RESULT _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
  439. struct _starpu_data_replicate *src_replicate,
  440. struct _starpu_data_replicate *dst_replicate,
  441. unsigned donotread,
  442. struct _starpu_data_request *req,
  443. unsigned may_alloc,
  444. unsigned prefetch)
  445. {
  446. if (!donotread)
  447. {
  448. STARPU_ASSERT(src_replicate->allocated);
  449. STARPU_ASSERT(src_replicate->refcnt);
  450. }
  451. int ret_alloc, ret_copy;
  452. unsigned STARPU_ATTRIBUTE_UNUSED com_id = 0;
  453. unsigned src_node = src_replicate->memory_node;
  454. unsigned dst_node = dst_replicate->memory_node;
  455. /* first make sure the destination has an allocated buffer */
  456. if (!dst_replicate->allocated)
  457. {
  458. if (!may_alloc || _starpu_is_reclaiming(dst_node))
  459. /* We're not supposed to allocate there at the moment */
  460. return -ENOMEM;
  461. ret_alloc = _starpu_allocate_memory_on_node(handle, dst_replicate, req ? req->prefetch : 0);
  462. if (ret_alloc)
  463. return -ENOMEM;
  464. }
  465. STARPU_ASSERT(dst_replicate->allocated);
  466. STARPU_ASSERT(dst_replicate->refcnt);
  467. /* if there is no need to actually read the data,
  468. * we do not perform any transfer */
  469. if (!donotread)
  470. {
  471. size_t size = _starpu_data_get_size(handle);
  472. _starpu_bus_update_profiling_info((int)src_node, (int)dst_node, size);
  473. #ifdef STARPU_USE_FXT
  474. com_id = STARPU_ATOMIC_ADD(&communication_cnt, 1);
  475. if (req)
  476. req->com_id = com_id;
  477. #endif
  478. dst_replicate->initialized = 1;
  479. _STARPU_TRACE_START_DRIVER_COPY(src_node, dst_node, size, com_id, prefetch);
  480. ret_copy = copy_data_1_to_1_generic(handle, src_replicate, dst_replicate, req);
  481. if (!req)
  482. /* Synchronous, this is already finished */
  483. _STARPU_TRACE_END_DRIVER_COPY(src_node, dst_node, size, com_id, prefetch);
  484. return ret_copy;
  485. }
  486. return 0;
  487. }
  488. /* This can be used by interfaces to easily transfer a piece of data without
  489. * caring about the particular transfer methods. */
  490. /* This should either return 0 if the transfer is complete, or -EAGAIN if the
  491. * transfer is still pending, and will have to be waited for by
  492. * _starpu_driver_test_request_completion/_starpu_driver_wait_request_completion
  493. */
  494. int starpu_interface_copy(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, void *async_data)
  495. {
  496. struct _starpu_async_channel *async_channel = async_data;
  497. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  498. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  499. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  500. {
  501. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  502. memcpy((void *) (dst + dst_offset), (void *) (src + src_offset), size);
  503. return 0;
  504. #ifdef STARPU_USE_CUDA
  505. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  506. return starpu_cuda_copy_async_sync(
  507. (void*) (src + src_offset), src_node,
  508. (void*) (dst + dst_offset), dst_node,
  509. size,
  510. async_channel?starpu_cuda_get_local_out_transfer_stream():NULL,
  511. cudaMemcpyDeviceToHost);
  512. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  513. return starpu_cuda_copy_async_sync(
  514. (void*) (src + src_offset), src_node,
  515. (void*) (dst + dst_offset), dst_node,
  516. size,
  517. async_channel?starpu_cuda_get_local_in_transfer_stream():NULL,
  518. cudaMemcpyHostToDevice);
  519. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  520. return starpu_cuda_copy_async_sync(
  521. (void*) (src + src_offset), src_node,
  522. (void*) (dst + dst_offset), dst_node,
  523. size,
  524. async_channel?starpu_cuda_get_peer_transfer_stream(src_node, dst_node):NULL,
  525. cudaMemcpyDeviceToDevice);
  526. #endif
  527. #ifdef STARPU_USE_OPENCL
  528. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  529. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  530. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  531. return starpu_opencl_copy_async_sync(
  532. src, src_offset, src_node,
  533. dst, dst_offset, dst_node,
  534. size,
  535. &async_channel->event.opencl_event);
  536. #endif
  537. #ifdef STARPU_USE_MIC
  538. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  539. if (async_data)
  540. return _starpu_mic_copy_mic_to_ram_async(
  541. (void*) (src + src_offset), src_node,
  542. (void*) (dst + dst_offset), dst_node,
  543. size);
  544. else
  545. return _starpu_mic_copy_mic_to_ram(
  546. (void*) (src + src_offset), src_node,
  547. (void*) (dst + dst_offset), dst_node,
  548. size);
  549. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  550. if (async_data)
  551. return _starpu_mic_copy_ram_to_mic_async(
  552. (void*) (src + src_offset), src_node,
  553. (void*) (dst + dst_offset), dst_node,
  554. size);
  555. else
  556. return _starpu_mic_copy_ram_to_mic(
  557. (void*) (src + src_offset), src_node,
  558. (void*) (dst + dst_offset), dst_node,
  559. size);
  560. /* TODO: MIC->MIC */
  561. #endif
  562. #ifdef STARPU_USE_SCC
  563. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  564. return _starpu_scc_copy_sink_to_src(
  565. (void*) (src + src_offset), src_node,
  566. (void*) (dst + dst_offset), dst_node,
  567. size);
  568. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  569. return _starpu_scc_copy_src_to_sink(
  570. (void*) (src + src_offset), src_node,
  571. (void*) (dst + dst_offset), dst_node,
  572. size);
  573. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  574. return _starpu_scc_copy_sink_to_sink(
  575. (void*) (src + src_offset), src_node,
  576. (void*) (dst + dst_offset), dst_node,
  577. size);
  578. #endif
  579. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM, STARPU_DISK_RAM):
  580. {
  581. return _starpu_disk_copy_src_to_disk(
  582. (void*) (src + src_offset), src_node,
  583. (void*) dst, dst_offset, dst_node,
  584. size, async_channel);
  585. }
  586. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_CPU_RAM):
  587. return _starpu_disk_copy_disk_to_src(
  588. (void*) src, src_offset, src_node,
  589. (void*) (dst + dst_offset), dst_node,
  590. size, async_channel);
  591. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_DISK_RAM):
  592. return _starpu_disk_copy_disk_to_disk(
  593. (void*) src, src_offset, src_node,
  594. (void*) dst, dst_offset, dst_node,
  595. size, async_channel);
  596. default:
  597. STARPU_ABORT();
  598. return -1;
  599. }
  600. return 0;
  601. }
  602. void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel)
  603. {
  604. #ifdef STARPU_SIMGRID
  605. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  606. while (!async_channel->event.finished)
  607. STARPU_PTHREAD_COND_WAIT(&async_channel->event.cond, &async_channel->event.mutex);
  608. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  609. #else /* !SIMGRID */
  610. enum starpu_node_kind kind = async_channel->type;
  611. #ifdef STARPU_USE_CUDA
  612. cudaEvent_t event;
  613. cudaError_t cures;
  614. #endif
  615. switch (kind)
  616. {
  617. #ifdef STARPU_USE_CUDA
  618. case STARPU_CUDA_RAM:
  619. event = (*async_channel).event.cuda_event;
  620. cures = cudaEventSynchronize(event);
  621. if (STARPU_UNLIKELY(cures))
  622. STARPU_CUDA_REPORT_ERROR(cures);
  623. cures = cudaEventDestroy(event);
  624. if (STARPU_UNLIKELY(cures))
  625. STARPU_CUDA_REPORT_ERROR(cures);
  626. break;
  627. #endif
  628. #ifdef STARPU_USE_OPENCL
  629. case STARPU_OPENCL_RAM:
  630. {
  631. cl_int err;
  632. if ((*async_channel).event.opencl_event == NULL)
  633. STARPU_ABORT();
  634. err = clWaitForEvents(1, &((*async_channel).event.opencl_event));
  635. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  636. STARPU_OPENCL_REPORT_ERROR(err);
  637. err = clReleaseEvent((*async_channel).event.opencl_event);
  638. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  639. STARPU_OPENCL_REPORT_ERROR(err);
  640. break;
  641. }
  642. #endif
  643. #ifdef STARPU_USE_MIC
  644. case STARPU_MIC_RAM:
  645. _starpu_mic_wait_request_completion(&(async_channel->event.mic_event));
  646. break;
  647. #endif
  648. case STARPU_MAIN_RAM:
  649. starpu_disk_wait_request(async_channel);
  650. break;
  651. case STARPU_CPU_RAM:
  652. default:
  653. STARPU_ABORT();
  654. }
  655. #endif /* !SIMGRID */
  656. }
  657. unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel)
  658. {
  659. #ifdef STARPU_SIMGRID
  660. unsigned ret;
  661. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  662. ret = async_channel->event.finished;
  663. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  664. return ret;
  665. #else /* !SIMGRID */
  666. enum starpu_node_kind kind = async_channel->type;
  667. unsigned success = 0;
  668. #ifdef STARPU_USE_CUDA
  669. cudaEvent_t event;
  670. #endif
  671. switch (kind)
  672. {
  673. #ifdef STARPU_USE_CUDA
  674. case STARPU_CUDA_RAM:
  675. event = (*async_channel).event.cuda_event;
  676. cudaError_t cures = cudaEventQuery(event);
  677. success = (cures == cudaSuccess);
  678. if (success)
  679. cudaEventDestroy(event);
  680. else if (cures != cudaErrorNotReady)
  681. STARPU_CUDA_REPORT_ERROR(cures);
  682. break;
  683. #endif
  684. #ifdef STARPU_USE_OPENCL
  685. case STARPU_OPENCL_RAM:
  686. {
  687. cl_int event_status;
  688. cl_event opencl_event = (*async_channel).event.opencl_event;
  689. if (opencl_event == NULL) STARPU_ABORT();
  690. cl_int err = clGetEventInfo(opencl_event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(event_status), &event_status, NULL);
  691. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  692. STARPU_OPENCL_REPORT_ERROR(err);
  693. if (event_status < 0)
  694. STARPU_OPENCL_REPORT_ERROR(event_status);
  695. success = (event_status == CL_COMPLETE);
  696. break;
  697. }
  698. #endif
  699. #ifdef STARPU_USE_MIC
  700. case STARPU_MIC_RAM:
  701. success = _starpu_mic_request_is_complete(&(async_channel->event.mic_event));
  702. break;
  703. #endif
  704. case STARPU_DISK_RAM:
  705. success = starpu_disk_test_request(async_channel);
  706. break;
  707. case STARPU_CPU_RAM:
  708. default:
  709. STARPU_ABORT();
  710. }
  711. return success;
  712. #endif /* !SIMGRID */
  713. }