copy_driver.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2016 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2013 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <common/utils.h>
  20. #include <core/sched_policy.h>
  21. #include <datawizard/datastats.h>
  22. #include <datawizard/memory_nodes.h>
  23. #include <drivers/disk/driver_disk.h>
  24. #include <common/fxt.h>
  25. #include "copy_driver.h"
  26. #include "memalloc.h"
  27. #include <starpu_opencl.h>
  28. #include <starpu_cuda.h>
  29. #include <profiling/profiling.h>
  30. #include <core/disk.h>
  31. #ifdef STARPU_SIMGRID
  32. #include <core/simgrid.h>
  33. #endif
  34. void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
  35. {
  36. /* wake up all workers on that memory node */
  37. unsigned cond_id;
  38. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  39. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  40. unsigned nconds = descr->condition_count[nodeid];
  41. for (cond_id = 0; cond_id < nconds; cond_id++)
  42. {
  43. struct _starpu_cond_and_mutex *condition;
  44. condition = &descr->conditions_attached_to_node[nodeid][cond_id];
  45. /* wake anybody waiting on that condition */
  46. STARPU_PTHREAD_MUTEX_LOCK_SCHED(condition->mutex);
  47. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  48. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(condition->mutex);
  49. }
  50. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  51. #ifdef STARPU_SIMGRID
  52. starpu_pthread_queue_broadcast(&_starpu_simgrid_transfer_queue[nodeid]);
  53. #endif
  54. }
  55. void starpu_wake_all_blocked_workers(void)
  56. {
  57. /* workers may be blocked on the various queues' conditions */
  58. unsigned cond_id;
  59. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  60. STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  61. unsigned nconds = descr->total_condition_count;
  62. for (cond_id = 0; cond_id < nconds; cond_id++)
  63. {
  64. struct _starpu_cond_and_mutex *condition;
  65. condition = &descr->conditions_all[cond_id];
  66. /* wake anybody waiting on that condition */
  67. STARPU_PTHREAD_MUTEX_LOCK_SCHED(condition->mutex);
  68. STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  69. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(condition->mutex);
  70. }
  71. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  72. #ifdef STARPU_SIMGRID
  73. unsigned workerid, nodeid;
  74. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  75. starpu_pthread_queue_broadcast(&_starpu_simgrid_task_queue[workerid]);
  76. for (nodeid = 0; nodeid < starpu_memory_nodes_get_count(); nodeid++)
  77. starpu_pthread_queue_broadcast(&_starpu_simgrid_transfer_queue[nodeid]);
  78. #endif
  79. }
  80. #ifdef STARPU_USE_FXT
  81. /* we need to identify each communication so that we can match the beginning
  82. * and the end of a communication in the trace, so we use a unique identifier
  83. * per communication */
  84. static unsigned long communication_cnt = 0;
  85. #endif
  86. static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
  87. struct _starpu_data_replicate *src_replicate,
  88. struct _starpu_data_replicate *dst_replicate,
  89. struct _starpu_data_request *req)
  90. {
  91. unsigned src_node = src_replicate->memory_node;
  92. unsigned dst_node = dst_replicate->memory_node;
  93. STARPU_ASSERT(src_replicate->refcnt);
  94. STARPU_ASSERT(dst_replicate->refcnt);
  95. STARPU_ASSERT(src_replicate->allocated);
  96. STARPU_ASSERT(dst_replicate->allocated);
  97. #ifdef STARPU_SIMGRID
  98. return _starpu_simgrid_transfer(handle->ops->get_size(handle), src_node, dst_node, req);
  99. #else /* !SIMGRID */
  100. int ret = 0;
  101. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  102. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  103. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  104. #ifdef STARPU_USE_CUDA
  105. cudaError_t cures;
  106. cudaStream_t stream;
  107. #endif
  108. void *src_interface = src_replicate->data_interface;
  109. void *dst_interface = dst_replicate->data_interface;
  110. #if defined(STARPU_USE_CUDA) && defined(HAVE_CUDA_MEMCPY_PEER) && !defined(STARPU_SIMGRID)
  111. if ((src_kind == STARPU_CUDA_RAM) || (dst_kind == STARPU_CUDA_RAM))
  112. {
  113. unsigned devid;
  114. if ((src_kind == STARPU_CUDA_RAM) && (dst_kind == STARPU_CUDA_RAM))
  115. {
  116. /* GPU-GPU transfer, issue it from the device we are supposed to drive */
  117. int worker = starpu_worker_get_id_check();
  118. devid = starpu_worker_get_devid(worker);
  119. }
  120. else
  121. {
  122. unsigned node = (dst_kind == STARPU_CUDA_RAM)?dst_node:src_node;
  123. devid = _starpu_memory_node_get_devid(node);
  124. }
  125. starpu_cuda_set_device(devid);
  126. }
  127. #endif
  128. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  129. {
  130. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  131. /* STARPU_CPU_RAM -> STARPU_CPU_RAM */
  132. if (copy_methods->ram_to_ram)
  133. copy_methods->ram_to_ram(src_interface, src_node, dst_interface, dst_node);
  134. else
  135. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  136. break;
  137. #ifdef STARPU_USE_CUDA
  138. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  139. /* only the proper CUBLAS thread can initiate this directly ! */
  140. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  141. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  142. #endif
  143. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  144. !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
  145. {
  146. /* this is not associated to a request so it's synchronous */
  147. STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
  148. if (copy_methods->cuda_to_ram)
  149. copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
  150. else
  151. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  152. }
  153. else
  154. {
  155. req->async_channel.type = STARPU_CUDA_RAM;
  156. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  157. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  158. stream = starpu_cuda_get_local_out_transfer_stream();
  159. if (copy_methods->cuda_to_ram_async)
  160. ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
  161. else
  162. {
  163. STARPU_ASSERT(copy_methods->any_to_any);
  164. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  165. }
  166. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  167. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  168. }
  169. break;
  170. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  171. /* STARPU_CPU_RAM -> CUBLAS_RAM */
  172. /* only the proper CUBLAS thread can initiate this ! */
  173. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  174. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  175. #endif
  176. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  177. !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
  178. {
  179. /* this is not associated to a request so it's synchronous */
  180. STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
  181. if (copy_methods->ram_to_cuda)
  182. copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
  183. else
  184. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  185. }
  186. else
  187. {
  188. req->async_channel.type = STARPU_CUDA_RAM;
  189. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  190. if (STARPU_UNLIKELY(cures != cudaSuccess))
  191. STARPU_CUDA_REPORT_ERROR(cures);
  192. stream = starpu_cuda_get_local_in_transfer_stream();
  193. if (copy_methods->ram_to_cuda_async)
  194. ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  195. else
  196. {
  197. STARPU_ASSERT(copy_methods->any_to_any);
  198. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  199. }
  200. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  201. if (STARPU_UNLIKELY(cures != cudaSuccess))
  202. STARPU_CUDA_REPORT_ERROR(cures);
  203. }
  204. break;
  205. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  206. /* CUDA - CUDA transfer */
  207. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  208. !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
  209. {
  210. STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
  211. /* this is not associated to a request so it's synchronous */
  212. if (copy_methods->cuda_to_cuda)
  213. copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
  214. else
  215. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  216. }
  217. else
  218. {
  219. req->async_channel.type = STARPU_CUDA_RAM;
  220. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  221. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  222. stream = starpu_cuda_get_peer_transfer_stream(src_node, dst_node);
  223. if (copy_methods->cuda_to_cuda_async)
  224. ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  225. else
  226. {
  227. STARPU_ASSERT(copy_methods->any_to_any);
  228. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  229. }
  230. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  231. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  232. }
  233. break;
  234. #endif
  235. #ifdef STARPU_USE_OPENCL
  236. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  237. /* OpenCL -> RAM */
  238. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  239. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  240. !(copy_methods->opencl_to_ram_async || copy_methods->any_to_any))
  241. {
  242. STARPU_ASSERT(copy_methods->opencl_to_ram || copy_methods->any_to_any);
  243. /* this is not associated to a request so it's synchronous */
  244. if (copy_methods->opencl_to_ram)
  245. copy_methods->opencl_to_ram(src_interface, src_node, dst_interface, dst_node);
  246. else
  247. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  248. }
  249. else
  250. {
  251. req->async_channel.type = STARPU_OPENCL_RAM;
  252. if (copy_methods->opencl_to_ram_async)
  253. ret = copy_methods->opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  254. else
  255. {
  256. STARPU_ASSERT(copy_methods->any_to_any);
  257. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  258. }
  259. }
  260. break;
  261. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  262. /* STARPU_CPU_RAM -> STARPU_OPENCL_RAM */
  263. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  264. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  265. !(copy_methods->ram_to_opencl_async || copy_methods->any_to_any))
  266. {
  267. STARPU_ASSERT(copy_methods->ram_to_opencl || copy_methods->any_to_any);
  268. /* this is not associated to a request so it's synchronous */
  269. if (copy_methods->ram_to_opencl)
  270. copy_methods->ram_to_opencl(src_interface, src_node, dst_interface, dst_node);
  271. else
  272. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  273. }
  274. else
  275. {
  276. req->async_channel.type = STARPU_OPENCL_RAM;
  277. if (copy_methods->ram_to_opencl_async)
  278. ret = copy_methods->ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  279. else
  280. {
  281. STARPU_ASSERT(copy_methods->any_to_any);
  282. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  283. }
  284. }
  285. break;
  286. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  287. /* STARPU_OPENCL_RAM -> STARPU_OPENCL_RAM */
  288. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node || _starpu_memory_node_get_local_key() == src_node);
  289. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  290. !(copy_methods->opencl_to_opencl_async || copy_methods->any_to_any))
  291. {
  292. STARPU_ASSERT(copy_methods->opencl_to_opencl || copy_methods->any_to_any);
  293. /* this is not associated to a request so it's synchronous */
  294. if (copy_methods->opencl_to_opencl)
  295. copy_methods->opencl_to_opencl(src_interface, src_node, dst_interface, dst_node);
  296. else
  297. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  298. }
  299. else
  300. {
  301. req->async_channel.type = STARPU_OPENCL_RAM;
  302. if (copy_methods->opencl_to_opencl_async)
  303. ret = copy_methods->opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  304. else
  305. {
  306. STARPU_ASSERT(copy_methods->any_to_any);
  307. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  308. }
  309. }
  310. break;
  311. #endif
  312. #ifdef STARPU_USE_MIC
  313. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  314. /* RAM -> MIC */
  315. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  316. !(copy_methods->ram_to_mic_async || copy_methods->any_to_any))
  317. {
  318. /* this is not associated to a request so it's synchronous */
  319. STARPU_ASSERT(copy_methods->ram_to_mic || copy_methods->any_to_any);
  320. if (copy_methods->ram_to_mic)
  321. copy_methods->ram_to_mic(src_interface, src_node, dst_interface, dst_node);
  322. else
  323. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  324. }
  325. else
  326. {
  327. req->async_channel.type = STARPU_MIC_RAM;
  328. if (copy_methods->ram_to_mic_async)
  329. ret = copy_methods->ram_to_mic_async(src_interface, src_node, dst_interface, dst_node);
  330. else
  331. {
  332. STARPU_ASSERT(copy_methods->any_to_any);
  333. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  334. }
  335. _starpu_mic_init_event(&(req->async_channel.event.mic_event), dst_node);
  336. }
  337. break;
  338. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  339. /* MIC -> RAM */
  340. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_mic_copy_disabled() ||
  341. !(copy_methods->mic_to_ram_async || copy_methods->any_to_any))
  342. {
  343. /* this is not associated to a request so it's synchronous */
  344. STARPU_ASSERT(copy_methods->mic_to_ram || copy_methods->any_to_any);
  345. if (copy_methods->mic_to_ram)
  346. copy_methods->mic_to_ram(src_interface, src_node, dst_interface, dst_node);
  347. else
  348. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  349. }
  350. else
  351. {
  352. req->async_channel.type = STARPU_MIC_RAM;
  353. if (copy_methods->mic_to_ram_async)
  354. ret = copy_methods->mic_to_ram_async(src_interface, src_node, dst_interface, dst_node);
  355. else
  356. {
  357. STARPU_ASSERT(copy_methods->any_to_any);
  358. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  359. }
  360. _starpu_mic_init_event(&(req->async_channel.event.mic_event), src_node);
  361. }
  362. break;
  363. /* TODO: MIC -> MIC */
  364. #endif
  365. #ifdef STARPU_USE_SCC
  366. /* SCC RAM associated to the master process is considered as
  367. * the main memory node. */
  368. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  369. /* master private SCC RAM -> slave private SCC RAM */
  370. if (copy_methods->scc_src_to_sink)
  371. copy_methods->scc_src_to_sink(src_interface, src_node, dst_interface, dst_node);
  372. else
  373. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  374. break;
  375. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  376. /* slave private SCC RAM -> master private SCC RAM */
  377. if (copy_methods->scc_sink_to_src)
  378. copy_methods->scc_sink_to_src(src_interface, src_node, dst_interface, dst_node);
  379. else
  380. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  381. break;
  382. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  383. /* slave private SCC RAM -> slave private SCC RAM */
  384. if (copy_methods->scc_sink_to_sink)
  385. copy_methods->scc_sink_to_sink(src_interface, src_node, dst_interface, dst_node);
  386. else
  387. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  388. break;
  389. #endif
  390. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_DISK_RAM):
  391. if(copy_methods->any_to_any)
  392. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  393. else
  394. {
  395. void *obj = starpu_data_handle_to_pointer(handle, dst_node);
  396. void * ptr = NULL;
  397. starpu_ssize_t size = 0;
  398. handle->ops->pack_data(handle, src_node, &ptr, &size);
  399. ret = _starpu_disk_full_write(src_node, dst_node, obj, ptr, size, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  400. if (ret == 0)
  401. /* write is already finished, ptr was allocated in pack_data */
  402. free(ptr);
  403. /* For now, asynchronous is not supported */
  404. STARPU_ASSERT(ret == 0);
  405. }
  406. break;
  407. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_CPU_RAM):
  408. if(copy_methods->any_to_any)
  409. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  410. else
  411. {
  412. void *obj = starpu_data_handle_to_pointer(handle, src_node);
  413. void * ptr = NULL;
  414. size_t size = 0;
  415. ret = _starpu_disk_full_read(src_node, dst_node, obj, &ptr, &size, req && !starpu_asynchronous_copy_disabled() ? &req->async_channel : NULL);
  416. if (ret == 0)
  417. {
  418. /* read is already finished, we can already unpack */
  419. handle->ops->unpack_data(handle, dst_node, ptr, size);
  420. /* ptr is allocated in full_read */
  421. free(ptr);
  422. }
  423. /* For now, asynchronous is not supported */
  424. STARPU_ASSERT(ret == 0);
  425. }
  426. break;
  427. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM,STARPU_DISK_RAM):
  428. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  429. break;
  430. default:
  431. STARPU_ABORT();
  432. break;
  433. }
  434. return ret;
  435. #endif /* !SIMGRID */
  436. }
  437. int STARPU_ATTRIBUTE_WARN_UNUSED_RESULT _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
  438. struct _starpu_data_replicate *src_replicate,
  439. struct _starpu_data_replicate *dst_replicate,
  440. unsigned donotread,
  441. struct _starpu_data_request *req,
  442. unsigned may_alloc,
  443. unsigned prefetch STARPU_ATTRIBUTE_UNUSED)
  444. {
  445. if (!donotread)
  446. {
  447. STARPU_ASSERT(src_replicate->allocated);
  448. STARPU_ASSERT(src_replicate->refcnt);
  449. }
  450. int ret_alloc, ret_copy;
  451. unsigned long STARPU_ATTRIBUTE_UNUSED com_id = 0;
  452. unsigned src_node = src_replicate->memory_node;
  453. unsigned dst_node = dst_replicate->memory_node;
  454. /* first make sure the destination has an allocated buffer */
  455. if (!dst_replicate->allocated)
  456. {
  457. if (!may_alloc || _starpu_is_reclaiming(dst_node))
  458. /* We're not supposed to allocate there at the moment */
  459. return -ENOMEM;
  460. ret_alloc = _starpu_allocate_memory_on_node(handle, dst_replicate, req ? req->prefetch : 0);
  461. if (ret_alloc)
  462. return -ENOMEM;
  463. }
  464. STARPU_ASSERT(dst_replicate->allocated);
  465. STARPU_ASSERT(dst_replicate->refcnt);
  466. /* if there is no need to actually read the data,
  467. * we do not perform any transfer */
  468. if (!donotread)
  469. {
  470. size_t size = _starpu_data_get_size(handle);
  471. _starpu_bus_update_profiling_info((int)src_node, (int)dst_node, size);
  472. #ifdef STARPU_USE_FXT
  473. com_id = STARPU_ATOMIC_ADDL(&communication_cnt, 1);
  474. if (req)
  475. req->com_id = com_id;
  476. #endif
  477. dst_replicate->initialized = 1;
  478. _STARPU_TRACE_START_DRIVER_COPY(src_node, dst_node, size, com_id, prefetch, handle);
  479. ret_copy = copy_data_1_to_1_generic(handle, src_replicate, dst_replicate, req);
  480. if (!req)
  481. /* Synchronous, this is already finished */
  482. _STARPU_TRACE_END_DRIVER_COPY(src_node, dst_node, size, com_id, prefetch);
  483. return ret_copy;
  484. }
  485. return 0;
  486. }
  487. /* This can be used by interfaces to easily transfer a piece of data without
  488. * caring about the particular transfer methods. */
  489. /* This should either return 0 if the transfer is complete, or -EAGAIN if the
  490. * transfer is still pending, and will have to be waited for by
  491. * _starpu_driver_test_request_completion/_starpu_driver_wait_request_completion
  492. */
  493. int starpu_interface_copy(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, void *async_data)
  494. {
  495. struct _starpu_async_channel *async_channel = async_data;
  496. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  497. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  498. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  499. {
  500. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  501. memcpy((void *) (dst + dst_offset), (void *) (src + src_offset), size);
  502. return 0;
  503. #ifdef STARPU_USE_CUDA
  504. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  505. return starpu_cuda_copy_async_sync(
  506. (void*) (src + src_offset), src_node,
  507. (void*) (dst + dst_offset), dst_node,
  508. size,
  509. async_channel?starpu_cuda_get_local_out_transfer_stream():NULL,
  510. cudaMemcpyDeviceToHost);
  511. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  512. return starpu_cuda_copy_async_sync(
  513. (void*) (src + src_offset), src_node,
  514. (void*) (dst + dst_offset), dst_node,
  515. size,
  516. async_channel?starpu_cuda_get_local_in_transfer_stream():NULL,
  517. cudaMemcpyHostToDevice);
  518. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  519. return starpu_cuda_copy_async_sync(
  520. (void*) (src + src_offset), src_node,
  521. (void*) (dst + dst_offset), dst_node,
  522. size,
  523. async_channel?starpu_cuda_get_peer_transfer_stream(src_node, dst_node):NULL,
  524. cudaMemcpyDeviceToDevice);
  525. #endif
  526. #ifdef STARPU_USE_OPENCL
  527. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  528. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  529. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  530. return starpu_opencl_copy_async_sync(
  531. src, src_offset, src_node,
  532. dst, dst_offset, dst_node,
  533. size,
  534. &async_channel->event.opencl_event);
  535. #endif
  536. #ifdef STARPU_USE_MIC
  537. case _STARPU_MEMORY_NODE_TUPLE(STARPU_MIC_RAM,STARPU_CPU_RAM):
  538. if (async_data)
  539. return _starpu_mic_copy_mic_to_ram_async(
  540. (void*) (src + src_offset), src_node,
  541. (void*) (dst + dst_offset), dst_node,
  542. size);
  543. else
  544. return _starpu_mic_copy_mic_to_ram(
  545. (void*) (src + src_offset), src_node,
  546. (void*) (dst + dst_offset), dst_node,
  547. size);
  548. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_MIC_RAM):
  549. if (async_data)
  550. return _starpu_mic_copy_ram_to_mic_async(
  551. (void*) (src + src_offset), src_node,
  552. (void*) (dst + dst_offset), dst_node,
  553. size);
  554. else
  555. return _starpu_mic_copy_ram_to_mic(
  556. (void*) (src + src_offset), src_node,
  557. (void*) (dst + dst_offset), dst_node,
  558. size);
  559. /* TODO: MIC->MIC */
  560. #endif
  561. #ifdef STARPU_USE_SCC
  562. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_CPU_RAM):
  563. return _starpu_scc_copy_sink_to_src(
  564. (void*) (src + src_offset), src_node,
  565. (void*) (dst + dst_offset), dst_node,
  566. size);
  567. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_SCC_RAM):
  568. return _starpu_scc_copy_src_to_sink(
  569. (void*) (src + src_offset), src_node,
  570. (void*) (dst + dst_offset), dst_node,
  571. size);
  572. case _STARPU_MEMORY_NODE_TUPLE(STARPU_SCC_RAM,STARPU_SCC_RAM):
  573. return _starpu_scc_copy_sink_to_sink(
  574. (void*) (src + src_offset), src_node,
  575. (void*) (dst + dst_offset), dst_node,
  576. size);
  577. #endif
  578. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM, STARPU_DISK_RAM):
  579. {
  580. return _starpu_disk_copy_src_to_disk(
  581. (void*) (src + src_offset), src_node,
  582. (void*) dst, dst_offset, dst_node,
  583. size, async_channel);
  584. }
  585. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_CPU_RAM):
  586. return _starpu_disk_copy_disk_to_src(
  587. (void*) src, src_offset, src_node,
  588. (void*) (dst + dst_offset), dst_node,
  589. size, async_channel);
  590. case _STARPU_MEMORY_NODE_TUPLE(STARPU_DISK_RAM, STARPU_DISK_RAM):
  591. return _starpu_disk_copy_disk_to_disk(
  592. (void*) src, src_offset, src_node,
  593. (void*) dst, dst_offset, dst_node,
  594. size, async_channel);
  595. default:
  596. STARPU_ABORT();
  597. return -1;
  598. }
  599. return 0;
  600. }
  601. void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel)
  602. {
  603. #ifdef STARPU_SIMGRID
  604. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  605. while (!async_channel->event.finished)
  606. STARPU_PTHREAD_COND_WAIT(&async_channel->event.cond, &async_channel->event.mutex);
  607. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  608. #else /* !SIMGRID */
  609. enum starpu_node_kind kind = async_channel->type;
  610. #ifdef STARPU_USE_CUDA
  611. cudaEvent_t event;
  612. cudaError_t cures;
  613. #endif
  614. switch (kind)
  615. {
  616. #ifdef STARPU_USE_CUDA
  617. case STARPU_CUDA_RAM:
  618. event = (*async_channel).event.cuda_event;
  619. cures = cudaEventSynchronize(event);
  620. if (STARPU_UNLIKELY(cures))
  621. STARPU_CUDA_REPORT_ERROR(cures);
  622. cures = cudaEventDestroy(event);
  623. if (STARPU_UNLIKELY(cures))
  624. STARPU_CUDA_REPORT_ERROR(cures);
  625. break;
  626. #endif
  627. #ifdef STARPU_USE_OPENCL
  628. case STARPU_OPENCL_RAM:
  629. {
  630. cl_int err;
  631. if ((*async_channel).event.opencl_event == NULL)
  632. STARPU_ABORT();
  633. err = clWaitForEvents(1, &((*async_channel).event.opencl_event));
  634. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  635. STARPU_OPENCL_REPORT_ERROR(err);
  636. err = clReleaseEvent((*async_channel).event.opencl_event);
  637. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  638. STARPU_OPENCL_REPORT_ERROR(err);
  639. break;
  640. }
  641. #endif
  642. #ifdef STARPU_USE_MIC
  643. case STARPU_MIC_RAM:
  644. _starpu_mic_wait_request_completion(&(async_channel->event.mic_event));
  645. break;
  646. #endif
  647. case STARPU_MAIN_RAM:
  648. starpu_disk_wait_request(async_channel);
  649. break;
  650. case STARPU_CPU_RAM:
  651. default:
  652. STARPU_ABORT();
  653. }
  654. #endif /* !SIMGRID */
  655. }
  656. unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel)
  657. {
  658. #ifdef STARPU_SIMGRID
  659. unsigned ret;
  660. STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  661. ret = async_channel->event.finished;
  662. STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  663. return ret;
  664. #else /* !SIMGRID */
  665. enum starpu_node_kind kind = async_channel->type;
  666. unsigned success = 0;
  667. #ifdef STARPU_USE_CUDA
  668. cudaEvent_t event;
  669. #endif
  670. switch (kind)
  671. {
  672. #ifdef STARPU_USE_CUDA
  673. case STARPU_CUDA_RAM:
  674. event = (*async_channel).event.cuda_event;
  675. cudaError_t cures = cudaEventQuery(event);
  676. success = (cures == cudaSuccess);
  677. if (success)
  678. cudaEventDestroy(event);
  679. else if (cures != cudaErrorNotReady)
  680. STARPU_CUDA_REPORT_ERROR(cures);
  681. break;
  682. #endif
  683. #ifdef STARPU_USE_OPENCL
  684. case STARPU_OPENCL_RAM:
  685. {
  686. cl_int event_status;
  687. cl_event opencl_event = (*async_channel).event.opencl_event;
  688. if (opencl_event == NULL) STARPU_ABORT();
  689. cl_int err = clGetEventInfo(opencl_event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(event_status), &event_status, NULL);
  690. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  691. STARPU_OPENCL_REPORT_ERROR(err);
  692. if (event_status < 0)
  693. STARPU_OPENCL_REPORT_ERROR(event_status);
  694. if (event_status == CL_COMPLETE)
  695. {
  696. err = clReleaseEvent(opencl_event);
  697. if (STARPU_UNLIKELY(err != CL_SUCCESS)) STARPU_OPENCL_REPORT_ERROR(err);
  698. }
  699. success = (event_status == CL_COMPLETE);
  700. break;
  701. }
  702. #endif
  703. #ifdef STARPU_USE_MIC
  704. case STARPU_MIC_RAM:
  705. success = _starpu_mic_request_is_complete(&(async_channel->event.mic_event));
  706. break;
  707. #endif
  708. case STARPU_DISK_RAM:
  709. success = starpu_disk_test_request(async_channel);
  710. break;
  711. case STARPU_CPU_RAM:
  712. default:
  713. STARPU_ABORT();
  714. }
  715. return success;
  716. #endif /* !SIMGRID */
  717. }