copy_driver.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <common/config.h>
  19. #include <common/utils.h>
  20. #include <core/sched_policy.h>
  21. #include <datawizard/datastats.h>
  22. #include <common/fxt.h>
  23. #include "copy_driver.h"
  24. #include "memalloc.h"
  25. #include <starpu_opencl.h>
  26. #include <starpu_cuda.h>
  27. #include <profiling/profiling.h>
  28. #ifdef STARPU_SIMGRID
  29. #include <core/simgrid.h>
  30. #include <msg/msg.h>
  31. #endif
  32. void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
  33. {
  34. /* wake up all workers on that memory node */
  35. unsigned cond_id;
  36. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  37. _STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  38. unsigned nconds = descr->condition_count[nodeid];
  39. for (cond_id = 0; cond_id < nconds; cond_id++)
  40. {
  41. struct _starpu_cond_and_mutex *condition;
  42. condition = &descr->conditions_attached_to_node[nodeid][cond_id];
  43. /* wake anybody waiting on that condition */
  44. _STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  45. _STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  46. _STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  47. }
  48. _STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  49. }
  50. void starpu_wake_all_blocked_workers(void)
  51. {
  52. /* workers may be blocked on the various queues' conditions */
  53. unsigned cond_id;
  54. struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
  55. _STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
  56. unsigned nconds = descr->total_condition_count;
  57. for (cond_id = 0; cond_id < nconds; cond_id++)
  58. {
  59. struct _starpu_cond_and_mutex *condition;
  60. condition = &descr->conditions_all[cond_id];
  61. /* wake anybody waiting on that condition */
  62. _STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
  63. _STARPU_PTHREAD_COND_BROADCAST(condition->cond);
  64. _STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
  65. }
  66. _STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
  67. }
  68. #ifdef STARPU_USE_FXT
  69. /* we need to identify each communication so that we can match the beginning
  70. * and the end of a communication in the trace, so we use a unique identifier
  71. * per communication */
  72. static unsigned communication_cnt = 0;
  73. #endif
  74. static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
  75. struct _starpu_data_replicate *src_replicate,
  76. struct _starpu_data_replicate *dst_replicate,
  77. struct _starpu_data_request *req STARPU_ATTRIBUTE_UNUSED)
  78. {
  79. unsigned src_node = src_replicate->memory_node;
  80. unsigned dst_node = dst_replicate->memory_node;
  81. STARPU_ASSERT(src_replicate->refcnt);
  82. STARPU_ASSERT(dst_replicate->refcnt);
  83. STARPU_ASSERT(src_replicate->allocated);
  84. STARPU_ASSERT(dst_replicate->allocated);
  85. _starpu_comm_amounts_inc(src_node, dst_node, handle->ops->get_size(handle));
  86. #ifdef STARPU_SIMGRID
  87. return _starpu_simgrid_transfer(handle->ops->get_size(handle), src_node, dst_node, req);
  88. #else /* !SIMGRID */
  89. int ret = 0;
  90. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  91. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  92. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  93. #ifdef STARPU_USE_CUDA
  94. cudaError_t cures;
  95. cudaStream_t stream;
  96. #endif
  97. void *src_interface = src_replicate->data_interface;
  98. void *dst_interface = dst_replicate->data_interface;
  99. #if defined(STARPU_USE_CUDA) && defined(HAVE_CUDA_MEMCPY_PEER)
  100. if ((src_kind == STARPU_CUDA_RAM) || (dst_kind == STARPU_CUDA_RAM))
  101. {
  102. int node = (dst_kind == STARPU_CUDA_RAM)?dst_node:src_node;
  103. starpu_cuda_set_device(_starpu_memory_node_get_devid(node));
  104. }
  105. #endif
  106. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  107. {
  108. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  109. /* STARPU_CPU_RAM -> STARPU_CPU_RAM */
  110. if (copy_methods->ram_to_ram)
  111. copy_methods->ram_to_ram(src_interface, src_node, dst_interface, dst_node);
  112. else
  113. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
  114. break;
  115. #ifdef STARPU_USE_CUDA
  116. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  117. /* only the proper CUBLAS thread can initiate this directly ! */
  118. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  119. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  120. #endif
  121. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  122. !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
  123. {
  124. /* this is not associated to a request so it's synchronous */
  125. STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
  126. if (copy_methods->cuda_to_ram)
  127. copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
  128. else
  129. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  130. }
  131. else
  132. {
  133. req->async_channel.type = STARPU_CUDA_RAM;
  134. cures = cudaEventCreate(&req->async_channel.event.cuda_event);
  135. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  136. stream = starpu_cuda_get_local_out_transfer_stream();
  137. if (copy_methods->cuda_to_ram_async)
  138. ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
  139. else
  140. {
  141. STARPU_ASSERT(copy_methods->any_to_any);
  142. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  143. }
  144. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  145. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  146. }
  147. break;
  148. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  149. /* STARPU_CPU_RAM -> CUBLAS_RAM */
  150. /* only the proper CUBLAS thread can initiate this ! */
  151. #if !defined(HAVE_CUDA_MEMCPY_PEER)
  152. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  153. #endif
  154. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  155. !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
  156. {
  157. /* this is not associated to a request so it's synchronous */
  158. STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
  159. if (copy_methods->ram_to_cuda)
  160. copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
  161. else
  162. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  163. }
  164. else
  165. {
  166. req->async_channel.type = STARPU_CUDA_RAM;
  167. cures = cudaEventCreate(&req->async_channel.event.cuda_event);
  168. if (STARPU_UNLIKELY(cures != cudaSuccess))
  169. STARPU_CUDA_REPORT_ERROR(cures);
  170. stream = starpu_cuda_get_local_in_transfer_stream();
  171. if (copy_methods->ram_to_cuda_async)
  172. ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  173. else
  174. {
  175. STARPU_ASSERT(copy_methods->any_to_any);
  176. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  177. }
  178. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  179. if (STARPU_UNLIKELY(cures != cudaSuccess))
  180. STARPU_CUDA_REPORT_ERROR(cures);
  181. }
  182. break;
  183. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  184. /* CUDA - CUDA transfer */
  185. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  186. !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
  187. {
  188. STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
  189. /* this is not associated to a request so it's synchronous */
  190. if (copy_methods->cuda_to_cuda)
  191. copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
  192. else
  193. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  194. }
  195. else
  196. {
  197. req->async_channel.type = STARPU_CUDA_RAM;
  198. cures = cudaEventCreate(&req->async_channel.event.cuda_event);
  199. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  200. stream = starpu_cuda_get_local_peer_transfer_stream();
  201. if (copy_methods->cuda_to_cuda_async)
  202. ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  203. else
  204. {
  205. STARPU_ASSERT(copy_methods->any_to_any);
  206. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  207. }
  208. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  209. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  210. }
  211. break;
  212. #endif
  213. #ifdef STARPU_USE_OPENCL
  214. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  215. /* OpenCL -> RAM */
  216. STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
  217. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  218. !(copy_methods->opencl_to_ram_async || copy_methods->any_to_any))
  219. {
  220. STARPU_ASSERT(copy_methods->opencl_to_ram || copy_methods->any_to_any);
  221. /* this is not associated to a request so it's synchronous */
  222. if (copy_methods->opencl_to_ram)
  223. copy_methods->opencl_to_ram(src_interface, src_node, dst_interface, dst_node);
  224. else
  225. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  226. }
  227. else
  228. {
  229. req->async_channel.type = STARPU_OPENCL_RAM;
  230. if (copy_methods->opencl_to_ram_async)
  231. ret = copy_methods->opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  232. else
  233. {
  234. STARPU_ASSERT(copy_methods->any_to_any);
  235. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  236. }
  237. }
  238. break;
  239. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  240. /* STARPU_CPU_RAM -> STARPU_OPENCL_RAM */
  241. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
  242. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  243. !(copy_methods->ram_to_opencl_async || copy_methods->any_to_any))
  244. {
  245. STARPU_ASSERT(copy_methods->ram_to_opencl || copy_methods->any_to_any);
  246. /* this is not associated to a request so it's synchronous */
  247. if (copy_methods->ram_to_opencl)
  248. copy_methods->ram_to_opencl(src_interface, src_node, dst_interface, dst_node);
  249. else
  250. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  251. }
  252. else
  253. {
  254. req->async_channel.type = STARPU_OPENCL_RAM;
  255. if (copy_methods->ram_to_opencl_async)
  256. ret = copy_methods->ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  257. else
  258. {
  259. STARPU_ASSERT(copy_methods->any_to_any);
  260. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  261. }
  262. }
  263. break;
  264. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  265. /* STARPU_OPENCL_RAM -> STARPU_OPENCL_RAM */
  266. STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node || _starpu_memory_node_get_local_key() == src_node);
  267. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
  268. !(copy_methods->opencl_to_opencl_async || copy_methods->any_to_any))
  269. {
  270. STARPU_ASSERT(copy_methods->opencl_to_opencl || copy_methods->any_to_any);
  271. /* this is not associated to a request so it's synchronous */
  272. if (copy_methods->opencl_to_opencl)
  273. copy_methods->opencl_to_opencl(src_interface, src_node, dst_interface, dst_node);
  274. else
  275. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  276. }
  277. else
  278. {
  279. req->async_channel.type = STARPU_OPENCL_RAM;
  280. if (copy_methods->opencl_to_opencl_async)
  281. ret = copy_methods->opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
  282. else
  283. {
  284. STARPU_ASSERT(copy_methods->any_to_any);
  285. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  286. }
  287. }
  288. break;
  289. #endif
  290. default:
  291. STARPU_ABORT();
  292. break;
  293. }
  294. return ret;
  295. #endif /* !SIMGRID */
  296. }
  297. int __attribute__((warn_unused_result)) _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
  298. struct _starpu_data_replicate *src_replicate,
  299. struct _starpu_data_replicate *dst_replicate,
  300. unsigned donotread,
  301. struct _starpu_data_request *req,
  302. unsigned may_alloc)
  303. {
  304. if (!donotread)
  305. {
  306. STARPU_ASSERT(src_replicate->allocated);
  307. STARPU_ASSERT(src_replicate->refcnt);
  308. }
  309. int ret_alloc, ret_copy;
  310. unsigned STARPU_ATTRIBUTE_UNUSED com_id = 0;
  311. unsigned src_node = src_replicate->memory_node;
  312. unsigned dst_node = dst_replicate->memory_node;
  313. /* first make sure the destination has an allocated buffer */
  314. if (!dst_replicate->allocated)
  315. {
  316. if (!may_alloc)
  317. return -ENOMEM;
  318. ret_alloc = _starpu_allocate_memory_on_node(handle, dst_replicate,req->prefetch);
  319. if (ret_alloc)
  320. return -ENOMEM;
  321. }
  322. STARPU_ASSERT(dst_replicate->allocated);
  323. STARPU_ASSERT(dst_replicate->refcnt);
  324. /* if there is no need to actually read the data,
  325. * we do not perform any transfer */
  326. if (!donotread)
  327. {
  328. size_t size = _starpu_data_get_size(handle);
  329. _starpu_bus_update_profiling_info((int)src_node, (int)dst_node, size);
  330. #ifdef STARPU_USE_FXT
  331. com_id = STARPU_ATOMIC_ADD(&communication_cnt, 1);
  332. if (req)
  333. req->com_id = com_id;
  334. #endif
  335. _STARPU_TRACE_START_DRIVER_COPY(src_node, dst_node, size, com_id);
  336. ret_copy = copy_data_1_to_1_generic(handle, src_replicate, dst_replicate, req);
  337. return ret_copy;
  338. }
  339. return 0;
  340. }
  341. /* This can be used by interfaces to easily transfer a piece of data without
  342. * caring about the particular CUDA/OpenCL methods. */
  343. int starpu_interface_copy(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, void *async_data)
  344. {
  345. struct _starpu_async_channel *async_channel = async_data;
  346. enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
  347. enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
  348. switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
  349. {
  350. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
  351. memcpy((void *) dst + dst_offset, (void *) src + src_offset, size);
  352. return 0;
  353. #ifdef STARPU_USE_CUDA
  354. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
  355. return starpu_cuda_copy_async_sync(
  356. (void*) src + src_offset, src_node,
  357. (void*) dst + dst_offset, dst_node,
  358. size,
  359. async_channel?starpu_cuda_get_local_out_transfer_stream():NULL,
  360. cudaMemcpyDeviceToHost);
  361. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
  362. return starpu_cuda_copy_async_sync(
  363. (void*) src + src_offset, src_node,
  364. (void*) dst + dst_offset, dst_node,
  365. size,
  366. async_channel?starpu_cuda_get_local_in_transfer_stream():NULL,
  367. cudaMemcpyHostToDevice);
  368. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
  369. return starpu_cuda_copy_async_sync(
  370. (void*) src + src_offset, src_node,
  371. (void*) dst + dst_offset, dst_node,
  372. size,
  373. async_channel?starpu_cuda_get_local_peer_transfer_stream():NULL,
  374. cudaMemcpyDeviceToDevice);
  375. #endif
  376. #ifdef STARPU_USE_OPENCL
  377. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
  378. case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
  379. case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
  380. return starpu_opencl_copy_async_sync(
  381. src, src_offset, src_node,
  382. dst, dst_offset, dst_node,
  383. size,
  384. &async_channel->event.opencl_event);
  385. #endif
  386. default:
  387. STARPU_ABORT();
  388. return -1;
  389. }
  390. return 0;
  391. }
  392. void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel)
  393. {
  394. #ifdef STARPU_SIMGRID
  395. _STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  396. while (!async_channel->event.finished)
  397. _STARPU_PTHREAD_COND_WAIT(&async_channel->event.cond, &async_channel->event.mutex);
  398. _STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  399. #else /* !SIMGRID */
  400. enum starpu_node_kind kind = async_channel->type;
  401. #ifdef STARPU_USE_CUDA
  402. cudaEvent_t event;
  403. cudaError_t cures;
  404. #endif
  405. switch (kind)
  406. {
  407. #ifdef STARPU_USE_CUDA
  408. case STARPU_CUDA_RAM:
  409. event = (*async_channel).event.cuda_event;
  410. cures = cudaEventSynchronize(event);
  411. if (STARPU_UNLIKELY(cures))
  412. STARPU_CUDA_REPORT_ERROR(cures);
  413. cures = cudaEventDestroy(event);
  414. if (STARPU_UNLIKELY(cures))
  415. STARPU_CUDA_REPORT_ERROR(cures);
  416. break;
  417. #endif
  418. #ifdef STARPU_USE_OPENCL
  419. case STARPU_OPENCL_RAM:
  420. {
  421. cl_int err;
  422. if ((*async_channel).event.opencl_event == NULL)
  423. STARPU_ABORT();
  424. err = clWaitForEvents(1, &((*async_channel).event.opencl_event));
  425. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  426. STARPU_OPENCL_REPORT_ERROR(err);
  427. err = clReleaseEvent((*async_channel).event.opencl_event);
  428. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  429. STARPU_OPENCL_REPORT_ERROR(err);
  430. break;
  431. }
  432. #endif
  433. case STARPU_CPU_RAM:
  434. default:
  435. STARPU_ABORT();
  436. }
  437. #endif /* !SIMGRID */
  438. }
  439. unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel)
  440. {
  441. #ifdef STARPU_SIMGRID
  442. unsigned ret;
  443. _STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
  444. ret = async_channel->event.finished;
  445. _STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
  446. return ret;
  447. #else /* !SIMGRID */
  448. enum starpu_node_kind kind = async_channel->type;
  449. unsigned success = 0;
  450. #ifdef STARPU_USE_CUDA
  451. cudaEvent_t event;
  452. #endif
  453. switch (kind)
  454. {
  455. #ifdef STARPU_USE_CUDA
  456. case STARPU_CUDA_RAM:
  457. event = (*async_channel).event.cuda_event;
  458. cudaError_t cures = cudaEventQuery(event);
  459. success = (cures == cudaSuccess);
  460. if (success)
  461. cudaEventDestroy(event);
  462. else if (cures != cudaErrorNotReady)
  463. STARPU_CUDA_REPORT_ERROR(cures);
  464. break;
  465. #endif
  466. #ifdef STARPU_USE_OPENCL
  467. case STARPU_OPENCL_RAM:
  468. {
  469. cl_int event_status;
  470. cl_event opencl_event = (*async_channel).event.opencl_event;
  471. if (opencl_event == NULL) STARPU_ABORT();
  472. cl_int err = clGetEventInfo(opencl_event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(event_status), &event_status, NULL);
  473. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  474. STARPU_OPENCL_REPORT_ERROR(err);
  475. if (event_status < 0)
  476. STARPU_OPENCL_REPORT_ERROR(event_status);
  477. success = (event_status == CL_COMPLETE);
  478. break;
  479. }
  480. #endif
  481. case STARPU_CPU_RAM:
  482. default:
  483. STARPU_ABORT();
  484. }
  485. return success;
  486. #endif /* !SIMGRID */
  487. }