driver_cuda.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  6. * Copyright (C) 2011 Télécom-SudParis
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <starpu_cuda.h>
  21. #include <starpu_profiling.h>
  22. #include <common/utils.h>
  23. #include <common/config.h>
  24. #include <core/debug.h>
  25. #include <drivers/driver_common/driver_common.h>
  26. #include "driver_cuda.h"
  27. #include <core/sched_policy.h>
  28. #ifdef HAVE_CUDA_GL_INTEROP_H
  29. #include <cuda_gl_interop.h>
  30. #endif
  31. #include <datawizard/memory_manager.h>
  32. #include <datawizard/malloc.h>
  33. #ifdef STARPU_SIMGRID
  34. #include <core/simgrid.h>
  35. #endif
  36. /* the number of CUDA devices */
  37. static int ncudagpus;
  38. static size_t global_mem[STARPU_NMAXWORKERS];
  39. #ifdef STARPU_USE_CUDA
  40. static cudaStream_t streams[STARPU_NMAXWORKERS];
  41. static cudaStream_t out_transfer_streams[STARPU_NMAXWORKERS];
  42. static cudaStream_t in_transfer_streams[STARPU_NMAXWORKERS];
  43. static cudaStream_t peer_transfer_streams[STARPU_NMAXWORKERS];
  44. static struct cudaDeviceProp props[STARPU_MAXCUDADEVS];
  45. #endif /* STARPU_USE_CUDA */
  46. void
  47. _starpu_cuda_discover_devices (struct _starpu_machine_config *config)
  48. {
  49. /* Discover the number of CUDA devices. Fill the result in CONFIG. */
  50. #ifdef STARPU_SIMGRID
  51. config->topology.nhwcudagpus = _starpu_simgrid_get_nbhosts("CUDA");
  52. #else
  53. int cnt;
  54. cudaError_t cures;
  55. cures = cudaGetDeviceCount (&cnt);
  56. if (STARPU_UNLIKELY(cures != cudaSuccess))
  57. cnt = 0;
  58. config->topology.nhwcudagpus = cnt;
  59. #endif
  60. }
  61. /* In case we want to cap the amount of memory available on the GPUs by the
  62. * mean of the STARPU_LIMIT_CUDA_MEM, we decrease the value of
  63. * global_mem[devid] which is the value returned by
  64. * _starpu_cuda_get_global_mem_size() to indicate how much memory can
  65. * be allocated on the device
  66. */
  67. static void _starpu_cuda_limit_gpu_mem_if_needed(unsigned devid)
  68. {
  69. starpu_ssize_t limit;
  70. size_t STARPU_ATTRIBUTE_UNUSED totalGlobalMem = 0;
  71. size_t STARPU_ATTRIBUTE_UNUSED to_waste = 0;
  72. char name[30];
  73. #ifdef STARPU_USE_CUDA
  74. global_mem[devid] = props[devid].totalGlobalMem;
  75. #endif
  76. limit = starpu_get_env_number("STARPU_LIMIT_CUDA_MEM");
  77. if (limit == -1)
  78. {
  79. sprintf(name, "STARPU_LIMIT_CUDA_%u_MEM", devid);
  80. limit = starpu_get_env_number(name);
  81. }
  82. if (limit == -1)
  83. {
  84. return;
  85. }
  86. global_mem[devid] = limit * 1024*1024;
  87. #ifdef STARPU_USE_CUDA
  88. /* Find the size of the memory on the device */
  89. totalGlobalMem = props[devid].totalGlobalMem;
  90. /* How much memory to waste ? */
  91. to_waste = totalGlobalMem - global_mem[devid];
  92. props[devid].totalGlobalMem -= to_waste;
  93. #endif /* STARPU_USE_CUDA */
  94. _STARPU_DEBUG("CUDA device %u: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
  95. devid, (long) to_waste/(1024*1024), (long) limit, (long) totalGlobalMem/(1024*1024),
  96. (long) (totalGlobalMem - to_waste)/(1024*1024));
  97. }
  98. #ifdef STARPU_USE_CUDA
  99. cudaStream_t starpu_cuda_get_local_in_transfer_stream(void)
  100. {
  101. int worker = starpu_worker_get_id();
  102. return in_transfer_streams[worker];
  103. }
  104. cudaStream_t starpu_cuda_get_local_out_transfer_stream(void)
  105. {
  106. int worker = starpu_worker_get_id();
  107. return out_transfer_streams[worker];
  108. }
  109. cudaStream_t starpu_cuda_get_local_peer_transfer_stream(void)
  110. {
  111. int worker = starpu_worker_get_id();
  112. return peer_transfer_streams[worker];
  113. }
  114. cudaStream_t starpu_cuda_get_local_stream(void)
  115. {
  116. int worker = starpu_worker_get_id();
  117. return streams[worker];
  118. }
  119. const struct cudaDeviceProp *starpu_cuda_get_device_properties(unsigned workerid)
  120. {
  121. struct _starpu_machine_config *config = _starpu_get_machine_config();
  122. unsigned devid = config->workers[workerid].devid;
  123. return &props[devid];
  124. }
  125. #endif /* STARPU_USE_CUDA */
  126. void starpu_cuda_set_device(unsigned devid STARPU_ATTRIBUTE_UNUSED)
  127. {
  128. #ifdef STARPU_SIMGRID
  129. STARPU_ABORT();
  130. #else
  131. cudaError_t cures;
  132. struct starpu_conf *conf = _starpu_get_machine_config()->conf;
  133. #if !defined(HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  134. unsigned i;
  135. #endif
  136. #ifdef HAVE_CUDA_MEMCPY_PEER
  137. if (conf->n_cuda_opengl_interoperability)
  138. {
  139. fprintf(stderr, "OpenGL interoperability was requested, but StarPU was built with multithread GPU control support, please reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  140. STARPU_ABORT();
  141. }
  142. #elif !defined(HAVE_CUDA_GL_INTEROP_H)
  143. if (conf->n_cuda_opengl_interoperability)
  144. {
  145. fprintf(stderr,"OpenGL interoperability was requested, but cuda_gl_interop.h could not be compiled, please make sure that OpenGL headers were available before ./configure run.");
  146. STARPU_ABORT();
  147. }
  148. #else
  149. for (i = 0; i < conf->n_cuda_opengl_interoperability; i++)
  150. if (conf->cuda_opengl_interoperability[i] == devid)
  151. {
  152. cures = cudaGLSetGLDevice(devid);
  153. goto done;
  154. }
  155. #endif
  156. cures = cudaSetDevice(devid);
  157. #if !defined(HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  158. done:
  159. #endif
  160. if (STARPU_UNLIKELY(cures))
  161. STARPU_CUDA_REPORT_ERROR(cures);
  162. #endif
  163. }
  164. #ifndef STARPU_SIMGRID
  165. static void init_context(unsigned devid)
  166. {
  167. cudaError_t cures;
  168. int workerid;
  169. /* TODO: cudaSetDeviceFlag(cudaDeviceMapHost) */
  170. starpu_cuda_set_device(devid);
  171. #ifdef HAVE_CUDA_MEMCPY_PEER
  172. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") > 0)
  173. {
  174. int nworkers = starpu_worker_get_count();
  175. for (workerid = 0; workerid < nworkers; workerid++)
  176. {
  177. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  178. if (worker->arch == STARPU_CUDA_WORKER && worker->devid != devid)
  179. {
  180. int can;
  181. cures = cudaDeviceCanAccessPeer(&can, devid, worker->devid);
  182. if (!cures && can)
  183. {
  184. cures = cudaDeviceEnablePeerAccess(worker->devid, 0);
  185. if (!cures)
  186. _STARPU_DEBUG("Enabled GPU-Direct %d -> %d\n", worker->devid, devid);
  187. }
  188. }
  189. }
  190. }
  191. #endif
  192. /* force CUDA to initialize the context for real */
  193. cures = cudaFree(0);
  194. if (STARPU_UNLIKELY(cures))
  195. {
  196. if (cures == cudaErrorDevicesUnavailable)
  197. {
  198. fprintf(stderr,"All CUDA-capable devices are busy or unavailable\n");
  199. exit(77);
  200. }
  201. STARPU_CUDA_REPORT_ERROR(cures);
  202. }
  203. cures = cudaGetDeviceProperties(&props[devid], devid);
  204. if (STARPU_UNLIKELY(cures))
  205. STARPU_CUDA_REPORT_ERROR(cures);
  206. #ifdef HAVE_CUDA_MEMCPY_PEER
  207. if (props[devid].computeMode == cudaComputeModeExclusive)
  208. {
  209. fprintf(stderr, "CUDA is in EXCLUSIVE-THREAD mode, but StarPU was built with multithread GPU control support, please either ask your administrator to use EXCLUSIVE-PROCESS mode (which should really be fine), or reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  210. STARPU_ABORT();
  211. }
  212. #endif
  213. workerid = starpu_worker_get_id();
  214. cures = cudaStreamCreate(&streams[workerid]);
  215. if (STARPU_UNLIKELY(cures))
  216. STARPU_CUDA_REPORT_ERROR(cures);
  217. cures = cudaStreamCreate(&in_transfer_streams[workerid]);
  218. if (STARPU_UNLIKELY(cures))
  219. STARPU_CUDA_REPORT_ERROR(cures);
  220. cures = cudaStreamCreate(&out_transfer_streams[workerid]);
  221. if (STARPU_UNLIKELY(cures))
  222. STARPU_CUDA_REPORT_ERROR(cures);
  223. cures = cudaStreamCreate(&peer_transfer_streams[workerid]);
  224. if (STARPU_UNLIKELY(cures))
  225. STARPU_CUDA_REPORT_ERROR(cures);
  226. }
  227. static void deinit_context(int workerid)
  228. {
  229. cudaError_t cures;
  230. cudaStreamDestroy(streams[workerid]);
  231. cudaStreamDestroy(in_transfer_streams[workerid]);
  232. cudaStreamDestroy(out_transfer_streams[workerid]);
  233. cudaStreamDestroy(peer_transfer_streams[workerid]);
  234. /* cleanup the runtime API internal stuffs (which CUBLAS is using) */
  235. cures = cudaThreadExit();
  236. if (cures)
  237. STARPU_CUDA_REPORT_ERROR(cures);
  238. }
  239. #endif /* !SIMGRID */
  240. static size_t _starpu_cuda_get_global_mem_size(unsigned devid)
  241. {
  242. return global_mem[devid];
  243. }
  244. /* Return the number of devices usable in the system.
  245. * The value returned cannot be greater than MAXCUDADEVS */
  246. unsigned _starpu_get_cuda_device_count(void)
  247. {
  248. #ifdef STARPU_SIMGRID
  249. return _starpu_simgrid_get_nbhosts("CUDA");
  250. #else
  251. int cnt;
  252. cudaError_t cures;
  253. cures = cudaGetDeviceCount(&cnt);
  254. if (STARPU_UNLIKELY(cures))
  255. return 0;
  256. if (cnt > STARPU_MAXCUDADEVS)
  257. {
  258. fprintf(stderr, "# Warning: %d CUDA devices available. Only %d enabled. Use configure option --enable-maxcudadev=xxx to update the maximum value of supported CUDA devices.\n", cnt, STARPU_MAXCUDADEVS);
  259. cnt = STARPU_MAXCUDADEVS;
  260. }
  261. return (unsigned)cnt;
  262. #endif
  263. }
  264. void _starpu_init_cuda(void)
  265. {
  266. ncudagpus = _starpu_get_cuda_device_count();
  267. STARPU_ASSERT(ncudagpus <= STARPU_MAXCUDADEVS);
  268. }
  269. static int execute_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *args)
  270. {
  271. int ret;
  272. uint32_t mask = 0;
  273. STARPU_ASSERT(j);
  274. struct starpu_task *task = j->task;
  275. struct timespec codelet_start, codelet_end;
  276. int profiling = starpu_profiling_status_get();
  277. STARPU_ASSERT(task);
  278. struct starpu_codelet *cl = task->cl;
  279. STARPU_ASSERT(cl);
  280. ret = _starpu_fetch_task_input(j, mask);
  281. if (ret != 0)
  282. {
  283. /* there was not enough memory, so the input of
  284. * the codelet cannot be fetched ... put the
  285. * codelet back, and try it later */
  286. return -EAGAIN;
  287. }
  288. _starpu_driver_start_job(args, j, &codelet_start, 0, profiling);
  289. #if defined(HAVE_CUDA_MEMCPY_PEER) && !defined(STARPU_SIMGRID)
  290. /* We make sure we do manipulate the proper device */
  291. starpu_cuda_set_device(args->devid);
  292. #endif
  293. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, j->nimpl);
  294. STARPU_ASSERT(func);
  295. #ifdef STARPU_SIMGRID
  296. _starpu_simgrid_execute_job(j, &args->perf_arch, NAN);
  297. #else
  298. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  299. #endif
  300. _starpu_driver_end_job(args, j, &args->perf_arch, &codelet_end, 0, profiling);
  301. _starpu_driver_update_job_feedback(j, args, &args->perf_arch, &codelet_start, &codelet_end, profiling);
  302. _starpu_push_task_output(j, mask);
  303. return 0;
  304. }
  305. static struct _starpu_worker*
  306. _starpu_get_worker_from_driver(struct starpu_driver *d)
  307. {
  308. unsigned nworkers = starpu_worker_get_count();
  309. unsigned workerid;
  310. for (workerid = 0; workerid < nworkers; workerid++)
  311. {
  312. if (starpu_worker_get_type(workerid) == d->type)
  313. {
  314. struct _starpu_worker *worker;
  315. worker = _starpu_get_worker_struct(workerid);
  316. if (worker->devid == d->id.cuda_id)
  317. return worker;
  318. }
  319. }
  320. return NULL;
  321. }
  322. /* XXX Should this be merged with _starpu_init_cuda ? */
  323. int _starpu_cuda_driver_init(struct starpu_driver *d)
  324. {
  325. struct _starpu_worker* args = _starpu_get_worker_from_driver(d);
  326. STARPU_ASSERT(args);
  327. unsigned devid = args->devid;
  328. _starpu_worker_start(args, _STARPU_FUT_CUDA_KEY);
  329. #ifndef STARPU_SIMGRID
  330. init_context(devid);
  331. #endif
  332. _starpu_cuda_limit_gpu_mem_if_needed(devid);
  333. _starpu_memory_manager_set_global_memory_size(args->memory_node, _starpu_cuda_get_global_mem_size(devid));
  334. _starpu_malloc_init(args->memory_node);
  335. /* one more time to avoid hacks from third party lib :) */
  336. _starpu_bind_thread_on_cpu(args->config, args->bindid);
  337. args->status = STATUS_UNKNOWN;
  338. float size = (float) global_mem[devid] / (1<<30);
  339. #ifdef STARPU_SIMGRID
  340. const char *devname = "Simgrid";
  341. #else
  342. /* get the device's name */
  343. char devname[128];
  344. strncpy(devname, props[devid].name, 128);
  345. #endif
  346. #if defined(STARPU_HAVE_BUSID) && !defined(STARPU_SIMGRID)
  347. #if defined(STARPU_HAVE_DOMAINID) && !defined(STARPU_SIMGRID)
  348. if (props[devid].pciDomainID)
  349. snprintf(args->name, sizeof(args->name), "CUDA %u (%s %.1f GiB %04x:%02x:%02x.0)", devid, devname, size, props[devid].pciDomainID, props[devid].pciBusID, props[devid].pciDeviceID);
  350. else
  351. #endif
  352. snprintf(args->name, sizeof(args->name), "CUDA %u (%s %.1f GiB %02x:%02x.0)", devid, devname, size, props[devid].pciBusID, props[devid].pciDeviceID);
  353. #else
  354. snprintf(args->name, sizeof(args->name), "CUDA %u (%s %.1f GiB)", devid, devname, size);
  355. #endif
  356. snprintf(args->short_name, sizeof(args->short_name), "CUDA %u", devid);
  357. _STARPU_DEBUG("cuda (%s) dev id %u thread is ready to run on CPU %d !\n", devname, devid, args->bindid);
  358. _STARPU_TRACE_WORKER_INIT_END;
  359. /* tell the main thread that this one is ready */
  360. STARPU_PTHREAD_MUTEX_LOCK(&args->mutex);
  361. args->worker_is_initialized = 1;
  362. STARPU_PTHREAD_COND_SIGNAL(&args->ready_cond);
  363. STARPU_PTHREAD_MUTEX_UNLOCK(&args->mutex);
  364. return 0;
  365. }
  366. int _starpu_cuda_driver_run_once(struct starpu_driver *d)
  367. {
  368. struct _starpu_worker* args = _starpu_get_worker_from_driver(d);
  369. STARPU_ASSERT(args);
  370. unsigned memnode = args->memory_node;
  371. int workerid = args->workerid;
  372. _STARPU_TRACE_START_PROGRESS(memnode);
  373. _starpu_datawizard_progress(memnode, 1);
  374. _STARPU_TRACE_END_PROGRESS(memnode);
  375. struct starpu_task *task;
  376. struct _starpu_job *j = NULL;
  377. task = _starpu_get_worker_task(args, workerid, memnode);
  378. if (!task)
  379. return 0;
  380. j = _starpu_get_job_associated_to_task(task);
  381. /* can CUDA do that task ? */
  382. if (!_STARPU_CUDA_MAY_PERFORM(j))
  383. {
  384. /* this is neither a cuda or a cublas task */
  385. _starpu_push_task_to_workers(task);
  386. return 0;
  387. }
  388. _starpu_set_current_task(task);
  389. args->current_task = j->task;
  390. int res = execute_job_on_cuda(j, args);
  391. _starpu_set_current_task(NULL);
  392. args->current_task = NULL;
  393. if (res)
  394. {
  395. switch (res)
  396. {
  397. case -EAGAIN:
  398. _STARPU_DISP("ouch, CUDA could not actually run task %p, putting it back...\n", task);
  399. _starpu_push_task_to_workers(task);
  400. STARPU_ABORT();
  401. default:
  402. STARPU_ABORT();
  403. }
  404. }
  405. _starpu_handle_job_termination(j);
  406. return 0;
  407. }
  408. int _starpu_cuda_driver_deinit(struct starpu_driver *d)
  409. {
  410. struct _starpu_worker* args = _starpu_get_worker_from_driver(d);
  411. STARPU_ASSERT(args);
  412. unsigned memnode = args->memory_node;
  413. _STARPU_TRACE_WORKER_DEINIT_START;
  414. _starpu_handle_all_pending_node_data_requests(memnode);
  415. /* In case there remains some memory that was automatically
  416. * allocated by StarPU, we release it now. Note that data
  417. * coherency is not maintained anymore at that point ! */
  418. _starpu_free_all_automatically_allocated_buffers(memnode);
  419. _starpu_malloc_shutdown(memnode);
  420. #ifndef STARPU_SIMGRID
  421. deinit_context(args->workerid);
  422. #endif
  423. _STARPU_TRACE_WORKER_DEINIT_END(_STARPU_FUT_CUDA_KEY);
  424. return 0;
  425. }
  426. void *_starpu_cuda_worker(void *arg)
  427. {
  428. struct _starpu_worker* args = arg;
  429. struct starpu_driver d =
  430. {
  431. .type = STARPU_CUDA_WORKER,
  432. .id.cuda_id = args->devid
  433. };
  434. _starpu_cuda_driver_init(&d);
  435. while (_starpu_machine_is_running())
  436. _starpu_cuda_driver_run_once(&d);
  437. _starpu_cuda_driver_deinit(&d);
  438. return NULL;
  439. }
  440. #ifdef STARPU_USE_CUDA
  441. void starpu_cublas_report_error(const char *func, const char *file, int line, cublasStatus status)
  442. {
  443. char *errormsg;
  444. switch (status)
  445. {
  446. case CUBLAS_STATUS_SUCCESS:
  447. errormsg = "success";
  448. break;
  449. case CUBLAS_STATUS_NOT_INITIALIZED:
  450. errormsg = "not initialized";
  451. break;
  452. case CUBLAS_STATUS_ALLOC_FAILED:
  453. errormsg = "alloc failed";
  454. break;
  455. case CUBLAS_STATUS_INVALID_VALUE:
  456. errormsg = "invalid value";
  457. break;
  458. case CUBLAS_STATUS_ARCH_MISMATCH:
  459. errormsg = "arch mismatch";
  460. break;
  461. case CUBLAS_STATUS_EXECUTION_FAILED:
  462. errormsg = "execution failed";
  463. break;
  464. case CUBLAS_STATUS_INTERNAL_ERROR:
  465. errormsg = "internal error";
  466. break;
  467. default:
  468. errormsg = "unknown error";
  469. break;
  470. }
  471. fprintf(stderr, "oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  472. STARPU_ABORT();
  473. }
  474. void starpu_cuda_report_error(const char *func, const char *file, int line, cudaError_t status)
  475. {
  476. const char *errormsg = cudaGetErrorString(status);
  477. printf("oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  478. STARPU_ABORT();
  479. }
  480. #endif /* STARPU_USE_CUDA */
  481. #ifdef STARPU_USE_CUDA
  482. int
  483. starpu_cuda_copy_async_sync(void *src_ptr, unsigned src_node,
  484. void *dst_ptr, unsigned dst_node,
  485. size_t ssize, cudaStream_t stream,
  486. enum cudaMemcpyKind kind)
  487. {
  488. #ifdef HAVE_CUDA_MEMCPY_PEER
  489. int peer_copy = 0;
  490. int src_dev = -1, dst_dev = -1;
  491. #endif
  492. cudaError_t cures = 0;
  493. if (kind == cudaMemcpyDeviceToDevice && src_node != dst_node)
  494. {
  495. #ifdef HAVE_CUDA_MEMCPY_PEER
  496. peer_copy = 1;
  497. src_dev = _starpu_memory_node_get_devid(src_node);
  498. dst_dev = _starpu_memory_node_get_devid(dst_node);
  499. #else
  500. STARPU_ABORT();
  501. #endif
  502. }
  503. if (stream)
  504. {
  505. _STARPU_TRACE_START_DRIVER_COPY_ASYNC(src_node, dst_node);
  506. #ifdef HAVE_CUDA_MEMCPY_PEER
  507. if (peer_copy)
  508. {
  509. cures = cudaMemcpyPeerAsync((char *) dst_ptr, dst_dev,
  510. (char *) src_ptr, src_dev,
  511. ssize, stream);
  512. }
  513. else
  514. #endif
  515. {
  516. cures = cudaMemcpyAsync((char *)dst_ptr, (char *)src_ptr, ssize, kind, stream);
  517. }
  518. _STARPU_TRACE_END_DRIVER_COPY_ASYNC(src_node, dst_node);
  519. }
  520. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  521. if (stream == NULL || cures)
  522. {
  523. /* do it in a synchronous fashion */
  524. #ifdef HAVE_CUDA_MEMCPY_PEER
  525. if (peer_copy)
  526. {
  527. cures = cudaMemcpyPeer((char *) dst_ptr, dst_dev,
  528. (char *) src_ptr, src_dev,
  529. ssize);
  530. }
  531. else
  532. #endif
  533. {
  534. cures = cudaMemcpy((char *)dst_ptr, (char *)src_ptr, ssize, kind);
  535. }
  536. if (STARPU_UNLIKELY(cures))
  537. STARPU_CUDA_REPORT_ERROR(cures);
  538. return 0;
  539. }
  540. return -EAGAIN;
  541. }
  542. #endif /* STARPU_USE_CUDA */
  543. int _starpu_run_cuda(struct starpu_driver *d)
  544. {
  545. STARPU_ASSERT(d && d->type == STARPU_CUDA_WORKER);
  546. int workerid = starpu_worker_get_by_devid(STARPU_CUDA_WORKER, d->id.cuda_id);
  547. _STARPU_DEBUG("Running cuda %u from the application\n", d->id.cuda_id);
  548. struct _starpu_worker *workerarg = _starpu_get_worker_struct(workerid);
  549. workerarg->set = NULL;
  550. workerarg->worker_is_initialized = 0;
  551. /* Let's go ! */
  552. _starpu_cuda_worker(workerarg);
  553. /* XXX: Should we wait for the driver to be ready, as it is done when
  554. * launching it the usual way ? Cf. the end of _starpu_launch_drivers()
  555. */
  556. return 0;
  557. }