driver_cuda.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2012,2014,2016-2017 Inria
  4. * Copyright (C) 2008-2018 Université de Bordeaux
  5. * Copyright (C) 2010 Mehdi Juhoor
  6. * Copyright (C) 2010-2017 CNRS
  7. * Copyright (C) 2011 Télécom-SudParis
  8. * Copyright (C) 2016 Uppsala University
  9. *
  10. * StarPU is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU Lesser General Public License as published by
  12. * the Free Software Foundation; either version 2.1 of the License, or (at
  13. * your option) any later version.
  14. *
  15. * StarPU is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  18. *
  19. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  20. */
  21. #include <starpu.h>
  22. #include <starpu_cuda.h>
  23. #include <starpu_profiling.h>
  24. #include <common/utils.h>
  25. #include <common/config.h>
  26. #include <core/debug.h>
  27. #include <drivers/driver_common/driver_common.h>
  28. #include "driver_cuda.h"
  29. #include <core/sched_policy.h>
  30. #ifdef HAVE_CUDA_GL_INTEROP_H
  31. #include <cuda_gl_interop.h>
  32. #endif
  33. #ifdef HAVE_LIBNVIDIA_ML
  34. #include <nvml.h>
  35. #endif
  36. #include <datawizard/memory_manager.h>
  37. #include <datawizard/memory_nodes.h>
  38. #include <datawizard/malloc.h>
  39. #include <core/task.h>
  40. #ifdef STARPU_SIMGRID
  41. #include <core/simgrid.h>
  42. #endif
  43. #ifdef STARPU_USE_CUDA
  44. #if CUDART_VERSION >= 5000
  45. /* Avoid letting our streams spuriously synchonize with the NULL stream */
  46. #define starpu_cudaStreamCreate(stream) cudaStreamCreateWithFlags(stream, cudaStreamNonBlocking)
  47. #else
  48. #define starpu_cudaStreamCreate(stream) cudaStreamCreate(stream)
  49. #endif
  50. #endif
  51. /* the number of CUDA devices */
  52. static int ncudagpus = -1;
  53. static size_t global_mem[STARPU_MAXCUDADEVS];
  54. #ifdef HAVE_LIBNVIDIA_ML
  55. static nvmlDevice_t nvmlDev[STARPU_MAXCUDADEVS];
  56. #endif
  57. int _starpu_cuda_bus_ids[STARPU_MAXCUDADEVS+STARPU_MAXNUMANODES][STARPU_MAXCUDADEVS+STARPU_MAXNUMANODES];
  58. #ifdef STARPU_USE_CUDA
  59. static cudaStream_t streams[STARPU_NMAXWORKERS];
  60. static char used_stream[STARPU_NMAXWORKERS];
  61. static cudaStream_t out_transfer_streams[STARPU_MAXCUDADEVS];
  62. static cudaStream_t in_transfer_streams[STARPU_MAXCUDADEVS];
  63. /* Note: streams are not thread-safe, so we define them for each CUDA worker
  64. * emitting a GPU-GPU transfer */
  65. static cudaStream_t in_peer_transfer_streams[STARPU_MAXCUDADEVS][STARPU_MAXCUDADEVS];
  66. static struct cudaDeviceProp props[STARPU_MAXCUDADEVS];
  67. #ifndef STARPU_SIMGRID
  68. static cudaEvent_t task_events[STARPU_NMAXWORKERS][STARPU_MAX_PIPELINE];
  69. #endif
  70. #endif /* STARPU_USE_CUDA */
  71. #ifdef STARPU_SIMGRID
  72. static unsigned task_finished[STARPU_NMAXWORKERS][STARPU_MAX_PIPELINE];
  73. #endif /* STARPU_SIMGRID */
  74. static enum initialization cuda_device_init[STARPU_MAXCUDADEVS];
  75. static int cuda_device_users[STARPU_MAXCUDADEVS];
  76. static starpu_pthread_mutex_t cuda_device_init_mutex[STARPU_MAXCUDADEVS];
  77. static starpu_pthread_cond_t cuda_device_init_cond[STARPU_MAXCUDADEVS];
  78. void _starpu_cuda_init(void)
  79. {
  80. unsigned i;
  81. for (i = 0; i < STARPU_MAXCUDADEVS; i++)
  82. {
  83. STARPU_PTHREAD_MUTEX_INIT(&cuda_device_init_mutex[i], NULL);
  84. STARPU_PTHREAD_COND_INIT(&cuda_device_init_cond[i], NULL);
  85. }
  86. }
  87. static size_t _starpu_cuda_get_global_mem_size(unsigned devid)
  88. {
  89. return global_mem[devid];
  90. }
  91. void
  92. _starpu_cuda_discover_devices (struct _starpu_machine_config *config)
  93. {
  94. /* Discover the number of CUDA devices. Fill the result in CONFIG. */
  95. #ifdef STARPU_SIMGRID
  96. config->topology.nhwcudagpus = _starpu_simgrid_get_nbhosts("CUDA");
  97. #else
  98. int cnt;
  99. cudaError_t cures;
  100. cures = cudaGetDeviceCount (&cnt);
  101. if (STARPU_UNLIKELY(cures != cudaSuccess))
  102. cnt = 0;
  103. config->topology.nhwcudagpus = cnt;
  104. #ifdef HAVE_LIBNVIDIA_ML
  105. nvmlInit();
  106. #endif
  107. #endif
  108. }
  109. /* In case we want to cap the amount of memory available on the GPUs by the
  110. * mean of the STARPU_LIMIT_CUDA_MEM, we decrease the value of
  111. * global_mem[devid] which is the value returned by
  112. * _starpu_cuda_get_global_mem_size() to indicate how much memory can
  113. * be allocated on the device
  114. */
  115. static void _starpu_cuda_limit_gpu_mem_if_needed(unsigned devid)
  116. {
  117. starpu_ssize_t limit;
  118. size_t STARPU_ATTRIBUTE_UNUSED totalGlobalMem = 0;
  119. size_t STARPU_ATTRIBUTE_UNUSED to_waste = 0;
  120. #ifdef STARPU_SIMGRID
  121. totalGlobalMem = _starpu_simgrid_get_memsize("CUDA", devid);
  122. #elif defined(STARPU_USE_CUDA)
  123. /* Find the size of the memory on the device */
  124. totalGlobalMem = props[devid].totalGlobalMem;
  125. #endif
  126. limit = starpu_get_env_number("STARPU_LIMIT_CUDA_MEM");
  127. if (limit == -1)
  128. {
  129. char name[30];
  130. snprintf(name, sizeof(name), "STARPU_LIMIT_CUDA_%u_MEM", devid);
  131. limit = starpu_get_env_number(name);
  132. }
  133. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  134. if (limit == -1)
  135. {
  136. /* Use 90% of the available memory by default. */
  137. limit = totalGlobalMem / (1024*1024) * 0.9;
  138. }
  139. #endif
  140. global_mem[devid] = limit * 1024*1024;
  141. #ifdef STARPU_USE_CUDA
  142. /* How much memory to waste ? */
  143. to_waste = totalGlobalMem - global_mem[devid];
  144. props[devid].totalGlobalMem -= to_waste;
  145. #endif /* STARPU_USE_CUDA */
  146. _STARPU_DEBUG("CUDA device %u: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
  147. devid, (long) to_waste/(1024*1024), (long) limit, (long) totalGlobalMem/(1024*1024),
  148. (long) (totalGlobalMem - to_waste)/(1024*1024));
  149. }
  150. #ifdef STARPU_USE_CUDA
  151. cudaStream_t starpu_cuda_get_local_in_transfer_stream()
  152. {
  153. int worker = starpu_worker_get_id_check();
  154. int devid = starpu_worker_get_devid(worker);
  155. cudaStream_t stream;
  156. stream = in_transfer_streams[devid];
  157. STARPU_ASSERT(stream);
  158. return stream;
  159. }
  160. cudaStream_t starpu_cuda_get_in_transfer_stream(unsigned dst_node)
  161. {
  162. int dst_devid = _starpu_memory_node_get_devid(dst_node);
  163. cudaStream_t stream;
  164. stream = in_transfer_streams[dst_devid];
  165. STARPU_ASSERT(stream);
  166. return stream;
  167. }
  168. cudaStream_t starpu_cuda_get_local_out_transfer_stream()
  169. {
  170. int worker = starpu_worker_get_id_check();
  171. int devid = starpu_worker_get_devid(worker);
  172. cudaStream_t stream;
  173. stream = out_transfer_streams[devid];
  174. STARPU_ASSERT(stream);
  175. return stream;
  176. }
  177. cudaStream_t starpu_cuda_get_out_transfer_stream(unsigned src_node)
  178. {
  179. int src_devid = _starpu_memory_node_get_devid(src_node);
  180. cudaStream_t stream;
  181. stream = out_transfer_streams[src_devid];
  182. STARPU_ASSERT(stream);
  183. return stream;
  184. }
  185. cudaStream_t starpu_cuda_get_peer_transfer_stream(unsigned src_node, unsigned dst_node)
  186. {
  187. int src_devid = _starpu_memory_node_get_devid(src_node);
  188. int dst_devid = _starpu_memory_node_get_devid(dst_node);
  189. cudaStream_t stream;
  190. stream = in_peer_transfer_streams[src_devid][dst_devid];
  191. STARPU_ASSERT(stream);
  192. return stream;
  193. }
  194. cudaStream_t starpu_cuda_get_local_stream(void)
  195. {
  196. int worker = starpu_worker_get_id_check();
  197. used_stream[worker] = 1;
  198. return streams[worker];
  199. }
  200. const struct cudaDeviceProp *starpu_cuda_get_device_properties(unsigned workerid)
  201. {
  202. struct _starpu_machine_config *config = _starpu_get_machine_config();
  203. unsigned devid = config->workers[workerid].devid;
  204. return &props[devid];
  205. }
  206. #endif /* STARPU_USE_CUDA */
  207. void starpu_cuda_set_device(unsigned devid STARPU_ATTRIBUTE_UNUSED)
  208. {
  209. #ifdef STARPU_SIMGRID
  210. STARPU_ABORT();
  211. #else
  212. cudaError_t cures;
  213. struct starpu_conf *conf = &_starpu_get_machine_config()->conf;
  214. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  215. unsigned i;
  216. #endif
  217. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  218. if (conf->n_cuda_opengl_interoperability)
  219. {
  220. _STARPU_MSG("OpenGL interoperability was requested, but StarPU was built with multithread GPU control support, please reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  221. STARPU_ABORT();
  222. }
  223. #elif !defined(HAVE_CUDA_GL_INTEROP_H)
  224. if (conf->n_cuda_opengl_interoperability)
  225. {
  226. _STARPU_MSG("OpenGL interoperability was requested, but cuda_gl_interop.h could not be compiled, please make sure that OpenGL headers were available before ./configure run.");
  227. STARPU_ABORT();
  228. }
  229. #else
  230. for (i = 0; i < conf->n_cuda_opengl_interoperability; i++)
  231. {
  232. if (conf->cuda_opengl_interoperability[i] == devid)
  233. {
  234. cures = cudaGLSetGLDevice(devid);
  235. goto done;
  236. }
  237. }
  238. #endif
  239. cures = cudaSetDevice(devid);
  240. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  241. done:
  242. #endif
  243. if (STARPU_UNLIKELY(cures
  244. #ifdef STARPU_OPENMP
  245. /* When StarPU is used as Open Runtime support,
  246. * starpu_omp_shutdown() will usually be called from a
  247. * destructor, in which case cudaThreadExit() reports a
  248. * cudaErrorCudartUnloading here. There should not
  249. * be any remaining tasks running at this point so
  250. * we can probably ignore it without much consequences. */
  251. && cures != cudaErrorCudartUnloading
  252. #endif /* STARPU_OPENMP */
  253. ))
  254. STARPU_CUDA_REPORT_ERROR(cures);
  255. #endif
  256. }
  257. static void init_device_context(unsigned devid, unsigned memnode)
  258. {
  259. #ifndef STARPU_SIMGRID
  260. cudaError_t cures;
  261. /* TODO: cudaSetDeviceFlag(cudaDeviceMapHost) */
  262. starpu_cuda_set_device(devid);
  263. #endif /* !STARPU_SIMGRID */
  264. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  265. cuda_device_users[devid]++;
  266. if (cuda_device_init[devid] == UNINITIALIZED)
  267. /* Nobody started initialization yet, do it */
  268. cuda_device_init[devid] = CHANGING;
  269. else
  270. {
  271. /* Somebody else is doing initialization, wait for it */
  272. while (cuda_device_init[devid] != INITIALIZED)
  273. STARPU_PTHREAD_COND_WAIT(&cuda_device_init_cond[devid], &cuda_device_init_mutex[devid]);
  274. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  275. return;
  276. }
  277. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  278. #ifndef STARPU_SIMGRID
  279. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  280. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  281. {
  282. int nworkers = starpu_worker_get_count();
  283. int workerid;
  284. for (workerid = 0; workerid < nworkers; workerid++)
  285. {
  286. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  287. if (worker->arch == STARPU_CUDA_WORKER && worker->devid != devid)
  288. {
  289. int can;
  290. cures = cudaDeviceCanAccessPeer(&can, devid, worker->devid);
  291. if (!cures && can)
  292. {
  293. cures = cudaDeviceEnablePeerAccess(worker->devid, 0);
  294. if (!cures)
  295. {
  296. _STARPU_DEBUG("Enabled GPU-Direct %d -> %d\n", worker->devid, devid);
  297. /* direct copies are made from the destination, see link_supports_direct_transfers */
  298. starpu_bus_set_direct(_starpu_cuda_bus_ids[worker->devid+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES], 1);
  299. }
  300. }
  301. }
  302. }
  303. }
  304. #endif
  305. /* force CUDA to initialize the context for real */
  306. cures = cudaFree(0);
  307. if (STARPU_UNLIKELY(cures))
  308. {
  309. if (cures == cudaErrorDevicesUnavailable)
  310. {
  311. _STARPU_MSG("All CUDA-capable devices are busy or unavailable\n");
  312. exit(77);
  313. }
  314. STARPU_CUDA_REPORT_ERROR(cures);
  315. }
  316. cures = cudaGetDeviceProperties(&props[devid], devid);
  317. if (STARPU_UNLIKELY(cures))
  318. STARPU_CUDA_REPORT_ERROR(cures);
  319. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  320. if (props[devid].computeMode == cudaComputeModeExclusive)
  321. {
  322. _STARPU_MSG("CUDA is in EXCLUSIVE-THREAD mode, but StarPU was built with multithread GPU control support, please either ask your administrator to use EXCLUSIVE-PROCESS mode (which should really be fine), or reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  323. STARPU_ABORT();
  324. }
  325. #endif
  326. cures = starpu_cudaStreamCreate(&in_transfer_streams[devid]);
  327. if (STARPU_UNLIKELY(cures))
  328. STARPU_CUDA_REPORT_ERROR(cures);
  329. cures = starpu_cudaStreamCreate(&out_transfer_streams[devid]);
  330. if (STARPU_UNLIKELY(cures))
  331. STARPU_CUDA_REPORT_ERROR(cures);
  332. int i;
  333. for (i = 0; i < ncudagpus; i++)
  334. {
  335. cures = starpu_cudaStreamCreate(&in_peer_transfer_streams[i][devid]);
  336. if (STARPU_UNLIKELY(cures))
  337. STARPU_CUDA_REPORT_ERROR(cures);
  338. }
  339. #endif /* !STARPU_SIMGRID */
  340. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  341. cuda_device_init[devid] = INITIALIZED;
  342. STARPU_PTHREAD_COND_BROADCAST(&cuda_device_init_cond[devid]);
  343. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  344. _starpu_cuda_limit_gpu_mem_if_needed(devid);
  345. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cuda_get_global_mem_size(devid));
  346. }
  347. static void init_worker_context(unsigned workerid, unsigned devid STARPU_ATTRIBUTE_UNUSED)
  348. {
  349. int j;
  350. #ifdef STARPU_SIMGRID
  351. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  352. task_finished[workerid][j] = 0;
  353. #else /* !STARPU_SIMGRID */
  354. cudaError_t cures;
  355. starpu_cuda_set_device(devid);
  356. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  357. {
  358. cures = cudaEventCreateWithFlags(&task_events[workerid][j], cudaEventDisableTiming);
  359. if (STARPU_UNLIKELY(cures))
  360. STARPU_CUDA_REPORT_ERROR(cures);
  361. }
  362. cures = starpu_cudaStreamCreate(&streams[workerid]);
  363. if (STARPU_UNLIKELY(cures))
  364. STARPU_CUDA_REPORT_ERROR(cures);
  365. #endif /* !STARPU_SIMGRID */
  366. }
  367. #ifndef STARPU_SIMGRID
  368. static void deinit_device_context(unsigned devid)
  369. {
  370. int i;
  371. starpu_cuda_set_device(devid);
  372. cudaStreamDestroy(in_transfer_streams[devid]);
  373. cudaStreamDestroy(out_transfer_streams[devid]);
  374. for (i = 0; i < ncudagpus; i++)
  375. {
  376. cudaStreamDestroy(in_peer_transfer_streams[i][devid]);
  377. }
  378. }
  379. #endif /* !STARPU_SIMGRID */
  380. static void deinit_worker_context(unsigned workerid, unsigned devid STARPU_ATTRIBUTE_UNUSED)
  381. {
  382. unsigned j;
  383. #ifdef STARPU_SIMGRID
  384. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  385. task_finished[workerid][j] = 0;
  386. #else /* STARPU_SIMGRID */
  387. starpu_cuda_set_device(devid);
  388. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  389. cudaEventDestroy(task_events[workerid][j]);
  390. cudaStreamDestroy(streams[workerid]);
  391. #endif /* STARPU_SIMGRID */
  392. }
  393. /* Return the number of devices usable in the system.
  394. * The value returned cannot be greater than MAXCUDADEVS */
  395. unsigned _starpu_get_cuda_device_count(void)
  396. {
  397. int cnt;
  398. #ifdef STARPU_SIMGRID
  399. cnt = _starpu_simgrid_get_nbhosts("CUDA");
  400. #else
  401. cudaError_t cures;
  402. cures = cudaGetDeviceCount(&cnt);
  403. if (STARPU_UNLIKELY(cures))
  404. return 0;
  405. #endif
  406. if (cnt > STARPU_MAXCUDADEVS)
  407. {
  408. _STARPU_MSG("# Warning: %d CUDA devices available. Only %d enabled. Use configure option --enable-maxcudadev=xxx to update the maximum value of supported CUDA devices.\n", cnt, STARPU_MAXCUDADEVS);
  409. cnt = STARPU_MAXCUDADEVS;
  410. }
  411. return (unsigned)cnt;
  412. }
  413. /* This is run from initialize to determine the number of CUDA devices */
  414. void _starpu_init_cuda(void)
  415. {
  416. if (ncudagpus < 0)
  417. {
  418. ncudagpus = _starpu_get_cuda_device_count();
  419. STARPU_ASSERT(ncudagpus <= STARPU_MAXCUDADEVS);
  420. }
  421. }
  422. static int start_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *worker, unsigned char pipeline_idx STARPU_ATTRIBUTE_UNUSED)
  423. {
  424. STARPU_ASSERT(j);
  425. struct starpu_task *task = j->task;
  426. int profiling = starpu_profiling_status_get();
  427. STARPU_ASSERT(task);
  428. struct starpu_codelet *cl = task->cl;
  429. STARPU_ASSERT(cl);
  430. _starpu_set_local_worker_key(worker);
  431. _starpu_set_current_task(task);
  432. if (worker->ntasks == 1)
  433. {
  434. /* We are alone in the pipeline, the kernel will start now, record it */
  435. _starpu_driver_start_job(worker, j, &worker->perf_arch, 0, profiling);
  436. }
  437. #if defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && !defined(STARPU_SIMGRID)
  438. /* We make sure we do manipulate the proper device */
  439. starpu_cuda_set_device(worker->devid);
  440. #endif
  441. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, j->nimpl);
  442. STARPU_ASSERT_MSG(func, "when STARPU_CUDA is defined in 'where', cuda_func or cuda_funcs has to be defined");
  443. if (_starpu_get_disable_kernels() <= 0)
  444. {
  445. _STARPU_TRACE_START_EXECUTING();
  446. #ifdef STARPU_SIMGRID
  447. int async = task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC;
  448. unsigned workerid = worker->workerid;
  449. if (cl->flags & STARPU_CODELET_SIMGRID_EXECUTE && !async)
  450. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  451. else if (cl->flags & STARPU_CODELET_SIMGRID_EXECUTE_AND_INJECT && !async)
  452. {
  453. _SIMGRID_TIMER_BEGIN(1);
  454. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  455. _SIMGRID_TIMER_END;
  456. }
  457. else
  458. _starpu_simgrid_submit_job(workerid, j, &worker->perf_arch, NAN,
  459. async ? &task_finished[workerid][pipeline_idx] : NULL);
  460. #else
  461. #ifdef HAVE_LIBNVIDIA_ML
  462. unsigned long long energy_start = 0;
  463. nvmlReturn_t nvmlRet = -1;
  464. if (profiling)
  465. {
  466. nvmlRet = nvmlDeviceGetTotalEnergyConsumption(nvmlDev[worker->devid], &energy_start);
  467. if (nvmlRet == NVML_SUCCESS)
  468. task->profiling_info->energy_consumed = energy_start / 1000.;
  469. }
  470. #endif
  471. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  472. #endif
  473. _STARPU_TRACE_END_EXECUTING();
  474. }
  475. return 0;
  476. }
  477. static void finish_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *worker)
  478. {
  479. int profiling = starpu_profiling_status_get();
  480. #ifdef HAVE_LIBNVIDIA_ML
  481. if (profiling && j->task->profiling_info->energy_consumed)
  482. {
  483. unsigned long long energy_end;
  484. nvmlReturn_t nvmlRet = -1;
  485. nvmlRet = nvmlDeviceGetTotalEnergyConsumption(nvmlDev[worker->devid], &energy_end);
  486. #ifdef STARPU_DEVEL
  487. #warning TODO: measure idle consumption to subtract it
  488. #endif
  489. if (nvmlRet == NVML_SUCCESS)
  490. j->task->profiling_info->energy_consumed =
  491. (energy_end / 1000. - j->task->profiling_info->energy_consumed);
  492. }
  493. #endif
  494. _starpu_set_current_task(NULL);
  495. if (worker->pipeline_length)
  496. worker->current_tasks[worker->first_task] = NULL;
  497. else
  498. worker->current_task = NULL;
  499. worker->first_task = (worker->first_task + 1) % STARPU_MAX_PIPELINE;
  500. worker->ntasks--;
  501. _starpu_driver_end_job(worker, j, &worker->perf_arch, 0, profiling);
  502. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  503. if(!sched_ctx)
  504. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  505. if(!sched_ctx->sched_policy)
  506. _starpu_driver_update_job_feedback(j, worker, &sched_ctx->perf_arch, profiling);
  507. else
  508. _starpu_driver_update_job_feedback(j, worker, &worker->perf_arch, profiling);
  509. _starpu_push_task_output(j);
  510. _starpu_handle_job_termination(j);
  511. }
  512. /* Execute a job, up to completion for synchronous jobs */
  513. static void execute_job_on_cuda(struct starpu_task *task, struct _starpu_worker *worker)
  514. {
  515. int workerid = worker->workerid;
  516. int res;
  517. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  518. unsigned char pipeline_idx = (worker->first_task + worker->ntasks - 1)%STARPU_MAX_PIPELINE;
  519. res = start_job_on_cuda(j, worker, pipeline_idx);
  520. if (res)
  521. {
  522. switch (res)
  523. {
  524. case -EAGAIN:
  525. _STARPU_DISP("ouch, CUDA could not actually run task %p, putting it back...\n", task);
  526. _starpu_push_task_to_workers(task);
  527. STARPU_ABORT();
  528. default:
  529. STARPU_ABORT();
  530. }
  531. }
  532. #ifndef STARPU_SIMGRID
  533. if (!used_stream[workerid])
  534. {
  535. used_stream[workerid] = 1;
  536. _STARPU_DISP("Warning: starpu_cuda_get_local_stream() was not used to submit kernel to CUDA on worker %d. CUDA will thus introduce a lot of useless synchronizations, which will prevent proper overlapping of data transfers and kernel execution. See the CUDA-specific part of the 'Check List When Performance Are Not There' of the StarPU handbook\n", workerid);
  537. }
  538. #endif
  539. if (task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC)
  540. {
  541. if (worker->pipeline_length == 0)
  542. {
  543. #ifdef STARPU_SIMGRID
  544. _starpu_simgrid_wait_tasks(workerid);
  545. #else
  546. /* Forced synchronous execution */
  547. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  548. #endif
  549. finish_job_on_cuda(j, worker);
  550. }
  551. else
  552. {
  553. #ifndef STARPU_SIMGRID
  554. /* Record event to synchronize with task termination later */
  555. cudaError_t cures = cudaEventRecord(task_events[workerid][pipeline_idx], starpu_cuda_get_local_stream());
  556. if (STARPU_UNLIKELY(cures))
  557. STARPU_CUDA_REPORT_ERROR(cures);
  558. #endif
  559. #ifdef STARPU_USE_FXT
  560. int k;
  561. for (k = 0; k < (int) worker->set->nworkers; k++)
  562. if (worker->set->workers[k].ntasks == worker->set->workers[k].pipeline_length)
  563. break;
  564. if (k == (int) worker->set->nworkers)
  565. /* Everybody busy */
  566. _STARPU_TRACE_START_EXECUTING();
  567. #endif
  568. }
  569. }
  570. else
  571. /* Synchronous execution */
  572. {
  573. #if !defined(STARPU_SIMGRID)
  574. STARPU_ASSERT_MSG(cudaStreamQuery(starpu_cuda_get_local_stream()) == cudaSuccess, "Unless when using the STARPU_CUDA_ASYNC flag, CUDA codelets have to wait for termination of their kernels on the starpu_cuda_get_local_stream() stream");
  575. #endif
  576. finish_job_on_cuda(j, worker);
  577. }
  578. }
  579. /* This is run from the driver to initialize the driver CUDA context */
  580. int _starpu_cuda_driver_init(struct _starpu_worker_set *worker_set)
  581. {
  582. struct _starpu_worker *worker0 = &worker_set->workers[0];
  583. int lastdevid = -1;
  584. unsigned i;
  585. _starpu_driver_start(worker0, _STARPU_FUT_CUDA_KEY, 0);
  586. _starpu_set_local_worker_set_key(worker_set);
  587. #ifdef STARPU_USE_FXT
  588. for (i = 1; i < worker_set->nworkers; i++)
  589. _starpu_worker_start(&worker_set->workers[i], _STARPU_FUT_CUDA_KEY, 0);
  590. #endif
  591. for (i = 0; i < worker_set->nworkers; i++)
  592. {
  593. struct _starpu_worker *worker = &worker_set->workers[i];
  594. unsigned devid = worker->devid;
  595. unsigned memnode = worker->memory_node;
  596. if ((int) devid == lastdevid)
  597. {
  598. #ifdef STARPU_SIMGRID
  599. STARPU_ASSERT_MSG(0, "Simgrid mode does not support concurrent kernel execution yet\n");
  600. #endif /* !STARPU_SIMGRID */
  601. /* Already initialized */
  602. continue;
  603. }
  604. lastdevid = devid;
  605. init_device_context(devid, memnode);
  606. #ifndef STARPU_SIMGRID
  607. if (worker->config->topology.nworkerpercuda > 1 && props[devid].concurrentKernels == 0)
  608. _STARPU_DISP("Warning: STARPU_NWORKER_PER_CUDA is %u, but CUDA device %u does not support concurrent kernel execution!\n", worker_set->nworkers, devid);
  609. #endif /* !STARPU_SIMGRID */
  610. }
  611. /* one more time to avoid hacks from third party lib :) */
  612. _starpu_bind_thread_on_cpu(worker0->bindid, worker0->workerid);
  613. for (i = 0; i < worker_set->nworkers; i++)
  614. {
  615. struct _starpu_worker *worker = &worker_set->workers[i];
  616. unsigned devid = worker->devid;
  617. unsigned workerid = worker->workerid;
  618. unsigned subdev = i % _starpu_get_machine_config()->topology.nworkerpercuda;
  619. float size = (float) global_mem[devid] / (1<<30);
  620. #ifdef STARPU_SIMGRID
  621. const char *devname = "Simgrid";
  622. #else
  623. /* get the device's name */
  624. char devname[128];
  625. strncpy(devname, props[devid].name, 127);
  626. devname[127] = 0;
  627. #endif
  628. #if defined(STARPU_HAVE_BUSID) && !defined(STARPU_SIMGRID)
  629. #if defined(STARPU_HAVE_DOMAINID) && !defined(STARPU_SIMGRID)
  630. #ifdef HAVE_LIBNVIDIA_ML
  631. char busid[13];
  632. snprintf(busid, sizeof(busid), "%04x:%02x:%02x.0", props[devid].pciDomainID, props[devid].pciBusID, props[devid].pciDeviceID);
  633. nvmlDeviceGetHandleByPciBusId(busid, &nvmlDev[devid]);
  634. #endif
  635. if (props[devid].pciDomainID)
  636. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB %04x:%02x:%02x.0)", devid, subdev, devname, size, props[devid].pciDomainID, props[devid].pciBusID, props[devid].pciDeviceID);
  637. else
  638. #endif
  639. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB %02x:%02x.0)", devid, subdev, devname, size, props[devid].pciBusID, props[devid].pciDeviceID);
  640. #else
  641. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB)", devid, subdev, devname, size);
  642. #endif
  643. snprintf(worker->short_name, sizeof(worker->short_name), "CUDA %u.%u", devid, subdev);
  644. _STARPU_DEBUG("cuda (%s) dev id %u worker %u thread is ready to run on CPU %d !\n", devname, devid, subdev, worker->bindid);
  645. worker->pipeline_length = starpu_get_env_number_default("STARPU_CUDA_PIPELINE", 2);
  646. if (worker->pipeline_length > STARPU_MAX_PIPELINE)
  647. {
  648. _STARPU_DISP("Warning: STARPU_CUDA_PIPELINE is %u, but STARPU_MAX_PIPELINE is only %u", worker->pipeline_length, STARPU_MAX_PIPELINE);
  649. worker->pipeline_length = STARPU_MAX_PIPELINE;
  650. }
  651. #if !defined(STARPU_SIMGRID) && !defined(STARPU_NON_BLOCKING_DRIVERS)
  652. if (worker->pipeline_length >= 1)
  653. {
  654. /* We need non-blocking drivers, to poll for CUDA task
  655. * termination */
  656. _STARPU_DISP("Warning: reducing STARPU_CUDA_PIPELINE to 0 because blocking drivers are enabled (and simgrid is not enabled)\n");
  657. worker->pipeline_length = 0;
  658. }
  659. #endif
  660. init_worker_context(workerid, worker->devid);
  661. _STARPU_TRACE_WORKER_INIT_END(workerid);
  662. }
  663. {
  664. char thread_name[16];
  665. snprintf(thread_name, sizeof(thread_name), "CUDA %u", worker0->devid);
  666. starpu_pthread_setname(thread_name);
  667. }
  668. /* tell the main thread that this one is ready */
  669. STARPU_PTHREAD_MUTEX_LOCK(&worker0->mutex);
  670. worker0->status = STATUS_UNKNOWN;
  671. worker0->worker_is_initialized = 1;
  672. STARPU_PTHREAD_COND_SIGNAL(&worker0->ready_cond);
  673. STARPU_PTHREAD_MUTEX_UNLOCK(&worker0->mutex);
  674. /* tell the main thread that this one is ready */
  675. STARPU_PTHREAD_MUTEX_LOCK(&worker_set->mutex);
  676. worker_set->set_is_initialized = 1;
  677. STARPU_PTHREAD_COND_SIGNAL(&worker_set->ready_cond);
  678. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set->mutex);
  679. return 0;
  680. }
  681. int _starpu_cuda_driver_run_once(struct _starpu_worker_set *worker_set)
  682. {
  683. struct _starpu_worker *worker0 = &worker_set->workers[0];
  684. struct starpu_task *tasks[worker_set->nworkers], *task;
  685. struct _starpu_job *j;
  686. int i, res;
  687. int idle_tasks, idle_transfers;
  688. #ifdef STARPU_SIMGRID
  689. starpu_pthread_wait_reset(&worker0->wait);
  690. #endif
  691. _starpu_set_local_worker_key(worker0);
  692. /* First poll for completed jobs */
  693. idle_tasks = 0;
  694. idle_transfers = 0;
  695. for (i = 0; i < (int) worker_set->nworkers; i++)
  696. {
  697. struct _starpu_worker *worker = &worker_set->workers[i];
  698. int workerid = worker->workerid;
  699. unsigned memnode = worker->memory_node;
  700. if (!worker->ntasks)
  701. idle_tasks++;
  702. if (!worker->task_transferring)
  703. idle_transfers++;
  704. if (!worker->ntasks && !worker->task_transferring)
  705. {
  706. /* Even nothing to test */
  707. continue;
  708. }
  709. /* First test for transfers pending for next task */
  710. task = worker->task_transferring;
  711. if (task && worker->nb_buffers_transferred == worker->nb_buffers_totransfer)
  712. {
  713. _STARPU_TRACE_END_PROGRESS(memnode);
  714. j = _starpu_get_job_associated_to_task(task);
  715. _starpu_set_local_worker_key(worker);
  716. _starpu_fetch_task_input_tail(task, j, worker);
  717. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  718. /* Reset it */
  719. worker->task_transferring = NULL;
  720. if (worker->ntasks > 1 && !(task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC))
  721. {
  722. /* We have to execute a non-asynchronous task but we
  723. * still have tasks in the pipeline... Record it to
  724. * prevent more tasks from coming, and do it later */
  725. worker->pipeline_stuck = 1;
  726. }
  727. else
  728. {
  729. execute_job_on_cuda(task, worker);
  730. }
  731. _STARPU_TRACE_START_PROGRESS(memnode);
  732. }
  733. /* Then test for termination of queued tasks */
  734. if (!worker->ntasks)
  735. /* No queued task */
  736. continue;
  737. if (worker->pipeline_length)
  738. task = worker->current_tasks[worker->first_task];
  739. else
  740. task = worker->current_task;
  741. if (task == worker->task_transferring)
  742. /* Next task is still pending transfer */
  743. continue;
  744. /* On-going asynchronous task, check for its termination first */
  745. #ifdef STARPU_SIMGRID
  746. if (task_finished[workerid][worker->first_task])
  747. #else /* !STARPU_SIMGRID */
  748. cudaError_t cures = cudaEventQuery(task_events[workerid][worker->first_task]);
  749. if (cures != cudaSuccess)
  750. {
  751. STARPU_ASSERT_MSG(cures == cudaErrorNotReady, "CUDA error on task %p, codelet %p (%s): %s (%d)", task, task->cl, _starpu_codelet_get_model_name(task->cl), cudaGetErrorString(cures), cures);
  752. }
  753. else
  754. #endif /* !STARPU_SIMGRID */
  755. {
  756. _STARPU_TRACE_END_PROGRESS(memnode);
  757. /* Asynchronous task completed! */
  758. _starpu_set_local_worker_key(worker);
  759. finish_job_on_cuda(_starpu_get_job_associated_to_task(task), worker);
  760. /* See next task if any */
  761. if (worker->ntasks && worker->current_tasks[worker->first_task] != worker->task_transferring)
  762. {
  763. task = worker->current_tasks[worker->first_task];
  764. j = _starpu_get_job_associated_to_task(task);
  765. if (task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC)
  766. {
  767. /* An asynchronous task, it was already
  768. * queued, it's now running, record its start time. */
  769. _starpu_driver_start_job(worker, j, &worker->perf_arch, 0, starpu_profiling_status_get());
  770. }
  771. else
  772. {
  773. /* A synchronous task, we have finished
  774. * flushing the pipeline, we can now at
  775. * last execute it. */
  776. _STARPU_TRACE_EVENT("sync_task");
  777. execute_job_on_cuda(task, worker);
  778. _STARPU_TRACE_EVENT("end_sync_task");
  779. worker->pipeline_stuck = 0;
  780. }
  781. }
  782. #ifdef STARPU_USE_FXT
  783. int k;
  784. for (k = 0; k < (int) worker_set->nworkers; k++)
  785. if (worker_set->workers[k].ntasks)
  786. break;
  787. if (k == (int) worker_set->nworkers)
  788. /* Everybody busy */
  789. _STARPU_TRACE_END_EXECUTING()
  790. #endif
  791. _STARPU_TRACE_START_PROGRESS(memnode);
  792. }
  793. if (!worker->pipeline_length || worker->ntasks < worker->pipeline_length)
  794. idle_tasks++;
  795. }
  796. #if defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  797. if (!idle_tasks)
  798. {
  799. /* No task ready yet, no better thing to do than waiting */
  800. __starpu_datawizard_progress(1, !idle_transfers);
  801. return 0;
  802. }
  803. #endif
  804. /* Something done, make some progress */
  805. res = !idle_tasks || !idle_transfers;
  806. res |= __starpu_datawizard_progress(1, 1);
  807. /* And pull tasks */
  808. res |= _starpu_get_multi_worker_task(worker_set->workers, tasks, worker_set->nworkers, worker0->memory_node);
  809. #ifdef STARPU_SIMGRID
  810. if (!res)
  811. starpu_pthread_wait_wait(&worker0->wait);
  812. #else
  813. if (!res)
  814. return 0;
  815. #endif
  816. for (i = 0; i < (int) worker_set->nworkers; i++)
  817. {
  818. struct _starpu_worker *worker = &worker_set->workers[i];
  819. unsigned memnode STARPU_ATTRIBUTE_UNUSED = worker->memory_node;
  820. task = tasks[i];
  821. if (!task)
  822. continue;
  823. j = _starpu_get_job_associated_to_task(task);
  824. /* can CUDA do that task ? */
  825. if (!_STARPU_CUDA_MAY_PERFORM(j))
  826. {
  827. /* this is neither a cuda or a cublas task */
  828. _starpu_worker_refuse_task(worker, task);
  829. #if 0
  830. if (worker->pipeline_length)
  831. {
  832. int j;
  833. for (j = 0; j < worker->ntasks; j++)
  834. {
  835. const int j_mod = (j+worker->first_task)%STARPU_MAX_PIPELINE;
  836. if (task == worker->current_tasks[j_mod])
  837. {
  838. worker->current_tasks[j_mod] = NULL;
  839. if (j == 0)
  840. {
  841. worker->first_task = (worker->first_task + 1) % STARPU_MAX_PIPELINE;
  842. _starpu_set_current_task(NULL);
  843. }
  844. break;
  845. }
  846. }
  847. STARPU_ASSERT(j<worker->ntasks);
  848. }
  849. else
  850. {
  851. worker->current_task = NULL;
  852. _starpu_set_current_task(NULL);
  853. }
  854. worker->ntasks--;
  855. int res = _starpu_push_task_to_workers(task);
  856. STARPU_ASSERT_MSG(res == 0, "_starpu_push_task_to_workers() unexpectedly returned = %d\n", res);
  857. #endif
  858. continue;
  859. }
  860. /* Fetch data asynchronously */
  861. _STARPU_TRACE_END_PROGRESS(memnode);
  862. _starpu_set_local_worker_key(worker);
  863. res = _starpu_fetch_task_input(task, j, 1);
  864. STARPU_ASSERT(res == 0);
  865. _STARPU_TRACE_START_PROGRESS(memnode);
  866. }
  867. return 0;
  868. }
  869. int _starpu_cuda_driver_deinit(struct _starpu_worker_set *worker_set)
  870. {
  871. int lastdevid = -1;
  872. unsigned i;
  873. _STARPU_TRACE_WORKER_DEINIT_START;
  874. for (i = 0; i < worker_set->nworkers; i++)
  875. {
  876. struct _starpu_worker *worker = &worker_set->workers[i];
  877. unsigned devid = worker->devid;
  878. unsigned memnode = worker->memory_node;
  879. unsigned usersleft;
  880. if ((int) devid == lastdevid)
  881. /* Already initialized */
  882. continue;
  883. lastdevid = devid;
  884. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  885. usersleft = --cuda_device_users[devid];
  886. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  887. if (!usersleft)
  888. {
  889. /* I'm last, deinitialize device */
  890. _starpu_handle_all_pending_node_data_requests(memnode);
  891. /* In case there remains some memory that was automatically
  892. * allocated by StarPU, we release it now. Note that data
  893. * coherency is not maintained anymore at that point ! */
  894. _starpu_free_all_automatically_allocated_buffers(memnode);
  895. _starpu_malloc_shutdown(memnode);
  896. #ifndef STARPU_SIMGRID
  897. deinit_device_context(devid);
  898. #endif /* !STARPU_SIMGRID */
  899. }
  900. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  901. cuda_device_init[devid] = UNINITIALIZED;
  902. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  903. }
  904. for (i = 0; i < worker_set->nworkers; i++)
  905. {
  906. struct _starpu_worker *worker = &worker_set->workers[i];
  907. unsigned workerid = worker->workerid;
  908. deinit_worker_context(workerid, worker->devid);
  909. }
  910. worker_set->workers[0].worker_is_initialized = 0;
  911. _STARPU_TRACE_WORKER_DEINIT_END(_STARPU_FUT_CUDA_KEY);
  912. return 0;
  913. }
  914. void *_starpu_cuda_worker(void *_arg)
  915. {
  916. struct _starpu_worker_set* worker_set = _arg;
  917. unsigned i;
  918. _starpu_cuda_driver_init(worker_set);
  919. for (i = 0; i < worker_set->nworkers; i++)
  920. _STARPU_TRACE_START_PROGRESS(worker_set->workers[i].memory_node);
  921. while (_starpu_machine_is_running())
  922. {
  923. _starpu_may_pause();
  924. _starpu_cuda_driver_run_once(worker_set);
  925. }
  926. for (i = 0; i < worker_set->nworkers; i++)
  927. _STARPU_TRACE_END_PROGRESS(worker_set->workers[i].memory_node);
  928. _starpu_cuda_driver_deinit(worker_set);
  929. return NULL;
  930. }
  931. #ifdef STARPU_USE_CUDA
  932. void starpu_cublas_report_error(const char *func, const char *file, int line, int status)
  933. {
  934. char *errormsg;
  935. switch (status)
  936. {
  937. case CUBLAS_STATUS_SUCCESS:
  938. errormsg = "success";
  939. break;
  940. case CUBLAS_STATUS_NOT_INITIALIZED:
  941. errormsg = "not initialized";
  942. break;
  943. case CUBLAS_STATUS_ALLOC_FAILED:
  944. errormsg = "alloc failed";
  945. break;
  946. case CUBLAS_STATUS_INVALID_VALUE:
  947. errormsg = "invalid value";
  948. break;
  949. case CUBLAS_STATUS_ARCH_MISMATCH:
  950. errormsg = "arch mismatch";
  951. break;
  952. case CUBLAS_STATUS_EXECUTION_FAILED:
  953. errormsg = "execution failed";
  954. break;
  955. case CUBLAS_STATUS_INTERNAL_ERROR:
  956. errormsg = "internal error";
  957. break;
  958. default:
  959. errormsg = "unknown error";
  960. break;
  961. }
  962. _STARPU_MSG("oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  963. STARPU_ABORT();
  964. }
  965. void starpu_cuda_report_error(const char *func, const char *file, int line, cudaError_t status)
  966. {
  967. const char *errormsg = cudaGetErrorString(status);
  968. _STARPU_ERROR("oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  969. }
  970. #endif /* STARPU_USE_CUDA */
  971. #ifdef STARPU_USE_CUDA
  972. int
  973. starpu_cuda_copy_async_sync(void *src_ptr, unsigned src_node,
  974. void *dst_ptr, unsigned dst_node,
  975. size_t ssize, cudaStream_t stream,
  976. enum cudaMemcpyKind kind)
  977. {
  978. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  979. int peer_copy = 0;
  980. int src_dev = -1, dst_dev = -1;
  981. #endif
  982. cudaError_t cures = 0;
  983. if (kind == cudaMemcpyDeviceToDevice && src_node != dst_node)
  984. {
  985. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  986. peer_copy = 1;
  987. src_dev = _starpu_memory_node_get_devid(src_node);
  988. dst_dev = _starpu_memory_node_get_devid(dst_node);
  989. #else
  990. STARPU_ABORT();
  991. #endif
  992. }
  993. if (stream)
  994. {
  995. double start;
  996. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  997. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  998. if (peer_copy)
  999. {
  1000. cures = cudaMemcpyPeerAsync((char *) dst_ptr, dst_dev,
  1001. (char *) src_ptr, src_dev,
  1002. ssize, stream);
  1003. }
  1004. else
  1005. #endif
  1006. {
  1007. cures = cudaMemcpyAsync((char *)dst_ptr, (char *)src_ptr, ssize, kind, stream);
  1008. }
  1009. starpu_interface_end_driver_copy_async(src_node, dst_node, start);
  1010. }
  1011. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1012. if (stream == NULL || cures)
  1013. {
  1014. /* do it in a synchronous fashion */
  1015. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1016. if (peer_copy)
  1017. {
  1018. cures = cudaMemcpyPeer((char *) dst_ptr, dst_dev,
  1019. (char *) src_ptr, src_dev,
  1020. ssize);
  1021. }
  1022. else
  1023. #endif
  1024. {
  1025. cures = cudaMemcpy((char *)dst_ptr, (char *)src_ptr, ssize, kind);
  1026. }
  1027. if (STARPU_UNLIKELY(cures))
  1028. STARPU_CUDA_REPORT_ERROR(cures);
  1029. return 0;
  1030. }
  1031. return -EAGAIN;
  1032. }
  1033. #endif /* STARPU_USE_CUDA */
  1034. int _starpu_run_cuda(struct _starpu_worker_set *workerarg)
  1035. {
  1036. /* Let's go ! */
  1037. _starpu_cuda_worker(workerarg);
  1038. return 0;
  1039. }
  1040. int _starpu_cuda_driver_init_from_worker(struct _starpu_worker *worker)
  1041. {
  1042. return _starpu_cuda_driver_init(worker->set);
  1043. }
  1044. int _starpu_cuda_run_from_worker(struct _starpu_worker *worker)
  1045. {
  1046. return _starpu_run_cuda(worker->set);
  1047. }
  1048. int _starpu_cuda_driver_run_once_from_worker(struct _starpu_worker *worker)
  1049. {
  1050. return _starpu_cuda_driver_run_once(worker->set);
  1051. }
  1052. int _starpu_cuda_driver_deinit_from_worker(struct _starpu_worker *worker)
  1053. {
  1054. return _starpu_cuda_driver_deinit(worker->set);
  1055. }
  1056. struct _starpu_driver_ops _starpu_driver_cuda_ops =
  1057. {
  1058. .init = _starpu_cuda_driver_init_from_worker,
  1059. .run = _starpu_cuda_run_from_worker,
  1060. .run_once = _starpu_cuda_driver_run_once_from_worker,
  1061. .deinit = _starpu_cuda_driver_deinit_from_worker
  1062. };