driver_cuda.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2008-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2010 Mehdi Juhoor
  5. * Copyright (C) 2011 Télécom-SudParis
  6. * Copyright (C) 2013 Thibaut Lambert
  7. * Copyright (C) 2016 Uppsala University
  8. *
  9. * StarPU is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU Lesser General Public License as published by
  11. * the Free Software Foundation; either version 2.1 of the License, or (at
  12. * your option) any later version.
  13. *
  14. * StarPU is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  19. */
  20. #include <starpu.h>
  21. #include <starpu_cuda.h>
  22. #include <starpu_profiling.h>
  23. #include <common/utils.h>
  24. #include <common/config.h>
  25. #include <core/debug.h>
  26. #include <drivers/cpu/driver_cpu.h>
  27. #include <drivers/driver_common/driver_common.h>
  28. #include "driver_cuda.h"
  29. #include <core/sched_policy.h>
  30. #ifdef HAVE_CUDA_GL_INTEROP_H
  31. #include <cuda_gl_interop.h>
  32. #endif
  33. #ifdef HAVE_LIBNVIDIA_ML
  34. #include <nvml.h>
  35. #endif
  36. #include <datawizard/memory_manager.h>
  37. #include <datawizard/memory_nodes.h>
  38. #include <datawizard/malloc.h>
  39. #include <core/task.h>
  40. #include <common/knobs.h>
  41. #ifdef STARPU_SIMGRID
  42. #include <core/simgrid.h>
  43. #endif
  44. #ifdef STARPU_USE_CUDA
  45. #if CUDART_VERSION >= 5000
  46. /* Avoid letting our streams spuriously synchonize with the NULL stream */
  47. #define starpu_cudaStreamCreate(stream) cudaStreamCreateWithFlags(stream, cudaStreamNonBlocking)
  48. #else
  49. #define starpu_cudaStreamCreate(stream) cudaStreamCreate(stream)
  50. #endif
  51. /* At least CUDA 4.2 still didn't have working memcpy3D */
  52. #if CUDART_VERSION < 5000
  53. #define BUGGED_MEMCPY3D
  54. #endif
  55. #endif
  56. /* the number of CUDA devices */
  57. static int ncudagpus = -1;
  58. static size_t global_mem[STARPU_MAXCUDADEVS];
  59. #ifdef HAVE_LIBNVIDIA_ML
  60. static nvmlDevice_t nvmlDev[STARPU_MAXCUDADEVS];
  61. #endif
  62. int _starpu_cuda_bus_ids[STARPU_MAXCUDADEVS+STARPU_MAXNUMANODES][STARPU_MAXCUDADEVS+STARPU_MAXNUMANODES];
  63. #ifdef STARPU_USE_CUDA
  64. static cudaStream_t streams[STARPU_NMAXWORKERS];
  65. static char used_stream[STARPU_NMAXWORKERS];
  66. static cudaStream_t out_transfer_streams[STARPU_MAXCUDADEVS];
  67. static cudaStream_t in_transfer_streams[STARPU_MAXCUDADEVS];
  68. /* Note: streams are not thread-safe, so we define them for each CUDA worker
  69. * emitting a GPU-GPU transfer */
  70. static cudaStream_t in_peer_transfer_streams[STARPU_MAXCUDADEVS][STARPU_MAXCUDADEVS];
  71. static struct cudaDeviceProp props[STARPU_MAXCUDADEVS];
  72. #ifndef STARPU_SIMGRID
  73. static cudaEvent_t task_events[STARPU_NMAXWORKERS][STARPU_MAX_PIPELINE];
  74. #endif
  75. #endif /* STARPU_USE_CUDA */
  76. #ifdef STARPU_SIMGRID
  77. static unsigned task_finished[STARPU_NMAXWORKERS][STARPU_MAX_PIPELINE];
  78. static starpu_pthread_mutex_t cuda_alloc_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  79. #endif /* STARPU_SIMGRID */
  80. static enum initialization cuda_device_init[STARPU_MAXCUDADEVS];
  81. static int cuda_device_users[STARPU_MAXCUDADEVS];
  82. static starpu_pthread_mutex_t cuda_device_init_mutex[STARPU_MAXCUDADEVS];
  83. static starpu_pthread_cond_t cuda_device_init_cond[STARPU_MAXCUDADEVS];
  84. void _starpu_cuda_init(void)
  85. {
  86. unsigned i;
  87. for (i = 0; i < STARPU_MAXCUDADEVS; i++)
  88. {
  89. STARPU_PTHREAD_MUTEX_INIT(&cuda_device_init_mutex[i], NULL);
  90. STARPU_PTHREAD_COND_INIT(&cuda_device_init_cond[i], NULL);
  91. }
  92. }
  93. static size_t _starpu_cuda_get_global_mem_size(unsigned devid)
  94. {
  95. return global_mem[devid];
  96. }
  97. void
  98. _starpu_cuda_discover_devices (struct _starpu_machine_config *config)
  99. {
  100. /* Discover the number of CUDA devices. Fill the result in CONFIG. */
  101. #ifdef STARPU_SIMGRID
  102. config->topology.nhwdevices[STARPU_CUDA_WORKER] = _starpu_simgrid_get_nbhosts("CUDA");
  103. #else
  104. int cnt;
  105. cudaError_t cures;
  106. cures = cudaGetDeviceCount (&cnt);
  107. if (STARPU_UNLIKELY(cures != cudaSuccess))
  108. cnt = 0;
  109. config->topology.nhwdevices[STARPU_CUDA_WORKER] = cnt;
  110. #ifdef HAVE_LIBNVIDIA_ML
  111. nvmlInit();
  112. #endif
  113. #endif
  114. }
  115. /* In case we want to cap the amount of memory available on the GPUs by the
  116. * mean of the STARPU_LIMIT_CUDA_MEM, we decrease the value of
  117. * global_mem[devid] which is the value returned by
  118. * _starpu_cuda_get_global_mem_size() to indicate how much memory can
  119. * be allocated on the device
  120. */
  121. static void _starpu_cuda_limit_gpu_mem_if_needed(unsigned devid)
  122. {
  123. starpu_ssize_t limit;
  124. size_t STARPU_ATTRIBUTE_UNUSED totalGlobalMem = 0;
  125. size_t STARPU_ATTRIBUTE_UNUSED to_waste = 0;
  126. #ifdef STARPU_SIMGRID
  127. totalGlobalMem = _starpu_simgrid_get_memsize("CUDA", devid);
  128. #elif defined(STARPU_USE_CUDA)
  129. /* Find the size of the memory on the device */
  130. totalGlobalMem = props[devid].totalGlobalMem;
  131. #endif
  132. limit = starpu_get_env_number("STARPU_LIMIT_CUDA_MEM");
  133. if (limit == -1)
  134. {
  135. char name[30];
  136. snprintf(name, sizeof(name), "STARPU_LIMIT_CUDA_%u_MEM", devid);
  137. limit = starpu_get_env_number(name);
  138. }
  139. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  140. if (limit == -1)
  141. {
  142. /* Use 90% of the available memory by default. */
  143. limit = totalGlobalMem / (1024*1024) * 0.9;
  144. }
  145. #endif
  146. global_mem[devid] = limit * 1024*1024;
  147. #ifdef STARPU_USE_CUDA
  148. /* How much memory to waste ? */
  149. to_waste = totalGlobalMem - global_mem[devid];
  150. props[devid].totalGlobalMem -= to_waste;
  151. #endif /* STARPU_USE_CUDA */
  152. _STARPU_DEBUG("CUDA device %u: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
  153. devid, (long) to_waste/(1024*1024), (long) limit, (long) totalGlobalMem/(1024*1024),
  154. (long) (totalGlobalMem - to_waste)/(1024*1024));
  155. }
  156. #ifdef STARPU_USE_CUDA
  157. cudaStream_t starpu_cuda_get_local_in_transfer_stream()
  158. {
  159. int worker = starpu_worker_get_id_check();
  160. int devid = starpu_worker_get_devid(worker);
  161. cudaStream_t stream;
  162. stream = in_transfer_streams[devid];
  163. STARPU_ASSERT(stream);
  164. return stream;
  165. }
  166. cudaStream_t starpu_cuda_get_in_transfer_stream(unsigned dst_node)
  167. {
  168. int dst_devid = starpu_memory_node_get_devid(dst_node);
  169. cudaStream_t stream;
  170. stream = in_transfer_streams[dst_devid];
  171. STARPU_ASSERT(stream);
  172. return stream;
  173. }
  174. cudaStream_t starpu_cuda_get_local_out_transfer_stream()
  175. {
  176. int worker = starpu_worker_get_id_check();
  177. int devid = starpu_worker_get_devid(worker);
  178. cudaStream_t stream;
  179. stream = out_transfer_streams[devid];
  180. STARPU_ASSERT(stream);
  181. return stream;
  182. }
  183. cudaStream_t starpu_cuda_get_out_transfer_stream(unsigned src_node)
  184. {
  185. int src_devid = starpu_memory_node_get_devid(src_node);
  186. cudaStream_t stream;
  187. stream = out_transfer_streams[src_devid];
  188. STARPU_ASSERT(stream);
  189. return stream;
  190. }
  191. cudaStream_t starpu_cuda_get_peer_transfer_stream(unsigned src_node, unsigned dst_node)
  192. {
  193. int src_devid = starpu_memory_node_get_devid(src_node);
  194. int dst_devid = starpu_memory_node_get_devid(dst_node);
  195. cudaStream_t stream;
  196. stream = in_peer_transfer_streams[src_devid][dst_devid];
  197. STARPU_ASSERT(stream);
  198. return stream;
  199. }
  200. cudaStream_t starpu_cuda_get_local_stream(void)
  201. {
  202. int worker = starpu_worker_get_id_check();
  203. used_stream[worker] = 1;
  204. return streams[worker];
  205. }
  206. const struct cudaDeviceProp *starpu_cuda_get_device_properties(unsigned workerid)
  207. {
  208. struct _starpu_machine_config *config = _starpu_get_machine_config();
  209. unsigned devid = config->workers[workerid].devid;
  210. return &props[devid];
  211. }
  212. #endif /* STARPU_USE_CUDA */
  213. void starpu_cuda_set_device(unsigned devid STARPU_ATTRIBUTE_UNUSED)
  214. {
  215. #ifdef STARPU_SIMGRID
  216. STARPU_ABORT();
  217. #else
  218. cudaError_t cures;
  219. struct starpu_conf *conf = &_starpu_get_machine_config()->conf;
  220. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  221. unsigned i;
  222. #endif
  223. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  224. if (conf->n_cuda_opengl_interoperability)
  225. {
  226. _STARPU_MSG("OpenGL interoperability was requested, but StarPU was built with multithread GPU control support, please reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  227. STARPU_ABORT();
  228. }
  229. #elif !defined(HAVE_CUDA_GL_INTEROP_H)
  230. if (conf->n_cuda_opengl_interoperability)
  231. {
  232. _STARPU_MSG("OpenGL interoperability was requested, but cuda_gl_interop.h could not be compiled, please make sure that OpenGL headers were available before ./configure run.");
  233. STARPU_ABORT();
  234. }
  235. #else
  236. for (i = 0; i < conf->n_cuda_opengl_interoperability; i++)
  237. {
  238. if (conf->cuda_opengl_interoperability[i] == devid)
  239. {
  240. cures = cudaGLSetGLDevice(devid);
  241. goto done;
  242. }
  243. }
  244. #endif
  245. cures = cudaSetDevice(devid);
  246. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && defined(HAVE_CUDA_GL_INTEROP_H)
  247. done:
  248. #endif
  249. #ifdef STARPU_OPENMP
  250. /* When StarPU is used as Open Runtime support,
  251. * starpu_omp_shutdown() will usually be called from a
  252. * destructor, in which case cudaThreadExit() reports a
  253. * cudaErrorCudartUnloading here. There should not
  254. * be any remaining tasks running at this point so
  255. * we can probably ignore it without much consequences. */
  256. if (STARPU_UNLIKELY(cures && cures != cudaErrorCudartUnloading))
  257. STARPU_CUDA_REPORT_ERROR(cures);
  258. #else
  259. if (STARPU_UNLIKELY(cures))
  260. STARPU_CUDA_REPORT_ERROR(cures);
  261. #endif /* STARPU_OPENMP */
  262. #endif
  263. }
  264. static void init_device_context(unsigned devid, unsigned memnode)
  265. {
  266. #ifndef STARPU_SIMGRID
  267. cudaError_t cures;
  268. /* TODO: cudaSetDeviceFlag(cudaDeviceMapHost) */
  269. starpu_cuda_set_device(devid);
  270. #endif /* !STARPU_SIMGRID */
  271. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  272. cuda_device_users[devid]++;
  273. if (cuda_device_init[devid] == UNINITIALIZED)
  274. /* Nobody started initialization yet, do it */
  275. cuda_device_init[devid] = CHANGING;
  276. else
  277. {
  278. /* Somebody else is doing initialization, wait for it */
  279. while (cuda_device_init[devid] != INITIALIZED)
  280. STARPU_PTHREAD_COND_WAIT(&cuda_device_init_cond[devid], &cuda_device_init_mutex[devid]);
  281. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  282. return;
  283. }
  284. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  285. #ifndef STARPU_SIMGRID
  286. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  287. if (starpu_get_env_number("STARPU_ENABLE_CUDA_GPU_GPU_DIRECT") != 0)
  288. {
  289. int nworkers = starpu_worker_get_count();
  290. int workerid;
  291. for (workerid = 0; workerid < nworkers; workerid++)
  292. {
  293. struct _starpu_worker *worker = _starpu_get_worker_struct(workerid);
  294. if (worker->arch == STARPU_CUDA_WORKER && worker->devid != devid)
  295. {
  296. int can;
  297. cures = cudaDeviceCanAccessPeer(&can, devid, worker->devid);
  298. (void) cudaGetLastError();
  299. if (!cures && can)
  300. {
  301. cures = cudaDeviceEnablePeerAccess(worker->devid, 0);
  302. (void) cudaGetLastError();
  303. if (!cures)
  304. {
  305. _STARPU_DEBUG("Enabled GPU-Direct %d -> %d\n", worker->devid, devid);
  306. /* direct copies are made from the destination, see link_supports_direct_transfers */
  307. starpu_bus_set_direct(_starpu_cuda_bus_ids[worker->devid+STARPU_MAXNUMANODES][devid+STARPU_MAXNUMANODES], 1);
  308. }
  309. }
  310. }
  311. }
  312. }
  313. #endif
  314. /* force CUDA to initialize the context for real */
  315. cures = cudaFree(0);
  316. if (STARPU_UNLIKELY(cures))
  317. {
  318. if (cures == cudaErrorDevicesUnavailable)
  319. {
  320. _STARPU_MSG("All CUDA-capable devices are busy or unavailable\n");
  321. exit(77);
  322. }
  323. STARPU_CUDA_REPORT_ERROR(cures);
  324. }
  325. cures = cudaGetDeviceProperties(&props[devid], devid);
  326. if (STARPU_UNLIKELY(cures))
  327. STARPU_CUDA_REPORT_ERROR(cures);
  328. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  329. if (props[devid].computeMode == cudaComputeModeExclusive)
  330. {
  331. _STARPU_MSG("CUDA is in EXCLUSIVE-THREAD mode, but StarPU was built with multithread GPU control support, please either ask your administrator to use EXCLUSIVE-PROCESS mode (which should really be fine), or reconfigure with --disable-cuda-memcpy-peer but that will disable the memcpy-peer optimizations\n");
  332. STARPU_ABORT();
  333. }
  334. #endif
  335. cures = starpu_cudaStreamCreate(&in_transfer_streams[devid]);
  336. if (STARPU_UNLIKELY(cures))
  337. STARPU_CUDA_REPORT_ERROR(cures);
  338. cures = starpu_cudaStreamCreate(&out_transfer_streams[devid]);
  339. if (STARPU_UNLIKELY(cures))
  340. STARPU_CUDA_REPORT_ERROR(cures);
  341. int i;
  342. for (i = 0; i < ncudagpus; i++)
  343. {
  344. cures = starpu_cudaStreamCreate(&in_peer_transfer_streams[i][devid]);
  345. if (STARPU_UNLIKELY(cures))
  346. STARPU_CUDA_REPORT_ERROR(cures);
  347. }
  348. #endif /* !STARPU_SIMGRID */
  349. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  350. cuda_device_init[devid] = INITIALIZED;
  351. STARPU_PTHREAD_COND_BROADCAST(&cuda_device_init_cond[devid]);
  352. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  353. _starpu_cuda_limit_gpu_mem_if_needed(devid);
  354. _starpu_memory_manager_set_global_memory_size(memnode, _starpu_cuda_get_global_mem_size(devid));
  355. }
  356. static void init_worker_context(unsigned workerid, unsigned devid STARPU_ATTRIBUTE_UNUSED)
  357. {
  358. int j;
  359. #ifdef STARPU_SIMGRID
  360. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  361. task_finished[workerid][j] = 0;
  362. #else /* !STARPU_SIMGRID */
  363. cudaError_t cures;
  364. starpu_cuda_set_device(devid);
  365. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  366. {
  367. cures = cudaEventCreateWithFlags(&task_events[workerid][j], cudaEventDisableTiming);
  368. if (STARPU_UNLIKELY(cures))
  369. STARPU_CUDA_REPORT_ERROR(cures);
  370. }
  371. cures = starpu_cudaStreamCreate(&streams[workerid]);
  372. if (STARPU_UNLIKELY(cures))
  373. STARPU_CUDA_REPORT_ERROR(cures);
  374. #endif /* !STARPU_SIMGRID */
  375. }
  376. #ifndef STARPU_SIMGRID
  377. static void deinit_device_context(unsigned devid)
  378. {
  379. int i;
  380. starpu_cuda_set_device(devid);
  381. cudaStreamDestroy(in_transfer_streams[devid]);
  382. cudaStreamDestroy(out_transfer_streams[devid]);
  383. for (i = 0; i < ncudagpus; i++)
  384. {
  385. cudaStreamDestroy(in_peer_transfer_streams[i][devid]);
  386. }
  387. }
  388. #endif /* !STARPU_SIMGRID */
  389. static void deinit_worker_context(unsigned workerid, unsigned devid STARPU_ATTRIBUTE_UNUSED)
  390. {
  391. unsigned j;
  392. #ifdef STARPU_SIMGRID
  393. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  394. task_finished[workerid][j] = 0;
  395. #else /* STARPU_SIMGRID */
  396. starpu_cuda_set_device(devid);
  397. for (j = 0; j < STARPU_MAX_PIPELINE; j++)
  398. cudaEventDestroy(task_events[workerid][j]);
  399. cudaStreamDestroy(streams[workerid]);
  400. #endif /* STARPU_SIMGRID */
  401. }
  402. /* Return the number of devices usable in the system.
  403. * The value returned cannot be greater than MAXCUDADEVS */
  404. unsigned _starpu_get_cuda_device_count(void)
  405. {
  406. int cnt;
  407. #ifdef STARPU_SIMGRID
  408. cnt = _starpu_simgrid_get_nbhosts("CUDA");
  409. #else
  410. cudaError_t cures;
  411. cures = cudaGetDeviceCount(&cnt);
  412. if (STARPU_UNLIKELY(cures))
  413. return 0;
  414. #endif
  415. if (cnt > STARPU_MAXCUDADEVS)
  416. {
  417. _STARPU_MSG("# Warning: %d CUDA devices available. Only %d enabled. Use configure option --enable-maxcudadev=xxx to update the maximum value of supported CUDA devices.\n", cnt, STARPU_MAXCUDADEVS);
  418. cnt = STARPU_MAXCUDADEVS;
  419. }
  420. return (unsigned)cnt;
  421. }
  422. /* This is run from initialize to determine the number of CUDA devices */
  423. void _starpu_init_cuda(void)
  424. {
  425. if (ncudagpus < 0)
  426. {
  427. ncudagpus = _starpu_get_cuda_device_count();
  428. STARPU_ASSERT(ncudagpus <= STARPU_MAXCUDADEVS);
  429. }
  430. }
  431. static int start_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *worker, unsigned char pipeline_idx STARPU_ATTRIBUTE_UNUSED)
  432. {
  433. STARPU_ASSERT(j);
  434. struct starpu_task *task = j->task;
  435. int profiling = starpu_profiling_status_get();
  436. STARPU_ASSERT(task);
  437. struct starpu_codelet *cl = task->cl;
  438. STARPU_ASSERT(cl);
  439. _starpu_set_local_worker_key(worker);
  440. _starpu_set_current_task(task);
  441. if (worker->ntasks == 1)
  442. {
  443. /* We are alone in the pipeline, the kernel will start now, record it */
  444. _starpu_driver_start_job(worker, j, &worker->perf_arch, 0, profiling);
  445. }
  446. #if defined(STARPU_HAVE_CUDA_MEMCPY_PEER) && !defined(STARPU_SIMGRID)
  447. /* We make sure we do manipulate the proper device */
  448. starpu_cuda_set_device(worker->devid);
  449. #endif
  450. starpu_cuda_func_t func = _starpu_task_get_cuda_nth_implementation(cl, j->nimpl);
  451. STARPU_ASSERT_MSG(func, "when STARPU_CUDA is defined in 'where', cuda_func or cuda_funcs has to be defined");
  452. if (_starpu_get_disable_kernels() <= 0)
  453. {
  454. _STARPU_TRACE_START_EXECUTING();
  455. #ifdef STARPU_SIMGRID
  456. int async = task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC;
  457. unsigned workerid = worker->workerid;
  458. if (cl->flags & STARPU_CODELET_SIMGRID_EXECUTE && !async)
  459. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  460. else if (cl->flags & STARPU_CODELET_SIMGRID_EXECUTE_AND_INJECT && !async)
  461. {
  462. _SIMGRID_TIMER_BEGIN(1);
  463. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  464. _SIMGRID_TIMER_END;
  465. }
  466. else
  467. {
  468. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  469. _starpu_simgrid_submit_job(workerid, sched_ctx->id, j, &worker->perf_arch, NAN, NAN,
  470. async ? &task_finished[workerid][pipeline_idx] : NULL);
  471. }
  472. #else
  473. #ifdef HAVE_NVMLDEVICEGETTOTALENERGYCONSUMPTION
  474. unsigned long long energy_start = 0;
  475. nvmlReturn_t nvmlRet = -1;
  476. if (profiling && task->profiling_info)
  477. {
  478. nvmlRet = nvmlDeviceGetTotalEnergyConsumption(nvmlDev[worker->devid], &energy_start);
  479. if (nvmlRet == NVML_SUCCESS)
  480. task->profiling_info->energy_consumed = energy_start / 1000.;
  481. }
  482. #endif
  483. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  484. #endif
  485. _STARPU_TRACE_END_EXECUTING();
  486. }
  487. return 0;
  488. }
  489. static void finish_job_on_cuda(struct _starpu_job *j, struct _starpu_worker *worker)
  490. {
  491. int profiling = starpu_profiling_status_get();
  492. #ifdef HAVE_NVMLDEVICEGETTOTALENERGYCONSUMPTION
  493. if (profiling && j->task->profiling_info && j->task->profiling_info->energy_consumed)
  494. {
  495. unsigned long long energy_end;
  496. nvmlReturn_t nvmlRet;
  497. nvmlRet = nvmlDeviceGetTotalEnergyConsumption(nvmlDev[worker->devid], &energy_end);
  498. #ifdef STARPU_DEVEL
  499. #warning TODO: measure idle consumption to subtract it
  500. #endif
  501. if (nvmlRet == NVML_SUCCESS)
  502. j->task->profiling_info->energy_consumed =
  503. (energy_end / 1000. - j->task->profiling_info->energy_consumed);
  504. }
  505. #endif
  506. _starpu_set_current_task(NULL);
  507. if (worker->pipeline_length)
  508. worker->current_tasks[worker->first_task] = NULL;
  509. else
  510. worker->current_task = NULL;
  511. worker->first_task = (worker->first_task + 1) % STARPU_MAX_PIPELINE;
  512. worker->ntasks--;
  513. _starpu_driver_end_job(worker, j, &worker->perf_arch, 0, profiling);
  514. struct _starpu_sched_ctx *sched_ctx = _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(worker, j);
  515. if(!sched_ctx)
  516. sched_ctx = _starpu_get_sched_ctx_struct(j->task->sched_ctx);
  517. if(!sched_ctx->sched_policy)
  518. _starpu_driver_update_job_feedback(j, worker, &sched_ctx->perf_arch, profiling);
  519. else
  520. _starpu_driver_update_job_feedback(j, worker, &worker->perf_arch, profiling);
  521. _starpu_push_task_output(j);
  522. _starpu_handle_job_termination(j);
  523. }
  524. /* Execute a job, up to completion for synchronous jobs */
  525. static void execute_job_on_cuda(struct starpu_task *task, struct _starpu_worker *worker)
  526. {
  527. int workerid = worker->workerid;
  528. int res;
  529. struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
  530. unsigned char pipeline_idx = (worker->first_task + worker->ntasks - 1)%STARPU_MAX_PIPELINE;
  531. res = start_job_on_cuda(j, worker, pipeline_idx);
  532. if (res)
  533. {
  534. switch (res)
  535. {
  536. case -EAGAIN:
  537. _STARPU_DISP("ouch, CUDA could not actually run task %p, putting it back...\n", task);
  538. _starpu_push_task_to_workers(task);
  539. STARPU_ABORT();
  540. default:
  541. STARPU_ABORT();
  542. }
  543. }
  544. #ifndef STARPU_SIMGRID
  545. if (!used_stream[workerid])
  546. {
  547. used_stream[workerid] = 1;
  548. _STARPU_DISP("Warning: starpu_cuda_get_local_stream() was not used to submit kernel to CUDA on worker %d. CUDA will thus introduce a lot of useless synchronizations, which will prevent proper overlapping of data transfers and kernel execution. See the CUDA-specific part of the 'Check List When Performance Are Not There' of the StarPU handbook\n", workerid);
  549. }
  550. #endif
  551. if (task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC)
  552. {
  553. if (worker->pipeline_length == 0)
  554. {
  555. #ifdef STARPU_SIMGRID
  556. _starpu_simgrid_wait_tasks(workerid);
  557. #else
  558. /* Forced synchronous execution */
  559. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  560. #endif
  561. finish_job_on_cuda(j, worker);
  562. }
  563. else
  564. {
  565. #ifndef STARPU_SIMGRID
  566. /* Record event to synchronize with task termination later */
  567. cudaError_t cures = cudaEventRecord(task_events[workerid][pipeline_idx], starpu_cuda_get_local_stream());
  568. if (STARPU_UNLIKELY(cures))
  569. STARPU_CUDA_REPORT_ERROR(cures);
  570. #endif
  571. #ifdef STARPU_USE_FXT
  572. int k;
  573. for (k = 0; k < (int) worker->set->nworkers; k++)
  574. if (worker->set->workers[k].ntasks == worker->set->workers[k].pipeline_length)
  575. break;
  576. if (k == (int) worker->set->nworkers)
  577. /* Everybody busy */
  578. _STARPU_TRACE_START_EXECUTING();
  579. #endif
  580. }
  581. }
  582. else
  583. /* Synchronous execution */
  584. {
  585. #if !defined(STARPU_SIMGRID)
  586. STARPU_ASSERT_MSG(cudaStreamQuery(starpu_cuda_get_local_stream()) == cudaSuccess, "Unless when using the STARPU_CUDA_ASYNC flag, CUDA codelets have to wait for termination of their kernels on the starpu_cuda_get_local_stream() stream");
  587. #endif
  588. finish_job_on_cuda(j, worker);
  589. }
  590. }
  591. /* This is run from the driver to initialize the driver CUDA context */
  592. int _starpu_cuda_driver_init(struct _starpu_worker_set *worker_set)
  593. {
  594. struct _starpu_worker *worker0 = &worker_set->workers[0];
  595. int lastdevid = -1;
  596. unsigned i;
  597. _starpu_driver_start(worker0, STARPU_CUDA_WORKER, 0);
  598. _starpu_set_local_worker_set_key(worker_set);
  599. #ifdef STARPU_USE_FXT
  600. for (i = 1; i < worker_set->nworkers; i++)
  601. _starpu_worker_start(&worker_set->workers[i], STARPU_CUDA_WORKER, 0);
  602. #endif
  603. for (i = 0; i < worker_set->nworkers; i++)
  604. {
  605. struct _starpu_worker *worker = &worker_set->workers[i];
  606. unsigned devid = worker->devid;
  607. unsigned memnode = worker->memory_node;
  608. if ((int) devid == lastdevid)
  609. {
  610. #ifdef STARPU_SIMGRID
  611. STARPU_ASSERT_MSG(0, "Simgrid mode does not support concurrent kernel execution yet\n");
  612. #endif /* !STARPU_SIMGRID */
  613. /* Already initialized */
  614. continue;
  615. }
  616. lastdevid = devid;
  617. init_device_context(devid, memnode);
  618. #ifndef STARPU_SIMGRID
  619. if (worker->config->topology.nworker[STARPU_CUDA_WORKER][devid] > 1 && props[devid].concurrentKernels == 0)
  620. _STARPU_DISP("Warning: STARPU_NWORKER_PER_CUDA is %u, but CUDA device %u does not support concurrent kernel execution!\n", worker_set->nworkers, devid);
  621. #endif /* !STARPU_SIMGRID */
  622. }
  623. /* one more time to avoid hacks from third party lib :) */
  624. _starpu_bind_thread_on_cpu(worker0->bindid, worker0->workerid, NULL);
  625. for (i = 0; i < worker_set->nworkers; i++)
  626. {
  627. struct _starpu_worker *worker = &worker_set->workers[i];
  628. unsigned devid = worker->devid;
  629. unsigned workerid = worker->workerid;
  630. unsigned subdev = i % _starpu_get_machine_config()->topology.nworker[STARPU_CUDA_WORKER][devid];
  631. float size = (float) global_mem[devid] / (1<<30);
  632. #ifdef STARPU_SIMGRID
  633. const char *devname = "Simgrid";
  634. #else
  635. /* get the device's name */
  636. char devname[64];
  637. strncpy(devname, props[devid].name, 63);
  638. devname[63] = 0;
  639. #endif
  640. #if defined(STARPU_HAVE_BUSID) && !defined(STARPU_SIMGRID)
  641. #if defined(STARPU_HAVE_DOMAINID) && !defined(STARPU_SIMGRID)
  642. #ifdef HAVE_LIBNVIDIA_ML
  643. char busid[13];
  644. snprintf(busid, sizeof(busid), "%04x:%02x:%02x.0", props[devid].pciDomainID, props[devid].pciBusID, props[devid].pciDeviceID);
  645. nvmlDeviceGetHandleByPciBusId(busid, &nvmlDev[devid]);
  646. #endif
  647. if (props[devid].pciDomainID)
  648. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB %04x:%02x:%02x.0)", devid, subdev, devname, size, props[devid].pciDomainID, props[devid].pciBusID, props[devid].pciDeviceID);
  649. else
  650. #endif
  651. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB %02x:%02x.0)", devid, subdev, devname, size, props[devid].pciBusID, props[devid].pciDeviceID);
  652. #else
  653. snprintf(worker->name, sizeof(worker->name), "CUDA %u.%u (%s %.1f GiB)", devid, subdev, devname, size);
  654. #endif
  655. snprintf(worker->short_name, sizeof(worker->short_name), "CUDA %u.%u", devid, subdev);
  656. _STARPU_DEBUG("cuda (%s) dev id %u worker %u thread is ready to run on CPU %d !\n", devname, devid, subdev, worker->bindid);
  657. worker->pipeline_length = starpu_get_env_number_default("STARPU_CUDA_PIPELINE", 2);
  658. if (worker->pipeline_length > STARPU_MAX_PIPELINE)
  659. {
  660. _STARPU_DISP("Warning: STARPU_CUDA_PIPELINE is %u, but STARPU_MAX_PIPELINE is only %u", worker->pipeline_length, STARPU_MAX_PIPELINE);
  661. worker->pipeline_length = STARPU_MAX_PIPELINE;
  662. }
  663. #if !defined(STARPU_SIMGRID) && !defined(STARPU_NON_BLOCKING_DRIVERS)
  664. if (worker->pipeline_length >= 1)
  665. {
  666. /* We need non-blocking drivers, to poll for CUDA task
  667. * termination */
  668. _STARPU_DISP("Warning: reducing STARPU_CUDA_PIPELINE to 0 because blocking drivers are enabled (and simgrid is not enabled)\n");
  669. worker->pipeline_length = 0;
  670. }
  671. #endif
  672. init_worker_context(workerid, worker->devid);
  673. _STARPU_TRACE_WORKER_INIT_END(workerid);
  674. }
  675. {
  676. char thread_name[16];
  677. snprintf(thread_name, sizeof(thread_name), "CUDA %u", worker0->devid);
  678. starpu_pthread_setname(thread_name);
  679. }
  680. /* tell the main thread that this one is ready */
  681. STARPU_PTHREAD_MUTEX_LOCK(&worker0->mutex);
  682. worker0->status = STATUS_UNKNOWN;
  683. worker0->worker_is_initialized = 1;
  684. STARPU_PTHREAD_COND_SIGNAL(&worker0->ready_cond);
  685. STARPU_PTHREAD_MUTEX_UNLOCK(&worker0->mutex);
  686. /* tell the main thread that this one is ready */
  687. STARPU_PTHREAD_MUTEX_LOCK(&worker_set->mutex);
  688. worker_set->set_is_initialized = 1;
  689. STARPU_PTHREAD_COND_SIGNAL(&worker_set->ready_cond);
  690. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_set->mutex);
  691. return 0;
  692. }
  693. int _starpu_cuda_driver_run_once(struct _starpu_worker_set *worker_set)
  694. {
  695. struct _starpu_worker *worker0 = &worker_set->workers[0];
  696. struct starpu_task *tasks[worker_set->nworkers], *task;
  697. struct _starpu_job *j;
  698. int i, res;
  699. int idle_tasks, idle_transfers;
  700. #ifdef STARPU_SIMGRID
  701. starpu_pthread_wait_reset(&worker0->wait);
  702. #endif
  703. _starpu_set_local_worker_key(worker0);
  704. /* First poll for completed jobs */
  705. idle_tasks = 0;
  706. idle_transfers = 0;
  707. for (i = 0; i < (int) worker_set->nworkers; i++)
  708. {
  709. struct _starpu_worker *worker = &worker_set->workers[i];
  710. int workerid = worker->workerid;
  711. unsigned memnode = worker->memory_node;
  712. if (!worker->ntasks)
  713. idle_tasks++;
  714. if (!worker->task_transferring)
  715. idle_transfers++;
  716. if (!worker->ntasks && !worker->task_transferring)
  717. {
  718. /* Even nothing to test */
  719. continue;
  720. }
  721. /* First test for transfers pending for next task */
  722. task = worker->task_transferring;
  723. if (task && worker->nb_buffers_transferred == worker->nb_buffers_totransfer)
  724. {
  725. STARPU_RMB();
  726. _STARPU_TRACE_END_PROGRESS(memnode);
  727. j = _starpu_get_job_associated_to_task(task);
  728. _starpu_set_local_worker_key(worker);
  729. _starpu_fetch_task_input_tail(task, j, worker);
  730. _starpu_set_worker_status(worker, STATUS_UNKNOWN);
  731. /* Reset it */
  732. worker->task_transferring = NULL;
  733. if (worker->ntasks > 1 && !(task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC))
  734. {
  735. /* We have to execute a non-asynchronous task but we
  736. * still have tasks in the pipeline... Record it to
  737. * prevent more tasks from coming, and do it later */
  738. worker->pipeline_stuck = 1;
  739. }
  740. else
  741. {
  742. execute_job_on_cuda(task, worker);
  743. }
  744. _STARPU_TRACE_START_PROGRESS(memnode);
  745. }
  746. /* Then test for termination of queued tasks */
  747. if (!worker->ntasks)
  748. /* No queued task */
  749. continue;
  750. if (worker->pipeline_length)
  751. task = worker->current_tasks[worker->first_task];
  752. else
  753. task = worker->current_task;
  754. if (task == worker->task_transferring)
  755. /* Next task is still pending transfer */
  756. continue;
  757. /* On-going asynchronous task, check for its termination first */
  758. #ifdef STARPU_SIMGRID
  759. if (task_finished[workerid][worker->first_task])
  760. #else /* !STARPU_SIMGRID */
  761. cudaError_t cures = cudaEventQuery(task_events[workerid][worker->first_task]);
  762. if (cures != cudaSuccess)
  763. {
  764. STARPU_ASSERT_MSG(cures == cudaErrorNotReady, "CUDA error on task %p, codelet %p (%s): %s (%d)", task, task->cl, _starpu_codelet_get_model_name(task->cl), cudaGetErrorString(cures), cures);
  765. }
  766. else
  767. #endif /* !STARPU_SIMGRID */
  768. {
  769. _STARPU_TRACE_END_PROGRESS(memnode);
  770. /* Asynchronous task completed! */
  771. _starpu_set_local_worker_key(worker);
  772. finish_job_on_cuda(_starpu_get_job_associated_to_task(task), worker);
  773. /* See next task if any */
  774. if (worker->ntasks)
  775. {
  776. if (worker->current_tasks[worker->first_task] != worker->task_transferring)
  777. {
  778. task = worker->current_tasks[worker->first_task];
  779. j = _starpu_get_job_associated_to_task(task);
  780. if (task->cl->cuda_flags[j->nimpl] & STARPU_CUDA_ASYNC)
  781. {
  782. /* An asynchronous task, it was already
  783. * queued, it's now running, record its start time. */
  784. _starpu_driver_start_job(worker, j, &worker->perf_arch, 0, starpu_profiling_status_get());
  785. }
  786. else
  787. {
  788. /* A synchronous task, we have finished
  789. * flushing the pipeline, we can now at
  790. * last execute it. */
  791. _STARPU_TRACE_EVENT("sync_task");
  792. execute_job_on_cuda(task, worker);
  793. _STARPU_TRACE_EVENT("end_sync_task");
  794. worker->pipeline_stuck = 0;
  795. }
  796. }
  797. else
  798. /* Data for next task didn't have time to finish transferring :/ */
  799. _STARPU_TRACE_WORKER_START_FETCH_INPUT(NULL, workerid);
  800. }
  801. #ifdef STARPU_USE_FXT
  802. int k;
  803. for (k = 0; k < (int) worker_set->nworkers; k++)
  804. if (worker_set->workers[k].ntasks)
  805. break;
  806. if (k == (int) worker_set->nworkers)
  807. /* Everybody busy */
  808. _STARPU_TRACE_END_EXECUTING()
  809. #endif
  810. _STARPU_TRACE_START_PROGRESS(memnode);
  811. }
  812. if (!worker->pipeline_length || worker->ntasks < worker->pipeline_length)
  813. idle_tasks++;
  814. }
  815. #if defined(STARPU_NON_BLOCKING_DRIVERS) && !defined(STARPU_SIMGRID)
  816. if (!idle_tasks)
  817. {
  818. /* No task ready yet, no better thing to do than waiting */
  819. __starpu_datawizard_progress(1, !idle_transfers);
  820. return 0;
  821. }
  822. #endif
  823. /* Something done, make some progress */
  824. res = !idle_tasks || !idle_transfers;
  825. res |= __starpu_datawizard_progress(1, 1);
  826. /* And pull tasks */
  827. res |= _starpu_get_multi_worker_task(worker_set->workers, tasks, worker_set->nworkers, worker0->memory_node);
  828. #ifdef STARPU_SIMGRID
  829. if (!res)
  830. starpu_pthread_wait_wait(&worker0->wait);
  831. #endif
  832. for (i = 0; i < (int) worker_set->nworkers; i++)
  833. {
  834. struct _starpu_worker *worker = &worker_set->workers[i];
  835. unsigned memnode STARPU_ATTRIBUTE_UNUSED = worker->memory_node;
  836. task = tasks[i];
  837. if (!task)
  838. continue;
  839. j = _starpu_get_job_associated_to_task(task);
  840. /* can CUDA do that task ? */
  841. if (!_STARPU_MAY_PERFORM(j, CUDA))
  842. {
  843. /* this is neither a cuda or a cublas task */
  844. _starpu_worker_refuse_task(worker, task);
  845. continue;
  846. }
  847. /* Fetch data asynchronously */
  848. _STARPU_TRACE_END_PROGRESS(memnode);
  849. _starpu_set_local_worker_key(worker);
  850. res = _starpu_fetch_task_input(task, j, 1);
  851. STARPU_ASSERT(res == 0);
  852. _STARPU_TRACE_START_PROGRESS(memnode);
  853. }
  854. return 0;
  855. }
  856. int _starpu_cuda_driver_deinit(struct _starpu_worker_set *worker_set)
  857. {
  858. int lastdevid = -1;
  859. unsigned i;
  860. _STARPU_TRACE_WORKER_DEINIT_START;
  861. for (i = 0; i < worker_set->nworkers; i++)
  862. {
  863. struct _starpu_worker *worker = &worker_set->workers[i];
  864. unsigned devid = worker->devid;
  865. unsigned memnode = worker->memory_node;
  866. unsigned usersleft;
  867. if ((int) devid == lastdevid)
  868. /* Already initialized */
  869. continue;
  870. lastdevid = devid;
  871. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  872. usersleft = --cuda_device_users[devid];
  873. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  874. if (!usersleft)
  875. {
  876. /* I'm last, deinitialize device */
  877. _starpu_handle_all_pending_node_data_requests(memnode);
  878. /* In case there remains some memory that was automatically
  879. * allocated by StarPU, we release it now. Note that data
  880. * coherency is not maintained anymore at that point ! */
  881. _starpu_free_all_automatically_allocated_buffers(memnode);
  882. _starpu_malloc_shutdown(memnode);
  883. #ifndef STARPU_SIMGRID
  884. deinit_device_context(devid);
  885. #endif /* !STARPU_SIMGRID */
  886. }
  887. STARPU_PTHREAD_MUTEX_LOCK(&cuda_device_init_mutex[devid]);
  888. cuda_device_init[devid] = UNINITIALIZED;
  889. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_device_init_mutex[devid]);
  890. }
  891. for (i = 0; i < worker_set->nworkers; i++)
  892. {
  893. struct _starpu_worker *worker = &worker_set->workers[i];
  894. unsigned workerid = worker->workerid;
  895. deinit_worker_context(workerid, worker->devid);
  896. }
  897. worker_set->workers[0].worker_is_initialized = 0;
  898. _STARPU_TRACE_WORKER_DEINIT_END(STARPU_CUDA_WORKER);
  899. return 0;
  900. }
  901. void *_starpu_cuda_worker(void *_arg)
  902. {
  903. struct _starpu_worker_set* worker_set = _arg;
  904. unsigned i;
  905. _starpu_cuda_driver_init(worker_set);
  906. for (i = 0; i < worker_set->nworkers; i++)
  907. _STARPU_TRACE_START_PROGRESS(worker_set->workers[i].memory_node);
  908. while (_starpu_machine_is_running())
  909. {
  910. _starpu_may_pause();
  911. _starpu_cuda_driver_run_once(worker_set);
  912. }
  913. for (i = 0; i < worker_set->nworkers; i++)
  914. _STARPU_TRACE_END_PROGRESS(worker_set->workers[i].memory_node);
  915. _starpu_cuda_driver_deinit(worker_set);
  916. return NULL;
  917. }
  918. #ifdef STARPU_USE_CUDA
  919. void starpu_cublas_report_error(const char *func, const char *file, int line, int status)
  920. {
  921. char *errormsg;
  922. switch (status)
  923. {
  924. case CUBLAS_STATUS_SUCCESS:
  925. errormsg = "success";
  926. break;
  927. case CUBLAS_STATUS_NOT_INITIALIZED:
  928. errormsg = "not initialized";
  929. break;
  930. case CUBLAS_STATUS_ALLOC_FAILED:
  931. errormsg = "alloc failed";
  932. break;
  933. case CUBLAS_STATUS_INVALID_VALUE:
  934. errormsg = "invalid value";
  935. break;
  936. case CUBLAS_STATUS_ARCH_MISMATCH:
  937. errormsg = "arch mismatch";
  938. break;
  939. case CUBLAS_STATUS_EXECUTION_FAILED:
  940. errormsg = "execution failed";
  941. break;
  942. case CUBLAS_STATUS_INTERNAL_ERROR:
  943. errormsg = "internal error";
  944. break;
  945. default:
  946. errormsg = "unknown error";
  947. break;
  948. }
  949. _STARPU_MSG("oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  950. STARPU_ABORT();
  951. }
  952. void starpu_cuda_report_error(const char *func, const char *file, int line, cudaError_t status)
  953. {
  954. const char *errormsg = cudaGetErrorString(status);
  955. _STARPU_ERROR("oops in %s (%s:%d)... %d: %s \n", func, file, line, status, errormsg);
  956. }
  957. #endif /* STARPU_USE_CUDA */
  958. #ifdef STARPU_USE_CUDA
  959. int
  960. starpu_cuda_copy_async_sync(void *src_ptr, unsigned src_node,
  961. void *dst_ptr, unsigned dst_node,
  962. size_t ssize, cudaStream_t stream,
  963. enum cudaMemcpyKind kind)
  964. {
  965. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  966. int peer_copy = 0;
  967. int src_dev = -1, dst_dev = -1;
  968. #endif
  969. cudaError_t cures = 0;
  970. if (kind == cudaMemcpyDeviceToDevice && src_node != dst_node)
  971. {
  972. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  973. peer_copy = 1;
  974. src_dev = starpu_memory_node_get_devid(src_node);
  975. dst_dev = starpu_memory_node_get_devid(dst_node);
  976. #else
  977. STARPU_ABORT();
  978. #endif
  979. }
  980. if (stream)
  981. {
  982. double start;
  983. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  984. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  985. if (peer_copy)
  986. {
  987. cures = cudaMemcpyPeerAsync((char *) dst_ptr, dst_dev,
  988. (char *) src_ptr, src_dev,
  989. ssize, stream);
  990. }
  991. else
  992. #endif
  993. {
  994. cures = cudaMemcpyAsync((char *)dst_ptr, (char *)src_ptr, ssize, kind, stream);
  995. }
  996. (void) cudaGetLastError();
  997. starpu_interface_end_driver_copy_async(src_node, dst_node, start);
  998. }
  999. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1000. if (stream == NULL || cures)
  1001. {
  1002. /* do it in a synchronous fashion */
  1003. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1004. if (peer_copy)
  1005. {
  1006. cures = cudaMemcpyPeer((char *) dst_ptr, dst_dev,
  1007. (char *) src_ptr, src_dev,
  1008. ssize);
  1009. }
  1010. else
  1011. #endif
  1012. {
  1013. cures = cudaMemcpy((char *)dst_ptr, (char *)src_ptr, ssize, kind);
  1014. }
  1015. (void) cudaGetLastError();
  1016. if (!cures)
  1017. cures = cudaDeviceSynchronize();
  1018. if (STARPU_UNLIKELY(cures))
  1019. STARPU_CUDA_REPORT_ERROR(cures);
  1020. return 0;
  1021. }
  1022. return -EAGAIN;
  1023. }
  1024. int
  1025. starpu_cuda_copy2d_async_sync(void *src_ptr, unsigned src_node,
  1026. void *dst_ptr, unsigned dst_node,
  1027. size_t blocksize,
  1028. size_t numblocks, size_t ld_src, size_t ld_dst,
  1029. cudaStream_t stream, enum cudaMemcpyKind kind)
  1030. {
  1031. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1032. int peer_copy = 0;
  1033. int src_dev = -1, dst_dev = -1;
  1034. #endif
  1035. cudaError_t cures = 0;
  1036. if (kind == cudaMemcpyDeviceToDevice && src_node != dst_node)
  1037. {
  1038. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1039. # ifdef BUGGED_MEMCPY3D
  1040. STARPU_ABORT_MSG("CUDA memcpy 3D peer buggy, but core triggered one?!");
  1041. # endif
  1042. peer_copy = 1;
  1043. src_dev = starpu_memory_node_get_devid(src_node);
  1044. dst_dev = starpu_memory_node_get_devid(dst_node);
  1045. #else
  1046. STARPU_ABORT_MSG("CUDA memcpy 3D peer not available, but core triggered one ?!");
  1047. #endif
  1048. }
  1049. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1050. if (peer_copy)
  1051. {
  1052. struct cudaMemcpy3DPeerParms p;
  1053. memset(&p, 0, sizeof(p));
  1054. p.srcDevice = src_dev;
  1055. p.dstDevice = dst_dev;
  1056. p.srcPtr = make_cudaPitchedPtr((char *)src_ptr, ld_src, blocksize, numblocks);
  1057. p.dstPtr = make_cudaPitchedPtr((char *)dst_ptr, ld_dst, blocksize, numblocks);
  1058. p.extent = make_cudaExtent(blocksize, numblocks, 1);
  1059. if (stream)
  1060. {
  1061. double start;
  1062. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  1063. cures = cudaMemcpy3DPeerAsync(&p, stream);
  1064. (void) cudaGetLastError();
  1065. }
  1066. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1067. if (stream == NULL || cures)
  1068. {
  1069. cures = cudaMemcpy3DPeer(&p);
  1070. (void) cudaGetLastError();
  1071. if (!cures)
  1072. cures = cudaDeviceSynchronize();
  1073. if (STARPU_UNLIKELY(cures))
  1074. STARPU_CUDA_REPORT_ERROR(cures);
  1075. return 0;
  1076. }
  1077. }
  1078. else
  1079. #endif
  1080. {
  1081. if (stream)
  1082. {
  1083. double start;
  1084. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  1085. cures = cudaMemcpy2DAsync((char *)dst_ptr, ld_dst, (char *)src_ptr, ld_src,
  1086. blocksize, numblocks, kind, stream);
  1087. starpu_interface_end_driver_copy_async(src_node, dst_node, start);
  1088. }
  1089. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1090. if (stream == NULL || cures)
  1091. {
  1092. cures = cudaMemcpy2D((char *)dst_ptr, ld_dst, (char *)src_ptr, ld_src,
  1093. blocksize, numblocks, kind);
  1094. if (!cures)
  1095. cures = cudaDeviceSynchronize();
  1096. if (STARPU_UNLIKELY(cures))
  1097. STARPU_CUDA_REPORT_ERROR(cures);
  1098. return 0;
  1099. }
  1100. }
  1101. return -EAGAIN;
  1102. }
  1103. #if 0
  1104. /* CUDA doesn't seem to be providing a way to set ld2?? */
  1105. int
  1106. starpu_cuda_copy3d_async_sync(void *src_ptr, unsigned src_node,
  1107. void *dst_ptr, unsigned dst_node,
  1108. size_t blocksize,
  1109. size_t numblocks_1, size_t ld1_src, size_t ld1_dst,
  1110. size_t numblocks_2, size_t ld2_src, size_t ld2_dst,
  1111. cudaStream_t stream, enum cudaMemcpyKind kind)
  1112. {
  1113. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1114. int peer_copy = 0;
  1115. int src_dev = -1, dst_dev = -1;
  1116. #endif
  1117. cudaError_t cures = 0;
  1118. if (kind == cudaMemcpyDeviceToDevice && src_node != dst_node)
  1119. {
  1120. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1121. peer_copy = 1;
  1122. src_dev = starpu_memory_node_get_devid(src_node);
  1123. dst_dev = starpu_memory_node_get_devid(dst_node);
  1124. #else
  1125. STARPU_ABORT_MSG("CUDA memcpy 3D peer not available, but core triggered one ?!");
  1126. #endif
  1127. }
  1128. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  1129. if (peer_copy)
  1130. {
  1131. struct cudaMemcpy3DPeerParms p;
  1132. memset(&p, 0, sizeof(p));
  1133. p.srcDevice = src_dev;
  1134. p.dstDevice = dst_dev;
  1135. p.srcPtr = make_cudaPitchedPtr((char *)src_ptr, ld1_src, blocksize, numblocks);
  1136. p.dstPtr = make_cudaPitchedPtr((char *)dst_ptr, ld1_dst, blocksize, numblocks);
  1137. // FIXME: how to pass ld2_src / ld2_dst ??
  1138. p.extent = make_cudaExtent(blocksize, numblocks_1, numblocks_2);
  1139. if (stream)
  1140. {
  1141. double start;
  1142. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  1143. cures = cudaMemcpy3DPeerAsync(&p, stream);
  1144. }
  1145. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1146. if (stream == NULL || cures)
  1147. {
  1148. cures = cudaMemcpy3DPeer(&p);
  1149. (void) cudaGetLastError();
  1150. if (!cures)
  1151. cures = cudaDeviceSynchronize();
  1152. if (STARPU_UNLIKELY(cures))
  1153. STARPU_CUDA_REPORT_ERROR(cures);
  1154. return 0;
  1155. }
  1156. }
  1157. else
  1158. #endif
  1159. {
  1160. struct cudaMemcpy3DParms p;
  1161. memset(&p, 0, sizeof(p));
  1162. p.srcPtr = make_cudaPitchedPtr((char *)src_ptr, ld1_src, blocksize, numblocks);
  1163. p.dstPtr = make_cudaPitchedPtr((char *)dst_ptr, ld1_dst, blocksize, numblocks);
  1164. // FIXME: how to pass ld2_src / ld2_dst ??
  1165. p.extent = make_cudaExtent(blocksize, numblocks, 1);
  1166. p.kind = kind;
  1167. if (stream)
  1168. {
  1169. double start;
  1170. starpu_interface_start_driver_copy_async(src_node, dst_node, &start);
  1171. cures = cudaMemcpy3DAsync(&p, stream);
  1172. starpu_interface_end_driver_copy_async(src_node, dst_node, start);
  1173. }
  1174. /* Test if the asynchronous copy has failed or if the caller only asked for a synchronous copy */
  1175. if (stream == NULL || cures)
  1176. {
  1177. cures = cudaMemcpy3D(&p);
  1178. if (!cures)
  1179. cures = cudaDeviceSynchronize();
  1180. if (STARPU_UNLIKELY(cures))
  1181. STARPU_CUDA_REPORT_ERROR(cures);
  1182. return 0;
  1183. }
  1184. }
  1185. return -EAGAIN;
  1186. }
  1187. #endif
  1188. #endif /* STARPU_USE_CUDA */
  1189. int _starpu_run_cuda(struct _starpu_worker_set *workerarg)
  1190. {
  1191. /* Let's go ! */
  1192. _starpu_cuda_worker(workerarg);
  1193. return 0;
  1194. }
  1195. int _starpu_cuda_driver_init_from_worker(struct _starpu_worker *worker)
  1196. {
  1197. return _starpu_cuda_driver_init(worker->set);
  1198. }
  1199. int _starpu_cuda_run_from_worker(struct _starpu_worker *worker)
  1200. {
  1201. return _starpu_run_cuda(worker->set);
  1202. }
  1203. int _starpu_cuda_driver_run_once_from_worker(struct _starpu_worker *worker)
  1204. {
  1205. return _starpu_cuda_driver_run_once(worker->set);
  1206. }
  1207. int _starpu_cuda_driver_deinit_from_worker(struct _starpu_worker *worker)
  1208. {
  1209. return _starpu_cuda_driver_deinit(worker->set);
  1210. }
  1211. #ifdef STARPU_USE_CUDA
  1212. unsigned _starpu_cuda_test_request_completion(struct _starpu_async_channel *async_channel)
  1213. {
  1214. cudaEvent_t event;
  1215. cudaError_t cures;
  1216. unsigned success;
  1217. event = (*async_channel).event.cuda_event;
  1218. cures = cudaEventQuery(event);
  1219. success = (cures == cudaSuccess);
  1220. if (success)
  1221. cudaEventDestroy(event);
  1222. else if (cures != cudaErrorNotReady)
  1223. STARPU_CUDA_REPORT_ERROR(cures);
  1224. return success;
  1225. }
  1226. void _starpu_cuda_wait_request_completion(struct _starpu_async_channel *async_channel)
  1227. {
  1228. cudaEvent_t event;
  1229. cudaError_t cures;
  1230. event = (*async_channel).event.cuda_event;
  1231. cures = cudaEventSynchronize(event);
  1232. if (STARPU_UNLIKELY(cures))
  1233. STARPU_CUDA_REPORT_ERROR(cures);
  1234. cures = cudaEventDestroy(event);
  1235. if (STARPU_UNLIKELY(cures))
  1236. STARPU_CUDA_REPORT_ERROR(cures);
  1237. }
  1238. int _starpu_cuda_copy_interface_from_cuda_to_cuda(starpu_data_handle_t handle, void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, struct _starpu_data_request *req)
  1239. {
  1240. int src_kind = starpu_node_get_kind(src_node);
  1241. int dst_kind = starpu_node_get_kind(dst_node);
  1242. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CUDA_RAM);
  1243. int ret = 1;
  1244. cudaError_t cures;
  1245. cudaStream_t stream;
  1246. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  1247. /* CUDA - CUDA transfer */
  1248. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() || !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
  1249. {
  1250. STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
  1251. /* this is not associated to a request so it's synchronous */
  1252. if (copy_methods->cuda_to_cuda)
  1253. copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
  1254. else
  1255. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  1256. }
  1257. else
  1258. {
  1259. req->async_channel.node_ops = &_starpu_driver_cuda_node_ops;
  1260. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  1261. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  1262. stream = starpu_cuda_get_peer_transfer_stream(src_node, dst_node);
  1263. if (copy_methods->cuda_to_cuda_async)
  1264. ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  1265. else
  1266. {
  1267. STARPU_ASSERT(copy_methods->any_to_any);
  1268. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  1269. }
  1270. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  1271. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  1272. }
  1273. return ret;
  1274. }
  1275. int _starpu_cuda_copy_interface_from_cuda_to_cpu(starpu_data_handle_t handle, void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, struct _starpu_data_request *req)
  1276. {
  1277. int src_kind = starpu_node_get_kind(src_node);
  1278. int dst_kind = starpu_node_get_kind(dst_node);
  1279. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CPU_RAM);
  1280. int ret = 1;
  1281. cudaError_t cures;
  1282. cudaStream_t stream;
  1283. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  1284. /* only the proper CUBLAS thread can initiate this directly ! */
  1285. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1286. STARPU_ASSERT(starpu_worker_get_local_memory_node() == src_node);
  1287. #endif
  1288. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() || !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
  1289. {
  1290. /* this is not associated to a request so it's synchronous */
  1291. STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
  1292. if (copy_methods->cuda_to_ram)
  1293. copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
  1294. else
  1295. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  1296. }
  1297. else
  1298. {
  1299. req->async_channel.node_ops = &_starpu_driver_cuda_node_ops;
  1300. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  1301. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  1302. stream = starpu_cuda_get_out_transfer_stream(src_node);
  1303. if (copy_methods->cuda_to_ram_async)
  1304. ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
  1305. else
  1306. {
  1307. STARPU_ASSERT(copy_methods->any_to_any);
  1308. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  1309. }
  1310. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  1311. if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
  1312. }
  1313. return ret;
  1314. }
  1315. int _starpu_cuda_copy_interface_from_cpu_to_cuda(starpu_data_handle_t handle, void *src_interface, unsigned src_node, void *dst_interface, unsigned dst_node, struct _starpu_data_request *req)
  1316. {
  1317. int src_kind = starpu_node_get_kind(src_node);
  1318. int dst_kind = starpu_node_get_kind(dst_node);
  1319. STARPU_ASSERT(src_kind == STARPU_CPU_RAM && dst_kind == STARPU_CUDA_RAM);
  1320. int ret = 1;
  1321. cudaError_t cures;
  1322. cudaStream_t stream;
  1323. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  1324. /* STARPU_CPU_RAM -> CUBLAS_RAM */
  1325. /* only the proper CUBLAS thread can initiate this ! */
  1326. #if !defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1327. STARPU_ASSERT(starpu_worker_get_local_memory_node() == dst_node);
  1328. #endif
  1329. if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
  1330. !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
  1331. {
  1332. /* this is not associated to a request so it's synchronous */
  1333. STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
  1334. if (copy_methods->ram_to_cuda)
  1335. copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
  1336. else
  1337. copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
  1338. }
  1339. else
  1340. {
  1341. req->async_channel.node_ops = &_starpu_driver_cuda_node_ops;
  1342. cures = cudaEventCreateWithFlags(&req->async_channel.event.cuda_event, cudaEventDisableTiming);
  1343. if (STARPU_UNLIKELY(cures != cudaSuccess))
  1344. STARPU_CUDA_REPORT_ERROR(cures);
  1345. stream = starpu_cuda_get_in_transfer_stream(dst_node);
  1346. if (copy_methods->ram_to_cuda_async)
  1347. ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
  1348. else
  1349. {
  1350. STARPU_ASSERT(copy_methods->any_to_any);
  1351. ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
  1352. }
  1353. cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
  1354. if (STARPU_UNLIKELY(cures != cudaSuccess))
  1355. STARPU_CUDA_REPORT_ERROR(cures);
  1356. }
  1357. return ret;
  1358. }
  1359. int _starpu_cuda_copy_data_from_cuda_to_cpu(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, struct _starpu_async_channel *async_channel)
  1360. {
  1361. int src_kind = starpu_node_get_kind(src_node);
  1362. int dst_kind = starpu_node_get_kind(dst_node);
  1363. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CPU_RAM);
  1364. return starpu_cuda_copy_async_sync((void*) (src + src_offset), src_node,
  1365. (void*) (dst + dst_offset), dst_node,
  1366. size,
  1367. async_channel?starpu_cuda_get_out_transfer_stream(src_node):NULL,
  1368. cudaMemcpyDeviceToHost);
  1369. }
  1370. int _starpu_cuda_copy_data_from_cuda_to_cuda(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, struct _starpu_async_channel *async_channel)
  1371. {
  1372. int src_kind = starpu_node_get_kind(src_node);
  1373. int dst_kind = starpu_node_get_kind(dst_node);
  1374. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CUDA_RAM);
  1375. return starpu_cuda_copy_async_sync((void*) (src + src_offset), src_node,
  1376. (void*) (dst + dst_offset), dst_node,
  1377. size,
  1378. async_channel?starpu_cuda_get_peer_transfer_stream(src_node, dst_node):NULL,
  1379. cudaMemcpyDeviceToDevice);
  1380. }
  1381. int _starpu_cuda_copy_data_from_cpu_to_cuda(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, struct _starpu_async_channel *async_channel)
  1382. {
  1383. int src_kind = starpu_node_get_kind(src_node);
  1384. int dst_kind = starpu_node_get_kind(dst_node);
  1385. STARPU_ASSERT(src_kind == STARPU_CPU_RAM && dst_kind == STARPU_CUDA_RAM);
  1386. return starpu_cuda_copy_async_sync((void*) (src + src_offset), src_node,
  1387. (void*) (dst + dst_offset), dst_node,
  1388. size,
  1389. async_channel?starpu_cuda_get_in_transfer_stream(dst_node):NULL,
  1390. cudaMemcpyHostToDevice);
  1391. }
  1392. int _starpu_cuda_copy2d_data_from_cuda_to_cpu(uintptr_t src, size_t src_offset, unsigned src_node,
  1393. uintptr_t dst, size_t dst_offset, unsigned dst_node,
  1394. size_t blocksize, size_t numblocks, size_t ld_src, size_t ld_dst,
  1395. struct _starpu_async_channel *async_channel)
  1396. {
  1397. int src_kind = starpu_node_get_kind(src_node);
  1398. int dst_kind = starpu_node_get_kind(dst_node);
  1399. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CPU_RAM);
  1400. return starpu_cuda_copy2d_async_sync((void*) (src + src_offset), src_node,
  1401. (void*) (dst + dst_offset), dst_node,
  1402. blocksize, numblocks, ld_src, ld_dst,
  1403. async_channel?starpu_cuda_get_out_transfer_stream(src_node):NULL,
  1404. cudaMemcpyDeviceToHost);
  1405. }
  1406. int _starpu_cuda_copy2d_data_from_cuda_to_cuda(uintptr_t src, size_t src_offset, unsigned src_node,
  1407. uintptr_t dst, size_t dst_offset, unsigned dst_node,
  1408. size_t blocksize, size_t numblocks, size_t ld_src, size_t ld_dst,
  1409. struct _starpu_async_channel *async_channel)
  1410. {
  1411. int src_kind = starpu_node_get_kind(src_node);
  1412. int dst_kind = starpu_node_get_kind(dst_node);
  1413. STARPU_ASSERT(src_kind == STARPU_CUDA_RAM && dst_kind == STARPU_CUDA_RAM);
  1414. return starpu_cuda_copy2d_async_sync((void*) (src + src_offset), src_node,
  1415. (void*) (dst + dst_offset), dst_node,
  1416. blocksize, numblocks, ld_src, ld_dst,
  1417. async_channel?starpu_cuda_get_peer_transfer_stream(src_node, dst_node):NULL,
  1418. cudaMemcpyDeviceToDevice);
  1419. }
  1420. int _starpu_cuda_copy2d_data_from_cpu_to_cuda(uintptr_t src, size_t src_offset, unsigned src_node,
  1421. uintptr_t dst, size_t dst_offset, unsigned dst_node,
  1422. size_t blocksize, size_t numblocks, size_t ld_src, size_t ld_dst,
  1423. struct _starpu_async_channel *async_channel)
  1424. {
  1425. int src_kind = starpu_node_get_kind(src_node);
  1426. int dst_kind = starpu_node_get_kind(dst_node);
  1427. STARPU_ASSERT(src_kind == STARPU_CPU_RAM && dst_kind == STARPU_CUDA_RAM);
  1428. return starpu_cuda_copy2d_async_sync((void*) (src + src_offset), src_node,
  1429. (void*) (dst + dst_offset), dst_node,
  1430. blocksize, numblocks, ld_src, ld_dst,
  1431. async_channel?starpu_cuda_get_in_transfer_stream(dst_node):NULL,
  1432. cudaMemcpyHostToDevice);
  1433. }
  1434. #endif /* STARPU_USE_CUDA */
  1435. int _starpu_cuda_is_direct_access_supported(unsigned node, unsigned handling_node)
  1436. {
  1437. /* GPUs not always allow direct remote access: if CUDA4
  1438. * is enabled, we allow two CUDA devices to communicate. */
  1439. #ifdef STARPU_SIMGRID
  1440. (void) node;
  1441. if (starpu_node_get_kind(handling_node) == STARPU_CUDA_RAM)
  1442. {
  1443. starpu_sg_host_t host = _starpu_simgrid_get_memnode_host(handling_node);
  1444. # ifdef STARPU_HAVE_SIMGRID_ACTOR_H
  1445. const char* cuda_memcpy_peer = sg_host_get_property_value(host, "memcpy_peer");
  1446. # else
  1447. const char* cuda_memcpy_peer = MSG_host_get_property_value(host, "memcpy_peer");
  1448. # endif
  1449. return cuda_memcpy_peer && atoll(cuda_memcpy_peer);
  1450. }
  1451. else
  1452. return 0;
  1453. #elif defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1454. (void) node;
  1455. enum starpu_node_kind kind = starpu_node_get_kind(handling_node);
  1456. return kind == STARPU_CUDA_RAM;
  1457. #else /* STARPU_HAVE_CUDA_MEMCPY_PEER */
  1458. /* Direct GPU-GPU transfers are not allowed in general */
  1459. (void) node;
  1460. (void) handling_node;
  1461. return 0;
  1462. #endif /* STARPU_HAVE_CUDA_MEMCPY_PEER */
  1463. }
  1464. uintptr_t _starpu_cuda_malloc_on_node(unsigned dst_node, size_t size, int flags)
  1465. {
  1466. uintptr_t addr = 0;
  1467. (void) flags;
  1468. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1469. #ifdef STARPU_SIMGRID
  1470. static uintptr_t last[STARPU_MAXNODES];
  1471. #ifdef STARPU_DEVEL
  1472. #warning TODO: record used memory, using a simgrid property to know the available memory
  1473. #endif
  1474. /* Sleep for the allocation */
  1475. STARPU_PTHREAD_MUTEX_LOCK(&cuda_alloc_mutex);
  1476. if (_starpu_simgrid_cuda_malloc_cost())
  1477. starpu_sleep(0.000175);
  1478. if (!last[dst_node])
  1479. last[dst_node] = 1<<10;
  1480. addr = last[dst_node];
  1481. last[dst_node]+=size;
  1482. STARPU_ASSERT(last[dst_node] >= addr);
  1483. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_alloc_mutex);
  1484. #else
  1485. unsigned devid = starpu_memory_node_get_devid(dst_node);
  1486. #if defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1487. starpu_cuda_set_device(devid);
  1488. #else
  1489. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  1490. if (!worker || worker->arch != STARPU_CUDA_WORKER || worker->devid != devid)
  1491. STARPU_ASSERT_MSG(0, "CUDA peer access is not available with this version of CUDA");
  1492. #endif
  1493. /* Check if there is free memory */
  1494. size_t cuda_mem_free, cuda_mem_total;
  1495. cudaError_t status;
  1496. status = cudaMemGetInfo(&cuda_mem_free, &cuda_mem_total);
  1497. if (status == cudaSuccess && cuda_mem_free < (size*2))
  1498. {
  1499. addr = 0;
  1500. }
  1501. else
  1502. {
  1503. status = cudaMalloc((void **)&addr, size);
  1504. if (!addr || (status != cudaSuccess))
  1505. {
  1506. if (STARPU_UNLIKELY(status != cudaErrorMemoryAllocation))
  1507. STARPU_CUDA_REPORT_ERROR(status);
  1508. addr = 0;
  1509. }
  1510. }
  1511. #endif
  1512. #endif
  1513. return addr;
  1514. }
  1515. void _starpu_cuda_free_on_node(unsigned dst_node, uintptr_t addr, size_t size, int flags)
  1516. {
  1517. (void) dst_node;
  1518. (void) addr;
  1519. (void) size;
  1520. (void) flags;
  1521. #if defined(STARPU_USE_CUDA) || defined(STARPU_SIMGRID)
  1522. #ifdef STARPU_SIMGRID
  1523. STARPU_PTHREAD_MUTEX_LOCK(&cuda_alloc_mutex);
  1524. /* Sleep for the free */
  1525. if (_starpu_simgrid_cuda_malloc_cost())
  1526. starpu_sleep(0.000750);
  1527. STARPU_PTHREAD_MUTEX_UNLOCK(&cuda_alloc_mutex);
  1528. /* CUDA also synchronizes roughly everything on cudaFree */
  1529. _starpu_simgrid_sync_gpus();
  1530. #else
  1531. cudaError_t err;
  1532. unsigned devid = starpu_memory_node_get_devid(dst_node);
  1533. #if defined(STARPU_HAVE_CUDA_MEMCPY_PEER)
  1534. starpu_cuda_set_device(devid);
  1535. #else
  1536. struct _starpu_worker *worker = _starpu_get_local_worker_key();
  1537. if (!worker || worker->arch != STARPU_CUDA_WORKER || worker->devid != devid)
  1538. STARPU_ASSERT_MSG(0, "CUDA peer access is not available with this version of CUDA");
  1539. #endif /* STARPU_HAVE_CUDA_MEMCPY_PEER */
  1540. err = cudaFree((void*)addr);
  1541. #ifdef STARPU_OPENMP
  1542. /* When StarPU is used as Open Runtime support,
  1543. * starpu_omp_shutdown() will usually be called from a
  1544. * destructor, in which case cudaThreadExit() reports a
  1545. * cudaErrorCudartUnloading here. There should not
  1546. * be any remaining tasks running at this point so
  1547. * we can probably ignore it without much consequences. */
  1548. if (STARPU_UNLIKELY(err != cudaSuccess && err != cudaErrorCudartUnloading))
  1549. STARPU_CUDA_REPORT_ERROR(err);
  1550. #else
  1551. if (STARPU_UNLIKELY(err != cudaSuccess))
  1552. STARPU_CUDA_REPORT_ERROR(err);
  1553. #endif /* STARPU_OPENMP */
  1554. #endif /* STARPU_SIMGRID */
  1555. #endif
  1556. }
  1557. struct _starpu_driver_ops _starpu_driver_cuda_ops =
  1558. {
  1559. .init = _starpu_cuda_driver_init_from_worker,
  1560. .run = _starpu_cuda_run_from_worker,
  1561. .run_once = _starpu_cuda_driver_run_once_from_worker,
  1562. .deinit = _starpu_cuda_driver_deinit_from_worker
  1563. };
  1564. #ifdef STARPU_SIMGRID
  1565. struct _starpu_node_ops _starpu_driver_cuda_node_ops =
  1566. {
  1567. .copy_interface_to[STARPU_CPU_RAM] = NULL,
  1568. .copy_interface_to[STARPU_CUDA_RAM] = NULL,
  1569. .copy_data_to[STARPU_CPU_RAM] = NULL,
  1570. .copy_data_to[STARPU_CUDA_RAM] = NULL,
  1571. .copy2d_data_to[STARPU_CPU_RAM] = NULL,
  1572. .copy2d_data_to[STARPU_CUDA_RAM] = NULL,
  1573. .copy3d_data_to[STARPU_CPU_RAM] = NULL,
  1574. .copy3d_data_to[STARPU_CUDA_RAM] = NULL,
  1575. .wait_request_completion = NULL,
  1576. .test_request_completion = NULL,
  1577. .is_direct_access_supported = _starpu_cuda_is_direct_access_supported,
  1578. .malloc_on_node = _starpu_cuda_malloc_on_node,
  1579. .free_on_node = _starpu_cuda_free_on_node,
  1580. .name = "cuda driver"
  1581. };
  1582. #else
  1583. struct _starpu_node_ops _starpu_driver_cuda_node_ops =
  1584. {
  1585. .copy_interface_to[STARPU_CPU_RAM] = _starpu_cuda_copy_interface_from_cuda_to_cpu,
  1586. .copy_interface_to[STARPU_CUDA_RAM] = _starpu_cuda_copy_interface_from_cuda_to_cuda,
  1587. .copy_data_to[STARPU_CPU_RAM] = _starpu_cuda_copy_data_from_cuda_to_cpu,
  1588. .copy_data_to[STARPU_CUDA_RAM] = _starpu_cuda_copy_data_from_cuda_to_cuda,
  1589. .copy2d_data_to[STARPU_CPU_RAM] = _starpu_cuda_copy2d_data_from_cuda_to_cpu,
  1590. .copy2d_data_to[STARPU_CUDA_RAM] = _starpu_cuda_copy2d_data_from_cuda_to_cuda,
  1591. #if 0
  1592. .copy3d_data_to[STARPU_CPU_RAM] = _starpu_cuda_copy3d_data_from_cuda_to_cpu,
  1593. .copy3d_data_to[STARPU_CUDA_RAM] = _starpu_cuda_copy3d_data_from_cuda_to_cuda,
  1594. #else
  1595. .copy3d_data_to[STARPU_CPU_RAM] = NULL,
  1596. .copy3d_data_to[STARPU_CUDA_RAM] = NULL,
  1597. #endif
  1598. .wait_request_completion = _starpu_cuda_wait_request_completion,
  1599. .test_request_completion = _starpu_cuda_test_request_completion,
  1600. .is_direct_access_supported = _starpu_cuda_is_direct_access_supported,
  1601. .malloc_on_node = _starpu_cuda_malloc_on_node,
  1602. .free_on_node = _starpu_cuda_free_on_node,
  1603. .name = "cuda driver"
  1604. };
  1605. #endif