driver_cuda.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * StarPU
  3. * Copyright (C) Université Bordeaux 1, CNRS 2008-2010 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_cuda.h>
  18. #include <starpu_profiling.h>
  19. #include <common/utils.h>
  20. #include <common/config.h>
  21. #include <core/debug.h>
  22. #include <drivers/driver_common/driver_common.h>
  23. #include "driver_cuda.h"
  24. #include <core/sched_policy.h>
  25. #include <profiling/profiling.h>
  26. /* the number of CUDA devices */
  27. static int ncudagpus;
  28. static cudaStream_t streams[STARPU_NMAXWORKERS];
  29. /* In case we want to cap the amount of memory available on the GPUs by the
  30. * mean of the STARPU_LIMIT_GPU_MEM, we allocate a big buffer when the driver
  31. * is launched. */
  32. static char *wasted_memory[STARPU_NMAXWORKERS];
  33. static void limit_gpu_mem_if_needed(int devid)
  34. {
  35. cudaError_t cures;
  36. int limit = starpu_get_env_number("STARPU_LIMIT_GPU_MEM");
  37. if (limit == -1)
  38. {
  39. wasted_memory[devid] = NULL;
  40. return;
  41. }
  42. /* Find the size of the memory on the device */
  43. struct cudaDeviceProp prop;
  44. cures = cudaGetDeviceProperties(&prop, devid);
  45. if (STARPU_UNLIKELY(cures))
  46. STARPU_CUDA_REPORT_ERROR(cures);
  47. size_t totalGlobalMem = prop.totalGlobalMem;
  48. /* How much memory to waste ? */
  49. size_t to_waste = totalGlobalMem - (size_t)limit*1024*1024;
  50. _STARPU_DEBUG("CUDA device %d: Wasting %ld MB / Limit %ld MB / Total %ld MB / Remains %ld MB\n",
  51. devid, (size_t)to_waste/(1024*1024), (size_t)limit, (size_t)totalGlobalMem/(1024*1024),
  52. (size_t)(totalGlobalMem - to_waste)/(1024*1024));
  53. /* Allocate a large buffer to waste memory and constraint the amount of available memory. */
  54. cures = cudaMalloc((void **)&wasted_memory[devid], to_waste);
  55. if (STARPU_UNLIKELY(cures))
  56. STARPU_CUDA_REPORT_ERROR(cures);
  57. }
  58. static void unlimit_gpu_mem_if_needed(int devid)
  59. {
  60. cudaError_t cures;
  61. if (wasted_memory[devid])
  62. {
  63. cures = cudaFree(wasted_memory[devid]);
  64. if (STARPU_UNLIKELY(cures))
  65. STARPU_CUDA_REPORT_ERROR(cures);
  66. wasted_memory[devid] = NULL;
  67. }
  68. }
  69. cudaStream_t *starpu_cuda_get_local_stream(void)
  70. {
  71. int worker = starpu_worker_get_id();
  72. return &streams[worker];
  73. }
  74. static void init_context(int devid)
  75. {
  76. cudaError_t cures;
  77. cures = cudaSetDevice(devid);
  78. if (STARPU_UNLIKELY(cures))
  79. STARPU_CUDA_REPORT_ERROR(cures);
  80. /* force CUDA to initialize the context for real */
  81. cudaFree(0);
  82. limit_gpu_mem_if_needed(devid);
  83. cures = cudaStreamCreate(starpu_cuda_get_local_stream());
  84. if (STARPU_UNLIKELY(cures))
  85. STARPU_CUDA_REPORT_ERROR(cures);
  86. }
  87. static void deinit_context(int workerid, int devid)
  88. {
  89. cudaError_t cures;
  90. cudaStreamDestroy(streams[workerid]);
  91. unlimit_gpu_mem_if_needed(devid);
  92. /* cleanup the runtime API internal stuffs (which CUBLAS is using) */
  93. cures = cudaThreadExit();
  94. if (cures)
  95. STARPU_CUDA_REPORT_ERROR(cures);
  96. }
  97. unsigned _starpu_get_cuda_device_count(void)
  98. {
  99. int cnt;
  100. cudaError_t cures;
  101. cures = cudaGetDeviceCount(&cnt);
  102. if (STARPU_UNLIKELY(cures))
  103. return 0;
  104. return (unsigned)cnt;
  105. }
  106. void _starpu_init_cuda(void)
  107. {
  108. ncudagpus = _starpu_get_cuda_device_count();
  109. assert(ncudagpus <= STARPU_MAXCUDADEVS);
  110. }
  111. static int execute_job_on_cuda(starpu_job_t j, struct starpu_worker_s *args)
  112. {
  113. int ret;
  114. uint32_t mask = 0;
  115. STARPU_ASSERT(j);
  116. struct starpu_task *task = j->task;
  117. struct timespec codelet_start, codelet_end;
  118. unsigned calibrate_model = 0;
  119. int workerid = args->workerid;
  120. STARPU_ASSERT(task);
  121. struct starpu_codelet_t *cl = task->cl;
  122. STARPU_ASSERT(cl);
  123. if (cl->model && cl->model->benchmarking)
  124. calibrate_model = 1;
  125. ret = _starpu_fetch_task_input(task, mask);
  126. if (ret != 0) {
  127. /* there was not enough memory, so the input of
  128. * the codelet cannot be fetched ... put the
  129. * codelet back, and try it later */
  130. return -EAGAIN;
  131. }
  132. STARPU_TRACE_START_CODELET_BODY(j);
  133. struct starpu_task_profiling_info *profiling_info;
  134. profiling_info = task->profiling_info;
  135. if (profiling_info || calibrate_model)
  136. {
  137. starpu_clock_gettime(&codelet_start);
  138. _starpu_worker_register_executing_start_date(workerid, &codelet_start);
  139. }
  140. args->status = STATUS_EXECUTING;
  141. task->status = STARPU_TASK_RUNNING;
  142. cl_func func = cl->cuda_func;
  143. STARPU_ASSERT(func);
  144. func(task->interface, task->cl_arg);
  145. cl->per_worker_stats[workerid]++;
  146. if (profiling_info || calibrate_model)
  147. starpu_clock_gettime(&codelet_end);
  148. STARPU_TRACE_END_CODELET_BODY(j);
  149. args->status = STATUS_UNKNOWN;
  150. _starpu_push_task_output(task, mask);
  151. _starpu_driver_update_job_feedback(j, args, profiling_info, calibrate_model,
  152. &codelet_start, &codelet_end);
  153. return 0;
  154. }
  155. void *_starpu_cuda_worker(void *arg)
  156. {
  157. struct starpu_worker_s* args = arg;
  158. int devid = args->devid;
  159. int workerid = args->workerid;
  160. unsigned memnode = args->memory_node;
  161. #ifdef STARPU_USE_FXT
  162. _starpu_fxt_register_thread(args->bindid);
  163. #endif
  164. STARPU_TRACE_WORKER_INIT_START(STARPU_FUT_CUDA_KEY, devid, memnode);
  165. _starpu_bind_thread_on_cpu(args->config, args->bindid);
  166. _starpu_set_local_memory_node_key(&memnode);
  167. _starpu_set_local_worker_key(args);
  168. init_context(devid);
  169. /* one more time to avoid hacks from third party lib :) */
  170. _starpu_bind_thread_on_cpu(args->config, args->bindid);
  171. args->status = STATUS_UNKNOWN;
  172. /* get the device's name */
  173. char devname[128];
  174. struct cudaDeviceProp prop;
  175. cudaGetDeviceProperties(&prop, devid);
  176. strncpy(devname, prop.name, 128);
  177. snprintf(args->name, 32, "CUDA %d (%s)", args->devid, devname);
  178. _STARPU_DEBUG("cuda (%s) dev id %d thread is ready to run on CPU %d !\n", devname, devid, args->bindid);
  179. STARPU_TRACE_WORKER_INIT_END
  180. /* tell the main thread that this one is ready */
  181. PTHREAD_MUTEX_LOCK(&args->mutex);
  182. args->worker_is_initialized = 1;
  183. PTHREAD_COND_SIGNAL(&args->ready_cond);
  184. PTHREAD_MUTEX_UNLOCK(&args->mutex);
  185. struct starpu_job_s * j;
  186. int res;
  187. while (_starpu_machine_is_running())
  188. {
  189. STARPU_TRACE_START_PROGRESS(memnode);
  190. _starpu_datawizard_progress(memnode, 1);
  191. STARPU_TRACE_END_PROGRESS(memnode);
  192. _starpu_execute_registered_progression_hooks();
  193. PTHREAD_MUTEX_LOCK(args->sched_mutex);
  194. /* perhaps there is some local task to be executed first */
  195. j = _starpu_pop_local_task(args);
  196. /* otherwise ask a task to the scheduler */
  197. if (!j)
  198. {
  199. struct starpu_task *task = _starpu_pop_task();
  200. if (task)
  201. j = _starpu_get_job_associated_to_task(task);
  202. }
  203. if (j == NULL)
  204. {
  205. if (_starpu_worker_can_block(memnode))
  206. _starpu_block_worker(workerid, args->sched_cond, args->sched_mutex);
  207. PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
  208. continue;
  209. };
  210. PTHREAD_MUTEX_UNLOCK(args->sched_mutex);
  211. /* can CUDA do that task ? */
  212. if (!STARPU_CUDA_MAY_PERFORM(j))
  213. {
  214. /* this is neither a cuda or a cublas task */
  215. _starpu_push_task(j, 0);
  216. continue;
  217. }
  218. _starpu_set_current_task(j->task);
  219. res = execute_job_on_cuda(j, args);
  220. _starpu_set_current_task(NULL);
  221. if (res) {
  222. switch (res) {
  223. case -EAGAIN:
  224. _STARPU_DISP("ouch, put the codelet %p back ... \n", j);
  225. _starpu_push_task(j, 0);
  226. STARPU_ABORT();
  227. continue;
  228. default:
  229. assert(0);
  230. }
  231. }
  232. _starpu_handle_job_termination(j, 0);
  233. }
  234. STARPU_TRACE_WORKER_DEINIT_START
  235. /* In case there remains some memory that was automatically
  236. * allocated by StarPU, we release it now. Note that data
  237. * coherency is not maintained anymore at that point ! */
  238. _starpu_free_all_automatically_allocated_buffers(memnode);
  239. deinit_context(args->workerid, args->devid);
  240. STARPU_TRACE_WORKER_DEINIT_END(STARPU_FUT_CUDA_KEY);
  241. pthread_exit(NULL);
  242. return NULL;
  243. }