driver_cpu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2013 Université de Bordeaux 1
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010-2013 Centre National de la Recherche Scientifique
  6. * Copyright (C) 2011 Télécom-SudParis
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <common/config.h>
  20. #include <math.h>
  21. #include <starpu.h>
  22. #include <starpu_scheduler.h> /* XXX For starpu_machine_topology */
  23. #include <starpu_profiling.h>
  24. #include <drivers/driver_common/driver_common.h>
  25. #include <common/utils.h>
  26. #include <core/debug.h>
  27. #include "driver_cpu.h"
  28. #include <core/sched_policy.h>
  29. #include <datawizard/memory_manager.h>
  30. #ifdef STARPU_HAVE_HWLOC
  31. #include <hwloc.h>
  32. #ifndef HWLOC_API_VERSION
  33. #define HWLOC_OBJ_PU HWLOC_OBJ_PROC
  34. #endif
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #ifdef STARPU_SIMGRID
  40. #include <core/simgrid.h>
  41. #endif
  42. #ifdef STARPU_SIMGRID
  43. void
  44. _starpu_cpu_discover_devices(struct _starpu_machine_config *config)
  45. {
  46. config->topology.nhwcpus = _starpu_simgrid_get_nbhosts("CPU");
  47. }
  48. #elif defined(STARPU_HAVE_HWLOC)
  49. void
  50. _starpu_cpu_discover_devices(struct _starpu_machine_config *config)
  51. {
  52. /* Discover the CPUs relying on the hwloc interface and fills CONFIG
  53. * accordingly. */
  54. struct starpu_machine_topology *topology = &config->topology;
  55. config->cpu_depth = hwloc_get_type_depth (topology->hwtopology,
  56. HWLOC_OBJ_CORE);
  57. /* Would be very odd */
  58. STARPU_ASSERT(config->cpu_depth != HWLOC_TYPE_DEPTH_MULTIPLE);
  59. if (config->cpu_depth == HWLOC_TYPE_DEPTH_UNKNOWN) {
  60. /* unknown, using logical procesors as fallback */
  61. _STARPU_DISP("Warning: OS did not report CPU cores. Assuming there is only one thread per core.\n");
  62. config->cpu_depth = hwloc_get_type_depth(topology->hwtopology,
  63. HWLOC_OBJ_PU);
  64. }
  65. topology->nhwcpus = hwloc_get_nbobjs_by_depth (topology->hwtopology,
  66. config->cpu_depth);
  67. }
  68. #elif defined(HAVE_SYSCONF)
  69. void
  70. _starpu_cpu_discover_devices(struct _starpu_machine_config *config)
  71. {
  72. /* Discover the CPUs relying on the sysconf(3) function and fills
  73. * CONFIG accordingly. */
  74. config->topology.nhwcpus = sysconf(_SC_NPROCESSORS_ONLN);
  75. }
  76. #elif defined(__MINGW32__) || defined(__CYGWIN__)
  77. void
  78. _starpu_cpu_discover_devices(struct _starpu_machine_config *config)
  79. {
  80. /* Discover the CPUs on Cygwin and MinGW systems. */
  81. SYSTEM_INFO sysinfo;
  82. GetSystemInfo(&sysinfo);
  83. config->topology.nhwcpus = sysinfo.dwNumberOfProcessors;
  84. }
  85. #else
  86. #warning no way to know number of cores, assuming 1
  87. void
  88. _starpu_cpu_discover_devices(struct _starpu_machine_config *config)
  89. {
  90. config->topology.nhwcpus = 1;
  91. }
  92. #endif
  93. /* Actually launch the job on a cpu worker.
  94. * Handle binding CPUs on cores.
  95. * In the case of a combined worker WORKER_TASK != J->TASK */
  96. static int execute_job_on_cpu(struct _starpu_job *j, struct starpu_task *worker_task, struct _starpu_worker *cpu_args, int rank, enum starpu_perf_archtype perf_arch)
  97. {
  98. int ret;
  99. int is_parallel_task = (j->task_size > 1);
  100. int profiling = starpu_profiling_status_get();
  101. struct timespec codelet_start, codelet_end;
  102. struct starpu_task *task = j->task;
  103. struct starpu_codelet *cl = task->cl;
  104. STARPU_ASSERT(cl);
  105. if (rank == 0)
  106. {
  107. ret = _starpu_fetch_task_input(j, 0);
  108. if (ret != 0)
  109. {
  110. /* there was not enough memory so the codelet cannot be executed right now ... */
  111. /* push the codelet back and try another one ... */
  112. return -EAGAIN;
  113. }
  114. }
  115. if (is_parallel_task)
  116. {
  117. _STARPU_PTHREAD_BARRIER_WAIT(&j->before_work_barrier);
  118. /* In the case of a combined worker, the scheduler needs to know
  119. * when each actual worker begins the execution */
  120. _starpu_sched_pre_exec_hook(worker_task);
  121. }
  122. /* Give profiling variable */
  123. _starpu_driver_start_job(cpu_args, j, &codelet_start, rank, profiling);
  124. /* In case this is a Fork-join parallel task, the worker does not
  125. * execute the kernel at all. */
  126. if ((rank == 0) || (cl->type != STARPU_FORKJOIN))
  127. {
  128. _starpu_cl_func_t func = _starpu_task_get_cpu_nth_implementation(cl, j->nimpl);
  129. if (is_parallel_task && cl->type == STARPU_FORKJOIN)
  130. /* bind to parallel worker */
  131. _starpu_bind_thread_on_cpus(cpu_args->config, _starpu_get_combined_worker_struct(j->combined_workerid));
  132. STARPU_ASSERT(func);
  133. #ifdef STARPU_SIMGRID
  134. _starpu_simgrid_execute_job(j, perf_arch, NAN);
  135. #else
  136. func(_STARPU_TASK_GET_INTERFACES(task), task->cl_arg);
  137. #endif
  138. if (is_parallel_task && cl->type == STARPU_FORKJOIN)
  139. /* rebind to single CPU */
  140. _starpu_bind_thread_on_cpu(cpu_args->config, cpu_args->bindid);
  141. }
  142. _starpu_driver_end_job(cpu_args, j, perf_arch, &codelet_end, rank, profiling);
  143. if (is_parallel_task)
  144. _STARPU_PTHREAD_BARRIER_WAIT(&j->after_work_barrier);
  145. if (rank == 0)
  146. {
  147. _starpu_driver_update_job_feedback(j, cpu_args,
  148. perf_arch, &codelet_start, &codelet_end, profiling);
  149. _starpu_push_task_output(j, 0);
  150. }
  151. return 0;
  152. }
  153. static struct _starpu_worker*
  154. _starpu_get_worker_from_driver(struct starpu_driver *d)
  155. {
  156. int n = starpu_worker_get_by_devid(STARPU_CPU_WORKER, d->id.cpu_id);
  157. if (n == -1)
  158. return NULL;
  159. return _starpu_get_worker_struct(n);
  160. }
  161. static size_t _starpu_cpu_get_global_mem_size(int devid, struct _starpu_machine_config *config)
  162. {
  163. size_t global_mem;
  164. starpu_ssize_t limit;
  165. limit = starpu_get_env_number("STARPU_LIMIT_CPU_MEM");
  166. #ifdef STARPU_DEVEL
  167. # warning TODO: take into account NUMA node and check STARPU_LIMIT_CPU_numanode_MEM
  168. #endif
  169. #if defined(STARPU_HAVE_HWLOC)
  170. int depth_node;
  171. struct starpu_machine_topology *topology = &config->topology;
  172. depth_node = hwloc_get_type_depth(topology->hwtopology, HWLOC_OBJ_NODE);
  173. if (depth_node == HWLOC_TYPE_DEPTH_UNKNOWN)
  174. global_mem = hwloc_get_root_obj(topology->hwtopology)->memory.total_memory;
  175. else
  176. global_mem = hwloc_get_obj_by_depth(topology->hwtopology, depth_node, devid)->memory.local_memory;
  177. #else /* STARPU_HAVE_HWLOC */
  178. #ifdef STARPU_DEVEL
  179. # warning use sysinfo when available to get global size
  180. #endif
  181. global_mem = 0;
  182. #endif
  183. if (limit == -1)
  184. // No limit is defined, we return the global memory size
  185. return global_mem;
  186. else if (limit*1024*1024 > global_mem)
  187. // The requested limit is higher than what is available, we return the global memory size
  188. return global_mem;
  189. else
  190. // We limit the memory
  191. return limit*1024*1024;
  192. }
  193. int _starpu_cpu_driver_init(struct starpu_driver *d)
  194. {
  195. struct _starpu_worker *cpu_worker;
  196. cpu_worker = _starpu_get_worker_from_driver(d);
  197. STARPU_ASSERT(cpu_worker);
  198. int devid = cpu_worker->devid;
  199. _starpu_worker_init(cpu_worker, _STARPU_FUT_CPU_KEY);
  200. /* FIXME: when we have NUMA support, properly turn node number into NUMA node number */
  201. _starpu_memory_manager_set_global_memory_size(cpu_worker->memory_node, _starpu_cpu_get_global_mem_size(cpu_worker->memory_node, cpu_worker->config));
  202. snprintf(cpu_worker->name, sizeof(cpu_worker->name), "CPU %d", devid);
  203. snprintf(cpu_worker->short_name, sizeof(cpu_worker->short_name), "CPU %d", devid);
  204. cpu_worker->status = STATUS_UNKNOWN;
  205. _STARPU_TRACE_WORKER_INIT_END;
  206. /* tell the main thread that we are ready */
  207. _STARPU_PTHREAD_MUTEX_LOCK(&cpu_worker->mutex);
  208. cpu_worker->worker_is_initialized = 1;
  209. _STARPU_PTHREAD_COND_SIGNAL(&cpu_worker->ready_cond);
  210. _STARPU_PTHREAD_MUTEX_UNLOCK(&cpu_worker->mutex);
  211. return 0;
  212. }
  213. int _starpu_cpu_driver_run_once(struct starpu_driver *d STARPU_ATTRIBUTE_UNUSED)
  214. {
  215. struct _starpu_worker *cpu_worker;
  216. cpu_worker = _starpu_get_local_worker_key();
  217. STARPU_ASSERT(cpu_worker);
  218. unsigned memnode = cpu_worker->memory_node;
  219. int workerid = cpu_worker->workerid;
  220. _STARPU_TRACE_START_PROGRESS(memnode);
  221. _starpu_datawizard_progress(memnode, 1);
  222. _STARPU_TRACE_END_PROGRESS(memnode);
  223. struct _starpu_job *j;
  224. struct starpu_task *task;
  225. int res;
  226. task = _starpu_get_worker_task(cpu_worker, workerid, memnode);
  227. if (!task)
  228. return 0;
  229. j = _starpu_get_job_associated_to_task(task);
  230. /* can a cpu perform that task ? */
  231. if (!_STARPU_CPU_MAY_PERFORM(j))
  232. {
  233. /* put it and the end of the queue ... XXX */
  234. _starpu_push_task_to_workers(task);
  235. return 0;
  236. }
  237. int rank = 0;
  238. int is_parallel_task = (j->task_size > 1);
  239. enum starpu_perf_archtype perf_arch;
  240. /* Get the rank in case it is a parallel task */
  241. if (is_parallel_task)
  242. {
  243. _STARPU_PTHREAD_MUTEX_LOCK(&j->sync_mutex);
  244. rank = j->active_task_alias_count++;
  245. _STARPU_PTHREAD_MUTEX_UNLOCK(&j->sync_mutex);
  246. struct _starpu_combined_worker *combined_worker;
  247. combined_worker = _starpu_get_combined_worker_struct(j->combined_workerid);
  248. cpu_worker->combined_workerid = j->combined_workerid;
  249. cpu_worker->worker_size = combined_worker->worker_size;
  250. cpu_worker->current_rank = rank;
  251. perf_arch = combined_worker->perf_arch;
  252. }
  253. else
  254. {
  255. cpu_worker->combined_workerid = cpu_worker->workerid;
  256. cpu_worker->worker_size = 1;
  257. cpu_worker->current_rank = 0;
  258. perf_arch = cpu_worker->perf_arch;
  259. }
  260. _starpu_set_current_task(j->task);
  261. cpu_worker->current_task = j->task;
  262. res = execute_job_on_cpu(j, task, cpu_worker, rank, perf_arch);
  263. _starpu_set_current_task(NULL);
  264. cpu_worker->current_task = NULL;
  265. if (res)
  266. {
  267. switch (res)
  268. {
  269. case -EAGAIN:
  270. _starpu_push_task_to_workers(task);
  271. return 0;
  272. default:
  273. STARPU_ABORT();
  274. }
  275. }
  276. /* In the case of combined workers, we need to inform the
  277. * scheduler each worker's execution is over.
  278. * Then we free the workers' task alias */
  279. if (is_parallel_task)
  280. {
  281. _starpu_sched_post_exec_hook(task);
  282. free(task);
  283. }
  284. if (rank == 0)
  285. _starpu_handle_job_termination(j);
  286. return 0;
  287. }
  288. int _starpu_cpu_driver_deinit(struct starpu_driver *d STARPU_ATTRIBUTE_UNUSED)
  289. {
  290. _STARPU_TRACE_WORKER_DEINIT_START;
  291. struct _starpu_worker *cpu_worker;
  292. cpu_worker = _starpu_get_local_worker_key();
  293. STARPU_ASSERT(cpu_worker);
  294. unsigned memnode = cpu_worker->memory_node;
  295. _starpu_handle_all_pending_node_data_requests(memnode);
  296. /* In case there remains some memory that was automatically
  297. * allocated by StarPU, we release it now. Note that data
  298. * coherency is not maintained anymore at that point ! */
  299. _starpu_free_all_automatically_allocated_buffers(memnode);
  300. _STARPU_TRACE_WORKER_DEINIT_END(_STARPU_FUT_CPU_KEY);
  301. return 0;
  302. }
  303. void *
  304. _starpu_cpu_worker(void *arg)
  305. {
  306. struct _starpu_worker *args = arg;
  307. struct starpu_driver d =
  308. {
  309. .type = STARPU_CPU_WORKER,
  310. .id.cpu_id = args->devid
  311. };
  312. _starpu_cpu_driver_init(&d);
  313. while (_starpu_machine_is_running())
  314. _starpu_cpu_driver_run_once(&d);
  315. _starpu_cpu_driver_deinit(&d);
  316. return NULL;
  317. }
  318. int _starpu_run_cpu(struct starpu_driver *d)
  319. {
  320. STARPU_ASSERT(d && d->type == STARPU_CPU_WORKER);
  321. struct _starpu_worker *worker = _starpu_get_worker_from_driver(d);
  322. STARPU_ASSERT(worker);
  323. worker->set = NULL;
  324. worker->worker_is_initialized = 0;
  325. _starpu_cpu_worker(worker);
  326. return 0;
  327. }