profiling.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2013, 2016 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2016 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <starpu_profiling.h>
  19. #include <profiling/profiling.h>
  20. #include <core/workers.h>
  21. #include <common/config.h>
  22. #include <common/utils.h>
  23. #include <common/timing.h>
  24. #include <common/fxt.h>
  25. #include <errno.h>
  26. static struct starpu_profiling_worker_info worker_info[STARPU_NMAXWORKERS];
  27. /* TODO: rather use rwlock */
  28. static starpu_pthread_mutex_t worker_info_mutex[STARPU_NMAXWORKERS];
  29. /* In case the worker is still sleeping when the user request profiling info,
  30. * we need to account for the time elasped while sleeping. */
  31. static unsigned worker_registered_sleeping_start[STARPU_NMAXWORKERS];
  32. static struct timespec sleeping_start_date[STARPU_NMAXWORKERS];
  33. static unsigned worker_registered_executing_start[STARPU_NMAXWORKERS];
  34. static struct timespec executing_start_date[STARPU_NMAXWORKERS];
  35. /* Store the busid of the different (src, dst) pairs. busid_matrix[src][dst]
  36. * contains the busid of (src, dst) or -1 if the bus was not registered. */
  37. struct node_pair
  38. {
  39. int src;
  40. int dst;
  41. struct starpu_profiling_bus_info *bus_info;
  42. };
  43. static int busid_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  44. static struct starpu_profiling_bus_info bus_profiling_info[STARPU_MAXNODES][STARPU_MAXNODES];
  45. static struct node_pair busid_to_node_pair[STARPU_MAXNODES*STARPU_MAXNODES];
  46. static char bus_direct[STARPU_MAXNODES*STARPU_MAXNODES];
  47. static int bus_ngpus[STARPU_MAXNODES*STARPU_MAXNODES];
  48. static unsigned busid_cnt = 0;
  49. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info);
  50. /* Clear all the profiling info related to the worker. */
  51. static void _starpu_worker_reset_profiling_info_with_lock(int workerid);
  52. /*
  53. * Global control of profiling
  54. */
  55. /* Disabled by default, unless simulating */
  56. int _starpu_profiling =
  57. #ifdef STARPU_SIMGRID
  58. 1
  59. #else
  60. 0
  61. #endif
  62. ;
  63. void starpu_profiling_init()
  64. {
  65. _starpu_profiling_init();
  66. }
  67. static void _starpu_profiling_reset_counters()
  68. {
  69. int worker;
  70. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  71. {
  72. _starpu_worker_reset_profiling_info_with_lock(worker);
  73. }
  74. int busid;
  75. int bus_cnt = starpu_bus_get_count();
  76. for (busid = 0; busid < bus_cnt; busid++)
  77. {
  78. struct starpu_profiling_bus_info *bus_info;
  79. bus_info = busid_to_node_pair[busid].bus_info;
  80. _starpu_bus_reset_profiling_info(bus_info);
  81. }
  82. }
  83. int starpu_profiling_status_set(int status)
  84. {
  85. int worker;
  86. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  87. {
  88. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[worker]);
  89. }
  90. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  91. int prev_value = _starpu_profiling;
  92. _starpu_profiling = status;
  93. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  94. _STARPU_TRACE_SET_PROFILING(status);
  95. /* If we enable profiling, we reset the counters. */
  96. if (status == STARPU_PROFILING_ENABLE)
  97. {
  98. _starpu_profiling_reset_counters();
  99. }
  100. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  101. {
  102. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[worker]);
  103. }
  104. return prev_value;
  105. }
  106. void _starpu_profiling_init(void)
  107. {
  108. int worker;
  109. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  110. {
  111. STARPU_PTHREAD_MUTEX_INIT(&worker_info_mutex[worker], NULL);
  112. }
  113. }
  114. void _starpu_profiling_start(void)
  115. {
  116. const char *env;
  117. if ((env = starpu_getenv("STARPU_PROFILING")) && atoi(env))
  118. {
  119. starpu_profiling_status_set(STARPU_PROFILING_ENABLE);
  120. }
  121. }
  122. void _starpu_profiling_terminate(void)
  123. {
  124. int worker;
  125. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  126. {
  127. STARPU_PTHREAD_MUTEX_DESTROY(&worker_info_mutex[worker]);
  128. }
  129. }
  130. /*
  131. * Task profiling
  132. */
  133. struct starpu_profiling_task_info *_starpu_allocate_profiling_info_if_needed(struct starpu_task *task)
  134. {
  135. struct starpu_profiling_task_info *info = NULL;
  136. /* If we are benchmarking, we need room for the energy */
  137. if (starpu_profiling_status_get() || (task->cl && task->cl->energy_model && (task->cl->energy_model->benchmarking || _starpu_get_calibrate_flag())))
  138. {
  139. _STARPU_CALLOC(info, 1, sizeof(struct starpu_profiling_task_info));
  140. }
  141. return info;
  142. }
  143. /*
  144. * Worker profiling
  145. */
  146. static void _starpu_worker_reset_profiling_info_with_lock(int workerid)
  147. {
  148. _starpu_clock_gettime(&worker_info[workerid].start_time);
  149. /* This is computed in a lazy fashion when the application queries
  150. * profiling info. */
  151. starpu_timespec_clear(&worker_info[workerid].total_time);
  152. starpu_timespec_clear(&worker_info[workerid].executing_time);
  153. starpu_timespec_clear(&worker_info[workerid].sleeping_time);
  154. worker_info[workerid].executed_tasks = 0;
  155. worker_info[workerid].used_cycles = 0;
  156. worker_info[workerid].stall_cycles = 0;
  157. worker_info[workerid].energy_consumed = 0;
  158. worker_info[workerid].flops = 0;
  159. /* We detect if the worker is already sleeping or doing some
  160. * computation */
  161. enum _starpu_worker_status status = _starpu_worker_get_status(workerid);
  162. if (status == STATUS_SLEEPING)
  163. {
  164. worker_registered_sleeping_start[workerid] = 1;
  165. _starpu_clock_gettime(&sleeping_start_date[workerid]);
  166. }
  167. else
  168. {
  169. worker_registered_sleeping_start[workerid] = 0;
  170. }
  171. if (status == STATUS_EXECUTING)
  172. {
  173. worker_registered_executing_start[workerid] = 1;
  174. _starpu_clock_gettime(&executing_start_date[workerid]);
  175. }
  176. else
  177. {
  178. worker_registered_executing_start[workerid] = 0;
  179. }
  180. }
  181. void _starpu_worker_restart_sleeping(int workerid)
  182. {
  183. if (starpu_profiling_status_get())
  184. {
  185. struct timespec sleep_start_time;
  186. _starpu_clock_gettime(&sleep_start_time);
  187. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  188. if (worker_registered_sleeping_start[workerid] == 0)
  189. {
  190. worker_registered_sleeping_start[workerid] = 1;
  191. memcpy(&sleeping_start_date[workerid], &sleep_start_time, sizeof(struct timespec));
  192. }
  193. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  194. }
  195. }
  196. void _starpu_worker_stop_sleeping(int workerid)
  197. {
  198. if (starpu_profiling_status_get())
  199. {
  200. struct timespec *sleeping_start, sleep_end_time;
  201. _starpu_clock_gettime(&sleep_end_time);
  202. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  203. STARPU_ASSERT(worker_registered_sleeping_start[workerid] == 1);
  204. sleeping_start = &sleeping_start_date[workerid];
  205. /* Perhaps that profiling was enabled while the worker was
  206. * already blocked, so we don't measure (end - start), but
  207. * (end - max(start,worker_start)) where worker_start is the
  208. * date of the previous profiling info reset on the worker */
  209. struct timespec *worker_start = &worker_info[workerid].start_time;
  210. if (starpu_timespec_cmp(sleeping_start, worker_start, <))
  211. {
  212. /* sleeping_start < worker_start */
  213. sleeping_start = worker_start;
  214. }
  215. struct timespec sleeping_time;
  216. starpu_timespec_sub(&sleep_end_time, sleeping_start, &sleeping_time);
  217. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  218. worker_registered_sleeping_start[workerid] = 0;
  219. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  220. }
  221. }
  222. void _starpu_worker_register_executing_start_date(int workerid, struct timespec *executing_start)
  223. {
  224. if (starpu_profiling_status_get())
  225. {
  226. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  227. worker_registered_executing_start[workerid] = 1;
  228. memcpy(&executing_start_date[workerid], executing_start, sizeof(struct timespec));
  229. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  230. }
  231. }
  232. void _starpu_worker_register_executing_end(int workerid)
  233. {
  234. if (starpu_profiling_status_get())
  235. {
  236. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  237. worker_registered_executing_start[workerid] = 0;
  238. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  239. }
  240. }
  241. void _starpu_worker_update_profiling_info_executing(int workerid, struct timespec *executing_time, int executed_tasks, uint64_t used_cycles, uint64_t stall_cycles, double energy_consumed, double flops)
  242. {
  243. if (starpu_profiling_status_get())
  244. {
  245. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  246. if (executing_time)
  247. starpu_timespec_accumulate(&worker_info[workerid].executing_time, executing_time);
  248. worker_info[workerid].used_cycles += used_cycles;
  249. worker_info[workerid].stall_cycles += stall_cycles;
  250. worker_info[workerid].energy_consumed += energy_consumed;
  251. worker_info[workerid].executed_tasks += executed_tasks;
  252. worker_info[workerid].flops += flops;
  253. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  254. }
  255. else /* Not thread safe, shouldn't be too much a problem */
  256. worker_info[workerid].executed_tasks += executed_tasks;
  257. }
  258. int starpu_profiling_worker_get_info(int workerid, struct starpu_profiling_worker_info *info)
  259. {
  260. if (!starpu_profiling_status_get())
  261. {
  262. /* Not thread safe, shouldn't be too much a problem */
  263. info->executed_tasks = worker_info[workerid].executed_tasks;
  264. }
  265. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  266. if (info)
  267. {
  268. /* The total time is computed in a lazy fashion */
  269. struct timespec now;
  270. _starpu_clock_gettime(&now);
  271. /* In case some worker is currently sleeping, we take into
  272. * account the time spent since it registered. */
  273. if (worker_registered_sleeping_start[workerid])
  274. {
  275. struct timespec sleeping_time;
  276. starpu_timespec_sub(&now, &sleeping_start_date[workerid], &sleeping_time);
  277. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  278. }
  279. if (worker_registered_executing_start[workerid])
  280. {
  281. struct timespec executing_time;
  282. starpu_timespec_sub(&now, &executing_start_date[workerid], &executing_time);
  283. starpu_timespec_accumulate(&worker_info[workerid].executing_time, &executing_time);
  284. }
  285. /* total_time = now - start_time */
  286. starpu_timespec_sub(&now, &worker_info[workerid].start_time,
  287. &worker_info[workerid].total_time);
  288. memcpy(info, &worker_info[workerid], sizeof(struct starpu_profiling_worker_info));
  289. }
  290. _starpu_worker_reset_profiling_info_with_lock(workerid);
  291. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  292. return 0;
  293. }
  294. /* When did the task reach the scheduler ? */
  295. void _starpu_profiling_set_task_push_start_time(struct starpu_task *task)
  296. {
  297. if (!starpu_profiling_status_get())
  298. return;
  299. struct starpu_profiling_task_info *profiling_info;
  300. profiling_info = task->profiling_info;
  301. if (profiling_info)
  302. _starpu_clock_gettime(&profiling_info->push_start_time);
  303. }
  304. void _starpu_profiling_set_task_push_end_time(struct starpu_task *task)
  305. {
  306. if (!starpu_profiling_status_get())
  307. return;
  308. struct starpu_profiling_task_info *profiling_info;
  309. profiling_info = task->profiling_info;
  310. if (profiling_info)
  311. _starpu_clock_gettime(&profiling_info->push_end_time);
  312. }
  313. /*
  314. * Bus profiling
  315. */
  316. void _starpu_initialize_busid_matrix(void)
  317. {
  318. int i, j;
  319. for (j = 0; j < STARPU_MAXNODES; j++)
  320. for (i = 0; i < STARPU_MAXNODES; i++)
  321. busid_matrix[i][j] = -1;
  322. busid_cnt = 0;
  323. }
  324. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info)
  325. {
  326. _starpu_clock_gettime(&bus_info->start_time);
  327. bus_info->transferred_bytes = 0;
  328. bus_info->transfer_count = 0;
  329. }
  330. int _starpu_register_bus(int src_node, int dst_node)
  331. {
  332. if (starpu_bus_get_id(src_node, dst_node) != -1)
  333. return -EBUSY;
  334. int busid = STARPU_ATOMIC_ADD(&busid_cnt, 1) - 1;
  335. busid_matrix[src_node][dst_node] = busid;
  336. busid_to_node_pair[busid].src = src_node;
  337. busid_to_node_pair[busid].dst = dst_node;
  338. busid_to_node_pair[busid].bus_info = &bus_profiling_info[src_node][dst_node];
  339. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  340. return busid;
  341. }
  342. int starpu_bus_get_count(void)
  343. {
  344. return busid_cnt;
  345. }
  346. int starpu_bus_get_id(int src, int dst)
  347. {
  348. return busid_matrix[src][dst];
  349. }
  350. int starpu_bus_get_src(int busid)
  351. {
  352. return busid_to_node_pair[busid].src;
  353. }
  354. int starpu_bus_get_dst(int busid)
  355. {
  356. return busid_to_node_pair[busid].dst;
  357. }
  358. void starpu_bus_set_direct(int busid, int direct)
  359. {
  360. bus_direct[busid] = direct;
  361. }
  362. int starpu_bus_get_direct(int busid)
  363. {
  364. return bus_direct[busid];
  365. }
  366. void starpu_bus_set_ngpus(int busid, int ngpus)
  367. {
  368. bus_ngpus[busid] = ngpus;
  369. }
  370. int starpu_bus_get_ngpus(int busid)
  371. {
  372. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  373. int ngpus = bus_ngpus[busid];
  374. if (!ngpus)
  375. /* Unknown number of GPUs, assume it's shared by all GPUs */
  376. ngpus = topology->ncudagpus+topology->nopenclgpus;
  377. return ngpus;
  378. }
  379. int starpu_bus_get_profiling_info(int busid, struct starpu_profiling_bus_info *bus_info)
  380. {
  381. int src_node = starpu_bus_get_src(busid);
  382. int dst_node = starpu_bus_get_dst(busid);
  383. /* XXX protect all this method with a mutex */
  384. if (bus_info)
  385. {
  386. struct timespec now;
  387. _starpu_clock_gettime(&now);
  388. /* total_time = now - start_time */
  389. starpu_timespec_sub(&now, &bus_profiling_info[src_node][dst_node].start_time,
  390. &bus_profiling_info[src_node][dst_node].total_time);
  391. memcpy(bus_info, &bus_profiling_info[src_node][dst_node], sizeof(struct starpu_profiling_bus_info));
  392. }
  393. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  394. return 0;
  395. }
  396. void _starpu_bus_update_profiling_info(int src_node, int dst_node, size_t size)
  397. {
  398. bus_profiling_info[src_node][dst_node].transferred_bytes += size;
  399. bus_profiling_info[src_node][dst_node].transfer_count++;
  400. // fprintf(stderr, "PROFILE %d -> %d : %d (cnt %d)\n", src_node, dst_node, size, bus_profiling_info[src_node][dst_node].transfer_count);
  401. }
  402. #undef starpu_profiling_status_get
  403. int starpu_profiling_status_get(void)
  404. {
  405. int ret;
  406. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  407. ret = _starpu_profiling;
  408. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  409. return ret;
  410. }