profiling.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017,2019 Université de Bordeaux
  4. * Copyright (C) 2010-2013,2015-2017 CNRS
  5. * Copyright (C) 2016 Inria
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include <starpu_profiling.h>
  20. #include <profiling/profiling.h>
  21. #include <core/workers.h>
  22. #include <common/config.h>
  23. #include <common/utils.h>
  24. #include <common/timing.h>
  25. #include <common/fxt.h>
  26. #include <errno.h>
  27. #ifdef STARPU_PAPI
  28. #include <papi.h>
  29. #endif
  30. static struct starpu_profiling_worker_info worker_info[STARPU_NMAXWORKERS];
  31. /* TODO: rather use rwlock */
  32. static starpu_pthread_mutex_t worker_info_mutex[STARPU_NMAXWORKERS];
  33. /* In case the worker is still sleeping when the user request profiling info,
  34. * we need to account for the time elasped while sleeping. */
  35. static unsigned worker_registered_sleeping_start[STARPU_NMAXWORKERS];
  36. static struct timespec sleeping_start_date[STARPU_NMAXWORKERS];
  37. static unsigned worker_registered_executing_start[STARPU_NMAXWORKERS];
  38. static struct timespec executing_start_date[STARPU_NMAXWORKERS];
  39. #ifdef STARPU_PAPI
  40. static int papi_events[PAPI_MAX_HWCTRS];
  41. static int papi_nevents = 0;
  42. #endif
  43. /* Store the busid of the different (src, dst) pairs. busid_matrix[src][dst]
  44. * contains the busid of (src, dst) or -1 if the bus was not registered. */
  45. struct node_pair
  46. {
  47. int src;
  48. int dst;
  49. struct starpu_profiling_bus_info *bus_info;
  50. };
  51. static int busid_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  52. static struct starpu_profiling_bus_info bus_profiling_info[STARPU_MAXNODES][STARPU_MAXNODES];
  53. static struct node_pair busid_to_node_pair[STARPU_MAXNODES*STARPU_MAXNODES];
  54. static char bus_direct[STARPU_MAXNODES*STARPU_MAXNODES];
  55. static int bus_ngpus[STARPU_MAXNODES*STARPU_MAXNODES];
  56. static unsigned busid_cnt = 0;
  57. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info);
  58. /* Clear all the profiling info related to the worker. */
  59. static void _starpu_worker_reset_profiling_info_with_lock(int workerid);
  60. /*
  61. * Global control of profiling
  62. */
  63. /* Disabled by default, unless simulating */
  64. int _starpu_profiling =
  65. #ifdef STARPU_SIMGRID
  66. 1
  67. #else
  68. 0
  69. #endif
  70. ;
  71. void starpu_profiling_init()
  72. {
  73. _starpu_profiling_init();
  74. }
  75. static void _starpu_profiling_reset_counters()
  76. {
  77. int worker;
  78. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  79. {
  80. _starpu_worker_reset_profiling_info_with_lock(worker);
  81. }
  82. int busid;
  83. int bus_cnt = starpu_bus_get_count();
  84. for (busid = 0; busid < bus_cnt; busid++)
  85. {
  86. struct starpu_profiling_bus_info *bus_info;
  87. bus_info = busid_to_node_pair[busid].bus_info;
  88. _starpu_bus_reset_profiling_info(bus_info);
  89. }
  90. }
  91. int starpu_profiling_status_set(int status)
  92. {
  93. int worker;
  94. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  95. {
  96. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[worker]);
  97. }
  98. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  99. int prev_value = _starpu_profiling;
  100. _starpu_profiling = status;
  101. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  102. _STARPU_TRACE_SET_PROFILING(status);
  103. /* If we enable profiling, we reset the counters. */
  104. if (status == STARPU_PROFILING_ENABLE)
  105. {
  106. _starpu_profiling_reset_counters();
  107. }
  108. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  109. {
  110. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[worker]);
  111. }
  112. return prev_value;
  113. }
  114. void _starpu_profiling_init(void)
  115. {
  116. int worker;
  117. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  118. {
  119. STARPU_PTHREAD_MUTEX_INIT(&worker_info_mutex[worker], NULL);
  120. }
  121. #ifdef STARPU_PAPI
  122. int retval = PAPI_library_init(PAPI_VER_CURRENT);
  123. if (retval != PAPI_VER_CURRENT)
  124. {
  125. _STARPU_MSG("Failed init PAPI, error: %s.\n", PAPI_strerror(retval));
  126. }
  127. retval = PAPI_thread_init(pthread_self);
  128. if (retval != PAPI_OK)
  129. {
  130. _STARPU_MSG("Failed init PAPI thread, error: %s.\n", PAPI_strerror(retval));
  131. }
  132. char *conf_papi_events;
  133. char *papi_event_name;
  134. conf_papi_events = starpu_getenv("STARPU_PROF_PAPI_EVENTS");
  135. if (conf_papi_events != NULL)
  136. {
  137. while ((papi_event_name = strtok_r(conf_papi_events, " ", &conf_papi_events)))
  138. {
  139. _STARPU_DEBUG("Loading PAPI Event:%s\n", papi_event_name);
  140. retval = PAPI_event_name_to_code ((char*)papi_event_name, &papi_events[papi_nevents]);
  141. if (retval != PAPI_OK)
  142. _STARPU_MSG("Failed to codify papi event [%s], error: %s.\n", papi_event_name, PAPI_strerror(retval));
  143. else
  144. papi_nevents++;
  145. }
  146. }
  147. #endif
  148. }
  149. #ifdef STARPU_PAPI
  150. void _starpu_profiling_papi_task_start_counters(struct starpu_task *task)
  151. {
  152. if (!starpu_profiling_status_get())
  153. return;
  154. struct starpu_profiling_task_info *profiling_info;
  155. profiling_info = task->profiling_info;
  156. if (profiling_info)
  157. {
  158. profiling_info->papi_event_set = PAPI_NULL;
  159. PAPI_create_eventset(&profiling_info->papi_event_set);
  160. for(int i=0; i<papi_nevents; i++)
  161. {
  162. PAPI_add_event(profiling_info->papi_event_set, papi_events[i]);
  163. profiling_info->papi_values[i]=0;
  164. }
  165. PAPI_reset(profiling_info->papi_event_set);
  166. PAPI_start(profiling_info->papi_event_set);
  167. }
  168. }
  169. void _starpu_profiling_papi_task_stop_counters(struct starpu_task *task)
  170. {
  171. if (!starpu_profiling_status_get())
  172. return;
  173. struct starpu_profiling_task_info *profiling_info;
  174. profiling_info = task->profiling_info;
  175. if (profiling_info)
  176. {
  177. PAPI_stop(profiling_info->papi_event_set, profiling_info->papi_values);
  178. for(int i=0; i<papi_nevents; i++)
  179. {
  180. _STARPU_TRACE_PAPI_TASK_EVENT(papi_events[i], task, profiling_info->papi_values[i]);
  181. }
  182. PAPI_cleanup_eventset(profiling_info->papi_event_set);
  183. PAPI_destroy_eventset(&profiling_info->papi_event_set);
  184. }
  185. }
  186. #endif
  187. void _starpu_profiling_start(void)
  188. {
  189. const char *env;
  190. if ((env = starpu_getenv("STARPU_PROFILING")) && atoi(env))
  191. {
  192. starpu_profiling_status_set(STARPU_PROFILING_ENABLE);
  193. }
  194. }
  195. void _starpu_profiling_terminate(void)
  196. {
  197. int worker;
  198. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  199. {
  200. STARPU_PTHREAD_MUTEX_DESTROY(&worker_info_mutex[worker]);
  201. }
  202. }
  203. /*
  204. * Task profiling
  205. */
  206. struct starpu_profiling_task_info *_starpu_allocate_profiling_info_if_needed(struct starpu_task *task)
  207. {
  208. struct starpu_profiling_task_info *info = NULL;
  209. /* If we are benchmarking, we need room for the energy */
  210. if (starpu_profiling_status_get() || (task->cl && task->cl->energy_model && (task->cl->energy_model->benchmarking || _starpu_get_calibrate_flag())))
  211. {
  212. _STARPU_CALLOC(info, 1, sizeof(struct starpu_profiling_task_info));
  213. }
  214. return info;
  215. }
  216. /*
  217. * Worker profiling
  218. */
  219. static void _starpu_worker_reset_profiling_info_with_lock(int workerid)
  220. {
  221. _starpu_clock_gettime(&worker_info[workerid].start_time);
  222. /* This is computed in a lazy fashion when the application queries
  223. * profiling info. */
  224. starpu_timespec_clear(&worker_info[workerid].total_time);
  225. starpu_timespec_clear(&worker_info[workerid].executing_time);
  226. starpu_timespec_clear(&worker_info[workerid].sleeping_time);
  227. worker_info[workerid].executed_tasks = 0;
  228. worker_info[workerid].used_cycles = 0;
  229. worker_info[workerid].stall_cycles = 0;
  230. worker_info[workerid].energy_consumed = 0;
  231. worker_info[workerid].flops = 0;
  232. /* We detect if the worker is already sleeping or doing some
  233. * computation */
  234. enum _starpu_worker_status status = _starpu_worker_get_status(workerid);
  235. if (status == STATUS_SLEEPING || status == STATUS_SLEEPING_SCHEDULING)
  236. {
  237. worker_registered_sleeping_start[workerid] = 1;
  238. _starpu_clock_gettime(&sleeping_start_date[workerid]);
  239. }
  240. else
  241. {
  242. worker_registered_sleeping_start[workerid] = 0;
  243. }
  244. if (status == STATUS_EXECUTING)
  245. {
  246. worker_registered_executing_start[workerid] = 1;
  247. _starpu_clock_gettime(&executing_start_date[workerid]);
  248. }
  249. else
  250. {
  251. worker_registered_executing_start[workerid] = 0;
  252. }
  253. }
  254. void _starpu_worker_restart_sleeping(int workerid)
  255. {
  256. if (starpu_profiling_status_get())
  257. {
  258. struct timespec sleep_start_time;
  259. _starpu_clock_gettime(&sleep_start_time);
  260. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  261. if (worker_registered_sleeping_start[workerid] == 0)
  262. {
  263. worker_registered_sleeping_start[workerid] = 1;
  264. memcpy(&sleeping_start_date[workerid], &sleep_start_time, sizeof(struct timespec));
  265. }
  266. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  267. }
  268. }
  269. void _starpu_worker_stop_sleeping(int workerid)
  270. {
  271. if (starpu_profiling_status_get())
  272. {
  273. struct timespec *sleeping_start, sleep_end_time;
  274. _starpu_clock_gettime(&sleep_end_time);
  275. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  276. STARPU_ASSERT(worker_registered_sleeping_start[workerid] == 1);
  277. sleeping_start = &sleeping_start_date[workerid];
  278. /* Perhaps that profiling was enabled while the worker was
  279. * already blocked, so we don't measure (end - start), but
  280. * (end - max(start,worker_start)) where worker_start is the
  281. * date of the previous profiling info reset on the worker */
  282. struct timespec *worker_start = &worker_info[workerid].start_time;
  283. if (starpu_timespec_cmp(sleeping_start, worker_start, <))
  284. {
  285. /* sleeping_start < worker_start */
  286. sleeping_start = worker_start;
  287. }
  288. struct timespec sleeping_time;
  289. starpu_timespec_sub(&sleep_end_time, sleeping_start, &sleeping_time);
  290. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  291. worker_registered_sleeping_start[workerid] = 0;
  292. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  293. }
  294. }
  295. void _starpu_worker_register_executing_start_date(int workerid, struct timespec *executing_start)
  296. {
  297. if (starpu_profiling_status_get())
  298. {
  299. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  300. worker_registered_executing_start[workerid] = 1;
  301. memcpy(&executing_start_date[workerid], executing_start, sizeof(struct timespec));
  302. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  303. }
  304. }
  305. void _starpu_worker_register_executing_end(int workerid)
  306. {
  307. if (starpu_profiling_status_get())
  308. {
  309. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  310. worker_registered_executing_start[workerid] = 0;
  311. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  312. }
  313. }
  314. void _starpu_worker_update_profiling_info_executing(int workerid, struct timespec *executing_time, int executed_tasks, uint64_t used_cycles, uint64_t stall_cycles, double energy_consumed, double flops)
  315. {
  316. if (starpu_profiling_status_get())
  317. {
  318. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  319. if (executing_time)
  320. starpu_timespec_accumulate(&worker_info[workerid].executing_time, executing_time);
  321. worker_info[workerid].used_cycles += used_cycles;
  322. worker_info[workerid].stall_cycles += stall_cycles;
  323. worker_info[workerid].energy_consumed += energy_consumed;
  324. worker_info[workerid].executed_tasks += executed_tasks;
  325. worker_info[workerid].flops += flops;
  326. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  327. }
  328. else /* Not thread safe, shouldn't be too much a problem */
  329. worker_info[workerid].executed_tasks += executed_tasks;
  330. }
  331. int starpu_profiling_worker_get_info(int workerid, struct starpu_profiling_worker_info *info)
  332. {
  333. if (!starpu_profiling_status_get())
  334. {
  335. /* Not thread safe, shouldn't be too much a problem */
  336. info->executed_tasks = worker_info[workerid].executed_tasks;
  337. }
  338. STARPU_PTHREAD_MUTEX_LOCK(&_starpu_get_worker_struct(workerid)->sched_mutex);
  339. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  340. if (info)
  341. {
  342. /* The total time is computed in a lazy fashion */
  343. struct timespec now;
  344. _starpu_clock_gettime(&now);
  345. /* In case some worker is currently sleeping, we take into
  346. * account the time spent since it registered. */
  347. if (worker_registered_sleeping_start[workerid])
  348. {
  349. struct timespec sleeping_time;
  350. starpu_timespec_sub(&now, &sleeping_start_date[workerid], &sleeping_time);
  351. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  352. }
  353. if (worker_registered_executing_start[workerid])
  354. {
  355. struct timespec executing_time;
  356. starpu_timespec_sub(&now, &executing_start_date[workerid], &executing_time);
  357. starpu_timespec_accumulate(&worker_info[workerid].executing_time, &executing_time);
  358. }
  359. /* total_time = now - start_time */
  360. starpu_timespec_sub(&now, &worker_info[workerid].start_time,
  361. &worker_info[workerid].total_time);
  362. memcpy(info, &worker_info[workerid], sizeof(struct starpu_profiling_worker_info));
  363. }
  364. _starpu_worker_reset_profiling_info_with_lock(workerid);
  365. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  366. STARPU_PTHREAD_MUTEX_UNLOCK(&_starpu_get_worker_struct(workerid)->sched_mutex);
  367. return 0;
  368. }
  369. /* When did the task reach the scheduler ? */
  370. void _starpu_profiling_set_task_push_start_time(struct starpu_task *task)
  371. {
  372. if (!starpu_profiling_status_get())
  373. return;
  374. struct starpu_profiling_task_info *profiling_info;
  375. profiling_info = task->profiling_info;
  376. if (profiling_info)
  377. _starpu_clock_gettime(&profiling_info->push_start_time);
  378. }
  379. void _starpu_profiling_set_task_push_end_time(struct starpu_task *task)
  380. {
  381. if (!starpu_profiling_status_get())
  382. return;
  383. struct starpu_profiling_task_info *profiling_info;
  384. profiling_info = task->profiling_info;
  385. if (profiling_info)
  386. _starpu_clock_gettime(&profiling_info->push_end_time);
  387. }
  388. /*
  389. * Bus profiling
  390. */
  391. void _starpu_initialize_busid_matrix(void)
  392. {
  393. int i, j;
  394. for (j = 0; j < STARPU_MAXNODES; j++)
  395. for (i = 0; i < STARPU_MAXNODES; i++)
  396. busid_matrix[i][j] = -1;
  397. busid_cnt = 0;
  398. }
  399. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info)
  400. {
  401. _starpu_clock_gettime(&bus_info->start_time);
  402. bus_info->transferred_bytes = 0;
  403. bus_info->transfer_count = 0;
  404. }
  405. int _starpu_register_bus(int src_node, int dst_node)
  406. {
  407. if (starpu_bus_get_id(src_node, dst_node) != -1)
  408. return -EBUSY;
  409. int busid = STARPU_ATOMIC_ADD(&busid_cnt, 1) - 1;
  410. busid_matrix[src_node][dst_node] = busid;
  411. busid_to_node_pair[busid].src = src_node;
  412. busid_to_node_pair[busid].dst = dst_node;
  413. busid_to_node_pair[busid].bus_info = &bus_profiling_info[src_node][dst_node];
  414. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  415. return busid;
  416. }
  417. int starpu_bus_get_count(void)
  418. {
  419. return busid_cnt;
  420. }
  421. int starpu_bus_get_id(int src, int dst)
  422. {
  423. return busid_matrix[src][dst];
  424. }
  425. int starpu_bus_get_src(int busid)
  426. {
  427. return busid_to_node_pair[busid].src;
  428. }
  429. int starpu_bus_get_dst(int busid)
  430. {
  431. return busid_to_node_pair[busid].dst;
  432. }
  433. void starpu_bus_set_direct(int busid, int direct)
  434. {
  435. bus_direct[busid] = direct;
  436. }
  437. int starpu_bus_get_direct(int busid)
  438. {
  439. return bus_direct[busid];
  440. }
  441. void starpu_bus_set_ngpus(int busid, int ngpus)
  442. {
  443. bus_ngpus[busid] = ngpus;
  444. }
  445. int starpu_bus_get_ngpus(int busid)
  446. {
  447. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  448. int ngpus = bus_ngpus[busid];
  449. if (!ngpus)
  450. /* Unknown number of GPUs, assume it's shared by all GPUs */
  451. ngpus = topology->ncudagpus+topology->nopenclgpus;
  452. return ngpus;
  453. }
  454. int starpu_bus_get_profiling_info(int busid, struct starpu_profiling_bus_info *bus_info)
  455. {
  456. int src_node = starpu_bus_get_src(busid);
  457. int dst_node = starpu_bus_get_dst(busid);
  458. /* XXX protect all this method with a mutex */
  459. if (bus_info)
  460. {
  461. struct timespec now;
  462. _starpu_clock_gettime(&now);
  463. /* total_time = now - start_time */
  464. starpu_timespec_sub(&now, &bus_profiling_info[src_node][dst_node].start_time,
  465. &bus_profiling_info[src_node][dst_node].total_time);
  466. memcpy(bus_info, &bus_profiling_info[src_node][dst_node], sizeof(struct starpu_profiling_bus_info));
  467. }
  468. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  469. return 0;
  470. }
  471. void _starpu_bus_update_profiling_info(int src_node, int dst_node, size_t size)
  472. {
  473. bus_profiling_info[src_node][dst_node].transferred_bytes += size;
  474. bus_profiling_info[src_node][dst_node].transfer_count++;
  475. // fprintf(stderr, "PROFILE %d -> %d : %d (cnt %d)\n", src_node, dst_node, size, bus_profiling_info[src_node][dst_node].transfer_count);
  476. }
  477. #undef starpu_profiling_status_get
  478. int starpu_profiling_status_get(void)
  479. {
  480. int ret;
  481. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  482. ret = _starpu_profiling;
  483. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  484. return ret;
  485. }