profiling.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2020 Federal University of Rio Grande do Sul (UFRGS)
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <starpu_profiling.h>
  19. #include <profiling/profiling.h>
  20. #include <core/workers.h>
  21. #include <common/config.h>
  22. #include <common/utils.h>
  23. #include <common/timing.h>
  24. #include <common/fxt.h>
  25. #include <errno.h>
  26. #ifdef STARPU_PAPI
  27. #include <papi.h>
  28. #endif
  29. /* TODO: move to worker structure */
  30. static struct starpu_profiling_worker_info worker_info[STARPU_NMAXWORKERS];
  31. /* TODO: rather use rwlock */
  32. static starpu_pthread_mutex_t worker_info_mutex[STARPU_NMAXWORKERS];
  33. /* In case the worker is still sleeping when the user request profiling info,
  34. * we need to account for the time elasped while sleeping. */
  35. static unsigned worker_registered_sleeping_start[STARPU_NMAXWORKERS];
  36. static struct timespec sleeping_start_date[STARPU_NMAXWORKERS];
  37. static unsigned worker_registered_executing_start[STARPU_NMAXWORKERS];
  38. static struct timespec executing_start_date[STARPU_NMAXWORKERS];
  39. #ifdef STARPU_PAPI
  40. static starpu_pthread_mutex_t papi_mutex = STARPU_PTHREAD_MUTEX_INITIALIZER;
  41. static int papi_events[PAPI_MAX_HWCTRS];
  42. static int papi_nevents = 0;
  43. static int warned_component_unavailable = 0;
  44. #endif
  45. /* Store the busid of the different (src, dst) pairs. busid_matrix[src][dst]
  46. * contains the busid of (src, dst) or -1 if the bus was not registered. */
  47. struct node_pair
  48. {
  49. int src;
  50. int dst;
  51. struct starpu_profiling_bus_info *bus_info;
  52. };
  53. static int busid_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  54. static struct starpu_profiling_bus_info bus_profiling_info[STARPU_MAXNODES][STARPU_MAXNODES];
  55. static struct node_pair busid_to_node_pair[STARPU_MAXNODES*STARPU_MAXNODES];
  56. static char bus_direct[STARPU_MAXNODES*STARPU_MAXNODES];
  57. static int bus_ngpus[STARPU_MAXNODES*STARPU_MAXNODES];
  58. static unsigned busid_cnt = 0;
  59. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info);
  60. /* Clear all the profiling info related to the worker. */
  61. static void _starpu_worker_reset_profiling_info_with_lock(int workerid);
  62. /*
  63. * Global control of profiling
  64. */
  65. /* Disabled by default, unless simulating */
  66. int _starpu_profiling =
  67. #ifdef STARPU_SIMGRID
  68. 1
  69. #else
  70. 0
  71. #endif
  72. ;
  73. void starpu_profiling_init()
  74. {
  75. _starpu_profiling_init();
  76. }
  77. static void _starpu_profiling_reset_counters()
  78. {
  79. int worker;
  80. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  81. {
  82. _starpu_worker_reset_profiling_info_with_lock(worker);
  83. }
  84. int busid;
  85. int bus_cnt = starpu_bus_get_count();
  86. for (busid = 0; busid < bus_cnt; busid++)
  87. {
  88. struct starpu_profiling_bus_info *bus_info;
  89. bus_info = busid_to_node_pair[busid].bus_info;
  90. _starpu_bus_reset_profiling_info(bus_info);
  91. }
  92. }
  93. int starpu_profiling_status_set(int status)
  94. {
  95. unsigned worker;
  96. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  97. {
  98. struct _starpu_worker *worker_struct = _starpu_get_worker_struct(worker);
  99. STARPU_PTHREAD_MUTEX_LOCK(&worker_struct->sched_mutex);
  100. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[worker]);
  101. }
  102. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  103. int prev_value = _starpu_profiling;
  104. _starpu_profiling = status;
  105. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  106. _STARPU_TRACE_SET_PROFILING(status);
  107. /* If we enable profiling, we reset the counters. */
  108. if (status == STARPU_PROFILING_ENABLE)
  109. {
  110. _starpu_profiling_reset_counters();
  111. }
  112. for (worker = 0; worker < starpu_worker_get_count(); worker++)
  113. {
  114. struct _starpu_worker *worker_struct = _starpu_get_worker_struct(worker);
  115. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[worker]);
  116. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_struct->sched_mutex);
  117. }
  118. return prev_value;
  119. }
  120. void _starpu_profiling_init(void)
  121. {
  122. int worker;
  123. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  124. {
  125. STARPU_PTHREAD_MUTEX_INIT(&worker_info_mutex[worker], NULL);
  126. }
  127. #ifdef STARPU_PAPI
  128. STARPU_PTHREAD_MUTEX_LOCK(&papi_mutex);
  129. int retval = PAPI_library_init(PAPI_VER_CURRENT);
  130. if (retval != PAPI_VER_CURRENT)
  131. {
  132. _STARPU_MSG("Failed init PAPI, error: %s.\n", PAPI_strerror(retval));
  133. }
  134. retval = PAPI_thread_init(pthread_self);
  135. if (retval != PAPI_OK)
  136. {
  137. _STARPU_MSG("Failed init PAPI thread, error: %s.\n", PAPI_strerror(retval));
  138. }
  139. char *conf_papi_events;
  140. char *papi_event_name;
  141. conf_papi_events = starpu_getenv("STARPU_PROF_PAPI_EVENTS");
  142. papi_nevents = 0;
  143. if (conf_papi_events != NULL)
  144. {
  145. while ((papi_event_name = strtok_r(conf_papi_events, " ,", &conf_papi_events)))
  146. {
  147. if (papi_nevents == PAPI_MAX_HWCTRS)
  148. {
  149. _STARPU_MSG("Too many requested papi counters, ignoring %s\n", papi_event_name);
  150. continue;
  151. }
  152. _STARPU_DEBUG("Loading PAPI Event: %s\n", papi_event_name);
  153. retval = PAPI_event_name_to_code ((char*)papi_event_name, &papi_events[papi_nevents]);
  154. if (retval != PAPI_OK)
  155. _STARPU_MSG("Failed to codify papi event [%s], error: %s.\n", papi_event_name, PAPI_strerror(retval));
  156. else
  157. papi_nevents++;
  158. }
  159. }
  160. STARPU_PTHREAD_MUTEX_UNLOCK(&papi_mutex);
  161. #endif
  162. }
  163. #ifdef STARPU_PAPI
  164. void _starpu_profiling_papi_task_start_counters(struct starpu_task *task)
  165. {
  166. if (!starpu_profiling_status_get())
  167. return;
  168. struct starpu_profiling_task_info *profiling_info;
  169. profiling_info = task->profiling_info;
  170. if (profiling_info && papi_nevents)
  171. {
  172. int i;
  173. profiling_info->papi_event_set = PAPI_NULL;
  174. STARPU_PTHREAD_MUTEX_LOCK(&papi_mutex);
  175. PAPI_create_eventset(&profiling_info->papi_event_set);
  176. for(i=0; i<papi_nevents; i++)
  177. {
  178. int ret = PAPI_add_event(profiling_info->papi_event_set, papi_events[i]);
  179. #ifdef PAPI_ECMP_DISABLED
  180. if (ret == PAPI_ECMP_DISABLED && !warned_component_unavailable)
  181. {
  182. _STARPU_MSG("Error while registering Papi event: Component containing event is disabled. Try running `papi_component_avail` to get more information.\n");
  183. warned_component_unavailable = 1;
  184. }
  185. #endif
  186. profiling_info->papi_values[i]=0;
  187. }
  188. PAPI_reset(profiling_info->papi_event_set);
  189. PAPI_start(profiling_info->papi_event_set);
  190. STARPU_PTHREAD_MUTEX_UNLOCK(&papi_mutex);
  191. }
  192. }
  193. void _starpu_profiling_papi_task_stop_counters(struct starpu_task *task)
  194. {
  195. if (!starpu_profiling_status_get())
  196. return;
  197. struct starpu_profiling_task_info *profiling_info;
  198. profiling_info = task->profiling_info;
  199. if (profiling_info && papi_nevents)
  200. {
  201. int i;
  202. STARPU_PTHREAD_MUTEX_LOCK(&papi_mutex);
  203. PAPI_stop(profiling_info->papi_event_set, profiling_info->papi_values);
  204. for(i=0; i<papi_nevents; i++)
  205. {
  206. _STARPU_TRACE_PAPI_TASK_EVENT(papi_events[i], task, profiling_info->papi_values[i]);
  207. }
  208. PAPI_cleanup_eventset(profiling_info->papi_event_set);
  209. PAPI_destroy_eventset(&profiling_info->papi_event_set);
  210. STARPU_PTHREAD_MUTEX_UNLOCK(&papi_mutex);
  211. }
  212. }
  213. #endif
  214. void _starpu_profiling_start(void)
  215. {
  216. const char *env;
  217. if ((env = starpu_getenv("STARPU_PROFILING")) && atoi(env))
  218. {
  219. starpu_profiling_status_set(STARPU_PROFILING_ENABLE);
  220. }
  221. }
  222. void _starpu_profiling_terminate(void)
  223. {
  224. int worker;
  225. for (worker = 0; worker < STARPU_NMAXWORKERS; worker++)
  226. {
  227. STARPU_PTHREAD_MUTEX_DESTROY(&worker_info_mutex[worker]);
  228. }
  229. #ifdef STARPU_PAPI
  230. /* free the resources used by PAPI */
  231. STARPU_PTHREAD_MUTEX_LOCK(&papi_mutex);
  232. PAPI_shutdown();
  233. STARPU_PTHREAD_MUTEX_UNLOCK(&papi_mutex);
  234. #endif
  235. }
  236. /*
  237. * Task profiling
  238. */
  239. struct starpu_profiling_task_info *_starpu_allocate_profiling_info_if_needed(struct starpu_task *task)
  240. {
  241. struct starpu_profiling_task_info *info = NULL;
  242. /* If we are benchmarking, we need room for the energy */
  243. if (starpu_profiling_status_get() || (task->cl && task->cl->energy_model && (task->cl->energy_model->benchmarking || _starpu_get_calibrate_flag())))
  244. {
  245. _STARPU_CALLOC(info, 1, sizeof(struct starpu_profiling_task_info));
  246. }
  247. return info;
  248. }
  249. /*
  250. * Worker profiling
  251. */
  252. static void _starpu_worker_reset_profiling_info_with_lock(int workerid)
  253. {
  254. _starpu_clock_gettime(&worker_info[workerid].start_time);
  255. /* This is computed in a lazy fashion when the application queries
  256. * profiling info. */
  257. starpu_timespec_clear(&worker_info[workerid].total_time);
  258. starpu_timespec_clear(&worker_info[workerid].executing_time);
  259. starpu_timespec_clear(&worker_info[workerid].sleeping_time);
  260. worker_info[workerid].executed_tasks = 0;
  261. worker_info[workerid].used_cycles = 0;
  262. worker_info[workerid].stall_cycles = 0;
  263. worker_info[workerid].energy_consumed = 0;
  264. worker_info[workerid].flops = 0;
  265. /* We detect if the worker is already sleeping or doing some
  266. * computation */
  267. enum _starpu_worker_status status = _starpu_worker_get_status(workerid);
  268. if (status == STATUS_SLEEPING || status == STATUS_SLEEPING_SCHEDULING)
  269. {
  270. worker_registered_sleeping_start[workerid] = 1;
  271. _starpu_clock_gettime(&sleeping_start_date[workerid]);
  272. }
  273. else
  274. {
  275. worker_registered_sleeping_start[workerid] = 0;
  276. }
  277. if (status == STATUS_EXECUTING)
  278. {
  279. worker_registered_executing_start[workerid] = 1;
  280. _starpu_clock_gettime(&executing_start_date[workerid]);
  281. }
  282. else
  283. {
  284. worker_registered_executing_start[workerid] = 0;
  285. }
  286. }
  287. void _starpu_worker_restart_sleeping(int workerid)
  288. {
  289. if (starpu_profiling_status_get())
  290. {
  291. struct timespec sleep_start_time;
  292. _starpu_clock_gettime(&sleep_start_time);
  293. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  294. if (worker_registered_sleeping_start[workerid] == 0)
  295. {
  296. worker_registered_sleeping_start[workerid] = 1;
  297. memcpy(&sleeping_start_date[workerid], &sleep_start_time, sizeof(struct timespec));
  298. }
  299. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  300. }
  301. }
  302. void _starpu_worker_stop_sleeping(int workerid)
  303. {
  304. if (starpu_profiling_status_get())
  305. {
  306. struct timespec *sleeping_start, sleep_end_time;
  307. _starpu_clock_gettime(&sleep_end_time);
  308. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  309. if (worker_registered_sleeping_start[workerid] == 1)
  310. {
  311. sleeping_start = &sleeping_start_date[workerid];
  312. /* Perhaps that profiling was enabled while the worker was
  313. * already blocked, so we don't measure (end - start), but
  314. * (end - max(start,worker_start)) where worker_start is the
  315. * date of the previous profiling info reset on the worker */
  316. struct timespec *worker_start = &worker_info[workerid].start_time;
  317. if (starpu_timespec_cmp(sleeping_start, worker_start, <))
  318. {
  319. /* sleeping_start < worker_start */
  320. sleeping_start = worker_start;
  321. }
  322. struct timespec sleeping_time;
  323. starpu_timespec_sub(&sleep_end_time, sleeping_start, &sleeping_time);
  324. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  325. worker_registered_sleeping_start[workerid] = 0;
  326. }
  327. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  328. }
  329. }
  330. void _starpu_worker_register_executing_start_date(int workerid, struct timespec *executing_start)
  331. {
  332. if (starpu_profiling_status_get())
  333. {
  334. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  335. worker_registered_executing_start[workerid] = 1;
  336. memcpy(&executing_start_date[workerid], executing_start, sizeof(struct timespec));
  337. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  338. }
  339. }
  340. void _starpu_worker_register_executing_end(int workerid)
  341. {
  342. if (starpu_profiling_status_get())
  343. {
  344. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  345. worker_registered_executing_start[workerid] = 0;
  346. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  347. }
  348. }
  349. void _starpu_worker_update_profiling_info_executing(int workerid, struct timespec *executing_time, int executed_tasks, uint64_t used_cycles, uint64_t stall_cycles, double energy_consumed, double flops)
  350. {
  351. if (starpu_profiling_status_get())
  352. {
  353. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  354. if (executing_time)
  355. starpu_timespec_accumulate(&worker_info[workerid].executing_time, executing_time);
  356. worker_info[workerid].used_cycles += used_cycles;
  357. worker_info[workerid].stall_cycles += stall_cycles;
  358. worker_info[workerid].energy_consumed += energy_consumed;
  359. worker_info[workerid].executed_tasks += executed_tasks;
  360. worker_info[workerid].flops += flops;
  361. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  362. }
  363. else /* Not thread safe, shouldn't be too much a problem */
  364. worker_info[workerid].executed_tasks += executed_tasks;
  365. }
  366. int starpu_profiling_worker_get_info(int workerid, struct starpu_profiling_worker_info *info)
  367. {
  368. if (!starpu_profiling_status_get())
  369. {
  370. /* Not thread safe, shouldn't be too much a problem */
  371. info->executed_tasks = worker_info[workerid].executed_tasks;
  372. }
  373. STARPU_PTHREAD_MUTEX_LOCK(&_starpu_get_worker_struct(workerid)->sched_mutex);
  374. STARPU_PTHREAD_MUTEX_LOCK(&worker_info_mutex[workerid]);
  375. if (info)
  376. {
  377. /* The total time is computed in a lazy fashion */
  378. struct timespec now;
  379. _starpu_clock_gettime(&now);
  380. /* In case some worker is currently sleeping, we take into
  381. * account the time spent since it registered. */
  382. if (worker_registered_sleeping_start[workerid])
  383. {
  384. struct timespec sleeping_time;
  385. starpu_timespec_sub(&now, &sleeping_start_date[workerid], &sleeping_time);
  386. starpu_timespec_accumulate(&worker_info[workerid].sleeping_time, &sleeping_time);
  387. }
  388. if (worker_registered_executing_start[workerid])
  389. {
  390. struct timespec executing_time;
  391. starpu_timespec_sub(&now, &executing_start_date[workerid], &executing_time);
  392. starpu_timespec_accumulate(&worker_info[workerid].executing_time, &executing_time);
  393. }
  394. /* total_time = now - start_time */
  395. starpu_timespec_sub(&now, &worker_info[workerid].start_time,
  396. &worker_info[workerid].total_time);
  397. memcpy(info, &worker_info[workerid], sizeof(struct starpu_profiling_worker_info));
  398. }
  399. _starpu_worker_reset_profiling_info_with_lock(workerid);
  400. STARPU_PTHREAD_MUTEX_UNLOCK(&worker_info_mutex[workerid]);
  401. STARPU_PTHREAD_MUTEX_UNLOCK(&_starpu_get_worker_struct(workerid)->sched_mutex);
  402. return 0;
  403. }
  404. /* When did the task reach the scheduler ? */
  405. void _starpu_profiling_set_task_push_start_time(struct starpu_task *task)
  406. {
  407. if (!starpu_profiling_status_get())
  408. return;
  409. struct starpu_profiling_task_info *profiling_info;
  410. profiling_info = task->profiling_info;
  411. if (profiling_info)
  412. _starpu_clock_gettime(&profiling_info->push_start_time);
  413. }
  414. void _starpu_profiling_set_task_push_end_time(struct starpu_task *task)
  415. {
  416. if (!starpu_profiling_status_get())
  417. return;
  418. struct starpu_profiling_task_info *profiling_info;
  419. profiling_info = task->profiling_info;
  420. if (profiling_info)
  421. _starpu_clock_gettime(&profiling_info->push_end_time);
  422. }
  423. /*
  424. * Bus profiling
  425. */
  426. void _starpu_initialize_busid_matrix(void)
  427. {
  428. int i, j;
  429. for (j = 0; j < STARPU_MAXNODES; j++)
  430. for (i = 0; i < STARPU_MAXNODES; i++)
  431. busid_matrix[i][j] = -1;
  432. busid_cnt = 0;
  433. }
  434. static void _starpu_bus_reset_profiling_info(struct starpu_profiling_bus_info *bus_info)
  435. {
  436. _starpu_clock_gettime(&bus_info->start_time);
  437. bus_info->transferred_bytes = 0;
  438. bus_info->transfer_count = 0;
  439. }
  440. int _starpu_register_bus(int src_node, int dst_node)
  441. {
  442. if (starpu_bus_get_id(src_node, dst_node) != -1)
  443. return -EBUSY;
  444. int busid = STARPU_ATOMIC_ADD(&busid_cnt, 1) - 1;
  445. busid_matrix[src_node][dst_node] = busid;
  446. busid_to_node_pair[busid].src = src_node;
  447. busid_to_node_pair[busid].dst = dst_node;
  448. busid_to_node_pair[busid].bus_info = &bus_profiling_info[src_node][dst_node];
  449. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  450. return busid;
  451. }
  452. int starpu_bus_get_count(void)
  453. {
  454. return busid_cnt;
  455. }
  456. int starpu_bus_get_id(int src, int dst)
  457. {
  458. return busid_matrix[src][dst];
  459. }
  460. int starpu_bus_get_src(int busid)
  461. {
  462. return busid_to_node_pair[busid].src;
  463. }
  464. int starpu_bus_get_dst(int busid)
  465. {
  466. return busid_to_node_pair[busid].dst;
  467. }
  468. void starpu_bus_set_direct(int busid, int direct)
  469. {
  470. bus_direct[busid] = direct;
  471. }
  472. int starpu_bus_get_direct(int busid)
  473. {
  474. return bus_direct[busid];
  475. }
  476. void starpu_bus_set_ngpus(int busid, int ngpus)
  477. {
  478. bus_ngpus[busid] = ngpus;
  479. }
  480. int starpu_bus_get_ngpus(int busid)
  481. {
  482. struct _starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  483. int ngpus = bus_ngpus[busid];
  484. if (!ngpus)
  485. /* Unknown number of GPUs, assume it's shared by all GPUs */
  486. ngpus = topology->ndevices[STARPU_CUDA_WORKER]+topology->ndevices[STARPU_OPENCL_WORKER];
  487. return ngpus;
  488. }
  489. int starpu_bus_get_profiling_info(int busid, struct starpu_profiling_bus_info *bus_info)
  490. {
  491. int src_node = starpu_bus_get_src(busid);
  492. int dst_node = starpu_bus_get_dst(busid);
  493. /* XXX protect all this method with a mutex */
  494. if (bus_info)
  495. {
  496. struct timespec now;
  497. _starpu_clock_gettime(&now);
  498. /* total_time = now - start_time */
  499. starpu_timespec_sub(&now, &bus_profiling_info[src_node][dst_node].start_time,
  500. &bus_profiling_info[src_node][dst_node].total_time);
  501. memcpy(bus_info, &bus_profiling_info[src_node][dst_node], sizeof(struct starpu_profiling_bus_info));
  502. }
  503. _starpu_bus_reset_profiling_info(&bus_profiling_info[src_node][dst_node]);
  504. return 0;
  505. }
  506. void _starpu_bus_update_profiling_info(int src_node, int dst_node, size_t size)
  507. {
  508. bus_profiling_info[src_node][dst_node].transferred_bytes += size;
  509. bus_profiling_info[src_node][dst_node].transfer_count++;
  510. // fprintf(stderr, "PROFILE %d -> %d : %d (cnt %d)\n", src_node, dst_node, size, bus_profiling_info[src_node][dst_node].transfer_count);
  511. }
  512. #undef starpu_profiling_status_get
  513. int starpu_profiling_status_get(void)
  514. {
  515. int ret;
  516. ANNOTATE_HAPPENS_AFTER(&_starpu_profiling);
  517. ret = _starpu_profiling;
  518. ANNOTATE_HAPPENS_BEFORE(&_starpu_profiling);
  519. return ret;
  520. }