perfmodel_bus.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #include <core/workers.h>
  32. #include <core/perfmodel/perfmodel.h>
  33. #ifdef STARPU_USE_OPENCL
  34. #include <starpu_opencl.h>
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define SIZE (32*1024*1024*sizeof(char))
  40. #define NITER 128
  41. #define MAXCPUS 32
  42. static void starpu_force_bus_sampling(void);
  43. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  44. struct dev_timing
  45. {
  46. int cpu_id;
  47. double timing_htod;
  48. double timing_dtoh;
  49. };
  50. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  51. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  52. static unsigned was_benchmarked = 0;
  53. static unsigned ncpus = 0;
  54. static int ncuda = 0;
  55. static int nopencl = 0;
  56. /* Benchmarking the performance of the bus */
  57. #ifdef STARPU_USE_CUDA
  58. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][MAXCPUS];
  59. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  60. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  61. #ifdef HAVE_CUDA_MEMCPY_PEER
  62. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  63. #endif
  64. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  65. #endif
  66. #ifdef STARPU_USE_OPENCL
  67. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][MAXCPUS];
  68. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  69. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  70. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  71. #endif
  72. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  73. #ifdef STARPU_HAVE_HWLOC
  74. static hwloc_topology_t hwtopology;
  75. #endif
  76. #ifdef STARPU_USE_CUDA
  77. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  78. {
  79. struct _starpu_machine_config *config = _starpu_get_machine_config();
  80. _starpu_bind_thread_on_cpu(config, cpu);
  81. size_t size = SIZE;
  82. /* Initialize CUDA context on the device */
  83. cudaSetDevice(dev);
  84. /* hack to avoid third party libs to rebind threads */
  85. _starpu_bind_thread_on_cpu(config, cpu);
  86. /* hack to force the initialization */
  87. cudaFree(0);
  88. /* hack to avoid third party libs to rebind threads */
  89. _starpu_bind_thread_on_cpu(config, cpu);
  90. /* Get the maximum size which can be allocated on the device */
  91. struct cudaDeviceProp prop;
  92. cudaError_t cures;
  93. cures = cudaGetDeviceProperties(&prop, dev);
  94. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  95. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  96. /* Allocate a buffer on the device */
  97. unsigned char *d_buffer;
  98. cudaMalloc((void **)&d_buffer, size);
  99. STARPU_ASSERT(d_buffer);
  100. /* hack to avoid third party libs to rebind threads */
  101. _starpu_bind_thread_on_cpu(config, cpu);
  102. /* Allocate a buffer on the host */
  103. unsigned char *h_buffer;
  104. cures = cudaHostAlloc((void **)&h_buffer, size, 0);
  105. STARPU_ASSERT(cures == cudaSuccess);
  106. /* hack to avoid third party libs to rebind threads */
  107. _starpu_bind_thread_on_cpu(config, cpu);
  108. /* Fill them */
  109. memset(h_buffer, 0, size);
  110. cudaMemset(d_buffer, 0, size);
  111. /* hack to avoid third party libs to rebind threads */
  112. _starpu_bind_thread_on_cpu(config, cpu);
  113. unsigned iter;
  114. double timing;
  115. struct timeval start;
  116. struct timeval end;
  117. /* Measure upload bandwidth */
  118. gettimeofday(&start, NULL);
  119. for (iter = 0; iter < NITER; iter++)
  120. {
  121. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  122. cudaThreadSynchronize();
  123. }
  124. gettimeofday(&end, NULL);
  125. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  126. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER/size;
  127. /* Measure download bandwidth */
  128. gettimeofday(&start, NULL);
  129. for (iter = 0; iter < NITER; iter++)
  130. {
  131. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  132. cudaThreadSynchronize();
  133. }
  134. gettimeofday(&end, NULL);
  135. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  136. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  137. /* Free buffers */
  138. cudaFreeHost(h_buffer);
  139. cudaFree(d_buffer);
  140. cudaThreadExit();
  141. }
  142. #ifdef HAVE_CUDA_MEMCPY_PEER
  143. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  144. {
  145. size_t size = SIZE;
  146. int can;
  147. /* Get the maximum size which can be allocated on the device */
  148. struct cudaDeviceProp prop;
  149. cudaError_t cures;
  150. cures = cudaGetDeviceProperties(&prop, src);
  151. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  152. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  153. cures = cudaGetDeviceProperties(&prop, dst);
  154. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  155. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  156. /* Initialize CUDA context on the source */
  157. cudaSetDevice(src);
  158. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  159. if (!cures && can) {
  160. cures = cudaDeviceEnablePeerAccess(dst, 0);
  161. if (!cures)
  162. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  163. }
  164. /* Allocate a buffer on the device */
  165. unsigned char *s_buffer;
  166. cudaMalloc((void **)&s_buffer, size);
  167. STARPU_ASSERT(s_buffer);
  168. cudaMemset(s_buffer, 0, size);
  169. /* Initialize CUDA context on the destination */
  170. cudaSetDevice(dst);
  171. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  172. if (!cures && can) {
  173. cures = cudaDeviceEnablePeerAccess(src, 0);
  174. if (!cures)
  175. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  176. }
  177. /* Allocate a buffer on the device */
  178. unsigned char *d_buffer;
  179. cudaMalloc((void **)&d_buffer, size);
  180. STARPU_ASSERT(d_buffer);
  181. cudaMemset(d_buffer, 0, size);
  182. unsigned iter;
  183. double timing;
  184. struct timeval start;
  185. struct timeval end;
  186. /* Measure upload bandwidth */
  187. gettimeofday(&start, NULL);
  188. for (iter = 0; iter < NITER; iter++)
  189. {
  190. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  191. cudaThreadSynchronize();
  192. }
  193. gettimeofday(&end, NULL);
  194. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  195. cudadev_timing_dtod[src+1][dst+1] = timing/NITER/size;
  196. /* Free buffers */
  197. cudaFree(d_buffer);
  198. cudaSetDevice(src);
  199. cudaFree(s_buffer);
  200. cudaThreadExit();
  201. }
  202. #endif
  203. #endif
  204. #ifdef STARPU_USE_OPENCL
  205. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  206. {
  207. cl_context context;
  208. cl_command_queue queue;
  209. cl_int err=0;
  210. size_t size = SIZE;
  211. int not_initialized;
  212. struct _starpu_machine_config *config = _starpu_get_machine_config();
  213. _starpu_bind_thread_on_cpu(config, cpu);
  214. /* Is the context already initialised ? */
  215. starpu_opencl_get_context(dev, &context);
  216. not_initialized = (context == NULL);
  217. if (not_initialized == 1)
  218. _starpu_opencl_init_context(dev);
  219. /* Get context and queue */
  220. starpu_opencl_get_context(dev, &context);
  221. starpu_opencl_get_queue(dev, &queue);
  222. /* Get the maximum size which can be allocated on the device */
  223. cl_device_id device;
  224. cl_ulong maxMemAllocSize;
  225. starpu_opencl_get_device(dev, &device);
  226. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  227. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  228. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  229. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  230. {
  231. /* Let's not use too much RAM when running OpenCL on a CPU: it
  232. * would make the OS swap like crazy. */
  233. size /= 2;
  234. }
  235. /* hack to avoid third party libs to rebind threads */
  236. _starpu_bind_thread_on_cpu(config, cpu);
  237. /* Allocate a buffer on the device */
  238. cl_mem d_buffer;
  239. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  240. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  241. /* hack to avoid third party libs to rebind threads */
  242. _starpu_bind_thread_on_cpu(config, cpu);
  243. /* Allocate a buffer on the host */
  244. unsigned char *h_buffer;
  245. h_buffer = (unsigned char *)malloc(size);
  246. STARPU_ASSERT(h_buffer);
  247. /* hack to avoid third party libs to rebind threads */
  248. _starpu_bind_thread_on_cpu(config, cpu);
  249. /* Fill them */
  250. memset(h_buffer, 0, size);
  251. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  252. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  253. /* hack to avoid third party libs to rebind threads */
  254. _starpu_bind_thread_on_cpu(config, cpu);
  255. unsigned iter;
  256. double timing;
  257. struct timeval start;
  258. struct timeval end;
  259. /* Measure upload bandwidth */
  260. gettimeofday(&start, NULL);
  261. for (iter = 0; iter < NITER; iter++)
  262. {
  263. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  264. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  265. }
  266. gettimeofday(&end, NULL);
  267. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  268. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER/size;
  269. /* Measure download bandwidth */
  270. gettimeofday(&start, NULL);
  271. for (iter = 0; iter < NITER; iter++)
  272. {
  273. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  274. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  275. }
  276. gettimeofday(&end, NULL);
  277. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  278. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  279. /* Free buffers */
  280. clReleaseMemObject(d_buffer);
  281. free(h_buffer);
  282. /* Uninitiliaze OpenCL context on the device */
  283. if (not_initialized == 1)
  284. _starpu_opencl_deinit_context(dev);
  285. }
  286. #endif
  287. /* NB: we want to sort the bandwidth by DECREASING order */
  288. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  289. {
  290. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  291. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  292. double left_dtoh = left->timing_dtoh;
  293. double left_htod = left->timing_htod;
  294. double right_dtoh = right->timing_dtoh;
  295. double right_htod = right->timing_htod;
  296. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  297. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  298. /* it's for a decreasing sorting */
  299. return (timing_sum2_left > timing_sum2_right);
  300. }
  301. #ifdef STARPU_HAVE_HWLOC
  302. static int find_numa_node(hwloc_obj_t obj)
  303. {
  304. STARPU_ASSERT(obj);
  305. hwloc_obj_t current = obj;
  306. while (current->depth != HWLOC_OBJ_NODE)
  307. {
  308. current = current->parent;
  309. /* If we don't find a "node" obj before the root, this means
  310. * hwloc does not know whether there are numa nodes or not, so
  311. * we should not use a per-node sampling in that case. */
  312. STARPU_ASSERT(current);
  313. }
  314. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  315. return current->logical_index;
  316. }
  317. #endif
  318. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char *type)
  319. {
  320. /* Either we have hwloc and we measure the bandwith between each GPU
  321. * and each NUMA node, or we don't have such NUMA information and we
  322. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  323. * */
  324. #ifdef STARPU_HAVE_HWLOC
  325. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  326. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  327. /* If no NUMA node was found, we assume that we have a single memory
  328. * bank. */
  329. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  330. unsigned *is_available_per_numa_node = NULL;
  331. double *dev_timing_htod_per_numa_node = NULL;
  332. double *dev_timing_dtoh_per_numa_node = NULL;
  333. if (!no_node_obj_was_found)
  334. {
  335. is_available_per_numa_node = (unsigned *)malloc(nnuma_nodes * sizeof(unsigned));
  336. STARPU_ASSERT(is_available_per_numa_node);
  337. dev_timing_htod_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  338. STARPU_ASSERT(dev_timing_htod_per_numa_node);
  339. dev_timing_dtoh_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  340. STARPU_ASSERT(dev_timing_dtoh_per_numa_node);
  341. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  342. }
  343. #endif
  344. unsigned cpu;
  345. for (cpu = 0; cpu < ncpus; cpu++)
  346. {
  347. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id = cpu;
  348. #ifdef STARPU_HAVE_HWLOC
  349. int numa_id = 0;
  350. if (!no_node_obj_was_found)
  351. {
  352. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  353. numa_id = find_numa_node(obj);
  354. if (is_available_per_numa_node[numa_id])
  355. {
  356. /* We reuse the previous numbers for that NUMA node */
  357. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod =
  358. dev_timing_htod_per_numa_node[numa_id];
  359. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh =
  360. dev_timing_dtoh_per_numa_node[numa_id];
  361. continue;
  362. }
  363. }
  364. #endif
  365. #ifdef STARPU_USE_CUDA
  366. if (strncmp(type, "CUDA", 4) == 0)
  367. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  368. #endif
  369. #ifdef STARPU_USE_OPENCL
  370. if (strncmp(type, "OpenCL", 6) == 0)
  371. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  372. #endif
  373. #ifdef STARPU_HAVE_HWLOC
  374. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  375. {
  376. /* Save the results for that NUMA node */
  377. dev_timing_htod_per_numa_node[numa_id] =
  378. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  379. dev_timing_dtoh_per_numa_node[numa_id] =
  380. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  381. is_available_per_numa_node[numa_id] = 1;
  382. }
  383. #endif
  384. }
  385. #ifdef STARPU_HAVE_HWLOC
  386. if (!no_node_obj_was_found)
  387. {
  388. free(is_available_per_numa_node);
  389. free(dev_timing_htod_per_numa_node);
  390. free(dev_timing_dtoh_per_numa_node);
  391. }
  392. #endif /* STARPU_HAVE_HWLOC */
  393. }
  394. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  395. struct dev_timing *dev_timing_per_cpu, char *type)
  396. {
  397. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  398. /* sort the results */
  399. qsort(&(dev_timing_per_cpu[(dev+1)*MAXCPUS]), ncpus,
  400. sizeof(struct dev_timing),
  401. compar_dev_timing);
  402. #ifdef STARPU_DEVEL
  403. # warning save timing_dtoh and timing_htod data to display them when calling starpu_machine_display ? (Brice would like that)
  404. #endif
  405. #ifdef STARPU_VERBOSE
  406. unsigned cpu;
  407. for (cpu = 0; cpu < ncpus; cpu++)
  408. {
  409. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id;
  410. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  411. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  412. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  413. _STARPU_DISP("(%10s) BANDWIDTH GPU %d CPU %u - htod %f - dtoh %f - %f\n", type, dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  414. }
  415. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].cpu_id;
  416. _STARPU_DISP("(%10s) BANDWIDTH GPU %d BEST CPU %u\n", type, dev, best_cpu);
  417. #endif
  418. /* The results are sorted in a decreasing order, so that the best
  419. * measurement is currently the first entry. */
  420. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_dtoh;
  421. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_htod;
  422. }
  423. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  424. static void benchmark_all_gpu_devices(void)
  425. {
  426. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  427. int i;
  428. #ifdef HAVE_CUDA_MEMCPY_PEER
  429. int j;
  430. #endif
  431. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  432. #ifdef STARPU_HAVE_HWLOC
  433. hwloc_topology_init(&hwtopology);
  434. hwloc_topology_load(hwtopology);
  435. #endif
  436. #ifdef STARPU_HAVE_HWLOC
  437. hwloc_cpuset_t former_cpuset = hwloc_bitmap_alloc();
  438. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  439. #elif __linux__
  440. /* Save the current cpu binding */
  441. cpu_set_t former_process_affinity;
  442. int ret;
  443. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  444. if (ret)
  445. {
  446. perror("sched_getaffinity");
  447. STARPU_ABORT();
  448. }
  449. #else
  450. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  451. #endif
  452. struct _starpu_machine_config *config = _starpu_get_machine_config();
  453. ncpus = _starpu_topology_get_nhwcpu(config);
  454. #ifdef STARPU_USE_CUDA
  455. ncuda = _starpu_get_cuda_device_count();
  456. for (i = 0; i < ncuda; i++)
  457. {
  458. _STARPU_DISP("CUDA %d...\n", i);
  459. /* measure bandwidth between Host and Device i */
  460. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, "CUDA");
  461. }
  462. #ifdef HAVE_CUDA_MEMCPY_PEER
  463. for (i = 0; i < ncuda; i++)
  464. for (j = 0; j < ncuda; j++)
  465. if (i != j)
  466. {
  467. _STARPU_DISP("CUDA %d -> %d...\n", i, j);
  468. /* measure bandwidth between Host and Device i */
  469. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  470. }
  471. #endif
  472. #endif
  473. #ifdef STARPU_USE_OPENCL
  474. nopencl = _starpu_opencl_get_device_count();
  475. for (i = 0; i < nopencl; i++)
  476. {
  477. _STARPU_DISP("OpenCL %d...\n", i);
  478. /* measure bandwith between Host and Device i */
  479. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, "OpenCL");
  480. }
  481. #endif
  482. #ifdef STARPU_HAVE_HWLOC
  483. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  484. #elif __linux__
  485. /* Restore the former affinity */
  486. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  487. if (ret)
  488. {
  489. perror("sched_setaffinity");
  490. STARPU_ABORT();
  491. }
  492. #endif
  493. #ifdef STARPU_HAVE_HWLOC
  494. hwloc_topology_destroy(hwtopology);
  495. #endif
  496. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  497. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  498. was_benchmarked = 1;
  499. }
  500. static void get_bus_path(const char *type, char *path, size_t maxlen)
  501. {
  502. _starpu_get_perf_model_dir_bus(path, maxlen);
  503. char hostname[32];
  504. char *forced_hostname = getenv("STARPU_HOSTNAME");
  505. if (forced_hostname && forced_hostname[0])
  506. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  507. else
  508. gethostname(hostname, sizeof(hostname));
  509. strncat(path, hostname, maxlen);
  510. strncat(path, ".", maxlen);
  511. strncat(path, type, maxlen);
  512. }
  513. /*
  514. * Affinity
  515. */
  516. static void get_affinity_path(char *path, size_t maxlen)
  517. {
  518. get_bus_path("affinity", path, maxlen);
  519. }
  520. static void load_bus_affinity_file_content(void)
  521. {
  522. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  523. FILE *f;
  524. char path[256];
  525. get_affinity_path(path, 256);
  526. f = fopen(path, "r");
  527. STARPU_ASSERT(f);
  528. struct _starpu_machine_config *config = _starpu_get_machine_config();
  529. ncpus = _starpu_topology_get_nhwcpu(config);
  530. int gpu;
  531. #ifdef STARPU_USE_CUDA
  532. ncuda = _starpu_get_cuda_device_count();
  533. for (gpu = 0; gpu < ncuda; gpu++)
  534. {
  535. int ret;
  536. int dummy;
  537. _starpu_drop_comments(f);
  538. ret = fscanf(f, "%d\t", &dummy);
  539. STARPU_ASSERT(ret == 1);
  540. STARPU_ASSERT(dummy == gpu);
  541. unsigned cpu;
  542. for (cpu = 0; cpu < ncpus; cpu++)
  543. {
  544. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  545. STARPU_ASSERT(ret == 1);
  546. }
  547. ret = fscanf(f, "\n");
  548. STARPU_ASSERT(ret == 0);
  549. }
  550. #endif /* !STARPU_USE_CUDA */
  551. #ifdef STARPU_USE_OPENCL
  552. nopencl = _starpu_opencl_get_device_count();
  553. for (gpu = 0; gpu < nopencl; gpu++)
  554. {
  555. int ret;
  556. int dummy;
  557. _starpu_drop_comments(f);
  558. ret = fscanf(f, "%d\t", &dummy);
  559. STARPU_ASSERT(ret == 1);
  560. STARPU_ASSERT(dummy == gpu);
  561. unsigned cpu;
  562. for (cpu = 0; cpu < ncpus; cpu++)
  563. {
  564. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  565. STARPU_ASSERT(ret == 1);
  566. }
  567. ret = fscanf(f, "\n");
  568. STARPU_ASSERT(ret == 0);
  569. }
  570. #endif /* !STARPU_USE_OPENCL */
  571. fclose(f);
  572. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  573. }
  574. static void write_bus_affinity_file_content(void)
  575. {
  576. STARPU_ASSERT(was_benchmarked);
  577. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  578. FILE *f;
  579. char path[256];
  580. get_affinity_path(path, 256);
  581. f = fopen(path, "w+");
  582. if (!f)
  583. {
  584. perror("fopen write_buf_affinity_file_content");
  585. _STARPU_DISP("path '%s'\n", path);
  586. fflush(stderr);
  587. STARPU_ABORT();
  588. }
  589. unsigned cpu;
  590. int gpu;
  591. fprintf(f, "# GPU\t");
  592. for (cpu = 0; cpu < ncpus; cpu++)
  593. fprintf(f, "CPU%u\t", cpu);
  594. fprintf(f, "\n");
  595. #ifdef STARPU_USE_CUDA
  596. for (gpu = 0; gpu < ncuda; gpu++)
  597. {
  598. fprintf(f, "%d\t", gpu);
  599. for (cpu = 0; cpu < ncpus; cpu++)
  600. {
  601. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  602. }
  603. fprintf(f, "\n");
  604. }
  605. #endif
  606. #ifdef STARPU_USE_OPENCL
  607. for (gpu = 0; gpu < nopencl; gpu++)
  608. {
  609. fprintf(f, "%d\t", gpu);
  610. for (cpu = 0; cpu < ncpus; cpu++)
  611. {
  612. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  613. }
  614. fprintf(f, "\n");
  615. }
  616. #endif
  617. fclose(f);
  618. #endif
  619. }
  620. static void generate_bus_affinity_file(void)
  621. {
  622. if (!was_benchmarked)
  623. benchmark_all_gpu_devices();
  624. write_bus_affinity_file_content();
  625. }
  626. static void load_bus_affinity_file(void)
  627. {
  628. int res;
  629. char path[256];
  630. get_affinity_path(path, 256);
  631. res = access(path, F_OK);
  632. if (res)
  633. {
  634. /* File does not exist yet */
  635. generate_bus_affinity_file();
  636. }
  637. load_bus_affinity_file_content();
  638. }
  639. #ifdef STARPU_USE_CUDA
  640. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  641. {
  642. return cuda_affinity_matrix[gpuid];
  643. }
  644. #endif /* STARPU_USE_CUDA */
  645. #ifdef STARPU_USE_OPENCL
  646. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  647. {
  648. return opencl_affinity_matrix[gpuid];
  649. }
  650. #endif /* STARPU_USE_OPENCL */
  651. void starpu_bus_print_affinity(FILE *f)
  652. {
  653. unsigned cpu;
  654. int gpu;
  655. fprintf(f, "# GPU\tCPU in preference order (logical index)\n");
  656. #ifdef STARPU_USE_CUDA
  657. fprintf(f, "# CUDA\n");
  658. for(gpu = 0 ; gpu<ncuda ; gpu++)
  659. {
  660. fprintf(f, "%d\t", gpu);
  661. for (cpu = 0; cpu < ncpus; cpu++)
  662. {
  663. fprintf(f, "%d\t", cuda_affinity_matrix[gpu][cpu]);
  664. }
  665. fprintf(f, "\n");
  666. }
  667. #endif
  668. #ifdef STARPU_USE_OPENCL
  669. fprintf(f, "# OpenCL\n");
  670. for(gpu = 0 ; gpu<nopencl ; gpu++)
  671. {
  672. fprintf(f, "%d\t", gpu);
  673. for (cpu = 0; cpu < ncpus; cpu++)
  674. {
  675. fprintf(f, "%d\t", opencl_affinity_matrix[gpu][cpu]);
  676. }
  677. fprintf(f, "\n");
  678. }
  679. #endif
  680. }
  681. /*
  682. * Latency
  683. */
  684. static void get_latency_path(char *path, size_t maxlen)
  685. {
  686. get_bus_path("latency", path, maxlen);
  687. }
  688. static int load_bus_latency_file_content(void)
  689. {
  690. int n;
  691. unsigned src, dst;
  692. FILE *f;
  693. char path[256];
  694. get_latency_path(path, 256);
  695. f = fopen(path, "r");
  696. STARPU_ASSERT(f);
  697. for (src = 0; src < STARPU_MAXNODES; src++)
  698. {
  699. _starpu_drop_comments(f);
  700. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  701. {
  702. double latency;
  703. n = fscanf(f, "%lf", &latency);
  704. if (n != 1)
  705. {
  706. fclose(f);
  707. return 0;
  708. }
  709. n = getc(f);
  710. if (n != '\t')
  711. {
  712. fclose(f);
  713. return 0;
  714. }
  715. latency_matrix[src][dst] = latency;
  716. }
  717. n = getc(f);
  718. if (n != '\n')
  719. {
  720. fclose(f);
  721. return 0;
  722. }
  723. }
  724. fclose(f);
  725. return 1;
  726. }
  727. static void write_bus_latency_file_content(void)
  728. {
  729. int src, dst, maxnode;
  730. FILE *f;
  731. STARPU_ASSERT(was_benchmarked);
  732. char path[256];
  733. get_latency_path(path, 256);
  734. f = fopen(path, "w+");
  735. if (!f)
  736. {
  737. perror("fopen write_bus_latency_file_content");
  738. _STARPU_DISP("path '%s'\n", path);
  739. fflush(stderr);
  740. STARPU_ABORT();
  741. }
  742. fprintf(f, "# ");
  743. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  744. fprintf(f, "to %d\t\t", dst);
  745. fprintf(f, "\n");
  746. maxnode = ncuda;
  747. #ifdef STARPU_USE_OPENCL
  748. maxnode += nopencl;
  749. #endif
  750. for (src = 0; src < STARPU_MAXNODES; src++)
  751. {
  752. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  753. {
  754. double latency;
  755. if ((src > maxnode) || (dst > maxnode))
  756. {
  757. /* convention */
  758. latency = NAN;
  759. }
  760. else if (src == dst)
  761. {
  762. latency = 0.0;
  763. }
  764. else
  765. {
  766. /* µs */
  767. latency = ((src && dst)?2000.0:500.0);
  768. }
  769. fprintf(f, "%f\t", latency);
  770. }
  771. fprintf(f, "\n");
  772. }
  773. fclose(f);
  774. }
  775. static void generate_bus_latency_file(void)
  776. {
  777. if (!was_benchmarked)
  778. benchmark_all_gpu_devices();
  779. write_bus_latency_file_content();
  780. }
  781. static void load_bus_latency_file(void)
  782. {
  783. int res;
  784. char path[256];
  785. get_latency_path(path, 256);
  786. res = access(path, F_OK);
  787. if (res || !load_bus_latency_file_content())
  788. {
  789. /* File does not exist yet or is bogus */
  790. generate_bus_latency_file();
  791. }
  792. }
  793. /*
  794. * Bandwidth
  795. */
  796. static void get_bandwidth_path(char *path, size_t maxlen)
  797. {
  798. get_bus_path("bandwidth", path, maxlen);
  799. }
  800. static int load_bus_bandwidth_file_content(void)
  801. {
  802. int n;
  803. unsigned src, dst;
  804. FILE *f;
  805. char path[256];
  806. get_bandwidth_path(path, 256);
  807. f = fopen(path, "r");
  808. if (!f)
  809. {
  810. perror("fopen load_bus_bandwidth_file_content");
  811. _STARPU_DISP("path '%s'\n", path);
  812. fflush(stderr);
  813. STARPU_ABORT();
  814. }
  815. for (src = 0; src < STARPU_MAXNODES; src++)
  816. {
  817. _starpu_drop_comments(f);
  818. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  819. {
  820. double bandwidth;
  821. n = fscanf(f, "%lf", &bandwidth);
  822. if (n != 1)
  823. {
  824. fprintf(stderr,"didn't get a number\n");
  825. fclose(f);
  826. return 0;
  827. }
  828. n = getc(f);
  829. if (n != '\t')
  830. {
  831. fclose(f);
  832. return 0;
  833. }
  834. bandwidth_matrix[src][dst] = bandwidth;
  835. }
  836. n = getc(f);
  837. if (n != '\n')
  838. {
  839. fclose(f);
  840. return 0;
  841. }
  842. }
  843. fclose(f);
  844. return 1;
  845. }
  846. static void write_bus_bandwidth_file_content(void)
  847. {
  848. int src, dst, maxnode;
  849. FILE *f;
  850. STARPU_ASSERT(was_benchmarked);
  851. char path[256];
  852. get_bandwidth_path(path, 256);
  853. f = fopen(path, "w+");
  854. STARPU_ASSERT(f);
  855. fprintf(f, "# ");
  856. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  857. fprintf(f, "to %d\t\t", dst);
  858. fprintf(f, "\n");
  859. maxnode = ncuda;
  860. #ifdef STARPU_USE_OPENCL
  861. maxnode += nopencl;
  862. #endif
  863. for (src = 0; src < STARPU_MAXNODES; src++)
  864. {
  865. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  866. {
  867. double bandwidth;
  868. if ((src > maxnode) || (dst > maxnode))
  869. {
  870. bandwidth = NAN;
  871. }
  872. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  873. else if (src != dst)
  874. {
  875. double slowness = 0.0;
  876. /* Total bandwidth is the harmonic mean of bandwidths */
  877. #ifdef STARPU_USE_CUDA
  878. #ifdef HAVE_CUDA_MEMCPY_PEER
  879. if (src && src <= ncuda && dst && dst <= ncuda)
  880. /* Direct GPU-GPU transfert */
  881. slowness = cudadev_timing_dtod[src][dst];
  882. else
  883. #endif
  884. {
  885. if (src && src <= ncuda)
  886. slowness += cudadev_timing_dtoh[src];
  887. if (dst && dst <= ncuda)
  888. slowness += cudadev_timing_htod[dst];
  889. }
  890. #endif
  891. #ifdef STARPU_USE_OPENCL
  892. if (src > ncuda)
  893. slowness += opencldev_timing_dtoh[src-ncuda];
  894. if (dst > ncuda)
  895. slowness += opencldev_timing_htod[dst-ncuda];
  896. #endif
  897. bandwidth = 1.0/slowness;
  898. }
  899. #endif
  900. else
  901. {
  902. /* convention */
  903. bandwidth = 0.0;
  904. }
  905. fprintf(f, "%f\t", bandwidth);
  906. }
  907. fprintf(f, "\n");
  908. }
  909. fclose(f);
  910. }
  911. void starpu_bus_print_bandwidth(FILE *f)
  912. {
  913. int src, dst, maxnode;
  914. maxnode = ncuda;
  915. #ifdef STARPU_USE_OPENCL
  916. maxnode += nopencl;
  917. #endif
  918. fprintf(f, "from\t");
  919. fprintf(f, "to RAM\t\t");
  920. for (dst = 0; dst < ncuda; dst++)
  921. fprintf(f, "to CUDA %d\t", dst);
  922. for (dst = 0; dst < nopencl; dst++)
  923. fprintf(f, "to OpenCL %d\t", dst);
  924. fprintf(f, "\n");
  925. for (src = 0; src <= maxnode; src++)
  926. {
  927. if (!src)
  928. fprintf(f, "RAM\t");
  929. else if (src <= ncuda)
  930. fprintf(f, "CUDA %d\t", src-1);
  931. else
  932. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  933. for (dst = 0; dst <= maxnode; dst++)
  934. fprintf(f, "%f\t", bandwidth_matrix[src][dst]);
  935. fprintf(f, "\n");
  936. }
  937. }
  938. static void generate_bus_bandwidth_file(void)
  939. {
  940. if (!was_benchmarked)
  941. benchmark_all_gpu_devices();
  942. write_bus_bandwidth_file_content();
  943. }
  944. static void load_bus_bandwidth_file(void)
  945. {
  946. int res;
  947. char path[256];
  948. get_bandwidth_path(path, 256);
  949. res = access(path, F_OK);
  950. if (res || !load_bus_bandwidth_file_content())
  951. {
  952. /* File does not exist yet or is bogus */
  953. generate_bus_bandwidth_file();
  954. }
  955. }
  956. /*
  957. * Config
  958. */
  959. static void get_config_path(char *path, size_t maxlen)
  960. {
  961. get_bus_path("config", path, maxlen);
  962. }
  963. static void check_bus_config_file()
  964. {
  965. int res;
  966. char path[256];
  967. struct _starpu_machine_config *config = _starpu_get_machine_config();
  968. get_config_path(path, 256);
  969. res = access(path, F_OK);
  970. if (res || config->conf->bus_calibrate > 0)
  971. {
  972. if (res)
  973. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  974. starpu_force_bus_sampling();
  975. if (res)
  976. _STARPU_DISP("... done\n");
  977. }
  978. else
  979. {
  980. FILE *f;
  981. int ret, read_cuda = -1, read_opencl = -1;
  982. unsigned read_cpus = -1;
  983. // Loading configuration from file
  984. f = fopen(path, "r");
  985. STARPU_ASSERT(f);
  986. _starpu_drop_comments(f);
  987. ret = fscanf(f, "%u\t", &read_cpus);
  988. STARPU_ASSERT(ret == 1);
  989. _starpu_drop_comments(f);
  990. ret = fscanf(f, "%d\t", &read_cuda);
  991. STARPU_ASSERT(ret == 1);
  992. _starpu_drop_comments(f);
  993. ret = fscanf(f, "%d\t", &read_opencl);
  994. STARPU_ASSERT(ret == 1);
  995. _starpu_drop_comments(f);
  996. fclose(f);
  997. // Loading current configuration
  998. ncpus = _starpu_topology_get_nhwcpu(config);
  999. #ifdef STARPU_USE_CUDA
  1000. ncuda = _starpu_get_cuda_device_count();
  1001. #endif
  1002. #ifdef STARPU_USE_OPENCL
  1003. nopencl = _starpu_opencl_get_device_count();
  1004. #endif
  1005. // Checking if both configurations match
  1006. if (read_cpus != ncpus)
  1007. {
  1008. fprintf(stderr, "Current configuration does not match the bus performance model (CPUS: (stored) %u != (current) %u), recalibrating...", read_cpus, ncpus);
  1009. starpu_force_bus_sampling();
  1010. fprintf(stderr, "done\n");
  1011. }
  1012. else if (read_cuda != ncuda)
  1013. {
  1014. fprintf(stderr, "Current configuration does not match the bus performance model (CUDA: (stored) %d != (current) %d), recalibrating...", read_cuda, ncuda);
  1015. starpu_force_bus_sampling();
  1016. fprintf(stderr, "done\n");
  1017. }
  1018. else if (read_opencl != nopencl)
  1019. {
  1020. fprintf(stderr, "Current configuration does not match the bus performance model (OpenCL: (stored) %d != (current) %d), recalibrating...", read_opencl, nopencl);
  1021. starpu_force_bus_sampling();
  1022. fprintf(stderr, "done\n");
  1023. }
  1024. }
  1025. }
  1026. static void write_bus_config_file_content(void)
  1027. {
  1028. FILE *f;
  1029. char path[256];
  1030. STARPU_ASSERT(was_benchmarked);
  1031. get_config_path(path, 256);
  1032. f = fopen(path, "w+");
  1033. STARPU_ASSERT(f);
  1034. fprintf(f, "# Current configuration\n");
  1035. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1036. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  1037. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  1038. fclose(f);
  1039. }
  1040. static void generate_bus_config_file()
  1041. {
  1042. if (!was_benchmarked)
  1043. benchmark_all_gpu_devices();
  1044. write_bus_config_file_content();
  1045. }
  1046. /*
  1047. * Generic
  1048. */
  1049. static void starpu_force_bus_sampling(void)
  1050. {
  1051. _starpu_create_sampling_directory_if_needed();
  1052. generate_bus_affinity_file();
  1053. generate_bus_latency_file();
  1054. generate_bus_bandwidth_file();
  1055. generate_bus_config_file();
  1056. }
  1057. void _starpu_load_bus_performance_files(void)
  1058. {
  1059. _starpu_create_sampling_directory_if_needed();
  1060. check_bus_config_file();
  1061. load_bus_affinity_file();
  1062. load_bus_latency_file();
  1063. load_bus_bandwidth_file();
  1064. }
  1065. /* (in µs) */
  1066. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  1067. {
  1068. double bandwidth = bandwidth_matrix[src_node][dst_node];
  1069. double latency = latency_matrix[src_node][dst_node];
  1070. struct starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  1071. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  1072. }