perfmodel_bus.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009, 2010-2011 Université de Bordeaux 1
  4. * Copyright (C) 2010 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_opencl.h>
  29. #include <common/config.h>
  30. #include <core/workers.h>
  31. #include <core/perfmodel/perfmodel.h>
  32. #ifdef STARPU_USE_OPENCL
  33. #include <starpu_opencl.h>
  34. #endif
  35. #ifdef STARPU_HAVE_WINDOWS
  36. #include <windows.h>
  37. #endif
  38. #define SIZE (32*1024*1024*sizeof(char))
  39. #define NITER 128
  40. #define MAXCPUS 32
  41. struct dev_timing {
  42. int cpu_id;
  43. double timing_htod;
  44. double timing_dtoh;
  45. };
  46. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES] = {{-1.0}};
  47. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES] = {{ -1.0}};
  48. static unsigned was_benchmarked = 0;
  49. static unsigned ncpus = 0;
  50. static int ncuda = 0;
  51. static int nopencl = 0;
  52. /* Benchmarking the performance of the bus */
  53. #ifdef STARPU_USE_CUDA
  54. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][MAXCPUS];
  55. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  56. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  57. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  58. #endif
  59. #ifdef STARPU_USE_OPENCL
  60. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][MAXCPUS];
  61. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  62. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  63. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  64. static size_t opencl_size = SIZE;
  65. #endif
  66. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  67. #ifdef STARPU_HAVE_HWLOC
  68. static hwloc_topology_t hwtopology;
  69. #endif
  70. #ifdef STARPU_USE_CUDA
  71. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  72. {
  73. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  74. _starpu_bind_thread_on_cpu(config, cpu);
  75. /* Initiliaze CUDA context on the device */
  76. cudaSetDevice(dev);
  77. /* hack to avoid third party libs to rebind threads */
  78. _starpu_bind_thread_on_cpu(config, cpu);
  79. /* hack to force the initialization */
  80. cudaFree(0);
  81. /* hack to avoid third party libs to rebind threads */
  82. _starpu_bind_thread_on_cpu(config, cpu);
  83. /* Allocate a buffer on the device */
  84. unsigned char *d_buffer;
  85. cudaMalloc((void **)&d_buffer, SIZE);
  86. assert(d_buffer);
  87. /* hack to avoid third party libs to rebind threads */
  88. _starpu_bind_thread_on_cpu(config, cpu);
  89. /* Allocate a buffer on the host */
  90. unsigned char *h_buffer;
  91. cudaHostAlloc((void **)&h_buffer, SIZE, 0);
  92. assert(h_buffer);
  93. /* hack to avoid third party libs to rebind threads */
  94. _starpu_bind_thread_on_cpu(config, cpu);
  95. /* Fill them */
  96. memset(h_buffer, 0, SIZE);
  97. cudaMemset(d_buffer, 0, SIZE);
  98. /* hack to avoid third party libs to rebind threads */
  99. _starpu_bind_thread_on_cpu(config, cpu);
  100. unsigned iter;
  101. double timing;
  102. struct timeval start;
  103. struct timeval end;
  104. /* Measure upload bandwidth */
  105. gettimeofday(&start, NULL);
  106. for (iter = 0; iter < NITER; iter++)
  107. {
  108. cudaMemcpy(d_buffer, h_buffer, SIZE, cudaMemcpyHostToDevice);
  109. cudaThreadSynchronize();
  110. }
  111. gettimeofday(&end, NULL);
  112. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  113. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER;
  114. /* Measure download bandwidth */
  115. gettimeofday(&start, NULL);
  116. for (iter = 0; iter < NITER; iter++)
  117. {
  118. cudaMemcpy(h_buffer, d_buffer, SIZE, cudaMemcpyDeviceToHost);
  119. cudaThreadSynchronize();
  120. }
  121. gettimeofday(&end, NULL);
  122. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  123. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER;
  124. /* Free buffers */
  125. cudaFreeHost(h_buffer);
  126. cudaFree(d_buffer);
  127. cudaThreadExit();
  128. }
  129. #endif
  130. #ifdef STARPU_USE_OPENCL
  131. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  132. {
  133. cl_context context;
  134. cl_command_queue queue;
  135. cl_int err=0;
  136. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  137. _starpu_bind_thread_on_cpu(config, cpu);
  138. /* Initialize OpenCL context on the device */
  139. _starpu_opencl_init_context(dev);
  140. starpu_opencl_get_context(dev, &context);
  141. starpu_opencl_get_queue(dev, &queue);
  142. /* Get the maximum size which can be allocated on the device */
  143. cl_device_id device;
  144. cl_ulong maxMemAllocSize;
  145. starpu_opencl_get_device(dev, &device);
  146. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  147. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  148. if (opencl_size > (size_t)maxMemAllocSize) opencl_size = maxMemAllocSize;
  149. /* hack to avoid third party libs to rebind threads */
  150. _starpu_bind_thread_on_cpu(config, cpu);
  151. /* Allocate a buffer on the device */
  152. cl_mem d_buffer;
  153. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, opencl_size, NULL, &err);
  154. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  155. /* hack to avoid third party libs to rebind threads */
  156. _starpu_bind_thread_on_cpu(config, cpu);
  157. /* Allocate a buffer on the host */
  158. unsigned char *h_buffer;
  159. h_buffer = malloc(opencl_size);
  160. assert(h_buffer);
  161. /* hack to avoid third party libs to rebind threads */
  162. _starpu_bind_thread_on_cpu(config, cpu);
  163. /* Fill them */
  164. memset(h_buffer, 0, opencl_size);
  165. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, opencl_size, h_buffer, 0, NULL, NULL);
  166. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  167. /* hack to avoid third party libs to rebind threads */
  168. _starpu_bind_thread_on_cpu(config, cpu);
  169. unsigned iter;
  170. double timing;
  171. struct timeval start;
  172. struct timeval end;
  173. /* Measure upload bandwidth */
  174. gettimeofday(&start, NULL);
  175. for (iter = 0; iter < NITER; iter++)
  176. {
  177. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, opencl_size, h_buffer, 0, NULL, NULL);
  178. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  179. }
  180. gettimeofday(&end, NULL);
  181. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  182. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER;
  183. /* Measure download bandwidth */
  184. gettimeofday(&start, NULL);
  185. for (iter = 0; iter < NITER; iter++)
  186. {
  187. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, opencl_size, h_buffer, 0, NULL, NULL);
  188. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  189. }
  190. gettimeofday(&end, NULL);
  191. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  192. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER;
  193. /* Free buffers */
  194. clReleaseMemObject(d_buffer);
  195. free(h_buffer);
  196. /* Uninitiliaze OpenCL context on the device */
  197. _starpu_opencl_deinit_context(dev);
  198. }
  199. #endif
  200. /* NB: we want to sort the bandwidth by DECREASING order */
  201. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  202. {
  203. const struct dev_timing *left = left_dev_timing;
  204. const struct dev_timing *right = right_dev_timing;
  205. double left_dtoh = left->timing_dtoh;
  206. double left_htod = left->timing_htod;
  207. double right_dtoh = right->timing_dtoh;
  208. double right_htod = right->timing_htod;
  209. double bandwidth_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  210. double bandwidth_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  211. /* it's for a decreasing sorting */
  212. return (bandwidth_sum2_left < bandwidth_sum2_right);
  213. }
  214. #ifdef STARPU_HAVE_HWLOC
  215. static int find_numa_node(hwloc_obj_t obj)
  216. {
  217. STARPU_ASSERT(obj);
  218. hwloc_obj_t current = obj;
  219. while (current->depth != HWLOC_OBJ_NODE)
  220. {
  221. current = current->parent;
  222. /* If we don't find a "node" obj before the root, this means
  223. * hwloc does not know whether there are numa nodes or not, so
  224. * we should not use a per-node sampling in that case. */
  225. STARPU_ASSERT(current);
  226. }
  227. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  228. return current->logical_index;
  229. }
  230. #endif
  231. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char type)
  232. {
  233. /* Either we have hwloc and we measure the bandwith between each GPU
  234. * and each NUMA node, or we don't have such NUMA information and we
  235. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  236. * */
  237. #ifdef STARPU_HAVE_HWLOC
  238. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  239. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  240. /* If no NUMA node was found, we assume that we have a single memory
  241. * bank. */
  242. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  243. unsigned is_available_per_numa_node[nnuma_nodes];
  244. double dev_timing_htod_per_numa_node[nnuma_nodes];
  245. double dev_timing_dtoh_per_numa_node[nnuma_nodes];
  246. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  247. #endif
  248. unsigned cpu;
  249. for (cpu = 0; cpu < ncpus; cpu++)
  250. {
  251. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id = cpu;
  252. #ifdef STARPU_HAVE_HWLOC
  253. int numa_id = 0;
  254. if (!no_node_obj_was_found)
  255. {
  256. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  257. numa_id = find_numa_node(obj);
  258. if (is_available_per_numa_node[numa_id])
  259. {
  260. /* We reuse the previous numbers for that NUMA node */
  261. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod =
  262. dev_timing_htod_per_numa_node[numa_id];
  263. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh =
  264. dev_timing_dtoh_per_numa_node[numa_id];
  265. continue;
  266. }
  267. }
  268. #endif
  269. #ifdef STARPU_USE_CUDA
  270. if (type == 'C')
  271. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  272. #endif
  273. #ifdef STARPU_USE_OPENCL
  274. if (type == 'O')
  275. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  276. #endif
  277. #ifdef STARPU_HAVE_HWLOC
  278. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  279. {
  280. /* Save the results for that NUMA node */
  281. dev_timing_htod_per_numa_node[numa_id] =
  282. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  283. dev_timing_dtoh_per_numa_node[numa_id] =
  284. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  285. is_available_per_numa_node[numa_id] = 1;
  286. }
  287. #endif
  288. }
  289. }
  290. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  291. struct dev_timing *dev_timing_per_cpu, char type)
  292. {
  293. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  294. /* sort the results */
  295. qsort(&(dev_timing_per_cpu[(dev+1)*MAXCPUS]), ncpus,
  296. sizeof(struct dev_timing),
  297. compar_dev_timing);
  298. #ifdef STARPU_VERBOSE
  299. unsigned cpu;
  300. for (cpu = 0; cpu < ncpus; cpu++)
  301. {
  302. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id;
  303. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  304. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  305. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  306. _STARPU_DISP("BANDWIDTH GPU %d CPU %d - htod %lf - dtoh %lf - %lf\n", dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  307. }
  308. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].cpu_id;
  309. _STARPU_DISP("BANDWIDTH GPU %d BEST CPU %d\n", dev, best_cpu);
  310. #endif
  311. /* The results are sorted in a decreasing order, so that the best
  312. * measurement is currently the first entry. */
  313. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_dtoh;
  314. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_htod;
  315. }
  316. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  317. static void benchmark_all_gpu_devices(void)
  318. {
  319. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  320. int i, ret;
  321. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  322. #ifdef STARPU_HAVE_HWLOC
  323. hwloc_topology_init(&hwtopology);
  324. hwloc_topology_load(hwtopology);
  325. #endif
  326. /* TODO: use hwloc */
  327. #ifdef __linux__
  328. /* Save the current cpu binding */
  329. cpu_set_t former_process_affinity;
  330. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  331. if (ret)
  332. {
  333. perror("sched_getaffinity");
  334. STARPU_ABORT();
  335. }
  336. #else
  337. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  338. #endif
  339. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  340. ncpus = _starpu_topology_get_nhwcpu(config);
  341. #ifdef STARPU_USE_CUDA
  342. cudaGetDeviceCount(&ncuda);
  343. for (i = 0; i < ncuda; i++)
  344. {
  345. /* measure bandwidth between Host and Device i */
  346. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, 'C');
  347. }
  348. #endif
  349. #ifdef STARPU_USE_OPENCL
  350. nopencl = _starpu_opencl_get_device_count();
  351. for (i = 0; i < nopencl; i++)
  352. {
  353. /* measure bandwith between Host and Device i */
  354. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, 'O');
  355. }
  356. #endif
  357. /* FIXME: use hwloc */
  358. #ifdef __linux__
  359. /* Restore the former affinity */
  360. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  361. if (ret)
  362. {
  363. perror("sched_setaffinity");
  364. STARPU_ABORT();
  365. }
  366. #endif
  367. #ifdef STARPU_HAVE_HWLOC
  368. hwloc_topology_destroy(hwtopology);
  369. #endif
  370. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  371. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  372. was_benchmarked = 1;
  373. }
  374. static void get_bus_path(const char *type, char *path, size_t maxlen)
  375. {
  376. _starpu_get_perf_model_dir_bus(path, maxlen);
  377. strncat(path, type, maxlen);
  378. char hostname[32];
  379. gethostname(hostname, 32);
  380. strncat(path, ".", maxlen);
  381. strncat(path, hostname, maxlen);
  382. }
  383. /*
  384. * Affinity
  385. */
  386. static void get_affinity_path(char *path, size_t maxlen)
  387. {
  388. get_bus_path("affinity", path, maxlen);
  389. }
  390. static void load_bus_affinity_file_content(void)
  391. {
  392. FILE *f;
  393. char path[256];
  394. get_affinity_path(path, 256);
  395. f = fopen(path, "r");
  396. STARPU_ASSERT(f);
  397. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  398. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  399. ncpus = _starpu_topology_get_nhwcpu(config);
  400. int gpu;
  401. #ifdef STARPU_USE_CUDA
  402. cudaGetDeviceCount(&ncuda);
  403. for (gpu = 0; gpu < ncuda; gpu++)
  404. {
  405. int ret;
  406. int dummy;
  407. _starpu_drop_comments(f);
  408. ret = fscanf(f, "%d\t", &dummy);
  409. STARPU_ASSERT(ret == 1);
  410. STARPU_ASSERT(dummy == gpu);
  411. unsigned cpu;
  412. for (cpu = 0; cpu < ncpus; cpu++)
  413. {
  414. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  415. STARPU_ASSERT(ret == 1);
  416. }
  417. ret = fscanf(f, "\n");
  418. STARPU_ASSERT(ret == 0);
  419. }
  420. #endif
  421. #ifdef STARPU_USE_OPENCL
  422. nopencl = _starpu_opencl_get_device_count();
  423. for (gpu = 0; gpu < nopencl; gpu++)
  424. {
  425. int ret;
  426. int dummy;
  427. _starpu_drop_comments(f);
  428. ret = fscanf(f, "%d\t", &dummy);
  429. STARPU_ASSERT(ret == 1);
  430. STARPU_ASSERT(dummy == gpu);
  431. unsigned cpu;
  432. for (cpu = 0; cpu < ncpus; cpu++)
  433. {
  434. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  435. STARPU_ASSERT(ret == 1);
  436. }
  437. ret = fscanf(f, "\n");
  438. STARPU_ASSERT(ret == 0);
  439. }
  440. #endif
  441. #endif
  442. fclose(f);
  443. }
  444. static void write_bus_affinity_file_content(void)
  445. {
  446. FILE *f;
  447. STARPU_ASSERT(was_benchmarked);
  448. char path[256];
  449. get_affinity_path(path, 256);
  450. f = fopen(path, "w+");
  451. if (!f)
  452. {
  453. perror("fopen write_buf_affinity_file_content");
  454. _STARPU_DISP("path '%s'\n", path);
  455. fflush(stderr);
  456. STARPU_ABORT();
  457. }
  458. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  459. unsigned cpu;
  460. int gpu;
  461. fprintf(f, "# GPU\t");
  462. for (cpu = 0; cpu < ncpus; cpu++)
  463. fprintf(f, "CPU%d\t", cpu);
  464. fprintf(f, "\n");
  465. #ifdef STARPU_USE_CUDA
  466. for (gpu = 0; gpu < ncuda; gpu++)
  467. {
  468. fprintf(f, "%d\t", gpu);
  469. for (cpu = 0; cpu < ncpus; cpu++)
  470. {
  471. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  472. }
  473. fprintf(f, "\n");
  474. }
  475. #endif
  476. #ifdef STARPU_USE_OPENCL
  477. for (gpu = 0; gpu < nopencl; gpu++)
  478. {
  479. fprintf(f, "%d\t", gpu);
  480. for (cpu = 0; cpu < ncpus; cpu++)
  481. {
  482. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  483. }
  484. fprintf(f, "\n");
  485. }
  486. #endif
  487. fclose(f);
  488. #endif
  489. }
  490. static void generate_bus_affinity_file(void)
  491. {
  492. if (!was_benchmarked)
  493. benchmark_all_gpu_devices();
  494. write_bus_affinity_file_content();
  495. }
  496. static void load_bus_affinity_file(void)
  497. {
  498. int res;
  499. char path[256];
  500. get_affinity_path(path, 256);
  501. res = access(path, F_OK);
  502. if (res)
  503. {
  504. /* File does not exist yet */
  505. generate_bus_affinity_file();
  506. }
  507. load_bus_affinity_file_content();
  508. }
  509. #ifdef STARPU_USE_CUDA
  510. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  511. {
  512. return cuda_affinity_matrix[gpuid];
  513. }
  514. #endif /* STARPU_USE_CUDA */
  515. #ifdef STARPU_USE_OPENCL
  516. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  517. {
  518. return opencl_affinity_matrix[gpuid];
  519. }
  520. #endif /* STARPU_USE_OPENCL */
  521. /*
  522. * Latency
  523. */
  524. static void get_latency_path(char *path, size_t maxlen)
  525. {
  526. get_bus_path("latency", path, maxlen);
  527. }
  528. static void load_bus_latency_file_content(void)
  529. {
  530. int n;
  531. unsigned src, dst;
  532. FILE *f;
  533. char path[256];
  534. get_latency_path(path, 256);
  535. f = fopen(path, "r");
  536. STARPU_ASSERT(f);
  537. for (src = 0; src < STARPU_MAXNODES; src++)
  538. {
  539. _starpu_drop_comments(f);
  540. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  541. {
  542. double latency;
  543. n = fscanf(f, "%lf\t", &latency);
  544. STARPU_ASSERT(n == 1);
  545. latency_matrix[src][dst] = latency;
  546. }
  547. n = fscanf(f, "\n");
  548. STARPU_ASSERT(n == 0);
  549. }
  550. fclose(f);
  551. }
  552. static void write_bus_latency_file_content(void)
  553. {
  554. int src, dst, maxnode;
  555. FILE *f;
  556. STARPU_ASSERT(was_benchmarked);
  557. char path[256];
  558. get_latency_path(path, 256);
  559. f = fopen(path, "w+");
  560. if (!f)
  561. {
  562. perror("fopen write_bus_latency_file_content");
  563. _STARPU_DISP("path '%s'\n", path);
  564. fflush(stderr);
  565. STARPU_ABORT();
  566. }
  567. fprintf(f, "# ");
  568. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  569. fprintf(f, "to %d\t\t", dst);
  570. fprintf(f, "\n");
  571. maxnode = ncuda;
  572. #ifdef STARPU_USE_OPENCL
  573. maxnode += nopencl;
  574. #endif
  575. for (src = 0; src < STARPU_MAXNODES; src++)
  576. {
  577. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  578. {
  579. double latency;
  580. if ((src > maxnode) || (dst > maxnode))
  581. {
  582. /* convention */
  583. latency = -1.0;
  584. }
  585. else if (src == dst)
  586. {
  587. latency = 0.0;
  588. }
  589. else {
  590. latency = ((src && dst)?2000.0:500.0);
  591. }
  592. fprintf(f, "%lf\t", latency);
  593. }
  594. fprintf(f, "\n");
  595. }
  596. fclose(f);
  597. }
  598. static void generate_bus_latency_file(void)
  599. {
  600. if (!was_benchmarked)
  601. benchmark_all_gpu_devices();
  602. write_bus_latency_file_content();
  603. }
  604. static void load_bus_latency_file(void)
  605. {
  606. int res;
  607. char path[256];
  608. get_latency_path(path, 256);
  609. res = access(path, F_OK);
  610. if (res)
  611. {
  612. /* File does not exist yet */
  613. generate_bus_latency_file();
  614. }
  615. load_bus_latency_file_content();
  616. }
  617. /*
  618. * Bandwidth
  619. */
  620. static void get_bandwidth_path(char *path, size_t maxlen)
  621. {
  622. get_bus_path("bandwidth", path, maxlen);
  623. }
  624. static void load_bus_bandwidth_file_content(void)
  625. {
  626. int n;
  627. unsigned src, dst;
  628. FILE *f;
  629. char path[256];
  630. get_bandwidth_path(path, 256);
  631. f = fopen(path, "r");
  632. if (!f)
  633. {
  634. perror("fopen load_bus_bandwidth_file_content");
  635. _STARPU_DISP("path '%s'\n", path);
  636. fflush(stderr);
  637. STARPU_ABORT();
  638. }
  639. for (src = 0; src < STARPU_MAXNODES; src++)
  640. {
  641. _starpu_drop_comments(f);
  642. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  643. {
  644. double bandwidth;
  645. n = fscanf(f, "%lf\t", &bandwidth);
  646. STARPU_ASSERT(n == 1);
  647. bandwidth_matrix[src][dst] = bandwidth;
  648. }
  649. n = fscanf(f, "\n");
  650. STARPU_ASSERT(n == 0);
  651. }
  652. fclose(f);
  653. }
  654. static void write_bus_bandwidth_file_content(void)
  655. {
  656. int src, dst, maxnode;
  657. FILE *f;
  658. STARPU_ASSERT(was_benchmarked);
  659. char path[256];
  660. get_bandwidth_path(path, 256);
  661. f = fopen(path, "w+");
  662. STARPU_ASSERT(f);
  663. fprintf(f, "# ");
  664. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  665. fprintf(f, "to %d\t\t", dst);
  666. fprintf(f, "\n");
  667. maxnode = ncuda;
  668. #ifdef STARPU_USE_OPENCL
  669. maxnode += nopencl;
  670. #endif
  671. for (src = 0; src < STARPU_MAXNODES; src++)
  672. {
  673. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  674. {
  675. double bandwidth;
  676. if ((src > maxnode) || (dst > maxnode))
  677. {
  678. bandwidth = -1.0;
  679. }
  680. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  681. else if (src != dst)
  682. {
  683. double time_src_to_ram=0.0, time_ram_to_dst=0.0;
  684. double timing;
  685. /* Bandwidth = (SIZE)/(time i -> ram + time ram -> j)*/
  686. #ifdef STARPU_USE_CUDA
  687. time_src_to_ram = (src==0)?0.0:cudadev_timing_dtoh[src];
  688. time_ram_to_dst = (dst==0)?0.0:cudadev_timing_htod[dst];
  689. timing =time_src_to_ram + time_ram_to_dst;
  690. bandwidth = 1.0*SIZE/timing;
  691. #endif
  692. #ifdef STARPU_USE_OPENCL
  693. if (src > ncuda)
  694. time_src_to_ram = (src==0)?0.0:opencldev_timing_dtoh[src-ncuda];
  695. if (dst > ncuda)
  696. time_ram_to_dst = (dst==0)?0.0:opencldev_timing_htod[dst-ncuda];
  697. timing =time_src_to_ram + time_ram_to_dst;
  698. bandwidth = 1.0*opencl_size/timing;
  699. #endif
  700. }
  701. #endif
  702. else {
  703. /* convention */
  704. bandwidth = 0.0;
  705. }
  706. fprintf(f, "%lf\t", bandwidth);
  707. }
  708. fprintf(f, "\n");
  709. }
  710. fclose(f);
  711. }
  712. static void generate_bus_bandwidth_file(void)
  713. {
  714. if (!was_benchmarked)
  715. benchmark_all_gpu_devices();
  716. write_bus_bandwidth_file_content();
  717. }
  718. static void load_bus_bandwidth_file(void)
  719. {
  720. int res;
  721. char path[256];
  722. get_bandwidth_path(path, 256);
  723. res = access(path, F_OK);
  724. if (res)
  725. {
  726. /* File does not exist yet */
  727. generate_bus_bandwidth_file();
  728. }
  729. load_bus_bandwidth_file_content();
  730. }
  731. /*
  732. * Config
  733. */
  734. static void get_config_path(char *path, size_t maxlen)
  735. {
  736. get_bus_path("config", path, maxlen);
  737. }
  738. static void check_bus_config_file()
  739. {
  740. int res;
  741. char path[256];
  742. get_config_path(path, 256);
  743. res = access(path, F_OK);
  744. if (res) {
  745. fprintf(stderr, "No performance model for the bus, calibrating...");
  746. starpu_force_bus_sampling();
  747. fprintf(stderr, "done\n");
  748. }
  749. else {
  750. FILE *f;
  751. int ret, read_cuda, read_opencl;
  752. unsigned read_cpus;
  753. struct starpu_machine_config_s *config = _starpu_get_machine_config();
  754. // Loading configuration from file
  755. f = fopen(path, "r");
  756. STARPU_ASSERT(f);
  757. _starpu_drop_comments(f);
  758. ret = fscanf(f, "%u\t", &read_cpus);
  759. STARPU_ASSERT(ret == 1);
  760. _starpu_drop_comments(f);
  761. ret = fscanf(f, "%d\t", &read_cuda);
  762. STARPU_ASSERT(ret == 1);
  763. _starpu_drop_comments(f);
  764. ret = fscanf(f, "%d\t", &read_opencl);
  765. STARPU_ASSERT(ret == 1);
  766. _starpu_drop_comments(f);
  767. fclose(f);
  768. // Loading current configuration
  769. ncpus = _starpu_topology_get_nhwcpu(config);
  770. #ifdef STARPU_USE_CUDA
  771. cudaGetDeviceCount(&ncuda);
  772. #endif
  773. #ifdef STARPU_USE_OPENCL
  774. nopencl = _starpu_opencl_get_device_count();
  775. #endif
  776. // Checking if both configurations match
  777. if (read_cpus != ncpus) {
  778. fprintf(stderr, "Current configuration does not match the performance model (CPUS: (stored) %u != (current) %u), recalibrating...", read_cpus, ncpus);
  779. starpu_force_bus_sampling();
  780. fprintf(stderr, "done\n");
  781. }
  782. else if (read_cuda != ncuda) {
  783. fprintf(stderr, "Current configuration does not match the performance model (CUDA: (stored) %d != (current) %d), recalibrating...", read_cuda, ncuda);
  784. starpu_force_bus_sampling();
  785. fprintf(stderr, "done\n");
  786. }
  787. else if (read_opencl != nopencl) {
  788. fprintf(stderr, "Current configuration does not match the performance model (OpenCL: (stored) %d != (current) %d), recalibrating...", read_opencl, nopencl);
  789. starpu_force_bus_sampling();
  790. fprintf(stderr, "done\n");
  791. }
  792. }
  793. }
  794. static void write_bus_config_file_content(void)
  795. {
  796. FILE *f;
  797. char path[256];
  798. STARPU_ASSERT(was_benchmarked);
  799. get_config_path(path, 256);
  800. f = fopen(path, "w+");
  801. STARPU_ASSERT(f);
  802. fprintf(f, "# Current configuration\n");
  803. fprintf(f, "%u # Number of CPUs\n", ncpus);
  804. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  805. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  806. fclose(f);
  807. }
  808. static void generate_bus_config_file()
  809. {
  810. if (!was_benchmarked)
  811. benchmark_all_gpu_devices();
  812. write_bus_config_file_content();
  813. }
  814. /*
  815. * Generic
  816. */
  817. void starpu_force_bus_sampling(void)
  818. {
  819. _starpu_create_sampling_directory_if_needed();
  820. generate_bus_affinity_file();
  821. generate_bus_latency_file();
  822. generate_bus_bandwidth_file();
  823. generate_bus_config_file();
  824. }
  825. void _starpu_load_bus_performance_files(void)
  826. {
  827. _starpu_create_sampling_directory_if_needed();
  828. check_bus_config_file();
  829. load_bus_affinity_file();
  830. load_bus_latency_file();
  831. load_bus_bandwidth_file();
  832. }
  833. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  834. {
  835. double bandwidth = bandwidth_matrix[src_node][dst_node];
  836. double latency = latency_matrix[src_node][dst_node];
  837. struct starpu_machine_topology_s *topology = &_starpu_get_machine_config()->topology;
  838. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  839. }