perfmodel_bus.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #include <core/workers.h>
  32. #include <core/perfmodel/perfmodel.h>
  33. #ifdef STARPU_USE_OPENCL
  34. #include <starpu_opencl.h>
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define SIZE (32*1024*1024*sizeof(char))
  40. #define NITER 128
  41. static void starpu_force_bus_sampling(void);
  42. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  43. struct dev_timing
  44. {
  45. int cpu_id;
  46. double timing_htod;
  47. double timing_dtoh;
  48. };
  49. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  50. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  51. static unsigned was_benchmarked = 0;
  52. static unsigned ncpus = 0;
  53. static int ncuda = 0;
  54. static int nopencl = 0;
  55. /* Benchmarking the performance of the bus */
  56. #ifdef STARPU_USE_CUDA
  57. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXCPUS];
  58. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  59. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  60. #ifdef HAVE_CUDA_MEMCPY_PEER
  61. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  62. #endif
  63. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  64. #endif
  65. #ifdef STARPU_USE_OPENCL
  66. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXCPUS];
  67. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  68. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  69. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  70. #endif
  71. #ifdef STARPU_HAVE_HWLOC
  72. static hwloc_topology_t hwtopology;
  73. #endif
  74. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  75. #ifdef STARPU_USE_CUDA
  76. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  77. {
  78. struct _starpu_machine_config *config = _starpu_get_machine_config();
  79. _starpu_bind_thread_on_cpu(config, cpu);
  80. size_t size = SIZE;
  81. /* Initialize CUDA context on the device */
  82. cudaSetDevice(dev);
  83. /* hack to avoid third party libs to rebind threads */
  84. _starpu_bind_thread_on_cpu(config, cpu);
  85. /* hack to force the initialization */
  86. cudaFree(0);
  87. /* hack to avoid third party libs to rebind threads */
  88. _starpu_bind_thread_on_cpu(config, cpu);
  89. /* Get the maximum size which can be allocated on the device */
  90. struct cudaDeviceProp prop;
  91. cudaError_t cures;
  92. cures = cudaGetDeviceProperties(&prop, dev);
  93. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  94. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  95. /* Allocate a buffer on the device */
  96. unsigned char *d_buffer;
  97. cudaMalloc((void **)&d_buffer, size);
  98. STARPU_ASSERT(d_buffer);
  99. /* hack to avoid third party libs to rebind threads */
  100. _starpu_bind_thread_on_cpu(config, cpu);
  101. /* Allocate a buffer on the host */
  102. unsigned char *h_buffer;
  103. cures = cudaHostAlloc((void **)&h_buffer, size, 0);
  104. STARPU_ASSERT(cures == cudaSuccess);
  105. /* hack to avoid third party libs to rebind threads */
  106. _starpu_bind_thread_on_cpu(config, cpu);
  107. /* Fill them */
  108. memset(h_buffer, 0, size);
  109. cudaMemset(d_buffer, 0, size);
  110. /* hack to avoid third party libs to rebind threads */
  111. _starpu_bind_thread_on_cpu(config, cpu);
  112. unsigned iter;
  113. double timing;
  114. struct timeval start;
  115. struct timeval end;
  116. /* Measure upload bandwidth */
  117. gettimeofday(&start, NULL);
  118. for (iter = 0; iter < NITER; iter++)
  119. {
  120. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  121. cudaThreadSynchronize();
  122. }
  123. gettimeofday(&end, NULL);
  124. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  125. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  126. /* Measure download bandwidth */
  127. gettimeofday(&start, NULL);
  128. for (iter = 0; iter < NITER; iter++)
  129. {
  130. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  131. cudaThreadSynchronize();
  132. }
  133. gettimeofday(&end, NULL);
  134. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  135. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  136. /* Free buffers */
  137. cudaFreeHost(h_buffer);
  138. cudaFree(d_buffer);
  139. cudaThreadExit();
  140. }
  141. #ifdef HAVE_CUDA_MEMCPY_PEER
  142. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  143. {
  144. size_t size = SIZE;
  145. int can;
  146. /* Get the maximum size which can be allocated on the device */
  147. struct cudaDeviceProp prop;
  148. cudaError_t cures;
  149. cures = cudaGetDeviceProperties(&prop, src);
  150. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  151. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  152. cures = cudaGetDeviceProperties(&prop, dst);
  153. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  154. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  155. /* Initialize CUDA context on the source */
  156. cudaSetDevice(src);
  157. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") > 0) {
  158. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  159. if (!cures && can) {
  160. cures = cudaDeviceEnablePeerAccess(dst, 0);
  161. if (!cures)
  162. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  163. }
  164. }
  165. /* Allocate a buffer on the device */
  166. unsigned char *s_buffer;
  167. cudaMalloc((void **)&s_buffer, size);
  168. STARPU_ASSERT(s_buffer);
  169. cudaMemset(s_buffer, 0, size);
  170. /* Initialize CUDA context on the destination */
  171. cudaSetDevice(dst);
  172. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") > 0) {
  173. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  174. if (!cures && can) {
  175. cures = cudaDeviceEnablePeerAccess(src, 0);
  176. if (!cures)
  177. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  178. }
  179. }
  180. /* Allocate a buffer on the device */
  181. unsigned char *d_buffer;
  182. cudaMalloc((void **)&d_buffer, size);
  183. STARPU_ASSERT(d_buffer);
  184. cudaMemset(d_buffer, 0, size);
  185. unsigned iter;
  186. double timing;
  187. struct timeval start;
  188. struct timeval end;
  189. /* Measure upload bandwidth */
  190. gettimeofday(&start, NULL);
  191. for (iter = 0; iter < NITER; iter++)
  192. {
  193. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  194. cudaThreadSynchronize();
  195. }
  196. gettimeofday(&end, NULL);
  197. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  198. cudadev_timing_dtod[src+1][dst+1] = timing/NITER/size;
  199. /* Free buffers */
  200. cudaFree(d_buffer);
  201. cudaSetDevice(src);
  202. cudaFree(s_buffer);
  203. cudaThreadExit();
  204. }
  205. #endif
  206. #endif
  207. #ifdef STARPU_USE_OPENCL
  208. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  209. {
  210. cl_context context;
  211. cl_command_queue queue;
  212. cl_int err=0;
  213. size_t size = SIZE;
  214. int not_initialized;
  215. struct _starpu_machine_config *config = _starpu_get_machine_config();
  216. _starpu_bind_thread_on_cpu(config, cpu);
  217. /* Is the context already initialised ? */
  218. starpu_opencl_get_context(dev, &context);
  219. not_initialized = (context == NULL);
  220. if (not_initialized == 1)
  221. _starpu_opencl_init_context(dev);
  222. /* Get context and queue */
  223. starpu_opencl_get_context(dev, &context);
  224. starpu_opencl_get_queue(dev, &queue);
  225. /* Get the maximum size which can be allocated on the device */
  226. cl_device_id device;
  227. cl_ulong maxMemAllocSize;
  228. starpu_opencl_get_device(dev, &device);
  229. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  230. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  231. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  232. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  233. {
  234. /* Let's not use too much RAM when running OpenCL on a CPU: it
  235. * would make the OS swap like crazy. */
  236. size /= 2;
  237. }
  238. /* hack to avoid third party libs to rebind threads */
  239. _starpu_bind_thread_on_cpu(config, cpu);
  240. /* Allocate a buffer on the device */
  241. cl_mem d_buffer;
  242. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  243. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  244. /* hack to avoid third party libs to rebind threads */
  245. _starpu_bind_thread_on_cpu(config, cpu);
  246. /* Allocate a buffer on the host */
  247. unsigned char *h_buffer;
  248. h_buffer = (unsigned char *)malloc(size);
  249. STARPU_ASSERT(h_buffer);
  250. /* hack to avoid third party libs to rebind threads */
  251. _starpu_bind_thread_on_cpu(config, cpu);
  252. /* Fill them */
  253. memset(h_buffer, 0, size);
  254. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  255. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  256. /* hack to avoid third party libs to rebind threads */
  257. _starpu_bind_thread_on_cpu(config, cpu);
  258. unsigned iter;
  259. double timing;
  260. struct timeval start;
  261. struct timeval end;
  262. /* Measure upload bandwidth */
  263. gettimeofday(&start, NULL);
  264. for (iter = 0; iter < NITER; iter++)
  265. {
  266. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  267. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  268. }
  269. gettimeofday(&end, NULL);
  270. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  271. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  272. /* Measure download bandwidth */
  273. gettimeofday(&start, NULL);
  274. for (iter = 0; iter < NITER; iter++)
  275. {
  276. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  277. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  278. }
  279. gettimeofday(&end, NULL);
  280. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  281. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  282. /* Free buffers */
  283. clReleaseMemObject(d_buffer);
  284. free(h_buffer);
  285. /* Uninitiliaze OpenCL context on the device */
  286. if (not_initialized == 1)
  287. _starpu_opencl_deinit_context(dev);
  288. }
  289. #endif
  290. /* NB: we want to sort the bandwidth by DECREASING order */
  291. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  292. {
  293. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  294. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  295. double left_dtoh = left->timing_dtoh;
  296. double left_htod = left->timing_htod;
  297. double right_dtoh = right->timing_dtoh;
  298. double right_htod = right->timing_htod;
  299. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  300. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  301. /* it's for a decreasing sorting */
  302. return (timing_sum2_left > timing_sum2_right);
  303. }
  304. #ifdef STARPU_HAVE_HWLOC
  305. static int find_numa_node(hwloc_obj_t obj)
  306. {
  307. STARPU_ASSERT(obj);
  308. hwloc_obj_t current = obj;
  309. while (current->depth != HWLOC_OBJ_NODE)
  310. {
  311. current = current->parent;
  312. /* If we don't find a "node" obj before the root, this means
  313. * hwloc does not know whether there are numa nodes or not, so
  314. * we should not use a per-node sampling in that case. */
  315. STARPU_ASSERT(current);
  316. }
  317. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  318. return current->logical_index;
  319. }
  320. #endif
  321. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char *type)
  322. {
  323. /* Either we have hwloc and we measure the bandwith between each GPU
  324. * and each NUMA node, or we don't have such NUMA information and we
  325. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  326. * */
  327. #ifdef STARPU_HAVE_HWLOC
  328. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  329. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  330. /* If no NUMA node was found, we assume that we have a single memory
  331. * bank. */
  332. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  333. unsigned *is_available_per_numa_node = NULL;
  334. double *dev_timing_htod_per_numa_node = NULL;
  335. double *dev_timing_dtoh_per_numa_node = NULL;
  336. if (!no_node_obj_was_found)
  337. {
  338. is_available_per_numa_node = (unsigned *)malloc(nnuma_nodes * sizeof(unsigned));
  339. STARPU_ASSERT(is_available_per_numa_node);
  340. dev_timing_htod_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  341. STARPU_ASSERT(dev_timing_htod_per_numa_node);
  342. dev_timing_dtoh_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  343. STARPU_ASSERT(dev_timing_dtoh_per_numa_node);
  344. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  345. }
  346. #endif
  347. unsigned cpu;
  348. for (cpu = 0; cpu < ncpus; cpu++)
  349. {
  350. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id = cpu;
  351. #ifdef STARPU_HAVE_HWLOC
  352. int numa_id = 0;
  353. if (!no_node_obj_was_found)
  354. {
  355. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  356. numa_id = find_numa_node(obj);
  357. if (is_available_per_numa_node[numa_id])
  358. {
  359. /* We reuse the previous numbers for that NUMA node */
  360. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod =
  361. dev_timing_htod_per_numa_node[numa_id];
  362. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh =
  363. dev_timing_dtoh_per_numa_node[numa_id];
  364. continue;
  365. }
  366. }
  367. #endif
  368. #ifdef STARPU_USE_CUDA
  369. if (strncmp(type, "CUDA", 4) == 0)
  370. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  371. #endif
  372. #ifdef STARPU_USE_OPENCL
  373. if (strncmp(type, "OpenCL", 6) == 0)
  374. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  375. #endif
  376. #ifdef STARPU_HAVE_HWLOC
  377. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  378. {
  379. /* Save the results for that NUMA node */
  380. dev_timing_htod_per_numa_node[numa_id] =
  381. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  382. dev_timing_dtoh_per_numa_node[numa_id] =
  383. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  384. is_available_per_numa_node[numa_id] = 1;
  385. }
  386. #endif
  387. }
  388. #ifdef STARPU_HAVE_HWLOC
  389. if (!no_node_obj_was_found)
  390. {
  391. free(is_available_per_numa_node);
  392. free(dev_timing_htod_per_numa_node);
  393. free(dev_timing_dtoh_per_numa_node);
  394. }
  395. #endif /* STARPU_HAVE_HWLOC */
  396. }
  397. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  398. struct dev_timing *dev_timing_per_cpu, char *type)
  399. {
  400. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  401. /* sort the results */
  402. qsort(&(dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS]), ncpus,
  403. sizeof(struct dev_timing),
  404. compar_dev_timing);
  405. #ifdef STARPU_VERBOSE
  406. unsigned cpu;
  407. for (cpu = 0; cpu < ncpus; cpu++)
  408. {
  409. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id;
  410. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  411. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  412. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  413. _STARPU_DISP("(%10s) BANDWIDTH GPU %d CPU %u - htod %f - dtoh %f - %f\n", type, dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  414. }
  415. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].cpu_id;
  416. _STARPU_DISP("(%10s) BANDWIDTH GPU %d BEST CPU %u\n", type, dev, best_cpu);
  417. #endif
  418. /* The results are sorted in a decreasing order, so that the best
  419. * measurement is currently the first entry. */
  420. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_dtoh;
  421. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_htod;
  422. }
  423. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  424. static void benchmark_all_gpu_devices(void)
  425. {
  426. int i;
  427. #ifdef HAVE_CUDA_MEMCPY_PEER
  428. int j;
  429. #endif
  430. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  431. #ifdef STARPU_HAVE_HWLOC
  432. hwloc_topology_init(&hwtopology);
  433. hwloc_topology_load(hwtopology);
  434. #endif
  435. #ifdef STARPU_HAVE_HWLOC
  436. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  437. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  438. #elif __linux__
  439. /* Save the current cpu binding */
  440. cpu_set_t former_process_affinity;
  441. int ret;
  442. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  443. if (ret)
  444. {
  445. perror("sched_getaffinity");
  446. STARPU_ABORT();
  447. }
  448. #else
  449. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  450. #endif
  451. struct _starpu_machine_config *config = _starpu_get_machine_config();
  452. ncpus = _starpu_topology_get_nhwcpu(config);
  453. #ifdef STARPU_USE_CUDA
  454. ncuda = _starpu_get_cuda_device_count();
  455. for (i = 0; i < ncuda; i++)
  456. {
  457. _STARPU_DISP("CUDA %d...\n", i);
  458. /* measure bandwidth between Host and Device i */
  459. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, "CUDA");
  460. }
  461. #ifdef HAVE_CUDA_MEMCPY_PEER
  462. for (i = 0; i < ncuda; i++)
  463. for (j = 0; j < ncuda; j++)
  464. if (i != j)
  465. {
  466. _STARPU_DISP("CUDA %d -> %d...\n", i, j);
  467. /* measure bandwidth between Host and Device i */
  468. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  469. }
  470. #endif
  471. #endif
  472. #ifdef STARPU_USE_OPENCL
  473. nopencl = _starpu_opencl_get_device_count();
  474. for (i = 0; i < nopencl; i++)
  475. {
  476. _STARPU_DISP("OpenCL %d...\n", i);
  477. /* measure bandwith between Host and Device i */
  478. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, "OpenCL");
  479. }
  480. #endif
  481. #ifdef STARPU_HAVE_HWLOC
  482. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  483. #elif __linux__
  484. /* Restore the former affinity */
  485. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  486. if (ret)
  487. {
  488. perror("sched_setaffinity");
  489. STARPU_ABORT();
  490. }
  491. #endif
  492. #ifdef STARPU_HAVE_HWLOC
  493. hwloc_topology_destroy(hwtopology);
  494. #endif
  495. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  496. was_benchmarked = 1;
  497. }
  498. static void get_bus_path(const char *type, char *path, size_t maxlen)
  499. {
  500. _starpu_get_perf_model_dir_bus(path, maxlen);
  501. char hostname[32];
  502. char *forced_hostname = getenv("STARPU_HOSTNAME");
  503. if (forced_hostname && forced_hostname[0])
  504. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  505. else
  506. gethostname(hostname, sizeof(hostname));
  507. strncat(path, hostname, maxlen);
  508. strncat(path, ".", maxlen);
  509. strncat(path, type, maxlen);
  510. }
  511. /*
  512. * Affinity
  513. */
  514. static void get_affinity_path(char *path, size_t maxlen)
  515. {
  516. get_bus_path("affinity", path, maxlen);
  517. }
  518. static void load_bus_affinity_file_content(void)
  519. {
  520. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  521. FILE *f;
  522. char path[256];
  523. get_affinity_path(path, 256);
  524. f = fopen(path, "r");
  525. STARPU_ASSERT(f);
  526. struct _starpu_machine_config *config = _starpu_get_machine_config();
  527. ncpus = _starpu_topology_get_nhwcpu(config);
  528. int gpu;
  529. #ifdef STARPU_USE_CUDA
  530. ncuda = _starpu_get_cuda_device_count();
  531. for (gpu = 0; gpu < ncuda; gpu++)
  532. {
  533. int ret;
  534. int dummy;
  535. _starpu_drop_comments(f);
  536. ret = fscanf(f, "%d\t", &dummy);
  537. STARPU_ASSERT(ret == 1);
  538. STARPU_ASSERT(dummy == gpu);
  539. unsigned cpu;
  540. for (cpu = 0; cpu < ncpus; cpu++)
  541. {
  542. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  543. STARPU_ASSERT(ret == 1);
  544. }
  545. ret = fscanf(f, "\n");
  546. STARPU_ASSERT(ret == 0);
  547. }
  548. #endif /* !STARPU_USE_CUDA */
  549. #ifdef STARPU_USE_OPENCL
  550. nopencl = _starpu_opencl_get_device_count();
  551. for (gpu = 0; gpu < nopencl; gpu++)
  552. {
  553. int ret;
  554. int dummy;
  555. _starpu_drop_comments(f);
  556. ret = fscanf(f, "%d\t", &dummy);
  557. STARPU_ASSERT(ret == 1);
  558. STARPU_ASSERT(dummy == gpu);
  559. unsigned cpu;
  560. for (cpu = 0; cpu < ncpus; cpu++)
  561. {
  562. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  563. STARPU_ASSERT(ret == 1);
  564. }
  565. ret = fscanf(f, "\n");
  566. STARPU_ASSERT(ret == 0);
  567. }
  568. #endif /* !STARPU_USE_OPENCL */
  569. fclose(f);
  570. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  571. }
  572. static void write_bus_affinity_file_content(void)
  573. {
  574. STARPU_ASSERT(was_benchmarked);
  575. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  576. FILE *f;
  577. char path[256];
  578. get_affinity_path(path, 256);
  579. f = fopen(path, "w+");
  580. if (!f)
  581. {
  582. perror("fopen write_buf_affinity_file_content");
  583. _STARPU_DISP("path '%s'\n", path);
  584. fflush(stderr);
  585. STARPU_ABORT();
  586. }
  587. unsigned cpu;
  588. int gpu;
  589. fprintf(f, "# GPU\t");
  590. for (cpu = 0; cpu < ncpus; cpu++)
  591. fprintf(f, "CPU%u\t", cpu);
  592. fprintf(f, "\n");
  593. #ifdef STARPU_USE_CUDA
  594. for (gpu = 0; gpu < ncuda; gpu++)
  595. {
  596. fprintf(f, "%d\t", gpu);
  597. for (cpu = 0; cpu < ncpus; cpu++)
  598. {
  599. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  600. }
  601. fprintf(f, "\n");
  602. }
  603. #endif
  604. #ifdef STARPU_USE_OPENCL
  605. for (gpu = 0; gpu < nopencl; gpu++)
  606. {
  607. fprintf(f, "%d\t", gpu);
  608. for (cpu = 0; cpu < ncpus; cpu++)
  609. {
  610. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  611. }
  612. fprintf(f, "\n");
  613. }
  614. #endif
  615. fclose(f);
  616. #endif
  617. }
  618. static void generate_bus_affinity_file(void)
  619. {
  620. if (!was_benchmarked)
  621. benchmark_all_gpu_devices();
  622. write_bus_affinity_file_content();
  623. }
  624. static void load_bus_affinity_file(void)
  625. {
  626. int res;
  627. char path[256];
  628. get_affinity_path(path, 256);
  629. res = access(path, F_OK);
  630. if (res)
  631. {
  632. /* File does not exist yet */
  633. generate_bus_affinity_file();
  634. }
  635. load_bus_affinity_file_content();
  636. }
  637. #ifdef STARPU_USE_CUDA
  638. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  639. {
  640. return cuda_affinity_matrix[gpuid];
  641. }
  642. #endif /* STARPU_USE_CUDA */
  643. #ifdef STARPU_USE_OPENCL
  644. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  645. {
  646. return opencl_affinity_matrix[gpuid];
  647. }
  648. #endif /* STARPU_USE_OPENCL */
  649. void starpu_bus_print_affinity(FILE *f)
  650. {
  651. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  652. unsigned cpu;
  653. int gpu;
  654. #endif
  655. fprintf(f, "# GPU\tCPU in preference order (logical index)\n");
  656. #ifdef STARPU_USE_CUDA
  657. fprintf(f, "# CUDA\n");
  658. for(gpu = 0 ; gpu<ncuda ; gpu++)
  659. {
  660. fprintf(f, "%d\t", gpu);
  661. for (cpu = 0; cpu < ncpus; cpu++)
  662. {
  663. fprintf(f, "%d\t", cuda_affinity_matrix[gpu][cpu]);
  664. }
  665. fprintf(f, "\n");
  666. }
  667. #endif
  668. #ifdef STARPU_USE_OPENCL
  669. fprintf(f, "# OpenCL\n");
  670. for(gpu = 0 ; gpu<nopencl ; gpu++)
  671. {
  672. fprintf(f, "%d\t", gpu);
  673. for (cpu = 0; cpu < ncpus; cpu++)
  674. {
  675. fprintf(f, "%d\t", opencl_affinity_matrix[gpu][cpu]);
  676. }
  677. fprintf(f, "\n");
  678. }
  679. #endif
  680. }
  681. /*
  682. * Latency
  683. */
  684. static void get_latency_path(char *path, size_t maxlen)
  685. {
  686. get_bus_path("latency", path, maxlen);
  687. }
  688. static int load_bus_latency_file_content(void)
  689. {
  690. int n;
  691. unsigned src, dst;
  692. FILE *f;
  693. char path[256];
  694. get_latency_path(path, 256);
  695. f = fopen(path, "r");
  696. STARPU_ASSERT(f);
  697. for (src = 0; src < STARPU_MAXNODES; src++)
  698. {
  699. _starpu_drop_comments(f);
  700. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  701. {
  702. double latency;
  703. n = fscanf(f, "%lf", &latency);
  704. if (n != 1)
  705. {
  706. fclose(f);
  707. return 0;
  708. }
  709. n = getc(f);
  710. if (n != '\t')
  711. {
  712. fclose(f);
  713. return 0;
  714. }
  715. latency_matrix[src][dst] = latency;
  716. }
  717. n = getc(f);
  718. if (n != '\n')
  719. {
  720. fclose(f);
  721. return 0;
  722. }
  723. }
  724. fclose(f);
  725. return 1;
  726. }
  727. static void write_bus_latency_file_content(void)
  728. {
  729. int src, dst, maxnode;
  730. FILE *f;
  731. STARPU_ASSERT(was_benchmarked);
  732. char path[256];
  733. get_latency_path(path, 256);
  734. f = fopen(path, "w+");
  735. if (!f)
  736. {
  737. perror("fopen write_bus_latency_file_content");
  738. _STARPU_DISP("path '%s'\n", path);
  739. fflush(stderr);
  740. STARPU_ABORT();
  741. }
  742. fprintf(f, "# ");
  743. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  744. fprintf(f, "to %d\t\t", dst);
  745. fprintf(f, "\n");
  746. maxnode = ncuda;
  747. #ifdef STARPU_USE_OPENCL
  748. maxnode += nopencl;
  749. #endif
  750. for (src = 0; src < STARPU_MAXNODES; src++)
  751. {
  752. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  753. {
  754. double latency;
  755. if ((src > maxnode) || (dst > maxnode))
  756. {
  757. /* convention */
  758. latency = NAN;
  759. }
  760. else if (src == dst)
  761. {
  762. latency = 0.0;
  763. }
  764. else
  765. {
  766. /* µs */
  767. latency = ((src && dst)?2000.0:500.0);
  768. }
  769. fprintf(f, "%f\t", latency);
  770. }
  771. fprintf(f, "\n");
  772. }
  773. fclose(f);
  774. }
  775. static void generate_bus_latency_file(void)
  776. {
  777. if (!was_benchmarked)
  778. benchmark_all_gpu_devices();
  779. write_bus_latency_file_content();
  780. }
  781. static void load_bus_latency_file(void)
  782. {
  783. int res;
  784. char path[256];
  785. get_latency_path(path, 256);
  786. res = access(path, F_OK);
  787. if (res || !load_bus_latency_file_content())
  788. {
  789. /* File does not exist yet or is bogus */
  790. generate_bus_latency_file();
  791. }
  792. }
  793. /*
  794. * Bandwidth
  795. */
  796. static void get_bandwidth_path(char *path, size_t maxlen)
  797. {
  798. get_bus_path("bandwidth", path, maxlen);
  799. }
  800. static int load_bus_bandwidth_file_content(void)
  801. {
  802. int n;
  803. unsigned src, dst;
  804. FILE *f;
  805. char path[256];
  806. get_bandwidth_path(path, 256);
  807. f = fopen(path, "r");
  808. if (!f)
  809. {
  810. perror("fopen load_bus_bandwidth_file_content");
  811. _STARPU_DISP("path '%s'\n", path);
  812. fflush(stderr);
  813. STARPU_ABORT();
  814. }
  815. for (src = 0; src < STARPU_MAXNODES; src++)
  816. {
  817. _starpu_drop_comments(f);
  818. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  819. {
  820. double bandwidth;
  821. n = fscanf(f, "%lf", &bandwidth);
  822. if (n != 1)
  823. {
  824. fprintf(stderr,"Error while reading sampling file <%s>. Expected a number\n", path);
  825. fclose(f);
  826. return 0;
  827. }
  828. n = getc(f);
  829. if (n != '\t')
  830. {
  831. fclose(f);
  832. return 0;
  833. }
  834. bandwidth_matrix[src][dst] = bandwidth;
  835. }
  836. n = getc(f);
  837. if (n != '\n')
  838. {
  839. fclose(f);
  840. return 0;
  841. }
  842. }
  843. fclose(f);
  844. return 1;
  845. }
  846. static void write_bus_bandwidth_file_content(void)
  847. {
  848. int src, dst, maxnode;
  849. FILE *f;
  850. STARPU_ASSERT(was_benchmarked);
  851. char path[256];
  852. get_bandwidth_path(path, 256);
  853. f = fopen(path, "w+");
  854. STARPU_ASSERT(f);
  855. fprintf(f, "# ");
  856. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  857. fprintf(f, "to %d\t\t", dst);
  858. fprintf(f, "\n");
  859. maxnode = ncuda;
  860. #ifdef STARPU_USE_OPENCL
  861. maxnode += nopencl;
  862. #endif
  863. for (src = 0; src < STARPU_MAXNODES; src++)
  864. {
  865. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  866. {
  867. double bandwidth;
  868. if ((src > maxnode) || (dst > maxnode))
  869. {
  870. bandwidth = NAN;
  871. }
  872. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  873. else if (src != dst)
  874. {
  875. double slowness = 0.0;
  876. /* Total bandwidth is the harmonic mean of bandwidths */
  877. #ifdef STARPU_USE_CUDA
  878. #ifdef HAVE_CUDA_MEMCPY_PEER
  879. if (src && src <= ncuda && dst && dst <= ncuda)
  880. /* Direct GPU-GPU transfert */
  881. slowness = cudadev_timing_dtod[src][dst];
  882. else
  883. #endif
  884. {
  885. if (src && src <= ncuda)
  886. slowness += cudadev_timing_dtoh[src];
  887. if (dst && dst <= ncuda)
  888. slowness += cudadev_timing_htod[dst];
  889. }
  890. #endif
  891. #ifdef STARPU_USE_OPENCL
  892. if (src > ncuda)
  893. slowness += opencldev_timing_dtoh[src-ncuda];
  894. if (dst > ncuda)
  895. slowness += opencldev_timing_htod[dst-ncuda];
  896. #endif
  897. bandwidth = 1.0/slowness;
  898. }
  899. #endif
  900. else
  901. {
  902. /* convention */
  903. bandwidth = 0.0;
  904. }
  905. fprintf(f, "%f\t", bandwidth);
  906. }
  907. fprintf(f, "\n");
  908. }
  909. fclose(f);
  910. }
  911. void starpu_bus_print_bandwidth(FILE *f)
  912. {
  913. int src, dst, maxnode;
  914. maxnode = ncuda;
  915. #ifdef STARPU_USE_OPENCL
  916. maxnode += nopencl;
  917. #endif
  918. fprintf(f, "from/to\t");
  919. fprintf(f, "RAM\t");
  920. for (dst = 0; dst < ncuda; dst++)
  921. fprintf(f, "CUDA %d\t", dst);
  922. for (dst = 0; dst < nopencl; dst++)
  923. fprintf(f, "OpenCL%d\t", dst);
  924. fprintf(f, "\n");
  925. for (src = 0; src <= maxnode; src++)
  926. {
  927. if (!src)
  928. fprintf(f, "RAM\t");
  929. else if (src <= ncuda)
  930. fprintf(f, "CUDA %d\t", src-1);
  931. else
  932. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  933. for (dst = 0; dst <= maxnode; dst++)
  934. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  935. fprintf(f, "\n");
  936. }
  937. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  938. if (ncuda != 0 || nopencl != 0)
  939. fprintf(f, "\nGPU\tCPU in preference order (logical index), host-to-device, device-to-host\n");
  940. for (src = 1; src <= maxnode; src++)
  941. {
  942. struct dev_timing *timing;
  943. struct _starpu_machine_config *config = _starpu_get_machine_config();
  944. int ncpus = _starpu_topology_get_nhwcpu(config);
  945. int cpu;
  946. #ifdef STARPU_USE_CUDA
  947. if (src <= ncuda)
  948. {
  949. fprintf(f, "CUDA %d\t", src-1);
  950. for (cpu = 0; cpu < ncpus; cpu++)
  951. {
  952. timing = &cudadev_timing_per_cpu[src*STARPU_MAXCPUS+cpu];
  953. if (timing->timing_htod)
  954. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  955. else
  956. fprintf(f, "%2d\t", cuda_affinity_matrix[src-1][cpu]);
  957. }
  958. }
  959. #ifdef STARPU_USE_OPENCL
  960. else
  961. #endif
  962. #endif
  963. #ifdef STARPU_USE_OPENCL
  964. {
  965. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  966. for (cpu = 0; cpu < ncpus; cpu++)
  967. {
  968. timing = &opencldev_timing_per_cpu[(src-ncuda)*STARPU_MAXCPUS+cpu];
  969. if (timing->timing_htod)
  970. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  971. else
  972. fprintf(f, "%2d\t", opencl_affinity_matrix[src-1][cpu]);
  973. }
  974. }
  975. #endif
  976. fprintf(f, "\n");
  977. }
  978. #endif
  979. }
  980. static void generate_bus_bandwidth_file(void)
  981. {
  982. if (!was_benchmarked)
  983. benchmark_all_gpu_devices();
  984. write_bus_bandwidth_file_content();
  985. }
  986. static void load_bus_bandwidth_file(void)
  987. {
  988. int res;
  989. char path[256];
  990. get_bandwidth_path(path, 256);
  991. res = access(path, F_OK);
  992. if (res || !load_bus_bandwidth_file_content())
  993. {
  994. /* File does not exist yet or is bogus */
  995. generate_bus_bandwidth_file();
  996. }
  997. }
  998. /*
  999. * Config
  1000. */
  1001. static void get_config_path(char *path, size_t maxlen)
  1002. {
  1003. get_bus_path("config", path, maxlen);
  1004. }
  1005. static void check_bus_config_file()
  1006. {
  1007. int res;
  1008. char path[256];
  1009. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1010. get_config_path(path, 256);
  1011. res = access(path, F_OK);
  1012. if (res || config->conf->bus_calibrate > 0)
  1013. {
  1014. if (res)
  1015. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1016. starpu_force_bus_sampling();
  1017. if (res)
  1018. _STARPU_DISP("... done\n");
  1019. }
  1020. else
  1021. {
  1022. FILE *f;
  1023. int ret, read_cuda = -1, read_opencl = -1;
  1024. unsigned read_cpus = -1;
  1025. // Loading configuration from file
  1026. f = fopen(path, "r");
  1027. STARPU_ASSERT(f);
  1028. _starpu_drop_comments(f);
  1029. ret = fscanf(f, "%u\t", &read_cpus);
  1030. STARPU_ASSERT(ret == 1);
  1031. _starpu_drop_comments(f);
  1032. ret = fscanf(f, "%d\t", &read_cuda);
  1033. STARPU_ASSERT(ret == 1);
  1034. _starpu_drop_comments(f);
  1035. ret = fscanf(f, "%d\t", &read_opencl);
  1036. STARPU_ASSERT(ret == 1);
  1037. _starpu_drop_comments(f);
  1038. fclose(f);
  1039. // Loading current configuration
  1040. ncpus = _starpu_topology_get_nhwcpu(config);
  1041. #ifdef STARPU_USE_CUDA
  1042. ncuda = _starpu_get_cuda_device_count();
  1043. #endif
  1044. #ifdef STARPU_USE_OPENCL
  1045. nopencl = _starpu_opencl_get_device_count();
  1046. #endif
  1047. // Checking if both configurations match
  1048. if (read_cpus != ncpus)
  1049. {
  1050. fprintf(stderr, "Current configuration does not match the bus performance model (CPUS: (stored) %u != (current) %u), recalibrating...", read_cpus, ncpus);
  1051. starpu_force_bus_sampling();
  1052. fprintf(stderr, "done\n");
  1053. }
  1054. else if (read_cuda != ncuda)
  1055. {
  1056. fprintf(stderr, "Current configuration does not match the bus performance model (CUDA: (stored) %d != (current) %d), recalibrating...", read_cuda, ncuda);
  1057. starpu_force_bus_sampling();
  1058. fprintf(stderr, "done\n");
  1059. }
  1060. else if (read_opencl != nopencl)
  1061. {
  1062. fprintf(stderr, "Current configuration does not match the bus performance model (OpenCL: (stored) %d != (current) %d), recalibrating...", read_opencl, nopencl);
  1063. starpu_force_bus_sampling();
  1064. fprintf(stderr, "done\n");
  1065. }
  1066. }
  1067. }
  1068. static void write_bus_config_file_content(void)
  1069. {
  1070. FILE *f;
  1071. char path[256];
  1072. STARPU_ASSERT(was_benchmarked);
  1073. get_config_path(path, 256);
  1074. f = fopen(path, "w+");
  1075. STARPU_ASSERT(f);
  1076. fprintf(f, "# Current configuration\n");
  1077. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1078. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  1079. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  1080. fclose(f);
  1081. }
  1082. static void generate_bus_config_file()
  1083. {
  1084. if (!was_benchmarked)
  1085. benchmark_all_gpu_devices();
  1086. write_bus_config_file_content();
  1087. }
  1088. /*
  1089. * Generic
  1090. */
  1091. static void starpu_force_bus_sampling(void)
  1092. {
  1093. _STARPU_DEBUG("Force bus sampling ...\n");
  1094. _starpu_create_sampling_directory_if_needed();
  1095. generate_bus_affinity_file();
  1096. generate_bus_latency_file();
  1097. generate_bus_bandwidth_file();
  1098. generate_bus_config_file();
  1099. }
  1100. void _starpu_load_bus_performance_files(void)
  1101. {
  1102. _starpu_create_sampling_directory_if_needed();
  1103. check_bus_config_file();
  1104. load_bus_affinity_file();
  1105. load_bus_latency_file();
  1106. load_bus_bandwidth_file();
  1107. }
  1108. /* (in µs) */
  1109. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  1110. {
  1111. double bandwidth = bandwidth_matrix[src_node][dst_node];
  1112. double latency = latency_matrix[src_node][dst_node];
  1113. struct starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  1114. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  1115. }