perfmodel_bus.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #include <core/workers.h>
  32. #include <core/perfmodel/perfmodel.h>
  33. #ifdef STARPU_USE_OPENCL
  34. #include <starpu_opencl.h>
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define SIZE (32*1024*1024*sizeof(char))
  40. #define NITER 128
  41. #define MAXCPUS 32
  42. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  43. struct dev_timing
  44. {
  45. int cpu_id;
  46. double timing_htod;
  47. double timing_dtoh;
  48. };
  49. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES] = {{NAN}};
  50. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES] = {{NAN}};
  51. static unsigned was_benchmarked = 0;
  52. static unsigned ncpus = 0;
  53. static int ncuda = 0;
  54. static int nopencl = 0;
  55. /* Benchmarking the performance of the bus */
  56. #ifdef STARPU_USE_CUDA
  57. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][MAXCPUS];
  58. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  59. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  60. #ifdef HAVE_CUDA_MEMCPY_PEER
  61. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  62. #endif
  63. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  64. #endif
  65. #ifdef STARPU_USE_OPENCL
  66. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][MAXCPUS];
  67. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  68. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  69. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*MAXCPUS];
  70. #endif
  71. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  72. #ifdef STARPU_HAVE_HWLOC
  73. static hwloc_topology_t hwtopology;
  74. #endif
  75. #ifdef STARPU_USE_CUDA
  76. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  77. {
  78. struct _starpu_machine_config *config = _starpu_get_machine_config();
  79. _starpu_bind_thread_on_cpu(config, cpu);
  80. size_t size = SIZE;
  81. /* Initialize CUDA context on the device */
  82. starpu_cuda_set_device(dev);
  83. /* hack to avoid third party libs to rebind threads */
  84. _starpu_bind_thread_on_cpu(config, cpu);
  85. /* hack to force the initialization */
  86. cudaFree(0);
  87. /* hack to avoid third party libs to rebind threads */
  88. _starpu_bind_thread_on_cpu(config, cpu);
  89. /* Get the maximum size which can be allocated on the device */
  90. struct cudaDeviceProp prop;
  91. cudaError_t cures;
  92. cures = cudaGetDeviceProperties(&prop, dev);
  93. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  94. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  95. /* Allocate a buffer on the device */
  96. unsigned char *d_buffer;
  97. cudaMalloc((void **)&d_buffer, size);
  98. STARPU_ASSERT(d_buffer);
  99. /* hack to avoid third party libs to rebind threads */
  100. _starpu_bind_thread_on_cpu(config, cpu);
  101. /* Allocate a buffer on the host */
  102. unsigned char *h_buffer;
  103. cures = cudaHostAlloc((void **)&h_buffer, size, 0);
  104. STARPU_ASSERT(cures == cudaSuccess);
  105. /* hack to avoid third party libs to rebind threads */
  106. _starpu_bind_thread_on_cpu(config, cpu);
  107. /* Fill them */
  108. memset(h_buffer, 0, size);
  109. cudaMemset(d_buffer, 0, size);
  110. /* hack to avoid third party libs to rebind threads */
  111. _starpu_bind_thread_on_cpu(config, cpu);
  112. unsigned iter;
  113. double timing;
  114. struct timeval start;
  115. struct timeval end;
  116. /* Measure upload bandwidth */
  117. gettimeofday(&start, NULL);
  118. for (iter = 0; iter < NITER; iter++)
  119. {
  120. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  121. cudaThreadSynchronize();
  122. }
  123. gettimeofday(&end, NULL);
  124. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  125. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER/size;
  126. /* Measure download bandwidth */
  127. gettimeofday(&start, NULL);
  128. for (iter = 0; iter < NITER; iter++)
  129. {
  130. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  131. cudaThreadSynchronize();
  132. }
  133. gettimeofday(&end, NULL);
  134. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  135. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  136. /* Free buffers */
  137. cudaFreeHost(h_buffer);
  138. cudaFree(d_buffer);
  139. cudaThreadExit();
  140. }
  141. #ifdef HAVE_CUDA_MEMCPY_PEER
  142. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  143. {
  144. size_t size = SIZE;
  145. /* Get the maximum size which can be allocated on the device */
  146. struct cudaDeviceProp prop;
  147. cudaError_t cures;
  148. cures = cudaGetDeviceProperties(&prop, src);
  149. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  150. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  151. cures = cudaGetDeviceProperties(&prop, dst);
  152. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  153. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  154. /* Initialize CUDA context on the source */
  155. starpu_cuda_set_device(src);
  156. /* Allocate a buffer on the device */
  157. unsigned char *s_buffer;
  158. cudaMalloc((void **)&s_buffer, size);
  159. STARPU_ASSERT(s_buffer);
  160. cudaMemset(s_buffer, 0, size);
  161. /* Initialize CUDA context on the destination */
  162. starpu_cuda_set_device(dst);
  163. /* Allocate a buffer on the device */
  164. unsigned char *d_buffer;
  165. cudaMalloc((void **)&d_buffer, size);
  166. STARPU_ASSERT(d_buffer);
  167. cudaMemset(d_buffer, 0, size);
  168. unsigned iter;
  169. double timing;
  170. struct timeval start;
  171. struct timeval end;
  172. /* Measure upload bandwidth */
  173. gettimeofday(&start, NULL);
  174. for (iter = 0; iter < NITER; iter++)
  175. {
  176. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  177. cudaThreadSynchronize();
  178. }
  179. gettimeofday(&end, NULL);
  180. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  181. cudadev_timing_dtod[src+1][dst+1] = timing/NITER/size;
  182. /* Free buffers */
  183. cudaFree(d_buffer);
  184. starpu_cuda_set_device(src);
  185. cudaFree(s_buffer);
  186. cudaThreadExit();
  187. }
  188. #endif
  189. #endif
  190. #ifdef STARPU_USE_OPENCL
  191. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  192. {
  193. cl_context context;
  194. cl_command_queue queue;
  195. cl_int err=0;
  196. size_t size = SIZE;
  197. struct _starpu_machine_config *config = _starpu_get_machine_config();
  198. _starpu_bind_thread_on_cpu(config, cpu);
  199. /* Initialize OpenCL context on the device */
  200. _starpu_opencl_init_context(dev);
  201. starpu_opencl_get_context(dev, &context);
  202. starpu_opencl_get_queue(dev, &queue);
  203. /* Get the maximum size which can be allocated on the device */
  204. cl_device_id device;
  205. cl_ulong maxMemAllocSize;
  206. starpu_opencl_get_device(dev, &device);
  207. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  208. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  209. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  210. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  211. {
  212. /* Let's not use too much RAM when running OpenCL on a CPU: it
  213. * would make the OS swap like crazy. */
  214. size /= 2;
  215. }
  216. /* hack to avoid third party libs to rebind threads */
  217. _starpu_bind_thread_on_cpu(config, cpu);
  218. /* Allocate a buffer on the device */
  219. cl_mem d_buffer;
  220. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  221. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  222. /* hack to avoid third party libs to rebind threads */
  223. _starpu_bind_thread_on_cpu(config, cpu);
  224. /* Allocate a buffer on the host */
  225. unsigned char *h_buffer;
  226. h_buffer = (unsigned char *)malloc(size);
  227. STARPU_ASSERT(h_buffer);
  228. /* hack to avoid third party libs to rebind threads */
  229. _starpu_bind_thread_on_cpu(config, cpu);
  230. /* Fill them */
  231. memset(h_buffer, 0, size);
  232. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  233. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  234. /* hack to avoid third party libs to rebind threads */
  235. _starpu_bind_thread_on_cpu(config, cpu);
  236. unsigned iter;
  237. double timing;
  238. struct timeval start;
  239. struct timeval end;
  240. /* Measure upload bandwidth */
  241. gettimeofday(&start, NULL);
  242. for (iter = 0; iter < NITER; iter++)
  243. {
  244. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  245. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  246. }
  247. gettimeofday(&end, NULL);
  248. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  249. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod = timing/NITER/size;
  250. /* Measure download bandwidth */
  251. gettimeofday(&start, NULL);
  252. for (iter = 0; iter < NITER; iter++)
  253. {
  254. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  255. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  256. }
  257. gettimeofday(&end, NULL);
  258. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  259. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  260. /* Free buffers */
  261. clReleaseMemObject(d_buffer);
  262. free(h_buffer);
  263. /* Uninitiliaze OpenCL context on the device */
  264. _starpu_opencl_deinit_context(dev);
  265. }
  266. #endif
  267. /* NB: we want to sort the bandwidth by DECREASING order */
  268. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  269. {
  270. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  271. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  272. double left_dtoh = left->timing_dtoh;
  273. double left_htod = left->timing_htod;
  274. double right_dtoh = right->timing_dtoh;
  275. double right_htod = right->timing_htod;
  276. double bandwidth_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  277. double bandwidth_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  278. /* it's for a decreasing sorting */
  279. return (bandwidth_sum2_left < bandwidth_sum2_right);
  280. }
  281. #ifdef STARPU_HAVE_HWLOC
  282. static int find_numa_node(hwloc_obj_t obj)
  283. {
  284. STARPU_ASSERT(obj);
  285. hwloc_obj_t current = obj;
  286. while (current->depth != HWLOC_OBJ_NODE)
  287. {
  288. current = current->parent;
  289. /* If we don't find a "node" obj before the root, this means
  290. * hwloc does not know whether there are numa nodes or not, so
  291. * we should not use a per-node sampling in that case. */
  292. STARPU_ASSERT(current);
  293. }
  294. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  295. return current->logical_index;
  296. }
  297. #endif
  298. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char *type)
  299. {
  300. /* Either we have hwloc and we measure the bandwith between each GPU
  301. * and each NUMA node, or we don't have such NUMA information and we
  302. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  303. * */
  304. #ifdef STARPU_HAVE_HWLOC
  305. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  306. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  307. /* If no NUMA node was found, we assume that we have a single memory
  308. * bank. */
  309. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  310. unsigned *is_available_per_numa_node = NULL;
  311. double *dev_timing_htod_per_numa_node = NULL;
  312. double *dev_timing_dtoh_per_numa_node = NULL;
  313. if (!no_node_obj_was_found)
  314. {
  315. is_available_per_numa_node = (unsigned *)malloc(nnuma_nodes * sizeof(unsigned));
  316. STARPU_ASSERT(is_available_per_numa_node);
  317. dev_timing_htod_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  318. STARPU_ASSERT(dev_timing_htod_per_numa_node);
  319. dev_timing_dtoh_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  320. STARPU_ASSERT(dev_timing_dtoh_per_numa_node);
  321. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  322. }
  323. #endif
  324. unsigned cpu;
  325. for (cpu = 0; cpu < ncpus; cpu++)
  326. {
  327. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id = cpu;
  328. #ifdef STARPU_HAVE_HWLOC
  329. int numa_id = 0;
  330. if (!no_node_obj_was_found)
  331. {
  332. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  333. numa_id = find_numa_node(obj);
  334. if (is_available_per_numa_node[numa_id])
  335. {
  336. /* We reuse the previous numbers for that NUMA node */
  337. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod =
  338. dev_timing_htod_per_numa_node[numa_id];
  339. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh =
  340. dev_timing_dtoh_per_numa_node[numa_id];
  341. continue;
  342. }
  343. }
  344. #endif
  345. #ifdef STARPU_USE_CUDA
  346. if (strncmp(type, "CUDA", 4) == 0)
  347. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  348. #endif
  349. #ifdef STARPU_USE_OPENCL
  350. if (strncmp(type, "OpenCL", 6) == 0)
  351. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  352. #endif
  353. #ifdef STARPU_HAVE_HWLOC
  354. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  355. {
  356. /* Save the results for that NUMA node */
  357. dev_timing_htod_per_numa_node[numa_id] =
  358. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  359. dev_timing_dtoh_per_numa_node[numa_id] =
  360. dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  361. is_available_per_numa_node[numa_id] = 1;
  362. }
  363. #endif
  364. }
  365. #ifdef STARPU_HAVE_HWLOC
  366. if (!no_node_obj_was_found)
  367. {
  368. free(is_available_per_numa_node);
  369. free(dev_timing_htod_per_numa_node);
  370. free(dev_timing_dtoh_per_numa_node);
  371. }
  372. #endif /* STARPU_HAVE_HWLOC */
  373. }
  374. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  375. struct dev_timing *dev_timing_per_cpu, char *type)
  376. {
  377. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  378. /* sort the results */
  379. qsort(&(dev_timing_per_cpu[(dev+1)*MAXCPUS]), ncpus,
  380. sizeof(struct dev_timing),
  381. compar_dev_timing);
  382. #ifdef STARPU_VERBOSE
  383. unsigned cpu;
  384. for (cpu = 0; cpu < ncpus; cpu++)
  385. {
  386. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].cpu_id;
  387. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_dtoh;
  388. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*MAXCPUS+cpu].timing_htod;
  389. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  390. _STARPU_DISP("(%10s) BANDWIDTH GPU %d CPU %u - htod %f - dtoh %f - %f\n", type, dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  391. }
  392. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].cpu_id;
  393. _STARPU_DISP("(%10s) BANDWIDTH GPU %d BEST CPU %u\n", type, dev, best_cpu);
  394. #endif
  395. /* The results are sorted in a decreasing order, so that the best
  396. * measurement is currently the first entry. */
  397. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_dtoh;
  398. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*MAXCPUS+0].timing_htod;
  399. }
  400. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  401. static void benchmark_all_gpu_devices(void)
  402. {
  403. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  404. int i;
  405. #ifdef HAVE_CUDA_MEMCPY_PEER
  406. int j;
  407. #endif
  408. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  409. #ifdef STARPU_HAVE_HWLOC
  410. hwloc_topology_init(&hwtopology);
  411. hwloc_topology_load(hwtopology);
  412. #endif
  413. #ifdef STARPU_HAVE_HWLOC
  414. hwloc_cpuset_t former_cpuset = hwloc_bitmap_alloc();
  415. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  416. #elif __linux__
  417. /* Save the current cpu binding */
  418. cpu_set_t former_process_affinity;
  419. int ret;
  420. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  421. if (ret)
  422. {
  423. perror("sched_getaffinity");
  424. STARPU_ABORT();
  425. }
  426. #else
  427. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  428. #endif
  429. struct _starpu_machine_config *config = _starpu_get_machine_config();
  430. ncpus = _starpu_topology_get_nhwcpu(config);
  431. #ifdef STARPU_USE_CUDA
  432. ncuda = _starpu_get_cuda_device_count();
  433. for (i = 0; i < ncuda; i++)
  434. {
  435. _STARPU_DISP("CUDA %d...\n", i);
  436. /* measure bandwidth between Host and Device i */
  437. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, "CUDA");
  438. }
  439. #ifdef HAVE_CUDA_MEMCPY_PEER
  440. for (i = 0; i < ncuda; i++)
  441. for (j = 0; j < ncuda; j++)
  442. if (i != j)
  443. {
  444. _STARPU_DISP("CUDA %d -> %d...\n", i, j);
  445. /* measure bandwidth between Host and Device i */
  446. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  447. }
  448. #endif
  449. #endif
  450. #ifdef STARPU_USE_OPENCL
  451. nopencl = _starpu_opencl_get_device_count();
  452. for (i = 0; i < nopencl; i++)
  453. {
  454. _STARPU_DISP("OpenCL %d...\n", i);
  455. /* measure bandwith between Host and Device i */
  456. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, "OpenCL");
  457. }
  458. #endif
  459. #ifdef STARPU_HAVE_HWLOC
  460. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  461. #elif __linux__
  462. /* Restore the former affinity */
  463. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  464. if (ret)
  465. {
  466. perror("sched_setaffinity");
  467. STARPU_ABORT();
  468. }
  469. #endif
  470. #ifdef STARPU_HAVE_HWLOC
  471. hwloc_topology_destroy(hwtopology);
  472. #endif
  473. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  474. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  475. was_benchmarked = 1;
  476. }
  477. static void get_bus_path(const char *type, char *path, size_t maxlen)
  478. {
  479. _starpu_get_perf_model_dir_bus(path, maxlen);
  480. strncat(path, type, maxlen);
  481. char hostname[32];
  482. char *forced_hostname = getenv("STARPU_HOSTNAME");
  483. if (forced_hostname && forced_hostname[0])
  484. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  485. else
  486. gethostname(hostname, sizeof(hostname));
  487. strncat(path, ".", maxlen);
  488. strncat(path, hostname, maxlen);
  489. }
  490. /*
  491. * Affinity
  492. */
  493. static void get_affinity_path(char *path, size_t maxlen)
  494. {
  495. get_bus_path("affinity", path, maxlen);
  496. }
  497. static void load_bus_affinity_file_content(void)
  498. {
  499. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  500. FILE *f;
  501. char path[256];
  502. get_affinity_path(path, 256);
  503. f = fopen(path, "r");
  504. STARPU_ASSERT(f);
  505. struct _starpu_machine_config *config = _starpu_get_machine_config();
  506. ncpus = _starpu_topology_get_nhwcpu(config);
  507. int gpu;
  508. #ifdef STARPU_USE_CUDA
  509. ncuda = _starpu_get_cuda_device_count();
  510. for (gpu = 0; gpu < ncuda; gpu++)
  511. {
  512. int ret;
  513. int dummy;
  514. _starpu_drop_comments(f);
  515. ret = fscanf(f, "%d\t", &dummy);
  516. STARPU_ASSERT(ret == 1);
  517. STARPU_ASSERT(dummy == gpu);
  518. unsigned cpu;
  519. for (cpu = 0; cpu < ncpus; cpu++)
  520. {
  521. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  522. STARPU_ASSERT(ret == 1);
  523. }
  524. ret = fscanf(f, "\n");
  525. STARPU_ASSERT(ret == 0);
  526. }
  527. #endif /* !STARPU_USE_CUDA */
  528. #ifdef STARPU_USE_OPENCL
  529. nopencl = _starpu_opencl_get_device_count();
  530. for (gpu = 0; gpu < nopencl; gpu++)
  531. {
  532. int ret;
  533. int dummy;
  534. _starpu_drop_comments(f);
  535. ret = fscanf(f, "%d\t", &dummy);
  536. STARPU_ASSERT(ret == 1);
  537. STARPU_ASSERT(dummy == gpu);
  538. unsigned cpu;
  539. for (cpu = 0; cpu < ncpus; cpu++)
  540. {
  541. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  542. STARPU_ASSERT(ret == 1);
  543. }
  544. ret = fscanf(f, "\n");
  545. STARPU_ASSERT(ret == 0);
  546. }
  547. #endif /* !STARPU_USE_OPENCL */
  548. fclose(f);
  549. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  550. }
  551. static void write_bus_affinity_file_content(void)
  552. {
  553. STARPU_ASSERT(was_benchmarked);
  554. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  555. FILE *f;
  556. char path[256];
  557. get_affinity_path(path, 256);
  558. f = fopen(path, "w+");
  559. if (!f)
  560. {
  561. perror("fopen write_buf_affinity_file_content");
  562. _STARPU_DISP("path '%s'\n", path);
  563. fflush(stderr);
  564. STARPU_ABORT();
  565. }
  566. unsigned cpu;
  567. int gpu;
  568. fprintf(f, "# GPU\t");
  569. for (cpu = 0; cpu < ncpus; cpu++)
  570. fprintf(f, "CPU%u\t", cpu);
  571. fprintf(f, "\n");
  572. #ifdef STARPU_USE_CUDA
  573. for (gpu = 0; gpu < ncuda; gpu++)
  574. {
  575. fprintf(f, "%d\t", gpu);
  576. for (cpu = 0; cpu < ncpus; cpu++)
  577. {
  578. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  579. }
  580. fprintf(f, "\n");
  581. }
  582. #endif
  583. #ifdef STARPU_USE_OPENCL
  584. for (gpu = 0; gpu < nopencl; gpu++)
  585. {
  586. fprintf(f, "%d\t", gpu);
  587. for (cpu = 0; cpu < ncpus; cpu++)
  588. {
  589. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*MAXCPUS+cpu].cpu_id);
  590. }
  591. fprintf(f, "\n");
  592. }
  593. #endif
  594. fclose(f);
  595. #endif
  596. }
  597. static void generate_bus_affinity_file(void)
  598. {
  599. if (!was_benchmarked)
  600. benchmark_all_gpu_devices();
  601. write_bus_affinity_file_content();
  602. }
  603. static void load_bus_affinity_file(void)
  604. {
  605. int res;
  606. char path[256];
  607. get_affinity_path(path, 256);
  608. res = access(path, F_OK);
  609. if (res)
  610. {
  611. /* File does not exist yet */
  612. generate_bus_affinity_file();
  613. }
  614. load_bus_affinity_file_content();
  615. }
  616. #ifdef STARPU_USE_CUDA
  617. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  618. {
  619. return cuda_affinity_matrix[gpuid];
  620. }
  621. #endif /* STARPU_USE_CUDA */
  622. #ifdef STARPU_USE_OPENCL
  623. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  624. {
  625. return opencl_affinity_matrix[gpuid];
  626. }
  627. #endif /* STARPU_USE_OPENCL */
  628. /*
  629. * Latency
  630. */
  631. static void get_latency_path(char *path, size_t maxlen)
  632. {
  633. get_bus_path("latency", path, maxlen);
  634. }
  635. static int load_bus_latency_file_content(void)
  636. {
  637. int n;
  638. unsigned src, dst;
  639. FILE *f;
  640. char path[256];
  641. get_latency_path(path, 256);
  642. f = fopen(path, "r");
  643. STARPU_ASSERT(f);
  644. for (src = 0; src < STARPU_MAXNODES; src++)
  645. {
  646. _starpu_drop_comments(f);
  647. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  648. {
  649. double latency;
  650. n = fscanf(f, "%lf", &latency);
  651. if (n != 1)
  652. {
  653. fclose(f);
  654. return 0;
  655. }
  656. n = getc(f);
  657. if (n != '\t')
  658. {
  659. fclose(f);
  660. return 0;
  661. }
  662. latency_matrix[src][dst] = latency;
  663. }
  664. n = getc(f);
  665. if (n != '\n')
  666. {
  667. fclose(f);
  668. return 0;
  669. }
  670. }
  671. fclose(f);
  672. return 1;
  673. }
  674. static void write_bus_latency_file_content(void)
  675. {
  676. int src, dst, maxnode;
  677. FILE *f;
  678. STARPU_ASSERT(was_benchmarked);
  679. char path[256];
  680. get_latency_path(path, 256);
  681. f = fopen(path, "w+");
  682. if (!f)
  683. {
  684. perror("fopen write_bus_latency_file_content");
  685. _STARPU_DISP("path '%s'\n", path);
  686. fflush(stderr);
  687. STARPU_ABORT();
  688. }
  689. fprintf(f, "# ");
  690. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  691. fprintf(f, "to %d\t\t", dst);
  692. fprintf(f, "\n");
  693. maxnode = ncuda;
  694. #ifdef STARPU_USE_OPENCL
  695. maxnode += nopencl;
  696. #endif
  697. for (src = 0; src < STARPU_MAXNODES; src++)
  698. {
  699. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  700. {
  701. double latency;
  702. if ((src > maxnode) || (dst > maxnode))
  703. {
  704. /* convention */
  705. latency = NAN;
  706. }
  707. else if (src == dst)
  708. {
  709. latency = 0.0;
  710. }
  711. else
  712. {
  713. /* µs */
  714. latency = ((src && dst)?2000.0:500.0);
  715. }
  716. fprintf(f, "%f\t", latency);
  717. }
  718. fprintf(f, "\n");
  719. }
  720. fclose(f);
  721. }
  722. static void generate_bus_latency_file(void)
  723. {
  724. if (!was_benchmarked)
  725. benchmark_all_gpu_devices();
  726. write_bus_latency_file_content();
  727. }
  728. static void load_bus_latency_file(void)
  729. {
  730. int res;
  731. char path[256];
  732. get_latency_path(path, 256);
  733. res = access(path, F_OK);
  734. if (res || !load_bus_latency_file_content())
  735. {
  736. /* File does not exist yet or is bogus */
  737. generate_bus_latency_file();
  738. }
  739. }
  740. /*
  741. * Bandwidth
  742. */
  743. static void get_bandwidth_path(char *path, size_t maxlen)
  744. {
  745. get_bus_path("bandwidth", path, maxlen);
  746. }
  747. static int load_bus_bandwidth_file_content(void)
  748. {
  749. int n;
  750. unsigned src, dst;
  751. FILE *f;
  752. char path[256];
  753. get_bandwidth_path(path, 256);
  754. f = fopen(path, "r");
  755. if (!f)
  756. {
  757. perror("fopen load_bus_bandwidth_file_content");
  758. _STARPU_DISP("path '%s'\n", path);
  759. fflush(stderr);
  760. STARPU_ABORT();
  761. }
  762. for (src = 0; src < STARPU_MAXNODES; src++)
  763. {
  764. _starpu_drop_comments(f);
  765. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  766. {
  767. double bandwidth;
  768. n = fscanf(f, "%lf", &bandwidth);
  769. if (n != 1)
  770. {
  771. fprintf(stderr,"didn't get a number\n");
  772. fclose(f);
  773. return 0;
  774. }
  775. n = getc(f);
  776. if (n != '\t')
  777. {
  778. fclose(f);
  779. return 0;
  780. }
  781. bandwidth_matrix[src][dst] = bandwidth;
  782. }
  783. n = getc(f);
  784. if (n != '\n')
  785. {
  786. fclose(f);
  787. return 0;
  788. }
  789. }
  790. fclose(f);
  791. return 1;
  792. }
  793. static void write_bus_bandwidth_file_content(void)
  794. {
  795. int src, dst, maxnode;
  796. FILE *f;
  797. STARPU_ASSERT(was_benchmarked);
  798. char path[256];
  799. get_bandwidth_path(path, 256);
  800. f = fopen(path, "w+");
  801. STARPU_ASSERT(f);
  802. fprintf(f, "# ");
  803. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  804. fprintf(f, "to %d\t\t", dst);
  805. fprintf(f, "\n");
  806. maxnode = ncuda;
  807. #ifdef STARPU_USE_OPENCL
  808. maxnode += nopencl;
  809. #endif
  810. for (src = 0; src < STARPU_MAXNODES; src++)
  811. {
  812. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  813. {
  814. double bandwidth;
  815. if ((src > maxnode) || (dst > maxnode))
  816. {
  817. bandwidth = NAN;
  818. }
  819. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  820. else if (src != dst)
  821. {
  822. double slowness = 0.0;
  823. /* Total bandwidth is the harmonic mean of bandwidths */
  824. #ifdef STARPU_USE_CUDA
  825. #ifdef HAVE_CUDA_MEMCPY_PEER
  826. if (src && src <= ncuda && dst && dst <= ncuda)
  827. /* Direct GPU-GPU transfert */
  828. slowness = cudadev_timing_dtod[src][dst];
  829. else
  830. #endif
  831. {
  832. if (src && src <= ncuda)
  833. slowness += cudadev_timing_dtoh[src];
  834. if (dst && dst <= ncuda)
  835. slowness += cudadev_timing_htod[dst];
  836. }
  837. #endif
  838. #ifdef STARPU_USE_OPENCL
  839. if (src > ncuda)
  840. slowness += opencldev_timing_dtoh[src-ncuda];
  841. if (dst > ncuda)
  842. slowness += opencldev_timing_htod[dst-ncuda];
  843. #endif
  844. bandwidth = 1.0/slowness;
  845. }
  846. #endif
  847. else
  848. {
  849. /* convention */
  850. bandwidth = 0.0;
  851. }
  852. fprintf(f, "%f\t", bandwidth);
  853. }
  854. fprintf(f, "\n");
  855. }
  856. fclose(f);
  857. }
  858. void starpu_bus_print_bandwidth(FILE *f)
  859. {
  860. int src, dst, maxnode;
  861. maxnode = ncuda;
  862. #ifdef STARPU_USE_OPENCL
  863. maxnode += nopencl;
  864. #endif
  865. fprintf(f, "from\t");
  866. fprintf(f, "to RAM\t\t");
  867. for (dst = 0; dst < ncuda; dst++)
  868. fprintf(f, "to CUDA %d\t", dst);
  869. for (dst = 0; dst < nopencl; dst++)
  870. fprintf(f, "to OpenCL %d\t", dst);
  871. fprintf(f, "\n");
  872. for (src = 0; src <= maxnode; src++)
  873. {
  874. if (!src)
  875. fprintf(f, "RAM\t");
  876. else if (src <= ncuda)
  877. fprintf(f, "CUDA %d\t", src-1);
  878. else
  879. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  880. for (dst = 0; dst <= maxnode; dst++)
  881. fprintf(f, "%f\t", bandwidth_matrix[src][dst]);
  882. fprintf(f, "\n");
  883. }
  884. }
  885. static void generate_bus_bandwidth_file(void)
  886. {
  887. if (!was_benchmarked)
  888. benchmark_all_gpu_devices();
  889. write_bus_bandwidth_file_content();
  890. }
  891. static void load_bus_bandwidth_file(void)
  892. {
  893. int res;
  894. char path[256];
  895. get_bandwidth_path(path, 256);
  896. res = access(path, F_OK);
  897. if (res || !load_bus_bandwidth_file_content())
  898. {
  899. /* File does not exist yet or is bogus */
  900. generate_bus_bandwidth_file();
  901. }
  902. }
  903. /*
  904. * Config
  905. */
  906. static void get_config_path(char *path, size_t maxlen)
  907. {
  908. get_bus_path("config", path, maxlen);
  909. }
  910. static void check_bus_config_file()
  911. {
  912. int res;
  913. char path[256];
  914. get_config_path(path, 256);
  915. res = access(path, F_OK);
  916. if (res)
  917. {
  918. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  919. starpu_force_bus_sampling();
  920. _STARPU_DISP("... done\n");
  921. }
  922. else
  923. {
  924. FILE *f;
  925. int ret, read_cuda = -1, read_opencl = -1;
  926. unsigned read_cpus = -1;
  927. struct _starpu_machine_config *config = _starpu_get_machine_config();
  928. // Loading configuration from file
  929. f = fopen(path, "r");
  930. STARPU_ASSERT(f);
  931. _starpu_drop_comments(f);
  932. ret = fscanf(f, "%u\t", &read_cpus);
  933. STARPU_ASSERT(ret == 1);
  934. _starpu_drop_comments(f);
  935. ret = fscanf(f, "%d\t", &read_cuda);
  936. STARPU_ASSERT(ret == 1);
  937. _starpu_drop_comments(f);
  938. ret = fscanf(f, "%d\t", &read_opencl);
  939. STARPU_ASSERT(ret == 1);
  940. _starpu_drop_comments(f);
  941. fclose(f);
  942. // Loading current configuration
  943. ncpus = _starpu_topology_get_nhwcpu(config);
  944. #ifdef STARPU_USE_CUDA
  945. ncuda = _starpu_get_cuda_device_count();
  946. #endif
  947. #ifdef STARPU_USE_OPENCL
  948. nopencl = _starpu_opencl_get_device_count();
  949. #endif
  950. // Checking if both configurations match
  951. if (read_cpus != ncpus)
  952. {
  953. fprintf(stderr, "Current configuration does not match the bus performance model (CPUS: (stored) %u != (current) %u), recalibrating...", read_cpus, ncpus);
  954. starpu_force_bus_sampling();
  955. fprintf(stderr, "done\n");
  956. }
  957. else if (read_cuda != ncuda)
  958. {
  959. fprintf(stderr, "Current configuration does not match the bus performance model (CUDA: (stored) %d != (current) %d), recalibrating...", read_cuda, ncuda);
  960. starpu_force_bus_sampling();
  961. fprintf(stderr, "done\n");
  962. }
  963. else if (read_opencl != nopencl)
  964. {
  965. fprintf(stderr, "Current configuration does not match the bus performance model (OpenCL: (stored) %d != (current) %d), recalibrating...", read_opencl, nopencl);
  966. starpu_force_bus_sampling();
  967. fprintf(stderr, "done\n");
  968. }
  969. }
  970. }
  971. static void write_bus_config_file_content(void)
  972. {
  973. FILE *f;
  974. char path[256];
  975. STARPU_ASSERT(was_benchmarked);
  976. get_config_path(path, 256);
  977. f = fopen(path, "w+");
  978. STARPU_ASSERT(f);
  979. fprintf(f, "# Current configuration\n");
  980. fprintf(f, "%u # Number of CPUs\n", ncpus);
  981. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  982. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  983. fclose(f);
  984. }
  985. static void generate_bus_config_file()
  986. {
  987. if (!was_benchmarked)
  988. benchmark_all_gpu_devices();
  989. write_bus_config_file_content();
  990. }
  991. /*
  992. * Generic
  993. */
  994. void starpu_force_bus_sampling(void)
  995. {
  996. _starpu_create_sampling_directory_if_needed();
  997. generate_bus_affinity_file();
  998. generate_bus_latency_file();
  999. generate_bus_bandwidth_file();
  1000. generate_bus_config_file();
  1001. }
  1002. void _starpu_load_bus_performance_files(void)
  1003. {
  1004. _starpu_create_sampling_directory_if_needed();
  1005. check_bus_config_file();
  1006. load_bus_affinity_file();
  1007. load_bus_latency_file();
  1008. load_bus_bandwidth_file();
  1009. }
  1010. /* (in µs) */
  1011. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  1012. {
  1013. double bandwidth = bandwidth_matrix[src_node][dst_node];
  1014. double latency = latency_matrix[src_node][dst_node];
  1015. struct starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  1016. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  1017. }