perfmodel_bus.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #include <core/workers.h>
  32. #include <core/perfmodel/perfmodel.h>
  33. #ifdef STARPU_USE_OPENCL
  34. #include <starpu_opencl.h>
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define SIZE (32*1024*1024*sizeof(char))
  40. #define NITER 128
  41. static void starpu_force_bus_sampling(void);
  42. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  43. struct dev_timing
  44. {
  45. int cpu_id;
  46. double timing_htod;
  47. double timing_dtoh;
  48. };
  49. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  50. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  51. static unsigned was_benchmarked = 0;
  52. static unsigned ncpus = 0;
  53. static int ncuda = 0;
  54. static int nopencl = 0;
  55. /* Benchmarking the performance of the bus */
  56. #ifdef STARPU_USE_CUDA
  57. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXCPUS];
  58. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  59. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  60. #ifdef HAVE_CUDA_MEMCPY_PEER
  61. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  62. #endif
  63. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  64. #endif
  65. #ifdef STARPU_USE_OPENCL
  66. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXCPUS];
  67. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  68. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  69. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  70. #endif
  71. #ifdef STARPU_HAVE_HWLOC
  72. static hwloc_topology_t hwtopology;
  73. #endif
  74. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  75. #ifdef STARPU_USE_CUDA
  76. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  77. {
  78. struct _starpu_machine_config *config = _starpu_get_machine_config();
  79. _starpu_bind_thread_on_cpu(config, cpu);
  80. size_t size = SIZE;
  81. /* Initialize CUDA context on the device */
  82. /* We do not need to enable OpenGL interoperability at this point,
  83. * since we cleanly shutdown CUDA before returning. */
  84. cudaSetDevice(dev);
  85. /* hack to avoid third party libs to rebind threads */
  86. _starpu_bind_thread_on_cpu(config, cpu);
  87. /* hack to force the initialization */
  88. cudaFree(0);
  89. /* hack to avoid third party libs to rebind threads */
  90. _starpu_bind_thread_on_cpu(config, cpu);
  91. /* Get the maximum size which can be allocated on the device */
  92. struct cudaDeviceProp prop;
  93. cudaError_t cures;
  94. cures = cudaGetDeviceProperties(&prop, dev);
  95. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  96. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  97. /* Allocate a buffer on the device */
  98. unsigned char *d_buffer;
  99. cudaMalloc((void **)&d_buffer, size);
  100. STARPU_ASSERT(d_buffer);
  101. /* hack to avoid third party libs to rebind threads */
  102. _starpu_bind_thread_on_cpu(config, cpu);
  103. /* Allocate a buffer on the host */
  104. unsigned char *h_buffer;
  105. cures = cudaHostAlloc((void **)&h_buffer, size, 0);
  106. STARPU_ASSERT(cures == cudaSuccess);
  107. /* hack to avoid third party libs to rebind threads */
  108. _starpu_bind_thread_on_cpu(config, cpu);
  109. /* Fill them */
  110. memset(h_buffer, 0, size);
  111. cudaMemset(d_buffer, 0, size);
  112. /* hack to avoid third party libs to rebind threads */
  113. _starpu_bind_thread_on_cpu(config, cpu);
  114. unsigned iter;
  115. double timing;
  116. struct timeval start;
  117. struct timeval end;
  118. /* Measure upload bandwidth */
  119. gettimeofday(&start, NULL);
  120. for (iter = 0; iter < NITER; iter++)
  121. {
  122. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  123. cudaThreadSynchronize();
  124. }
  125. gettimeofday(&end, NULL);
  126. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  127. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  128. /* Measure download bandwidth */
  129. gettimeofday(&start, NULL);
  130. for (iter = 0; iter < NITER; iter++)
  131. {
  132. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  133. cudaThreadSynchronize();
  134. }
  135. gettimeofday(&end, NULL);
  136. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  137. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  138. /* Free buffers */
  139. cudaFreeHost(h_buffer);
  140. cudaFree(d_buffer);
  141. cudaThreadExit();
  142. }
  143. #ifdef HAVE_CUDA_MEMCPY_PEER
  144. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  145. {
  146. size_t size = SIZE;
  147. int can;
  148. /* Get the maximum size which can be allocated on the device */
  149. struct cudaDeviceProp prop;
  150. cudaError_t cures;
  151. cures = cudaGetDeviceProperties(&prop, src);
  152. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  153. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  154. cures = cudaGetDeviceProperties(&prop, dst);
  155. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  156. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  157. /* Initialize CUDA context on the source */
  158. /* We do not need to enable OpenGL interoperability at this point,
  159. * since we cleanly shutdown CUDA before returning. */
  160. cudaSetDevice(src);
  161. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") <= 0) {
  162. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  163. if (!cures && can) {
  164. cures = cudaDeviceEnablePeerAccess(dst, 0);
  165. if (!cures)
  166. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  167. }
  168. }
  169. /* Allocate a buffer on the device */
  170. unsigned char *s_buffer;
  171. cudaMalloc((void **)&s_buffer, size);
  172. STARPU_ASSERT(s_buffer);
  173. cudaMemset(s_buffer, 0, size);
  174. /* Initialize CUDA context on the destination */
  175. /* We do not need to enable OpenGL interoperability at this point,
  176. * since we cleanly shutdown CUDA before returning. */
  177. cudaSetDevice(dst);
  178. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") <= 0) {
  179. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  180. if (!cures && can) {
  181. cures = cudaDeviceEnablePeerAccess(src, 0);
  182. if (!cures)
  183. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  184. }
  185. }
  186. /* Allocate a buffer on the device */
  187. unsigned char *d_buffer;
  188. cudaMalloc((void **)&d_buffer, size);
  189. STARPU_ASSERT(d_buffer);
  190. cudaMemset(d_buffer, 0, size);
  191. unsigned iter;
  192. double timing;
  193. struct timeval start;
  194. struct timeval end;
  195. /* Measure upload bandwidth */
  196. gettimeofday(&start, NULL);
  197. for (iter = 0; iter < NITER; iter++)
  198. {
  199. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  200. cudaThreadSynchronize();
  201. }
  202. gettimeofday(&end, NULL);
  203. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  204. cudadev_timing_dtod[src+1][dst+1] = timing/NITER/size;
  205. /* Free buffers */
  206. cudaFree(d_buffer);
  207. cudaSetDevice(src);
  208. cudaFree(s_buffer);
  209. cudaThreadExit();
  210. }
  211. #endif
  212. #endif
  213. #ifdef STARPU_USE_OPENCL
  214. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  215. {
  216. cl_context context;
  217. cl_command_queue queue;
  218. cl_int err=0;
  219. size_t size = SIZE;
  220. int not_initialized;
  221. struct _starpu_machine_config *config = _starpu_get_machine_config();
  222. _starpu_bind_thread_on_cpu(config, cpu);
  223. /* Is the context already initialised ? */
  224. starpu_opencl_get_context(dev, &context);
  225. not_initialized = (context == NULL);
  226. if (not_initialized == 1)
  227. _starpu_opencl_init_context(dev);
  228. /* Get context and queue */
  229. starpu_opencl_get_context(dev, &context);
  230. starpu_opencl_get_queue(dev, &queue);
  231. /* Get the maximum size which can be allocated on the device */
  232. cl_device_id device;
  233. cl_ulong maxMemAllocSize;
  234. starpu_opencl_get_device(dev, &device);
  235. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  236. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  237. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  238. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  239. {
  240. /* Let's not use too much RAM when running OpenCL on a CPU: it
  241. * would make the OS swap like crazy. */
  242. size /= 2;
  243. }
  244. /* hack to avoid third party libs to rebind threads */
  245. _starpu_bind_thread_on_cpu(config, cpu);
  246. /* Allocate a buffer on the device */
  247. cl_mem d_buffer;
  248. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  249. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  250. /* hack to avoid third party libs to rebind threads */
  251. _starpu_bind_thread_on_cpu(config, cpu);
  252. /* Allocate a buffer on the host */
  253. unsigned char *h_buffer;
  254. h_buffer = (unsigned char *)malloc(size);
  255. STARPU_ASSERT(h_buffer);
  256. /* hack to avoid third party libs to rebind threads */
  257. _starpu_bind_thread_on_cpu(config, cpu);
  258. /* Fill them */
  259. memset(h_buffer, 0, size);
  260. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  261. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  262. /* hack to avoid third party libs to rebind threads */
  263. _starpu_bind_thread_on_cpu(config, cpu);
  264. unsigned iter;
  265. double timing;
  266. struct timeval start;
  267. struct timeval end;
  268. /* Measure upload bandwidth */
  269. gettimeofday(&start, NULL);
  270. for (iter = 0; iter < NITER; iter++)
  271. {
  272. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  273. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  274. }
  275. gettimeofday(&end, NULL);
  276. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  277. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  278. /* Measure download bandwidth */
  279. gettimeofday(&start, NULL);
  280. for (iter = 0; iter < NITER; iter++)
  281. {
  282. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  283. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  284. }
  285. gettimeofday(&end, NULL);
  286. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  287. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  288. /* Free buffers */
  289. clReleaseMemObject(d_buffer);
  290. free(h_buffer);
  291. /* Uninitiliaze OpenCL context on the device */
  292. if (not_initialized == 1)
  293. _starpu_opencl_deinit_context(dev);
  294. }
  295. #endif
  296. /* NB: we want to sort the bandwidth by DECREASING order */
  297. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  298. {
  299. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  300. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  301. double left_dtoh = left->timing_dtoh;
  302. double left_htod = left->timing_htod;
  303. double right_dtoh = right->timing_dtoh;
  304. double right_htod = right->timing_htod;
  305. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  306. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  307. /* it's for a decreasing sorting */
  308. return (timing_sum2_left > timing_sum2_right);
  309. }
  310. #ifdef STARPU_HAVE_HWLOC
  311. static int find_numa_node(hwloc_obj_t obj)
  312. {
  313. STARPU_ASSERT(obj);
  314. hwloc_obj_t current = obj;
  315. while (current->depth != HWLOC_OBJ_NODE)
  316. {
  317. current = current->parent;
  318. /* If we don't find a "node" obj before the root, this means
  319. * hwloc does not know whether there are numa nodes or not, so
  320. * we should not use a per-node sampling in that case. */
  321. STARPU_ASSERT(current);
  322. }
  323. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  324. return current->logical_index;
  325. }
  326. #endif
  327. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char *type)
  328. {
  329. /* Either we have hwloc and we measure the bandwith between each GPU
  330. * and each NUMA node, or we don't have such NUMA information and we
  331. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  332. * */
  333. #ifdef STARPU_HAVE_HWLOC
  334. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  335. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  336. /* If no NUMA node was found, we assume that we have a single memory
  337. * bank. */
  338. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  339. unsigned *is_available_per_numa_node = NULL;
  340. double *dev_timing_htod_per_numa_node = NULL;
  341. double *dev_timing_dtoh_per_numa_node = NULL;
  342. if (!no_node_obj_was_found)
  343. {
  344. is_available_per_numa_node = (unsigned *)malloc(nnuma_nodes * sizeof(unsigned));
  345. STARPU_ASSERT(is_available_per_numa_node);
  346. dev_timing_htod_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  347. STARPU_ASSERT(dev_timing_htod_per_numa_node);
  348. dev_timing_dtoh_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  349. STARPU_ASSERT(dev_timing_dtoh_per_numa_node);
  350. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  351. }
  352. #endif
  353. unsigned cpu;
  354. for (cpu = 0; cpu < ncpus; cpu++)
  355. {
  356. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id = cpu;
  357. #ifdef STARPU_HAVE_HWLOC
  358. int numa_id = 0;
  359. if (!no_node_obj_was_found)
  360. {
  361. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  362. numa_id = find_numa_node(obj);
  363. if (is_available_per_numa_node[numa_id])
  364. {
  365. /* We reuse the previous numbers for that NUMA node */
  366. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod =
  367. dev_timing_htod_per_numa_node[numa_id];
  368. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh =
  369. dev_timing_dtoh_per_numa_node[numa_id];
  370. continue;
  371. }
  372. }
  373. #endif
  374. #ifdef STARPU_USE_CUDA
  375. if (strncmp(type, "CUDA", 4) == 0)
  376. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  377. #endif
  378. #ifdef STARPU_USE_OPENCL
  379. if (strncmp(type, "OpenCL", 6) == 0)
  380. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  381. #endif
  382. #ifdef STARPU_HAVE_HWLOC
  383. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  384. {
  385. /* Save the results for that NUMA node */
  386. dev_timing_htod_per_numa_node[numa_id] =
  387. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  388. dev_timing_dtoh_per_numa_node[numa_id] =
  389. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  390. is_available_per_numa_node[numa_id] = 1;
  391. }
  392. #endif
  393. }
  394. #ifdef STARPU_HAVE_HWLOC
  395. if (!no_node_obj_was_found)
  396. {
  397. free(is_available_per_numa_node);
  398. free(dev_timing_htod_per_numa_node);
  399. free(dev_timing_dtoh_per_numa_node);
  400. }
  401. #endif /* STARPU_HAVE_HWLOC */
  402. }
  403. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  404. struct dev_timing *dev_timing_per_cpu, char *type)
  405. {
  406. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  407. /* sort the results */
  408. qsort(&(dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS]), ncpus,
  409. sizeof(struct dev_timing),
  410. compar_dev_timing);
  411. #ifdef STARPU_VERBOSE
  412. unsigned cpu;
  413. for (cpu = 0; cpu < ncpus; cpu++)
  414. {
  415. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id;
  416. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  417. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  418. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  419. _STARPU_DISP("(%10s) BANDWIDTH GPU %d CPU %u - htod %f - dtoh %f - %f\n", type, dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  420. }
  421. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].cpu_id;
  422. _STARPU_DISP("(%10s) BANDWIDTH GPU %d BEST CPU %u\n", type, dev, best_cpu);
  423. #endif
  424. /* The results are sorted in a decreasing order, so that the best
  425. * measurement is currently the first entry. */
  426. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_dtoh;
  427. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_htod;
  428. }
  429. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  430. static void benchmark_all_gpu_devices(void)
  431. {
  432. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  433. int i;
  434. #endif
  435. #ifdef HAVE_CUDA_MEMCPY_PEER
  436. int j;
  437. #endif
  438. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  439. #ifdef STARPU_HAVE_HWLOC
  440. hwloc_topology_init(&hwtopology);
  441. hwloc_topology_load(hwtopology);
  442. #endif
  443. #ifdef STARPU_HAVE_HWLOC
  444. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  445. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  446. #elif __linux__
  447. /* Save the current cpu binding */
  448. cpu_set_t former_process_affinity;
  449. int ret;
  450. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  451. if (ret)
  452. {
  453. perror("sched_getaffinity");
  454. STARPU_ABORT();
  455. }
  456. #else
  457. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  458. #endif
  459. struct _starpu_machine_config *config = _starpu_get_machine_config();
  460. ncpus = _starpu_topology_get_nhwcpu(config);
  461. #ifdef STARPU_USE_CUDA
  462. ncuda = _starpu_get_cuda_device_count();
  463. for (i = 0; i < ncuda; i++)
  464. {
  465. _STARPU_DISP("CUDA %d...\n", i);
  466. /* measure bandwidth between Host and Device i */
  467. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, "CUDA");
  468. }
  469. #ifdef HAVE_CUDA_MEMCPY_PEER
  470. for (i = 0; i < ncuda; i++)
  471. for (j = 0; j < ncuda; j++)
  472. if (i != j)
  473. {
  474. _STARPU_DISP("CUDA %d -> %d...\n", i, j);
  475. /* measure bandwidth between Host and Device i */
  476. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  477. }
  478. #endif
  479. #endif
  480. #ifdef STARPU_USE_OPENCL
  481. nopencl = _starpu_opencl_get_device_count();
  482. for (i = 0; i < nopencl; i++)
  483. {
  484. _STARPU_DISP("OpenCL %d...\n", i);
  485. /* measure bandwith between Host and Device i */
  486. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, "OpenCL");
  487. }
  488. #endif
  489. #ifdef STARPU_HAVE_HWLOC
  490. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  491. #elif __linux__
  492. /* Restore the former affinity */
  493. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  494. if (ret)
  495. {
  496. perror("sched_setaffinity");
  497. STARPU_ABORT();
  498. }
  499. #endif
  500. #ifdef STARPU_HAVE_HWLOC
  501. hwloc_topology_destroy(hwtopology);
  502. #endif
  503. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  504. was_benchmarked = 1;
  505. }
  506. static void get_bus_path(const char *type, char *path, size_t maxlen)
  507. {
  508. _starpu_get_perf_model_dir_bus(path, maxlen);
  509. char hostname[32];
  510. char *forced_hostname = getenv("STARPU_HOSTNAME");
  511. if (forced_hostname && forced_hostname[0])
  512. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  513. else
  514. gethostname(hostname, sizeof(hostname));
  515. strncat(path, hostname, maxlen);
  516. strncat(path, ".", maxlen);
  517. strncat(path, type, maxlen);
  518. }
  519. /*
  520. * Affinity
  521. */
  522. static void get_affinity_path(char *path, size_t maxlen)
  523. {
  524. get_bus_path("affinity", path, maxlen);
  525. }
  526. static void load_bus_affinity_file_content(void)
  527. {
  528. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  529. FILE *f;
  530. char path[256];
  531. get_affinity_path(path, 256);
  532. f = fopen(path, "r");
  533. STARPU_ASSERT(f);
  534. struct _starpu_machine_config *config = _starpu_get_machine_config();
  535. ncpus = _starpu_topology_get_nhwcpu(config);
  536. int gpu;
  537. #ifdef STARPU_USE_CUDA
  538. ncuda = _starpu_get_cuda_device_count();
  539. for (gpu = 0; gpu < ncuda; gpu++)
  540. {
  541. int ret;
  542. int dummy;
  543. _starpu_drop_comments(f);
  544. ret = fscanf(f, "%d\t", &dummy);
  545. STARPU_ASSERT(ret == 1);
  546. STARPU_ASSERT(dummy == gpu);
  547. unsigned cpu;
  548. for (cpu = 0; cpu < ncpus; cpu++)
  549. {
  550. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  551. STARPU_ASSERT(ret == 1);
  552. }
  553. ret = fscanf(f, "\n");
  554. STARPU_ASSERT(ret == 0);
  555. }
  556. #endif /* !STARPU_USE_CUDA */
  557. #ifdef STARPU_USE_OPENCL
  558. nopencl = _starpu_opencl_get_device_count();
  559. for (gpu = 0; gpu < nopencl; gpu++)
  560. {
  561. int ret;
  562. int dummy;
  563. _starpu_drop_comments(f);
  564. ret = fscanf(f, "%d\t", &dummy);
  565. STARPU_ASSERT(ret == 1);
  566. STARPU_ASSERT(dummy == gpu);
  567. unsigned cpu;
  568. for (cpu = 0; cpu < ncpus; cpu++)
  569. {
  570. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  571. STARPU_ASSERT(ret == 1);
  572. }
  573. ret = fscanf(f, "\n");
  574. STARPU_ASSERT(ret == 0);
  575. }
  576. #endif /* !STARPU_USE_OPENCL */
  577. fclose(f);
  578. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  579. }
  580. static void write_bus_affinity_file_content(void)
  581. {
  582. STARPU_ASSERT(was_benchmarked);
  583. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  584. FILE *f;
  585. char path[256];
  586. get_affinity_path(path, 256);
  587. f = fopen(path, "w+");
  588. if (!f)
  589. {
  590. perror("fopen write_buf_affinity_file_content");
  591. _STARPU_DISP("path '%s'\n", path);
  592. fflush(stderr);
  593. STARPU_ABORT();
  594. }
  595. unsigned cpu;
  596. int gpu;
  597. fprintf(f, "# GPU\t");
  598. for (cpu = 0; cpu < ncpus; cpu++)
  599. fprintf(f, "CPU%u\t", cpu);
  600. fprintf(f, "\n");
  601. #ifdef STARPU_USE_CUDA
  602. for (gpu = 0; gpu < ncuda; gpu++)
  603. {
  604. fprintf(f, "%d\t", gpu);
  605. for (cpu = 0; cpu < ncpus; cpu++)
  606. {
  607. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  608. }
  609. fprintf(f, "\n");
  610. }
  611. #endif
  612. #ifdef STARPU_USE_OPENCL
  613. for (gpu = 0; gpu < nopencl; gpu++)
  614. {
  615. fprintf(f, "%d\t", gpu);
  616. for (cpu = 0; cpu < ncpus; cpu++)
  617. {
  618. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  619. }
  620. fprintf(f, "\n");
  621. }
  622. #endif
  623. fclose(f);
  624. #endif
  625. }
  626. static void generate_bus_affinity_file(void)
  627. {
  628. if (!was_benchmarked)
  629. benchmark_all_gpu_devices();
  630. write_bus_affinity_file_content();
  631. }
  632. static void load_bus_affinity_file(void)
  633. {
  634. int res;
  635. char path[256];
  636. get_affinity_path(path, 256);
  637. res = access(path, F_OK);
  638. if (res)
  639. {
  640. /* File does not exist yet */
  641. generate_bus_affinity_file();
  642. }
  643. load_bus_affinity_file_content();
  644. }
  645. #ifdef STARPU_USE_CUDA
  646. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  647. {
  648. return cuda_affinity_matrix[gpuid];
  649. }
  650. #endif /* STARPU_USE_CUDA */
  651. #ifdef STARPU_USE_OPENCL
  652. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  653. {
  654. return opencl_affinity_matrix[gpuid];
  655. }
  656. #endif /* STARPU_USE_OPENCL */
  657. void starpu_bus_print_affinity(FILE *f)
  658. {
  659. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  660. unsigned cpu;
  661. int gpu;
  662. #endif
  663. fprintf(f, "# GPU\tCPU in preference order (logical index)\n");
  664. #ifdef STARPU_USE_CUDA
  665. fprintf(f, "# CUDA\n");
  666. for(gpu = 0 ; gpu<ncuda ; gpu++)
  667. {
  668. fprintf(f, "%d\t", gpu);
  669. for (cpu = 0; cpu < ncpus; cpu++)
  670. {
  671. fprintf(f, "%d\t", cuda_affinity_matrix[gpu][cpu]);
  672. }
  673. fprintf(f, "\n");
  674. }
  675. #endif
  676. #ifdef STARPU_USE_OPENCL
  677. fprintf(f, "# OpenCL\n");
  678. for(gpu = 0 ; gpu<nopencl ; gpu++)
  679. {
  680. fprintf(f, "%d\t", gpu);
  681. for (cpu = 0; cpu < ncpus; cpu++)
  682. {
  683. fprintf(f, "%d\t", opencl_affinity_matrix[gpu][cpu]);
  684. }
  685. fprintf(f, "\n");
  686. }
  687. #endif
  688. }
  689. /*
  690. * Latency
  691. */
  692. static void get_latency_path(char *path, size_t maxlen)
  693. {
  694. get_bus_path("latency", path, maxlen);
  695. }
  696. static int load_bus_latency_file_content(void)
  697. {
  698. int n;
  699. unsigned src, dst;
  700. FILE *f;
  701. char path[256];
  702. get_latency_path(path, 256);
  703. f = fopen(path, "r");
  704. STARPU_ASSERT(f);
  705. for (src = 0; src < STARPU_MAXNODES; src++)
  706. {
  707. _starpu_drop_comments(f);
  708. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  709. {
  710. double latency;
  711. n = fscanf(f, "%lf", &latency);
  712. if (n != 1)
  713. {
  714. fclose(f);
  715. return 0;
  716. }
  717. n = getc(f);
  718. if (n != '\t')
  719. {
  720. fclose(f);
  721. return 0;
  722. }
  723. latency_matrix[src][dst] = latency;
  724. }
  725. n = getc(f);
  726. if (n != '\n')
  727. {
  728. fclose(f);
  729. return 0;
  730. }
  731. }
  732. fclose(f);
  733. return 1;
  734. }
  735. static void write_bus_latency_file_content(void)
  736. {
  737. int src, dst, maxnode;
  738. FILE *f;
  739. STARPU_ASSERT(was_benchmarked);
  740. char path[256];
  741. get_latency_path(path, 256);
  742. f = fopen(path, "w+");
  743. if (!f)
  744. {
  745. perror("fopen write_bus_latency_file_content");
  746. _STARPU_DISP("path '%s'\n", path);
  747. fflush(stderr);
  748. STARPU_ABORT();
  749. }
  750. fprintf(f, "# ");
  751. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  752. fprintf(f, "to %d\t\t", dst);
  753. fprintf(f, "\n");
  754. maxnode = ncuda;
  755. #ifdef STARPU_USE_OPENCL
  756. maxnode += nopencl;
  757. #endif
  758. for (src = 0; src < STARPU_MAXNODES; src++)
  759. {
  760. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  761. {
  762. double latency;
  763. if ((src > maxnode) || (dst > maxnode))
  764. {
  765. /* convention */
  766. latency = NAN;
  767. }
  768. else if (src == dst)
  769. {
  770. latency = 0.0;
  771. }
  772. else
  773. {
  774. /* µs */
  775. latency = ((src && dst)?2000.0:500.0);
  776. }
  777. fprintf(f, "%f\t", latency);
  778. }
  779. fprintf(f, "\n");
  780. }
  781. fclose(f);
  782. }
  783. static void generate_bus_latency_file(void)
  784. {
  785. if (!was_benchmarked)
  786. benchmark_all_gpu_devices();
  787. write_bus_latency_file_content();
  788. }
  789. static void load_bus_latency_file(void)
  790. {
  791. int res;
  792. char path[256];
  793. get_latency_path(path, 256);
  794. res = access(path, F_OK);
  795. if (res || !load_bus_latency_file_content())
  796. {
  797. /* File does not exist yet or is bogus */
  798. generate_bus_latency_file();
  799. }
  800. }
  801. /*
  802. * Bandwidth
  803. */
  804. static void get_bandwidth_path(char *path, size_t maxlen)
  805. {
  806. get_bus_path("bandwidth", path, maxlen);
  807. }
  808. static int load_bus_bandwidth_file_content(void)
  809. {
  810. int n;
  811. unsigned src, dst;
  812. FILE *f;
  813. char path[256];
  814. get_bandwidth_path(path, 256);
  815. f = fopen(path, "r");
  816. if (!f)
  817. {
  818. perror("fopen load_bus_bandwidth_file_content");
  819. _STARPU_DISP("path '%s'\n", path);
  820. fflush(stderr);
  821. STARPU_ABORT();
  822. }
  823. for (src = 0; src < STARPU_MAXNODES; src++)
  824. {
  825. _starpu_drop_comments(f);
  826. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  827. {
  828. double bandwidth;
  829. n = fscanf(f, "%lf", &bandwidth);
  830. if (n != 1)
  831. {
  832. _STARPU_DISP("Error while reading sampling file <%s>. Expected a number\n", path);
  833. fclose(f);
  834. return 0;
  835. }
  836. n = getc(f);
  837. if (n != '\t')
  838. {
  839. fclose(f);
  840. return 0;
  841. }
  842. bandwidth_matrix[src][dst] = bandwidth;
  843. }
  844. n = getc(f);
  845. if (n != '\n')
  846. {
  847. fclose(f);
  848. return 0;
  849. }
  850. }
  851. fclose(f);
  852. return 1;
  853. }
  854. static void write_bus_bandwidth_file_content(void)
  855. {
  856. int src, dst, maxnode;
  857. FILE *f;
  858. STARPU_ASSERT(was_benchmarked);
  859. char path[256];
  860. get_bandwidth_path(path, 256);
  861. f = fopen(path, "w+");
  862. STARPU_ASSERT(f);
  863. fprintf(f, "# ");
  864. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  865. fprintf(f, "to %d\t\t", dst);
  866. fprintf(f, "\n");
  867. maxnode = ncuda;
  868. #ifdef STARPU_USE_OPENCL
  869. maxnode += nopencl;
  870. #endif
  871. for (src = 0; src < STARPU_MAXNODES; src++)
  872. {
  873. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  874. {
  875. double bandwidth;
  876. if ((src > maxnode) || (dst > maxnode))
  877. {
  878. bandwidth = NAN;
  879. }
  880. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  881. else if (src != dst)
  882. {
  883. double slowness = 0.0;
  884. /* Total bandwidth is the harmonic mean of bandwidths */
  885. #ifdef STARPU_USE_CUDA
  886. #ifdef HAVE_CUDA_MEMCPY_PEER
  887. if (src && src <= ncuda && dst && dst <= ncuda)
  888. /* Direct GPU-GPU transfert */
  889. slowness = cudadev_timing_dtod[src][dst];
  890. else
  891. #endif
  892. {
  893. if (src && src <= ncuda)
  894. slowness += cudadev_timing_dtoh[src];
  895. if (dst && dst <= ncuda)
  896. slowness += cudadev_timing_htod[dst];
  897. }
  898. #endif
  899. #ifdef STARPU_USE_OPENCL
  900. if (src > ncuda)
  901. slowness += opencldev_timing_dtoh[src-ncuda];
  902. if (dst > ncuda)
  903. slowness += opencldev_timing_htod[dst-ncuda];
  904. #endif
  905. bandwidth = 1.0/slowness;
  906. }
  907. #endif
  908. else
  909. {
  910. /* convention */
  911. bandwidth = 0.0;
  912. }
  913. fprintf(f, "%f\t", bandwidth);
  914. }
  915. fprintf(f, "\n");
  916. }
  917. fclose(f);
  918. }
  919. void starpu_bus_print_bandwidth(FILE *f)
  920. {
  921. int src, dst, maxnode;
  922. maxnode = ncuda;
  923. #ifdef STARPU_USE_OPENCL
  924. maxnode += nopencl;
  925. #endif
  926. fprintf(f, "from/to\t");
  927. fprintf(f, "RAM\t");
  928. for (dst = 0; dst < ncuda; dst++)
  929. fprintf(f, "CUDA %d\t", dst);
  930. for (dst = 0; dst < nopencl; dst++)
  931. fprintf(f, "OpenCL%d\t", dst);
  932. fprintf(f, "\n");
  933. for (src = 0; src <= maxnode; src++)
  934. {
  935. if (!src)
  936. fprintf(f, "RAM\t");
  937. else if (src <= ncuda)
  938. fprintf(f, "CUDA %d\t", src-1);
  939. else
  940. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  941. for (dst = 0; dst <= maxnode; dst++)
  942. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  943. fprintf(f, "\n");
  944. }
  945. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  946. if (ncuda != 0 || nopencl != 0)
  947. fprintf(f, "\nGPU\tCPU in preference order (logical index), host-to-device, device-to-host\n");
  948. for (src = 1; src <= maxnode; src++)
  949. {
  950. struct dev_timing *timing;
  951. struct _starpu_machine_config *config = _starpu_get_machine_config();
  952. int ncpus = _starpu_topology_get_nhwcpu(config);
  953. int cpu;
  954. #ifdef STARPU_USE_CUDA
  955. if (src <= ncuda)
  956. {
  957. fprintf(f, "CUDA %d\t", src-1);
  958. for (cpu = 0; cpu < ncpus; cpu++)
  959. {
  960. timing = &cudadev_timing_per_cpu[src*STARPU_MAXCPUS+cpu];
  961. if (timing->timing_htod)
  962. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  963. else
  964. fprintf(f, "%2d\t", cuda_affinity_matrix[src-1][cpu]);
  965. }
  966. }
  967. #ifdef STARPU_USE_OPENCL
  968. else
  969. #endif
  970. #endif
  971. #ifdef STARPU_USE_OPENCL
  972. {
  973. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  974. for (cpu = 0; cpu < ncpus; cpu++)
  975. {
  976. timing = &opencldev_timing_per_cpu[(src-ncuda)*STARPU_MAXCPUS+cpu];
  977. if (timing->timing_htod)
  978. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  979. else
  980. fprintf(f, "%2d\t", opencl_affinity_matrix[src-1][cpu]);
  981. }
  982. }
  983. #endif
  984. fprintf(f, "\n");
  985. }
  986. #endif
  987. }
  988. static void generate_bus_bandwidth_file(void)
  989. {
  990. if (!was_benchmarked)
  991. benchmark_all_gpu_devices();
  992. write_bus_bandwidth_file_content();
  993. }
  994. static void load_bus_bandwidth_file(void)
  995. {
  996. int res;
  997. char path[256];
  998. get_bandwidth_path(path, 256);
  999. res = access(path, F_OK);
  1000. if (res || !load_bus_bandwidth_file_content())
  1001. {
  1002. /* File does not exist yet or is bogus */
  1003. generate_bus_bandwidth_file();
  1004. }
  1005. }
  1006. /*
  1007. * Config
  1008. */
  1009. static void get_config_path(char *path, size_t maxlen)
  1010. {
  1011. get_bus_path("config", path, maxlen);
  1012. }
  1013. static void check_bus_config_file(void)
  1014. {
  1015. int res;
  1016. char path[256];
  1017. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1018. get_config_path(path, 256);
  1019. res = access(path, F_OK);
  1020. if (res || config->conf->bus_calibrate > 0)
  1021. {
  1022. if (res)
  1023. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1024. starpu_force_bus_sampling();
  1025. if (res)
  1026. _STARPU_DISP("... done\n");
  1027. }
  1028. else
  1029. {
  1030. FILE *f;
  1031. int ret, read_cuda = -1, read_opencl = -1;
  1032. unsigned read_cpus = -1;
  1033. // Loading configuration from file
  1034. f = fopen(path, "r");
  1035. STARPU_ASSERT(f);
  1036. _starpu_drop_comments(f);
  1037. ret = fscanf(f, "%u\t", &read_cpus);
  1038. STARPU_ASSERT(ret == 1);
  1039. _starpu_drop_comments(f);
  1040. ret = fscanf(f, "%d\t", &read_cuda);
  1041. STARPU_ASSERT(ret == 1);
  1042. _starpu_drop_comments(f);
  1043. ret = fscanf(f, "%d\t", &read_opencl);
  1044. STARPU_ASSERT(ret == 1);
  1045. _starpu_drop_comments(f);
  1046. fclose(f);
  1047. // Loading current configuration
  1048. ncpus = _starpu_topology_get_nhwcpu(config);
  1049. #ifdef STARPU_USE_CUDA
  1050. ncuda = _starpu_get_cuda_device_count();
  1051. #endif
  1052. #ifdef STARPU_USE_OPENCL
  1053. nopencl = _starpu_opencl_get_device_count();
  1054. #endif
  1055. // Checking if both configurations match
  1056. if (read_cpus != ncpus)
  1057. {
  1058. _STARPU_DISP("Current configuration does not match the bus performance model (CPUS: (stored) %u != (current) %u), recalibrating...\n", read_cpus, ncpus);
  1059. starpu_force_bus_sampling();
  1060. _STARPU_DISP("... done\n");
  1061. }
  1062. else if (read_cuda != ncuda)
  1063. {
  1064. _STARPU_DISP("Current configuration does not match the bus performance model (CUDA: (stored) %d != (current) %d), recalibrating...\n", read_cuda, ncuda);
  1065. starpu_force_bus_sampling();
  1066. _STARPU_DISP("... done\n");
  1067. }
  1068. else if (read_opencl != nopencl)
  1069. {
  1070. _STARPU_DISP("Current configuration does not match the bus performance model (OpenCL: (stored) %d != (current) %d), recalibrating...\n", read_opencl, nopencl);
  1071. starpu_force_bus_sampling();
  1072. _STARPU_DISP("... done\n");
  1073. }
  1074. }
  1075. }
  1076. static void write_bus_config_file_content(void)
  1077. {
  1078. FILE *f;
  1079. char path[256];
  1080. STARPU_ASSERT(was_benchmarked);
  1081. get_config_path(path, 256);
  1082. f = fopen(path, "w+");
  1083. STARPU_ASSERT(f);
  1084. fprintf(f, "# Current configuration\n");
  1085. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1086. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  1087. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  1088. fclose(f);
  1089. }
  1090. static void generate_bus_config_file(void)
  1091. {
  1092. if (!was_benchmarked)
  1093. benchmark_all_gpu_devices();
  1094. write_bus_config_file_content();
  1095. }
  1096. /*
  1097. * Generic
  1098. */
  1099. static void starpu_force_bus_sampling(void)
  1100. {
  1101. _STARPU_DEBUG("Force bus sampling ...\n");
  1102. _starpu_create_sampling_directory_if_needed();
  1103. generate_bus_affinity_file();
  1104. generate_bus_latency_file();
  1105. generate_bus_bandwidth_file();
  1106. generate_bus_config_file();
  1107. }
  1108. void _starpu_load_bus_performance_files(void)
  1109. {
  1110. _starpu_create_sampling_directory_if_needed();
  1111. check_bus_config_file();
  1112. load_bus_affinity_file();
  1113. load_bus_latency_file();
  1114. load_bus_bandwidth_file();
  1115. }
  1116. /* (in µs) */
  1117. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  1118. {
  1119. double bandwidth = bandwidth_matrix[src_node][dst_node];
  1120. double latency = latency_matrix[src_node][dst_node];
  1121. struct starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  1122. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  1123. }