perfmodel_bus.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #ifdef STARPU_USE_CUDA
  18. #ifndef _GNU_SOURCE
  19. #define _GNU_SOURCE
  20. #endif
  21. #include <sched.h>
  22. #endif
  23. #include <unistd.h>
  24. #include <sys/time.h>
  25. #include <stdlib.h>
  26. #include <math.h>
  27. #include <starpu.h>
  28. #include <starpu_cuda.h>
  29. #include <starpu_opencl.h>
  30. #include <common/config.h>
  31. #include <core/workers.h>
  32. #include <core/perfmodel/perfmodel.h>
  33. #ifdef STARPU_USE_OPENCL
  34. #include <starpu_opencl.h>
  35. #endif
  36. #ifdef STARPU_HAVE_WINDOWS
  37. #include <windows.h>
  38. #endif
  39. #define SIZE (32*1024*1024*sizeof(char))
  40. #define NITER 128
  41. static void starpu_force_bus_sampling(void);
  42. /* timing is in µs per byte (i.e. slowness, inverse of bandwidth) */
  43. struct dev_timing
  44. {
  45. int cpu_id;
  46. double timing_htod;
  47. double timing_dtoh;
  48. };
  49. static double bandwidth_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  50. static double latency_matrix[STARPU_MAXNODES][STARPU_MAXNODES];
  51. static unsigned was_benchmarked = 0;
  52. static unsigned ncpus = 0;
  53. static int ncuda = 0;
  54. static int nopencl = 0;
  55. /* Benchmarking the performance of the bus */
  56. #ifdef STARPU_USE_CUDA
  57. static int cuda_affinity_matrix[STARPU_MAXCUDADEVS][STARPU_MAXCPUS];
  58. static double cudadev_timing_htod[STARPU_MAXNODES] = {0.0};
  59. static double cudadev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  60. #ifdef HAVE_CUDA_MEMCPY_PEER
  61. static double cudadev_timing_dtod[STARPU_MAXNODES][STARPU_MAXNODES] = {{0.0}};
  62. #endif
  63. static struct dev_timing cudadev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  64. #endif
  65. #ifdef STARPU_USE_OPENCL
  66. static int opencl_affinity_matrix[STARPU_MAXOPENCLDEVS][STARPU_MAXCPUS];
  67. static double opencldev_timing_htod[STARPU_MAXNODES] = {0.0};
  68. static double opencldev_timing_dtoh[STARPU_MAXNODES] = {0.0};
  69. static struct dev_timing opencldev_timing_per_cpu[STARPU_MAXNODES*STARPU_MAXCPUS];
  70. #endif
  71. #ifdef STARPU_HAVE_HWLOC
  72. static hwloc_topology_t hwtopology;
  73. #endif
  74. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  75. #ifdef STARPU_USE_CUDA
  76. static void measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  77. {
  78. struct _starpu_machine_config *config = _starpu_get_machine_config();
  79. _starpu_bind_thread_on_cpu(config, cpu);
  80. size_t size = SIZE;
  81. /* Initialize CUDA context on the device */
  82. cudaSetDevice(dev);
  83. /* hack to avoid third party libs to rebind threads */
  84. _starpu_bind_thread_on_cpu(config, cpu);
  85. /* hack to force the initialization */
  86. cudaFree(0);
  87. /* hack to avoid third party libs to rebind threads */
  88. _starpu_bind_thread_on_cpu(config, cpu);
  89. /* Get the maximum size which can be allocated on the device */
  90. struct cudaDeviceProp prop;
  91. cudaError_t cures;
  92. cures = cudaGetDeviceProperties(&prop, dev);
  93. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  94. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  95. /* Allocate a buffer on the device */
  96. unsigned char *d_buffer;
  97. cudaMalloc((void **)&d_buffer, size);
  98. STARPU_ASSERT(d_buffer);
  99. /* hack to avoid third party libs to rebind threads */
  100. _starpu_bind_thread_on_cpu(config, cpu);
  101. /* Allocate a buffer on the host */
  102. unsigned char *h_buffer;
  103. cures = cudaHostAlloc((void **)&h_buffer, size, 0);
  104. STARPU_ASSERT(cures == cudaSuccess);
  105. /* hack to avoid third party libs to rebind threads */
  106. _starpu_bind_thread_on_cpu(config, cpu);
  107. /* Fill them */
  108. memset(h_buffer, 0, size);
  109. cudaMemset(d_buffer, 0, size);
  110. /* hack to avoid third party libs to rebind threads */
  111. _starpu_bind_thread_on_cpu(config, cpu);
  112. unsigned iter;
  113. double timing;
  114. struct timeval start;
  115. struct timeval end;
  116. /* Measure upload bandwidth */
  117. gettimeofday(&start, NULL);
  118. for (iter = 0; iter < NITER; iter++)
  119. {
  120. cudaMemcpy(d_buffer, h_buffer, size, cudaMemcpyHostToDevice);
  121. cudaThreadSynchronize();
  122. }
  123. gettimeofday(&end, NULL);
  124. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  125. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  126. /* Measure download bandwidth */
  127. gettimeofday(&start, NULL);
  128. for (iter = 0; iter < NITER; iter++)
  129. {
  130. cudaMemcpy(h_buffer, d_buffer, size, cudaMemcpyDeviceToHost);
  131. cudaThreadSynchronize();
  132. }
  133. gettimeofday(&end, NULL);
  134. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  135. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  136. /* Free buffers */
  137. cudaFreeHost(h_buffer);
  138. cudaFree(d_buffer);
  139. cudaThreadExit();
  140. }
  141. #ifdef HAVE_CUDA_MEMCPY_PEER
  142. static void measure_bandwidth_between_dev_and_dev_cuda(int src, int dst)
  143. {
  144. size_t size = SIZE;
  145. int can;
  146. /* Get the maximum size which can be allocated on the device */
  147. struct cudaDeviceProp prop;
  148. cudaError_t cures;
  149. cures = cudaGetDeviceProperties(&prop, src);
  150. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  151. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  152. cures = cudaGetDeviceProperties(&prop, dst);
  153. if (STARPU_UNLIKELY(cures)) STARPU_CUDA_REPORT_ERROR(cures);
  154. if (size > prop.totalGlobalMem/4) size = prop.totalGlobalMem/4;
  155. /* Initialize CUDA context on the source */
  156. cudaSetDevice(src);
  157. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") <= 0) {
  158. cures = cudaDeviceCanAccessPeer(&can, src, dst);
  159. if (!cures && can) {
  160. cures = cudaDeviceEnablePeerAccess(dst, 0);
  161. if (!cures)
  162. _STARPU_DISP("GPU-Direct %d -> %d\n", dst, src);
  163. }
  164. }
  165. /* Allocate a buffer on the device */
  166. unsigned char *s_buffer;
  167. cudaMalloc((void **)&s_buffer, size);
  168. STARPU_ASSERT(s_buffer);
  169. cudaMemset(s_buffer, 0, size);
  170. /* Initialize CUDA context on the destination */
  171. cudaSetDevice(dst);
  172. if (starpu_get_env_number("STARPU_DISABLE_CUDA_GPU_GPU_DIRECT") <= 0) {
  173. cures = cudaDeviceCanAccessPeer(&can, dst, src);
  174. if (!cures && can) {
  175. cures = cudaDeviceEnablePeerAccess(src, 0);
  176. if (!cures)
  177. _STARPU_DISP("GPU-Direct %d -> %d\n", src, dst);
  178. }
  179. }
  180. /* Allocate a buffer on the device */
  181. unsigned char *d_buffer;
  182. cudaMalloc((void **)&d_buffer, size);
  183. STARPU_ASSERT(d_buffer);
  184. cudaMemset(d_buffer, 0, size);
  185. unsigned iter;
  186. double timing;
  187. struct timeval start;
  188. struct timeval end;
  189. /* Measure upload bandwidth */
  190. gettimeofday(&start, NULL);
  191. for (iter = 0; iter < NITER; iter++)
  192. {
  193. cudaMemcpyPeer(d_buffer, dst, s_buffer, src, size);
  194. cudaThreadSynchronize();
  195. }
  196. gettimeofday(&end, NULL);
  197. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  198. cudadev_timing_dtod[src+1][dst+1] = timing/NITER/size;
  199. /* Free buffers */
  200. cudaFree(d_buffer);
  201. starpu_cuda_set_device(src);
  202. cudaFree(s_buffer);
  203. cudaThreadExit();
  204. }
  205. #endif
  206. #endif
  207. #ifdef STARPU_USE_OPENCL
  208. static void measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(int dev, int cpu, struct dev_timing *dev_timing_per_cpu)
  209. {
  210. cl_context context;
  211. cl_command_queue queue;
  212. cl_int err=0;
  213. size_t size = SIZE;
  214. int not_initialized;
  215. struct _starpu_machine_config *config = _starpu_get_machine_config();
  216. _starpu_bind_thread_on_cpu(config, cpu);
  217. /* Is the context already initialised ? */
  218. starpu_opencl_get_context(dev, &context);
  219. not_initialized = (context == NULL);
  220. if (not_initialized == 1)
  221. _starpu_opencl_init_context(dev);
  222. /* Get context and queue */
  223. starpu_opencl_get_context(dev, &context);
  224. starpu_opencl_get_queue(dev, &queue);
  225. /* Get the maximum size which can be allocated on the device */
  226. cl_device_id device;
  227. cl_ulong maxMemAllocSize;
  228. starpu_opencl_get_device(dev, &device);
  229. err = clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(maxMemAllocSize), &maxMemAllocSize, NULL);
  230. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  231. if (size > (size_t)maxMemAllocSize/4) size = maxMemAllocSize/4;
  232. if (_starpu_opencl_get_device_type(dev) == CL_DEVICE_TYPE_CPU)
  233. {
  234. /* Let's not use too much RAM when running OpenCL on a CPU: it
  235. * would make the OS swap like crazy. */
  236. size /= 2;
  237. }
  238. /* hack to avoid third party libs to rebind threads */
  239. _starpu_bind_thread_on_cpu(config, cpu);
  240. /* Allocate a buffer on the device */
  241. cl_mem d_buffer;
  242. d_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
  243. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  244. /* hack to avoid third party libs to rebind threads */
  245. _starpu_bind_thread_on_cpu(config, cpu);
  246. /* Allocate a buffer on the host */
  247. unsigned char *h_buffer;
  248. h_buffer = (unsigned char *)malloc(size);
  249. STARPU_ASSERT(h_buffer);
  250. /* hack to avoid third party libs to rebind threads */
  251. _starpu_bind_thread_on_cpu(config, cpu);
  252. /* Fill them */
  253. memset(h_buffer, 0, size);
  254. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  255. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  256. /* hack to avoid third party libs to rebind threads */
  257. _starpu_bind_thread_on_cpu(config, cpu);
  258. unsigned iter;
  259. double timing;
  260. struct timeval start;
  261. struct timeval end;
  262. /* Measure upload bandwidth */
  263. gettimeofday(&start, NULL);
  264. for (iter = 0; iter < NITER; iter++)
  265. {
  266. err = clEnqueueWriteBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  267. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  268. }
  269. gettimeofday(&end, NULL);
  270. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  271. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod = timing/NITER/size;
  272. /* Measure download bandwidth */
  273. gettimeofday(&start, NULL);
  274. for (iter = 0; iter < NITER; iter++)
  275. {
  276. err = clEnqueueReadBuffer(queue, d_buffer, CL_TRUE, 0, size, h_buffer, 0, NULL, NULL);
  277. if (err != CL_SUCCESS) STARPU_OPENCL_REPORT_ERROR(err);
  278. }
  279. gettimeofday(&end, NULL);
  280. timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  281. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh = timing/NITER/size;
  282. /* Free buffers */
  283. clReleaseMemObject(d_buffer);
  284. free(h_buffer);
  285. /* Uninitiliaze OpenCL context on the device */
  286. if (not_initialized == 1)
  287. _starpu_opencl_deinit_context(dev);
  288. }
  289. #endif
  290. /* NB: we want to sort the bandwidth by DECREASING order */
  291. static int compar_dev_timing(const void *left_dev_timing, const void *right_dev_timing)
  292. {
  293. const struct dev_timing *left = (const struct dev_timing *)left_dev_timing;
  294. const struct dev_timing *right = (const struct dev_timing *)right_dev_timing;
  295. double left_dtoh = left->timing_dtoh;
  296. double left_htod = left->timing_htod;
  297. double right_dtoh = right->timing_dtoh;
  298. double right_htod = right->timing_htod;
  299. double timing_sum2_left = left_dtoh*left_dtoh + left_htod*left_htod;
  300. double timing_sum2_right = right_dtoh*right_dtoh + right_htod*right_htod;
  301. /* it's for a decreasing sorting */
  302. return (timing_sum2_left > timing_sum2_right);
  303. }
  304. #ifdef STARPU_HAVE_HWLOC
  305. static int find_numa_node(hwloc_obj_t obj)
  306. {
  307. STARPU_ASSERT(obj);
  308. hwloc_obj_t current = obj;
  309. while (current->depth != HWLOC_OBJ_NODE)
  310. {
  311. current = current->parent;
  312. /* If we don't find a "node" obj before the root, this means
  313. * hwloc does not know whether there are numa nodes or not, so
  314. * we should not use a per-node sampling in that case. */
  315. STARPU_ASSERT(current);
  316. }
  317. STARPU_ASSERT(current->depth == HWLOC_OBJ_NODE);
  318. return current->logical_index;
  319. }
  320. #endif
  321. static void measure_bandwidth_between_cpus_and_dev(int dev, struct dev_timing *dev_timing_per_cpu, char *type)
  322. {
  323. /* Either we have hwloc and we measure the bandwith between each GPU
  324. * and each NUMA node, or we don't have such NUMA information and we
  325. * measure the bandwith for each pair of (CPU, GPU), which is slower.
  326. * */
  327. #ifdef STARPU_HAVE_HWLOC
  328. int cpu_depth = hwloc_get_type_depth(hwtopology, HWLOC_OBJ_CORE);
  329. int nnuma_nodes = hwloc_get_nbobjs_by_depth(hwtopology, HWLOC_OBJ_NODE);
  330. /* If no NUMA node was found, we assume that we have a single memory
  331. * bank. */
  332. const unsigned no_node_obj_was_found = (nnuma_nodes == 0);
  333. unsigned *is_available_per_numa_node = NULL;
  334. double *dev_timing_htod_per_numa_node = NULL;
  335. double *dev_timing_dtoh_per_numa_node = NULL;
  336. if (!no_node_obj_was_found)
  337. {
  338. is_available_per_numa_node = (unsigned *)malloc(nnuma_nodes * sizeof(unsigned));
  339. STARPU_ASSERT(is_available_per_numa_node);
  340. dev_timing_htod_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  341. STARPU_ASSERT(dev_timing_htod_per_numa_node);
  342. dev_timing_dtoh_per_numa_node = (double *)malloc(nnuma_nodes * sizeof(double));
  343. STARPU_ASSERT(dev_timing_dtoh_per_numa_node);
  344. memset(is_available_per_numa_node, 0, nnuma_nodes*sizeof(unsigned));
  345. }
  346. #endif
  347. unsigned cpu;
  348. for (cpu = 0; cpu < ncpus; cpu++)
  349. {
  350. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id = cpu;
  351. #ifdef STARPU_HAVE_HWLOC
  352. int numa_id = 0;
  353. if (!no_node_obj_was_found)
  354. {
  355. hwloc_obj_t obj = hwloc_get_obj_by_depth(hwtopology, cpu_depth, cpu);
  356. numa_id = find_numa_node(obj);
  357. if (is_available_per_numa_node[numa_id])
  358. {
  359. /* We reuse the previous numbers for that NUMA node */
  360. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod =
  361. dev_timing_htod_per_numa_node[numa_id];
  362. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh =
  363. dev_timing_dtoh_per_numa_node[numa_id];
  364. continue;
  365. }
  366. }
  367. #endif
  368. #ifdef STARPU_USE_CUDA
  369. if (strncmp(type, "CUDA", 4) == 0)
  370. measure_bandwidth_between_host_and_dev_on_cpu_with_cuda(dev, cpu, dev_timing_per_cpu);
  371. #endif
  372. #ifdef STARPU_USE_OPENCL
  373. if (strncmp(type, "OpenCL", 6) == 0)
  374. measure_bandwidth_between_host_and_dev_on_cpu_with_opencl(dev, cpu, dev_timing_per_cpu);
  375. #endif
  376. #ifdef STARPU_HAVE_HWLOC
  377. if (!no_node_obj_was_found && !is_available_per_numa_node[numa_id])
  378. {
  379. /* Save the results for that NUMA node */
  380. dev_timing_htod_per_numa_node[numa_id] =
  381. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  382. dev_timing_dtoh_per_numa_node[numa_id] =
  383. dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  384. is_available_per_numa_node[numa_id] = 1;
  385. }
  386. #endif
  387. }
  388. #ifdef STARPU_HAVE_HWLOC
  389. if (!no_node_obj_was_found)
  390. {
  391. free(is_available_per_numa_node);
  392. free(dev_timing_htod_per_numa_node);
  393. free(dev_timing_dtoh_per_numa_node);
  394. }
  395. #endif /* STARPU_HAVE_HWLOC */
  396. }
  397. static void measure_bandwidth_between_host_and_dev(int dev, double *dev_timing_htod, double *dev_timing_dtoh,
  398. struct dev_timing *dev_timing_per_cpu, char *type)
  399. {
  400. measure_bandwidth_between_cpus_and_dev(dev, dev_timing_per_cpu, type);
  401. /* sort the results */
  402. qsort(&(dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS]), ncpus,
  403. sizeof(struct dev_timing),
  404. compar_dev_timing);
  405. #ifdef STARPU_VERBOSE
  406. unsigned cpu;
  407. for (cpu = 0; cpu < ncpus; cpu++)
  408. {
  409. unsigned current_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].cpu_id;
  410. double bandwidth_dtoh = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_dtoh;
  411. double bandwidth_htod = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+cpu].timing_htod;
  412. double bandwidth_sum2 = bandwidth_dtoh*bandwidth_dtoh + bandwidth_htod*bandwidth_htod;
  413. _STARPU_DISP("(%10s) BANDWIDTH GPU %d CPU %u - htod %f - dtoh %f - %f\n", type, dev, current_cpu, bandwidth_htod, bandwidth_dtoh, sqrt(bandwidth_sum2));
  414. }
  415. unsigned best_cpu = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].cpu_id;
  416. _STARPU_DISP("(%10s) BANDWIDTH GPU %d BEST CPU %u\n", type, dev, best_cpu);
  417. #endif
  418. /* The results are sorted in a decreasing order, so that the best
  419. * measurement is currently the first entry. */
  420. dev_timing_dtoh[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_dtoh;
  421. dev_timing_htod[dev+1] = dev_timing_per_cpu[(dev+1)*STARPU_MAXCPUS+0].timing_htod;
  422. }
  423. #endif /* defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL) */
  424. static void benchmark_all_gpu_devices(void)
  425. {
  426. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  427. int i;
  428. #endif
  429. #ifdef HAVE_CUDA_MEMCPY_PEER
  430. int j;
  431. #endif
  432. _STARPU_DEBUG("Benchmarking the speed of the bus\n");
  433. #ifdef STARPU_HAVE_HWLOC
  434. hwloc_topology_init(&hwtopology);
  435. hwloc_topology_load(hwtopology);
  436. #endif
  437. #ifdef STARPU_HAVE_HWLOC
  438. hwloc_bitmap_t former_cpuset = hwloc_bitmap_alloc();
  439. hwloc_get_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  440. #elif __linux__
  441. /* Save the current cpu binding */
  442. cpu_set_t former_process_affinity;
  443. int ret;
  444. ret = sched_getaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  445. if (ret)
  446. {
  447. perror("sched_getaffinity");
  448. STARPU_ABORT();
  449. }
  450. #else
  451. #warning Missing binding support, StarPU will not be able to properly benchmark NUMA topology
  452. #endif
  453. struct _starpu_machine_config *config = _starpu_get_machine_config();
  454. ncpus = _starpu_topology_get_nhwcpu(config);
  455. #ifdef STARPU_USE_CUDA
  456. ncuda = _starpu_get_cuda_device_count();
  457. for (i = 0; i < ncuda; i++)
  458. {
  459. _STARPU_DISP("CUDA %d...\n", i);
  460. /* measure bandwidth between Host and Device i */
  461. measure_bandwidth_between_host_and_dev(i, cudadev_timing_htod, cudadev_timing_dtoh, cudadev_timing_per_cpu, "CUDA");
  462. }
  463. #ifdef HAVE_CUDA_MEMCPY_PEER
  464. for (i = 0; i < ncuda; i++)
  465. for (j = 0; j < ncuda; j++)
  466. if (i != j)
  467. {
  468. _STARPU_DISP("CUDA %d -> %d...\n", i, j);
  469. /* measure bandwidth between Host and Device i */
  470. measure_bandwidth_between_dev_and_dev_cuda(i, j);
  471. }
  472. #endif
  473. #endif
  474. #ifdef STARPU_USE_OPENCL
  475. nopencl = _starpu_opencl_get_device_count();
  476. for (i = 0; i < nopencl; i++)
  477. {
  478. _STARPU_DISP("OpenCL %d...\n", i);
  479. /* measure bandwith between Host and Device i */
  480. measure_bandwidth_between_host_and_dev(i, opencldev_timing_htod, opencldev_timing_dtoh, opencldev_timing_per_cpu, "OpenCL");
  481. }
  482. #endif
  483. #ifdef STARPU_HAVE_HWLOC
  484. hwloc_set_cpubind(hwtopology, former_cpuset, HWLOC_CPUBIND_THREAD);
  485. #elif __linux__
  486. /* Restore the former affinity */
  487. ret = sched_setaffinity(0, sizeof(former_process_affinity), &former_process_affinity);
  488. if (ret)
  489. {
  490. perror("sched_setaffinity");
  491. STARPU_ABORT();
  492. }
  493. #endif
  494. #ifdef STARPU_HAVE_HWLOC
  495. hwloc_topology_destroy(hwtopology);
  496. #endif
  497. _STARPU_DEBUG("Benchmarking the speed of the bus is done.\n");
  498. was_benchmarked = 1;
  499. }
  500. static void get_bus_path(const char *type, char *path, size_t maxlen)
  501. {
  502. _starpu_get_perf_model_dir_bus(path, maxlen);
  503. char hostname[32];
  504. char *forced_hostname = getenv("STARPU_HOSTNAME");
  505. if (forced_hostname && forced_hostname[0])
  506. snprintf(hostname, sizeof(hostname), "%s", forced_hostname);
  507. else
  508. gethostname(hostname, sizeof(hostname));
  509. strncat(path, hostname, maxlen);
  510. strncat(path, ".", maxlen);
  511. strncat(path, type, maxlen);
  512. }
  513. /*
  514. * Affinity
  515. */
  516. static void get_affinity_path(char *path, size_t maxlen)
  517. {
  518. get_bus_path("affinity", path, maxlen);
  519. }
  520. static void load_bus_affinity_file_content(void)
  521. {
  522. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  523. FILE *f;
  524. char path[256];
  525. get_affinity_path(path, 256);
  526. f = fopen(path, "r");
  527. STARPU_ASSERT(f);
  528. struct _starpu_machine_config *config = _starpu_get_machine_config();
  529. ncpus = _starpu_topology_get_nhwcpu(config);
  530. int gpu;
  531. #ifdef STARPU_USE_CUDA
  532. ncuda = _starpu_get_cuda_device_count();
  533. for (gpu = 0; gpu < ncuda; gpu++)
  534. {
  535. int ret;
  536. int dummy;
  537. _starpu_drop_comments(f);
  538. ret = fscanf(f, "%d\t", &dummy);
  539. STARPU_ASSERT(ret == 1);
  540. STARPU_ASSERT(dummy == gpu);
  541. unsigned cpu;
  542. for (cpu = 0; cpu < ncpus; cpu++)
  543. {
  544. ret = fscanf(f, "%d\t", &cuda_affinity_matrix[gpu][cpu]);
  545. STARPU_ASSERT(ret == 1);
  546. }
  547. ret = fscanf(f, "\n");
  548. STARPU_ASSERT(ret == 0);
  549. }
  550. #endif /* !STARPU_USE_CUDA */
  551. #ifdef STARPU_USE_OPENCL
  552. nopencl = _starpu_opencl_get_device_count();
  553. for (gpu = 0; gpu < nopencl; gpu++)
  554. {
  555. int ret;
  556. int dummy;
  557. _starpu_drop_comments(f);
  558. ret = fscanf(f, "%d\t", &dummy);
  559. STARPU_ASSERT(ret == 1);
  560. STARPU_ASSERT(dummy == gpu);
  561. unsigned cpu;
  562. for (cpu = 0; cpu < ncpus; cpu++)
  563. {
  564. ret = fscanf(f, "%d\t", &opencl_affinity_matrix[gpu][cpu]);
  565. STARPU_ASSERT(ret == 1);
  566. }
  567. ret = fscanf(f, "\n");
  568. STARPU_ASSERT(ret == 0);
  569. }
  570. #endif /* !STARPU_USE_OPENCL */
  571. fclose(f);
  572. #endif /* !(STARPU_USE_CUDA_ || STARPU_USE_OPENCL */
  573. }
  574. static void write_bus_affinity_file_content(void)
  575. {
  576. STARPU_ASSERT(was_benchmarked);
  577. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  578. FILE *f;
  579. char path[256];
  580. get_affinity_path(path, 256);
  581. f = fopen(path, "w+");
  582. if (!f)
  583. {
  584. perror("fopen write_buf_affinity_file_content");
  585. _STARPU_DISP("path '%s'\n", path);
  586. fflush(stderr);
  587. STARPU_ABORT();
  588. }
  589. unsigned cpu;
  590. int gpu;
  591. fprintf(f, "# GPU\t");
  592. for (cpu = 0; cpu < ncpus; cpu++)
  593. fprintf(f, "CPU%u\t", cpu);
  594. fprintf(f, "\n");
  595. #ifdef STARPU_USE_CUDA
  596. for (gpu = 0; gpu < ncuda; gpu++)
  597. {
  598. fprintf(f, "%d\t", gpu);
  599. for (cpu = 0; cpu < ncpus; cpu++)
  600. {
  601. fprintf(f, "%d\t", cudadev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  602. }
  603. fprintf(f, "\n");
  604. }
  605. #endif
  606. #ifdef STARPU_USE_OPENCL
  607. for (gpu = 0; gpu < nopencl; gpu++)
  608. {
  609. fprintf(f, "%d\t", gpu);
  610. for (cpu = 0; cpu < ncpus; cpu++)
  611. {
  612. fprintf(f, "%d\t", opencldev_timing_per_cpu[(gpu+1)*STARPU_MAXCPUS+cpu].cpu_id);
  613. }
  614. fprintf(f, "\n");
  615. }
  616. #endif
  617. fclose(f);
  618. #endif
  619. }
  620. static void generate_bus_affinity_file(void)
  621. {
  622. if (!was_benchmarked)
  623. benchmark_all_gpu_devices();
  624. write_bus_affinity_file_content();
  625. }
  626. static void load_bus_affinity_file(void)
  627. {
  628. int res;
  629. char path[256];
  630. get_affinity_path(path, 256);
  631. res = access(path, F_OK);
  632. if (res)
  633. {
  634. /* File does not exist yet */
  635. generate_bus_affinity_file();
  636. }
  637. load_bus_affinity_file_content();
  638. }
  639. #ifdef STARPU_USE_CUDA
  640. int *_starpu_get_cuda_affinity_vector(unsigned gpuid)
  641. {
  642. return cuda_affinity_matrix[gpuid];
  643. }
  644. #endif /* STARPU_USE_CUDA */
  645. #ifdef STARPU_USE_OPENCL
  646. int *_starpu_get_opencl_affinity_vector(unsigned gpuid)
  647. {
  648. return opencl_affinity_matrix[gpuid];
  649. }
  650. #endif /* STARPU_USE_OPENCL */
  651. void starpu_bus_print_affinity(FILE *f)
  652. {
  653. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  654. unsigned cpu;
  655. int gpu;
  656. #endif
  657. fprintf(f, "# GPU\tCPU in preference order (logical index)\n");
  658. #ifdef STARPU_USE_CUDA
  659. fprintf(f, "# CUDA\n");
  660. for(gpu = 0 ; gpu<ncuda ; gpu++)
  661. {
  662. fprintf(f, "%d\t", gpu);
  663. for (cpu = 0; cpu < ncpus; cpu++)
  664. {
  665. fprintf(f, "%d\t", cuda_affinity_matrix[gpu][cpu]);
  666. }
  667. fprintf(f, "\n");
  668. }
  669. #endif
  670. #ifdef STARPU_USE_OPENCL
  671. fprintf(f, "# OpenCL\n");
  672. for(gpu = 0 ; gpu<nopencl ; gpu++)
  673. {
  674. fprintf(f, "%d\t", gpu);
  675. for (cpu = 0; cpu < ncpus; cpu++)
  676. {
  677. fprintf(f, "%d\t", opencl_affinity_matrix[gpu][cpu]);
  678. }
  679. fprintf(f, "\n");
  680. }
  681. #endif
  682. }
  683. /*
  684. * Latency
  685. */
  686. static void get_latency_path(char *path, size_t maxlen)
  687. {
  688. get_bus_path("latency", path, maxlen);
  689. }
  690. static int load_bus_latency_file_content(void)
  691. {
  692. int n;
  693. unsigned src, dst;
  694. FILE *f;
  695. char path[256];
  696. get_latency_path(path, 256);
  697. f = fopen(path, "r");
  698. STARPU_ASSERT(f);
  699. for (src = 0; src < STARPU_MAXNODES; src++)
  700. {
  701. _starpu_drop_comments(f);
  702. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  703. {
  704. double latency;
  705. n = fscanf(f, "%lf", &latency);
  706. if (n != 1)
  707. {
  708. fclose(f);
  709. return 0;
  710. }
  711. n = getc(f);
  712. if (n != '\t')
  713. {
  714. fclose(f);
  715. return 0;
  716. }
  717. latency_matrix[src][dst] = latency;
  718. }
  719. n = getc(f);
  720. if (n != '\n')
  721. {
  722. fclose(f);
  723. return 0;
  724. }
  725. }
  726. fclose(f);
  727. return 1;
  728. }
  729. static void write_bus_latency_file_content(void)
  730. {
  731. int src, dst, maxnode;
  732. FILE *f;
  733. STARPU_ASSERT(was_benchmarked);
  734. char path[256];
  735. get_latency_path(path, 256);
  736. f = fopen(path, "w+");
  737. if (!f)
  738. {
  739. perror("fopen write_bus_latency_file_content");
  740. _STARPU_DISP("path '%s'\n", path);
  741. fflush(stderr);
  742. STARPU_ABORT();
  743. }
  744. fprintf(f, "# ");
  745. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  746. fprintf(f, "to %d\t\t", dst);
  747. fprintf(f, "\n");
  748. maxnode = ncuda;
  749. #ifdef STARPU_USE_OPENCL
  750. maxnode += nopencl;
  751. #endif
  752. for (src = 0; src < STARPU_MAXNODES; src++)
  753. {
  754. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  755. {
  756. double latency;
  757. if ((src > maxnode) || (dst > maxnode))
  758. {
  759. /* convention */
  760. latency = NAN;
  761. }
  762. else if (src == dst)
  763. {
  764. latency = 0.0;
  765. }
  766. else
  767. {
  768. /* µs */
  769. latency = ((src && dst)?2000.0:500.0);
  770. }
  771. fprintf(f, "%f\t", latency);
  772. }
  773. fprintf(f, "\n");
  774. }
  775. fclose(f);
  776. }
  777. static void generate_bus_latency_file(void)
  778. {
  779. if (!was_benchmarked)
  780. benchmark_all_gpu_devices();
  781. write_bus_latency_file_content();
  782. }
  783. static void load_bus_latency_file(void)
  784. {
  785. int res;
  786. char path[256];
  787. get_latency_path(path, 256);
  788. res = access(path, F_OK);
  789. if (res || !load_bus_latency_file_content())
  790. {
  791. /* File does not exist yet or is bogus */
  792. generate_bus_latency_file();
  793. }
  794. }
  795. /*
  796. * Bandwidth
  797. */
  798. static void get_bandwidth_path(char *path, size_t maxlen)
  799. {
  800. get_bus_path("bandwidth", path, maxlen);
  801. }
  802. static int load_bus_bandwidth_file_content(void)
  803. {
  804. int n;
  805. unsigned src, dst;
  806. FILE *f;
  807. char path[256];
  808. get_bandwidth_path(path, 256);
  809. f = fopen(path, "r");
  810. if (!f)
  811. {
  812. perror("fopen load_bus_bandwidth_file_content");
  813. _STARPU_DISP("path '%s'\n", path);
  814. fflush(stderr);
  815. STARPU_ABORT();
  816. }
  817. for (src = 0; src < STARPU_MAXNODES; src++)
  818. {
  819. _starpu_drop_comments(f);
  820. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  821. {
  822. double bandwidth;
  823. n = fscanf(f, "%lf", &bandwidth);
  824. if (n != 1)
  825. {
  826. fprintf(stderr,"Error while reading sampling file <%s>. Expected a number\n", path);
  827. fclose(f);
  828. return 0;
  829. }
  830. n = getc(f);
  831. if (n != '\t')
  832. {
  833. fclose(f);
  834. return 0;
  835. }
  836. bandwidth_matrix[src][dst] = bandwidth;
  837. }
  838. n = getc(f);
  839. if (n != '\n')
  840. {
  841. fclose(f);
  842. return 0;
  843. }
  844. }
  845. fclose(f);
  846. return 1;
  847. }
  848. static void write_bus_bandwidth_file_content(void)
  849. {
  850. int src, dst, maxnode;
  851. FILE *f;
  852. STARPU_ASSERT(was_benchmarked);
  853. char path[256];
  854. get_bandwidth_path(path, 256);
  855. f = fopen(path, "w+");
  856. STARPU_ASSERT(f);
  857. fprintf(f, "# ");
  858. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  859. fprintf(f, "to %d\t\t", dst);
  860. fprintf(f, "\n");
  861. maxnode = ncuda;
  862. #ifdef STARPU_USE_OPENCL
  863. maxnode += nopencl;
  864. #endif
  865. for (src = 0; src < STARPU_MAXNODES; src++)
  866. {
  867. for (dst = 0; dst < STARPU_MAXNODES; dst++)
  868. {
  869. double bandwidth;
  870. if ((src > maxnode) || (dst > maxnode))
  871. {
  872. bandwidth = NAN;
  873. }
  874. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  875. else if (src != dst)
  876. {
  877. double slowness = 0.0;
  878. /* Total bandwidth is the harmonic mean of bandwidths */
  879. #ifdef STARPU_USE_CUDA
  880. #ifdef HAVE_CUDA_MEMCPY_PEER
  881. if (src && src <= ncuda && dst && dst <= ncuda)
  882. /* Direct GPU-GPU transfert */
  883. slowness = cudadev_timing_dtod[src][dst];
  884. else
  885. #endif
  886. {
  887. if (src && src <= ncuda)
  888. slowness += cudadev_timing_dtoh[src];
  889. if (dst && dst <= ncuda)
  890. slowness += cudadev_timing_htod[dst];
  891. }
  892. #endif
  893. #ifdef STARPU_USE_OPENCL
  894. if (src > ncuda)
  895. slowness += opencldev_timing_dtoh[src-ncuda];
  896. if (dst > ncuda)
  897. slowness += opencldev_timing_htod[dst-ncuda];
  898. #endif
  899. bandwidth = 1.0/slowness;
  900. }
  901. #endif
  902. else
  903. {
  904. /* convention */
  905. bandwidth = 0.0;
  906. }
  907. fprintf(f, "%f\t", bandwidth);
  908. }
  909. fprintf(f, "\n");
  910. }
  911. fclose(f);
  912. }
  913. void starpu_bus_print_bandwidth(FILE *f)
  914. {
  915. int src, dst, maxnode;
  916. maxnode = ncuda;
  917. #ifdef STARPU_USE_OPENCL
  918. maxnode += nopencl;
  919. #endif
  920. fprintf(f, "from/to\t");
  921. fprintf(f, "RAM\t");
  922. for (dst = 0; dst < ncuda; dst++)
  923. fprintf(f, "CUDA %d\t", dst);
  924. for (dst = 0; dst < nopencl; dst++)
  925. fprintf(f, "OpenCL%d\t", dst);
  926. fprintf(f, "\n");
  927. for (src = 0; src <= maxnode; src++)
  928. {
  929. if (!src)
  930. fprintf(f, "RAM\t");
  931. else if (src <= ncuda)
  932. fprintf(f, "CUDA %d\t", src-1);
  933. else
  934. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  935. for (dst = 0; dst <= maxnode; dst++)
  936. fprintf(f, "%.0f\t", bandwidth_matrix[src][dst]);
  937. fprintf(f, "\n");
  938. }
  939. #if defined(STARPU_USE_CUDA) || defined(STARPU_USE_OPENCL)
  940. if (ncuda != 0 || nopencl != 0)
  941. fprintf(f, "\nGPU\tCPU in preference order (logical index), host-to-device, device-to-host\n");
  942. for (src = 1; src <= maxnode; src++)
  943. {
  944. struct dev_timing *timing;
  945. struct _starpu_machine_config *config = _starpu_get_machine_config();
  946. int ncpus = _starpu_topology_get_nhwcpu(config);
  947. int cpu;
  948. #ifdef STARPU_USE_CUDA
  949. if (src <= ncuda)
  950. {
  951. fprintf(f, "CUDA %d\t", src-1);
  952. for (cpu = 0; cpu < ncpus; cpu++)
  953. {
  954. timing = &cudadev_timing_per_cpu[src*STARPU_MAXCPUS+cpu];
  955. if (timing->timing_htod)
  956. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  957. else
  958. fprintf(f, "%2d\t", cuda_affinity_matrix[src-1][cpu]);
  959. }
  960. }
  961. #ifdef STARPU_USE_OPENCL
  962. else
  963. #endif
  964. #endif
  965. #ifdef STARPU_USE_OPENCL
  966. {
  967. fprintf(f, "OpenCL%d\t", src-ncuda-1);
  968. for (cpu = 0; cpu < ncpus; cpu++)
  969. {
  970. timing = &opencldev_timing_per_cpu[(src-ncuda)*STARPU_MAXCPUS+cpu];
  971. if (timing->timing_htod)
  972. fprintf(f, "%2d %.0f %.0f\t", timing->cpu_id, 1/timing->timing_htod, 1/timing->timing_dtoh);
  973. else
  974. fprintf(f, "%2d\t", opencl_affinity_matrix[src-1][cpu]);
  975. }
  976. }
  977. #endif
  978. fprintf(f, "\n");
  979. }
  980. #endif
  981. }
  982. static void generate_bus_bandwidth_file(void)
  983. {
  984. if (!was_benchmarked)
  985. benchmark_all_gpu_devices();
  986. write_bus_bandwidth_file_content();
  987. }
  988. static void load_bus_bandwidth_file(void)
  989. {
  990. int res;
  991. char path[256];
  992. get_bandwidth_path(path, 256);
  993. res = access(path, F_OK);
  994. if (res || !load_bus_bandwidth_file_content())
  995. {
  996. /* File does not exist yet or is bogus */
  997. generate_bus_bandwidth_file();
  998. }
  999. }
  1000. /*
  1001. * Config
  1002. */
  1003. static void get_config_path(char *path, size_t maxlen)
  1004. {
  1005. get_bus_path("config", path, maxlen);
  1006. }
  1007. static void check_bus_config_file(void)
  1008. {
  1009. int res;
  1010. char path[256];
  1011. struct _starpu_machine_config *config = _starpu_get_machine_config();
  1012. get_config_path(path, 256);
  1013. res = access(path, F_OK);
  1014. if (res || config->conf->bus_calibrate > 0)
  1015. {
  1016. if (res)
  1017. _STARPU_DISP("No performance model for the bus, calibrating...\n");
  1018. starpu_force_bus_sampling();
  1019. if (res)
  1020. _STARPU_DISP("... done\n");
  1021. }
  1022. else
  1023. {
  1024. FILE *f;
  1025. int ret, read_cuda = -1, read_opencl = -1;
  1026. unsigned read_cpus = -1;
  1027. // Loading configuration from file
  1028. f = fopen(path, "r");
  1029. STARPU_ASSERT(f);
  1030. _starpu_drop_comments(f);
  1031. ret = fscanf(f, "%u\t", &read_cpus);
  1032. STARPU_ASSERT(ret == 1);
  1033. _starpu_drop_comments(f);
  1034. ret = fscanf(f, "%d\t", &read_cuda);
  1035. STARPU_ASSERT(ret == 1);
  1036. _starpu_drop_comments(f);
  1037. ret = fscanf(f, "%d\t", &read_opencl);
  1038. STARPU_ASSERT(ret == 1);
  1039. _starpu_drop_comments(f);
  1040. fclose(f);
  1041. // Loading current configuration
  1042. ncpus = _starpu_topology_get_nhwcpu(config);
  1043. #ifdef STARPU_USE_CUDA
  1044. ncuda = _starpu_get_cuda_device_count();
  1045. #endif
  1046. #ifdef STARPU_USE_OPENCL
  1047. nopencl = _starpu_opencl_get_device_count();
  1048. #endif
  1049. // Checking if both configurations match
  1050. if (read_cpus != ncpus)
  1051. {
  1052. fprintf(stderr, "Current configuration does not match the bus performance model (CPUS: (stored) %u != (current) %u), recalibrating...", read_cpus, ncpus);
  1053. starpu_force_bus_sampling();
  1054. fprintf(stderr, "done\n");
  1055. }
  1056. else if (read_cuda != ncuda)
  1057. {
  1058. fprintf(stderr, "Current configuration does not match the bus performance model (CUDA: (stored) %d != (current) %d), recalibrating...", read_cuda, ncuda);
  1059. starpu_force_bus_sampling();
  1060. fprintf(stderr, "done\n");
  1061. }
  1062. else if (read_opencl != nopencl)
  1063. {
  1064. fprintf(stderr, "Current configuration does not match the bus performance model (OpenCL: (stored) %d != (current) %d), recalibrating...", read_opencl, nopencl);
  1065. starpu_force_bus_sampling();
  1066. fprintf(stderr, "done\n");
  1067. }
  1068. }
  1069. }
  1070. static void write_bus_config_file_content(void)
  1071. {
  1072. FILE *f;
  1073. char path[256];
  1074. STARPU_ASSERT(was_benchmarked);
  1075. get_config_path(path, 256);
  1076. f = fopen(path, "w+");
  1077. STARPU_ASSERT(f);
  1078. fprintf(f, "# Current configuration\n");
  1079. fprintf(f, "%u # Number of CPUs\n", ncpus);
  1080. fprintf(f, "%d # Number of CUDA devices\n", ncuda);
  1081. fprintf(f, "%d # Number of OpenCL devices\n", nopencl);
  1082. fclose(f);
  1083. }
  1084. static void generate_bus_config_file(void)
  1085. {
  1086. if (!was_benchmarked)
  1087. benchmark_all_gpu_devices();
  1088. write_bus_config_file_content();
  1089. }
  1090. /*
  1091. * Generic
  1092. */
  1093. static void starpu_force_bus_sampling(void)
  1094. {
  1095. _STARPU_DEBUG("Force bus sampling ...\n");
  1096. _starpu_create_sampling_directory_if_needed();
  1097. generate_bus_affinity_file();
  1098. generate_bus_latency_file();
  1099. generate_bus_bandwidth_file();
  1100. generate_bus_config_file();
  1101. }
  1102. void _starpu_load_bus_performance_files(void)
  1103. {
  1104. _starpu_create_sampling_directory_if_needed();
  1105. check_bus_config_file();
  1106. load_bus_affinity_file();
  1107. load_bus_latency_file();
  1108. load_bus_bandwidth_file();
  1109. }
  1110. /* (in µs) */
  1111. double _starpu_predict_transfer_time(unsigned src_node, unsigned dst_node, size_t size)
  1112. {
  1113. double bandwidth = bandwidth_matrix[src_node][dst_node];
  1114. double latency = latency_matrix[src_node][dst_node];
  1115. struct starpu_machine_topology *topology = &_starpu_get_machine_config()->topology;
  1116. return latency + (size/bandwidth)*2*(topology->ncudagpus+topology->nopenclgpus);
  1117. }