bandwidth.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <unistd.h>
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. /*
  21. * Measure the memory bandwidth available to kernels depending on the number of
  22. * kernels and number of idle workers.
  23. */
  24. #ifdef STARPU_QUICK_CHECK
  25. static size_t size = 1024;
  26. static unsigned cpustep = 4;
  27. #else
  28. /* Must be bigger than available cache size per core, 64MiB should be enough */
  29. static size_t size = 64UL << 20;
  30. static unsigned cpustep = 1;
  31. #endif
  32. static unsigned noalone = 0;
  33. static unsigned iter = 30;
  34. static unsigned total_ncpus;
  35. static starpu_pthread_barrier_t barrier_begin, barrier_end;
  36. static float *result;
  37. static void **buffers; /* Indexed by logical core number */
  38. static char padding1[STARPU_CACHELINE_SIZE];
  39. static volatile char finished;
  40. static char padding2[STARPU_CACHELINE_SIZE];
  41. static unsigned interleave(unsigned i);
  42. /* Initialize the buffer locally */
  43. void initialize_buffer(void *foo)
  44. {
  45. unsigned id = starpu_worker_get_id();
  46. #ifdef STARPU_HAVE_POSIX_MEMALIGN
  47. int ret = posix_memalign(&buffers[id], getpagesize(), 2*size);
  48. STARPU_ASSERT(ret == 0);
  49. #else
  50. buffers[id] = malloc(2*size);
  51. #endif
  52. memset(buffers[id], 0, 2*size);
  53. }
  54. /* Actual transfer codelet */
  55. void bw_func(void *descr[], void *arg)
  56. {
  57. int id = (uintptr_t) arg;
  58. void *src = buffers[id];
  59. void *dst = (void*) ((uintptr_t)src + size);
  60. unsigned i;
  61. double start, stop;
  62. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  63. start = starpu_timing_now();
  64. for (i = 0; i < iter; i++)
  65. {
  66. memcpy(dst, src, size);
  67. STARPU_SYNCHRONIZE();
  68. }
  69. stop = starpu_timing_now();
  70. STARPU_PTHREAD_BARRIER_WAIT(&barrier_end);
  71. finished = 1;
  72. result[id] = (size*iter) / (stop - start);
  73. }
  74. static struct starpu_codelet bw_codelet =
  75. {
  76. .cpu_funcs = {bw_func},
  77. .model = NULL,
  78. .nbuffers = 0,
  79. };
  80. /* Codelet that waits for completion while doing lots of cpu yields (nop). */
  81. void nop_func(void *descr[], void *arg)
  82. {
  83. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  84. while (!finished)
  85. {
  86. unsigned i;
  87. for (i = 0; i < 1000000; i++)
  88. STARPU_UYIELD();
  89. STARPU_SYNCHRONIZE();
  90. }
  91. }
  92. static struct starpu_codelet nop_codelet =
  93. {
  94. .cpu_funcs = {nop_func},
  95. .model = NULL,
  96. .nbuffers = 0,
  97. };
  98. /* Codelet that waits for completion while aggressively reading the finished variable. */
  99. void sync_func(void *descr[], void *arg)
  100. {
  101. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  102. while (!finished)
  103. {
  104. STARPU_VALGRIND_YIELD();
  105. STARPU_SYNCHRONIZE();
  106. }
  107. }
  108. static struct starpu_codelet sync_codelet =
  109. {
  110. .cpu_funcs = {sync_func},
  111. .model = NULL,
  112. .nbuffers = 0,
  113. };
  114. static void usage(char **argv)
  115. {
  116. fprintf(stderr, "Usage: %s [-n niter] [-s size (MB)] [-c cpustep] [-a]\n", argv[0]);
  117. fprintf(stderr, "\t-n niter\tNumber of iterations\n");
  118. fprintf(stderr, "\t-s size\tBuffer size in MB\n");
  119. fprintf(stderr, "\t-c cpustep\tCpu number increment\n");
  120. fprintf(stderr, "\t-a Do not run the alone test\n");
  121. exit(EXIT_FAILURE);
  122. }
  123. static void parse_args(int argc, char **argv)
  124. {
  125. int c;
  126. while ((c = getopt(argc, argv, "n:s:c:ah")) != -1)
  127. switch(c)
  128. {
  129. case 'n':
  130. iter = atoi(optarg);
  131. break;
  132. case 's':
  133. size = (long)atoi(optarg) << 20;
  134. break;
  135. case 'c':
  136. cpustep = atoi(optarg);
  137. break;
  138. case 'a':
  139. noalone = 1;
  140. break;
  141. case 'h':
  142. usage(argv);
  143. break;
  144. }
  145. }
  146. static unsigned interleave(unsigned i)
  147. {
  148. /* TODO: rather distribute over hierarchy */
  149. if (total_ncpus > 1)
  150. return (i % (total_ncpus/2))*2 + i / (total_ncpus/2);
  151. else
  152. return 0;
  153. }
  154. enum sleep_type {
  155. PAUSE,
  156. NOP,
  157. SYNC,
  158. SCHED,
  159. };
  160. static float bench(int *argc, char ***argv, unsigned nbusy, unsigned ncpus, int intl, enum sleep_type sleep)
  161. {
  162. int ret;
  163. unsigned i;
  164. struct starpu_conf conf;
  165. float bw;
  166. starpu_conf_init(&conf);
  167. conf.precedence_over_environment_variables = 1;
  168. conf.ncuda = 0;
  169. conf.nopencl = 0;
  170. conf.nmic = 0;
  171. conf.nmpi_ms = 0;
  172. conf.ncpus = ncpus;
  173. if (intl && sleep == PAUSE)
  174. {
  175. conf.use_explicit_workers_bindid = 1;
  176. for (i = 0; i < ncpus; i++)
  177. conf.workers_bindid[i] = interleave(i);
  178. }
  179. ret = starpu_initialize(&conf, argc, argv);
  180. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  181. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  182. if (sleep == PAUSE || sleep == SCHED)
  183. /* In these cases we don't have a task on each cpu */
  184. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, nbusy);
  185. else
  186. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, ncpus);
  187. STARPU_PTHREAD_BARRIER_INIT(&barrier_end, NULL, nbusy);
  188. finished = 0;
  189. for (i = 0; i < ncpus; i++)
  190. result[i] = NAN;
  191. for (i = 0; i < nbusy; i++)
  192. {
  193. struct starpu_task *task = starpu_task_create();
  194. task->cl = &bw_codelet;
  195. if (intl)
  196. task->cl_arg = (void*) (uintptr_t) interleave(i);
  197. else
  198. task->cl_arg = (void*) (uintptr_t) i;
  199. task->execute_on_a_specific_worker = 1;
  200. if (intl && sleep != PAUSE) /* In the pause case we interleaved above */
  201. task->workerid = interleave(i);
  202. else
  203. task->workerid = i;
  204. ret = starpu_task_submit(task);
  205. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  206. }
  207. if (sleep != PAUSE && sleep != SCHED)
  208. {
  209. /* Add waiting tasks */
  210. for ( ; i < ncpus; i++)
  211. {
  212. struct starpu_task *task = starpu_task_create();
  213. switch (sleep)
  214. {
  215. case NOP:
  216. task->cl = &nop_codelet;
  217. break;
  218. case SYNC:
  219. task->cl = &sync_codelet;
  220. break;
  221. default:
  222. STARPU_ASSERT(0);
  223. }
  224. task->execute_on_a_specific_worker = 1;
  225. task->workerid = interleave(i);
  226. ret = starpu_task_submit(task);
  227. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  228. }
  229. }
  230. starpu_task_wait_for_all();
  231. starpu_shutdown();
  232. for (bw = 0., i = 0; i < nbusy; i++)
  233. {
  234. if (intl)
  235. bw += result[interleave(i)];
  236. else
  237. bw += result[i];
  238. }
  239. return bw;
  240. }
  241. int main(int argc, char **argv)
  242. {
  243. int ret;
  244. unsigned n;
  245. struct starpu_conf conf;
  246. float alone, alone_int, alone_int_nop, alone_int_sync, sched, sched_int;
  247. parse_args(argc, argv);
  248. starpu_conf_init(&conf);
  249. conf.precedence_over_environment_variables = 1;
  250. conf.ncuda = 0;
  251. conf.nopencl = 0;
  252. conf.nmic = 0;
  253. conf.nmpi_ms = 0;
  254. ret = starpu_initialize(&conf, &argc, &argv);
  255. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  256. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  257. total_ncpus = starpu_cpu_worker_get_count();
  258. buffers = malloc(total_ncpus * sizeof(*buffers));
  259. starpu_execute_on_each_worker_ex(initialize_buffer, NULL, STARPU_CPU, "init_buffer");
  260. starpu_shutdown();
  261. if (total_ncpus == 0)
  262. return STARPU_TEST_SKIPPED;
  263. result = malloc(total_ncpus * sizeof(result[0]));
  264. printf("# nw\ta comp.\t+sched\teff%%\ta scat.\t+nop\t+sync\t+sched\teff%% vs nop\n");
  265. for (n = cpustep; n <= total_ncpus; n += cpustep)
  266. {
  267. if (noalone)
  268. {
  269. alone = 0.;
  270. alone_int = 0.;
  271. alone_int_nop = 0.;
  272. alone_int_sync = 0.;
  273. }
  274. else
  275. {
  276. alone = bench(&argc, &argv, n, n, 0, PAUSE);
  277. alone_int = bench(&argc, &argv, n, n, 1, PAUSE);
  278. alone_int_nop = bench(&argc, &argv, n, total_ncpus, 1, NOP);
  279. alone_int_sync = bench(&argc, &argv, n, total_ncpus, 1, SYNC);
  280. }
  281. sched = bench(&argc, &argv, n, total_ncpus, 0, SCHED);
  282. sched_int = bench(&argc, &argv, n, total_ncpus, 1, SCHED);
  283. printf("%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",
  284. n,
  285. alone/1000,
  286. sched/1000, sched*100/alone,
  287. alone_int/1000,
  288. alone_int_nop/1000,
  289. alone_int_sync/1000,
  290. sched_int/1000, sched_int*100/alone_int_nop);
  291. fflush(stdout);
  292. }
  293. free(result);
  294. for (n = 0; n < total_ncpus; n++)
  295. free(buffers[n]);
  296. free(buffers);
  297. return EXIT_SUCCESS;
  298. }