bandwidth.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <unistd.h>
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. /*
  21. * Measure the memory bandwidth available to kernels depending on the number of
  22. * kernels and number of idle workers.
  23. */
  24. #ifdef STARPU_QUICK_CHECK
  25. static size_t size = 1024;
  26. static unsigned cpustep = 4;
  27. #else
  28. /* Must be bigger than available cache size per core, 64MiB should be enough */
  29. static size_t size = 64UL << 20;
  30. static unsigned cpustep = 1;
  31. #endif
  32. static unsigned noalone = 0;
  33. static unsigned iter = 30;
  34. static unsigned total_ncpus;
  35. static starpu_pthread_barrier_t barrier_begin, barrier_end;
  36. static float *result;
  37. static void **buffers; /* Indexed by logical core number */
  38. static char padding1[STARPU_CACHELINE_SIZE];
  39. static volatile char finished;
  40. static char padding2[STARPU_CACHELINE_SIZE];
  41. static unsigned interleave(unsigned i);
  42. /* Initialize the buffer locally */
  43. void initialize_buffer(void *foo)
  44. {
  45. unsigned id = starpu_worker_get_id();
  46. #ifdef STARPU_HAVE_POSIX_MEMALIGN
  47. int ret = posix_memalign(&buffers[id], getpagesize(), 2*size);
  48. STARPU_ASSERT(ret == 0);
  49. #else
  50. buffers[id] = malloc(2*size);
  51. #endif
  52. memset(buffers[id], 0, 2*size);
  53. }
  54. /* Actual transfer codelet */
  55. void bw_func(void *descr[], void *arg)
  56. {
  57. int id = (uintptr_t) arg;
  58. void *src = buffers[id];
  59. void *dst = (void*) ((uintptr_t)src + size);
  60. unsigned i;
  61. double start, stop;
  62. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  63. start = starpu_timing_now();
  64. for (i = 0; i < iter; i++)
  65. {
  66. memcpy(dst, src, size);
  67. STARPU_SYNCHRONIZE();
  68. }
  69. stop = starpu_timing_now();
  70. STARPU_PTHREAD_BARRIER_WAIT(&barrier_end);
  71. finished = 1;
  72. result[id] = (size*iter) / (stop - start);
  73. }
  74. static struct starpu_codelet bw_codelet =
  75. {
  76. .cpu_funcs = {bw_func},
  77. .model = NULL,
  78. .nbuffers = 0,
  79. };
  80. /* Codelet that waits for completion while doing lots of cpu yields (nop). */
  81. void nop_func(void *descr[], void *arg)
  82. {
  83. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  84. while (!finished)
  85. {
  86. unsigned i;
  87. for (i = 0; i < 1000000; i++)
  88. STARPU_UYIELD();
  89. STARPU_SYNCHRONIZE();
  90. }
  91. }
  92. static struct starpu_codelet nop_codelet =
  93. {
  94. .cpu_funcs = {nop_func},
  95. .model = NULL,
  96. .nbuffers = 0,
  97. };
  98. /* Codelet that waits for completion while aggressively reading the finished variable. */
  99. void sync_func(void *descr[], void *arg)
  100. {
  101. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  102. while (!finished)
  103. {
  104. STARPU_VALGRIND_YIELD();
  105. STARPU_SYNCHRONIZE();
  106. }
  107. }
  108. static struct starpu_codelet sync_codelet =
  109. {
  110. .cpu_funcs = {sync_func},
  111. .model = NULL,
  112. .nbuffers = 0,
  113. };
  114. static void usage(char **argv)
  115. {
  116. fprintf(stderr, "Usage: %s [-n niter] [-s size (MB)] [-c cpustep] [-a]\n", argv[0]);
  117. fprintf(stderr, "\t-n niter\tNumber of iterations\n");
  118. fprintf(stderr, "\t-s size\tBuffer size in MB\n");
  119. fprintf(stderr, "\t-c cpustep\tCpu number increment\n");
  120. fprintf(stderr, "\t-a Do not run the alone test\n");
  121. exit(EXIT_FAILURE);
  122. }
  123. static void parse_args(int argc, char **argv)
  124. {
  125. int c;
  126. while ((c = getopt(argc, argv, "n:s:c:ah")) != -1)
  127. switch(c)
  128. {
  129. case 'n':
  130. iter = atoi(optarg);
  131. break;
  132. case 's':
  133. size = (long)atoi(optarg) << 20;
  134. break;
  135. case 'c':
  136. cpustep = atoi(optarg);
  137. break;
  138. case 'a':
  139. noalone = 1;
  140. break;
  141. case 'h':
  142. usage(argv);
  143. break;
  144. }
  145. }
  146. static unsigned interleave(unsigned i)
  147. {
  148. /* TODO: rather distribute over hierarchy */
  149. if (total_ncpus > 1)
  150. return (i % (total_ncpus/2))*2 + i / (total_ncpus/2);
  151. else
  152. return 0;
  153. }
  154. enum sleep_type
  155. {
  156. PAUSE,
  157. NOP,
  158. SYNC,
  159. SCHED,
  160. };
  161. static float bench(int *argc, char ***argv, unsigned nbusy, unsigned ncpus, int intl, enum sleep_type sleep)
  162. {
  163. int ret;
  164. unsigned i;
  165. struct starpu_conf conf;
  166. float bw;
  167. starpu_conf_init(&conf);
  168. conf.precedence_over_environment_variables = 1;
  169. starpu_conf_noworker(&conf);
  170. conf.ncpus = ncpus;
  171. if (intl && sleep == PAUSE)
  172. {
  173. conf.use_explicit_workers_bindid = 1;
  174. for (i = 0; i < ncpus; i++)
  175. conf.workers_bindid[i] = interleave(i);
  176. }
  177. ret = starpu_initialize(&conf, argc, argv);
  178. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  179. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  180. if (sleep == PAUSE || sleep == SCHED)
  181. /* In these cases we don't have a task on each cpu */
  182. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, nbusy);
  183. else
  184. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, ncpus);
  185. STARPU_PTHREAD_BARRIER_INIT(&barrier_end, NULL, nbusy);
  186. finished = 0;
  187. for (i = 0; i < ncpus; i++)
  188. result[i] = NAN;
  189. for (i = 0; i < nbusy; i++)
  190. {
  191. struct starpu_task *task = starpu_task_create();
  192. task->cl = &bw_codelet;
  193. if (intl)
  194. task->cl_arg = (void*) (uintptr_t) interleave(i);
  195. else
  196. task->cl_arg = (void*) (uintptr_t) i;
  197. task->execute_on_a_specific_worker = 1;
  198. if (intl && sleep != PAUSE) /* In the pause case we interleaved above */
  199. task->workerid = interleave(i);
  200. else
  201. task->workerid = i;
  202. ret = starpu_task_submit(task);
  203. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  204. }
  205. if (sleep != PAUSE && sleep != SCHED)
  206. {
  207. /* Add waiting tasks */
  208. for ( ; i < ncpus; i++)
  209. {
  210. struct starpu_task *task = starpu_task_create();
  211. switch (sleep)
  212. {
  213. case NOP:
  214. task->cl = &nop_codelet;
  215. break;
  216. case SYNC:
  217. task->cl = &sync_codelet;
  218. break;
  219. default:
  220. STARPU_ASSERT(0);
  221. }
  222. task->execute_on_a_specific_worker = 1;
  223. task->workerid = interleave(i);
  224. ret = starpu_task_submit(task);
  225. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  226. }
  227. }
  228. starpu_task_wait_for_all();
  229. starpu_shutdown();
  230. for (bw = 0., i = 0; i < nbusy; i++)
  231. {
  232. if (intl)
  233. bw += result[interleave(i)];
  234. else
  235. bw += result[i];
  236. }
  237. return bw;
  238. }
  239. int main(int argc, char **argv)
  240. {
  241. int ret;
  242. unsigned n;
  243. struct starpu_conf conf;
  244. float alone, alone_int, alone_int_nop, alone_int_sync, sched, sched_int;
  245. parse_args(argc, argv);
  246. starpu_conf_init(&conf);
  247. conf.precedence_over_environment_variables = 1;
  248. starpu_conf_noworker(&conf);
  249. conf.ncpus = -1;
  250. ret = starpu_initialize(&conf, &argc, &argv);
  251. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  252. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  253. total_ncpus = starpu_cpu_worker_get_count();
  254. buffers = malloc(total_ncpus * sizeof(*buffers));
  255. starpu_execute_on_each_worker_ex(initialize_buffer, NULL, STARPU_CPU, "init_buffer");
  256. starpu_shutdown();
  257. if (total_ncpus == 0)
  258. return STARPU_TEST_SKIPPED;
  259. result = malloc(total_ncpus * sizeof(result[0]));
  260. printf("# nw\ta comp.\t+sched\teff%%\ta scat.\t+nop\t+sync\t+sched\teff%% vs nop\n");
  261. for (n = cpustep; n <= total_ncpus; n += cpustep)
  262. {
  263. if (noalone)
  264. {
  265. alone = 0.;
  266. alone_int = 0.;
  267. alone_int_nop = 0.;
  268. alone_int_sync = 0.;
  269. }
  270. else
  271. {
  272. alone = bench(&argc, &argv, n, n, 0, PAUSE);
  273. alone_int = bench(&argc, &argv, n, n, 1, PAUSE);
  274. alone_int_nop = bench(&argc, &argv, n, total_ncpus, 1, NOP);
  275. alone_int_sync = bench(&argc, &argv, n, total_ncpus, 1, SYNC);
  276. }
  277. sched = bench(&argc, &argv, n, total_ncpus, 0, SCHED);
  278. sched_int = bench(&argc, &argv, n, total_ncpus, 1, SCHED);
  279. printf("%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",
  280. n,
  281. alone/1000,
  282. sched/1000, sched*100/alone,
  283. alone_int/1000,
  284. alone_int_nop/1000,
  285. alone_int_sync/1000,
  286. sched_int/1000, sched_int*100/alone_int_nop);
  287. fflush(stdout);
  288. }
  289. free(result);
  290. for (n = 0; n < total_ncpus; n++)
  291. free(buffers[n]);
  292. free(buffers);
  293. return EXIT_SUCCESS;
  294. }