bandwidth.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <unistd.h>
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. /*
  21. * Measure the memory bandwidth available to kernels depending on the number of
  22. * kernels and number of idle workers.
  23. */
  24. #if defined(STARPU_QUICK_CHECK) || defined(STARPU_SANITIZE_LEAK) || defined(STARPU_SANITIZE_ADDRESS)
  25. static size_t size = 1024;
  26. static unsigned cpustep = 8;
  27. #else
  28. /* Must be bigger than available cache size per core, 64MiB should be enough */
  29. static size_t size = 64UL << 20;
  30. static unsigned cpustep = 1;
  31. #endif
  32. static unsigned noalone = 0;
  33. static unsigned iter = 30;
  34. static unsigned total_ncpus;
  35. static starpu_pthread_barrier_t barrier_begin, barrier_end;
  36. static float *result;
  37. static void **buffers; /* Indexed by logical core number */
  38. static char padding1[STARPU_CACHELINE_SIZE];
  39. static volatile char finished;
  40. static char padding2[STARPU_CACHELINE_SIZE];
  41. static unsigned interleave(unsigned i);
  42. /* Initialize the buffer locally */
  43. void initialize_buffer(void *foo)
  44. {
  45. (void) foo;
  46. unsigned id = starpu_worker_get_id();
  47. #ifdef STARPU_HAVE_POSIX_MEMALIGN
  48. int ret = posix_memalign(&buffers[id], getpagesize(), 2*size);
  49. STARPU_ASSERT(ret == 0);
  50. #else
  51. buffers[id] = malloc(2*size);
  52. #endif
  53. memset(buffers[id], 0, 2*size);
  54. }
  55. /* Actual transfer codelet */
  56. void bw_func(void *descr[], void *arg)
  57. {
  58. (void)descr;
  59. int id = (uintptr_t) arg;
  60. void *src = buffers[id];
  61. void *dst = (void*) ((uintptr_t)src + size);
  62. unsigned i;
  63. double start, stop;
  64. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  65. start = starpu_timing_now();
  66. for (i = 0; i < iter; i++)
  67. {
  68. memcpy(dst, src, size);
  69. STARPU_SYNCHRONIZE();
  70. }
  71. stop = starpu_timing_now();
  72. STARPU_PTHREAD_BARRIER_WAIT(&barrier_end);
  73. finished = 1;
  74. result[id] = (size*iter) / (stop - start);
  75. }
  76. static struct starpu_codelet bw_codelet =
  77. {
  78. .cpu_funcs = {bw_func},
  79. .model = NULL,
  80. .nbuffers = 0,
  81. };
  82. /* Codelet that waits for completion while doing lots of cpu yields (nop). */
  83. void nop_func(void *descr[], void *arg)
  84. {
  85. (void)descr;
  86. (void)arg;
  87. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  88. while (!finished)
  89. {
  90. unsigned i;
  91. for (i = 0; i < 1000000; i++)
  92. STARPU_UYIELD();
  93. STARPU_SYNCHRONIZE();
  94. }
  95. }
  96. static struct starpu_codelet nop_codelet =
  97. {
  98. .cpu_funcs = {nop_func},
  99. .model = NULL,
  100. .nbuffers = 0,
  101. };
  102. /* Codelet that waits for completion while aggressively reading the finished variable. */
  103. void sync_func(void *descr[], void *arg)
  104. {
  105. (void)descr;
  106. (void)arg;
  107. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  108. while (!finished)
  109. {
  110. STARPU_VALGRIND_YIELD();
  111. STARPU_SYNCHRONIZE();
  112. }
  113. }
  114. static struct starpu_codelet sync_codelet =
  115. {
  116. .cpu_funcs = {sync_func},
  117. .model = NULL,
  118. .nbuffers = 0,
  119. };
  120. static void usage(char **argv)
  121. {
  122. fprintf(stderr, "Usage: %s [-n niter] [-s size (MB)] [-c cpustep] [-a]\n", argv[0]);
  123. fprintf(stderr, "\t-n niter\tNumber of iterations\n");
  124. fprintf(stderr, "\t-s size\tBuffer size in MB\n");
  125. fprintf(stderr, "\t-c cpustep\tCpu number increment\n");
  126. fprintf(stderr, "\t-a Do not run the alone test\n");
  127. exit(EXIT_FAILURE);
  128. }
  129. static void parse_args(int argc, char **argv)
  130. {
  131. int c;
  132. while ((c = getopt(argc, argv, "n:s:c:ah")) != -1)
  133. switch(c)
  134. {
  135. case 'n':
  136. iter = atoi(optarg);
  137. break;
  138. case 's':
  139. size = (long)atoi(optarg) << 20;
  140. break;
  141. case 'c':
  142. cpustep = atoi(optarg);
  143. break;
  144. case 'a':
  145. noalone = 1;
  146. break;
  147. case 'h':
  148. usage(argv);
  149. break;
  150. }
  151. }
  152. static unsigned interleave(unsigned i)
  153. {
  154. /* TODO: rather distribute over hierarchy */
  155. if (total_ncpus > 1)
  156. return (i % (total_ncpus/2))*2 + i / (total_ncpus/2);
  157. else
  158. return 0;
  159. }
  160. enum sleep_type
  161. {
  162. PAUSE,
  163. NOP,
  164. SYNC,
  165. SCHED,
  166. };
  167. static float bench(int *argc, char ***argv, unsigned nbusy, unsigned ncpus, int intl, enum sleep_type sleep)
  168. {
  169. int ret;
  170. unsigned i;
  171. struct starpu_conf conf;
  172. float bw;
  173. starpu_conf_init(&conf);
  174. conf.precedence_over_environment_variables = 1;
  175. starpu_conf_noworker(&conf);
  176. conf.ncpus = ncpus;
  177. if (intl && sleep == PAUSE)
  178. {
  179. conf.use_explicit_workers_bindid = 1;
  180. for (i = 0; i < ncpus; i++)
  181. conf.workers_bindid[i] = interleave(i);
  182. }
  183. ret = starpu_initialize(&conf, argc, argv);
  184. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  185. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  186. if (sleep == PAUSE || sleep == SCHED)
  187. /* In these cases we don't have a task on each cpu */
  188. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, nbusy);
  189. else
  190. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, ncpus);
  191. STARPU_PTHREAD_BARRIER_INIT(&barrier_end, NULL, nbusy);
  192. finished = 0;
  193. for (i = 0; i < ncpus; i++)
  194. result[i] = NAN;
  195. for (i = 0; i < nbusy; i++)
  196. {
  197. struct starpu_task *task = starpu_task_create();
  198. task->cl = &bw_codelet;
  199. if (intl)
  200. task->cl_arg = (void*) (uintptr_t) interleave(i);
  201. else
  202. task->cl_arg = (void*) (uintptr_t) i;
  203. task->execute_on_a_specific_worker = 1;
  204. if (intl && sleep != PAUSE) /* In the pause case we interleaved above */
  205. task->workerid = interleave(i);
  206. else
  207. task->workerid = i;
  208. ret = starpu_task_submit(task);
  209. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  210. }
  211. if (sleep != PAUSE && sleep != SCHED)
  212. {
  213. /* Add waiting tasks */
  214. for ( ; i < ncpus; i++)
  215. {
  216. struct starpu_task *task = starpu_task_create();
  217. switch (sleep)
  218. {
  219. case NOP:
  220. task->cl = &nop_codelet;
  221. break;
  222. case SYNC:
  223. task->cl = &sync_codelet;
  224. break;
  225. default:
  226. STARPU_ASSERT(0);
  227. }
  228. task->execute_on_a_specific_worker = 1;
  229. task->workerid = interleave(i);
  230. ret = starpu_task_submit(task);
  231. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  232. }
  233. }
  234. starpu_task_wait_for_all();
  235. starpu_shutdown();
  236. for (bw = 0., i = 0; i < nbusy; i++)
  237. {
  238. if (intl)
  239. bw += result[interleave(i)];
  240. else
  241. bw += result[i];
  242. }
  243. return bw;
  244. }
  245. int main(int argc, char **argv)
  246. {
  247. int ret;
  248. unsigned n;
  249. struct starpu_conf conf;
  250. float alone, alone_int, alone_int_nop, alone_int_sync, sched, sched_int;
  251. parse_args(argc, argv);
  252. starpu_conf_init(&conf);
  253. conf.precedence_over_environment_variables = 1;
  254. starpu_conf_noworker(&conf);
  255. conf.ncpus = -1;
  256. ret = starpu_initialize(&conf, &argc, &argv);
  257. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  258. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  259. total_ncpus = starpu_cpu_worker_get_count();
  260. buffers = malloc(total_ncpus * sizeof(*buffers));
  261. starpu_execute_on_each_worker_ex(initialize_buffer, NULL, STARPU_CPU, "initialize_buffer");
  262. starpu_shutdown();
  263. if (total_ncpus == 0)
  264. return STARPU_TEST_SKIPPED;
  265. result = malloc(total_ncpus * sizeof(result[0]));
  266. printf("# nw\ta comp.\t+sched\teff%%\ta scat.\t+nop\t+sync\t+sched\teff%% vs nop\n");
  267. for (n = cpustep; n <= total_ncpus; n += cpustep)
  268. {
  269. if (noalone)
  270. {
  271. alone = 0.;
  272. alone_int = 0.;
  273. alone_int_nop = 0.;
  274. alone_int_sync = 0.;
  275. }
  276. else
  277. {
  278. alone = bench(&argc, &argv, n, n, 0, PAUSE);
  279. alone_int = bench(&argc, &argv, n, n, 1, PAUSE);
  280. alone_int_nop = bench(&argc, &argv, n, total_ncpus, 1, NOP);
  281. alone_int_sync = bench(&argc, &argv, n, total_ncpus, 1, SYNC);
  282. }
  283. sched = bench(&argc, &argv, n, total_ncpus, 0, SCHED);
  284. sched_int = bench(&argc, &argv, n, total_ncpus, 1, SCHED);
  285. printf("%u\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",
  286. n,
  287. alone/1000,
  288. sched/1000, sched*100/alone,
  289. alone_int/1000,
  290. alone_int_nop/1000,
  291. alone_int_sync/1000,
  292. sched_int/1000, sched_int*100/alone_int_nop);
  293. fflush(stdout);
  294. }
  295. free(result);
  296. for (n = 0; n < total_ncpus; n++)
  297. free(buffers[n]);
  298. free(buffers);
  299. return EXIT_SUCCESS;
  300. }