bandwidth.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <unistd.h>
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. /*
  21. * Measure the memory bandwidth available to kernels depending on the number of
  22. * kernels and number of idle workers.
  23. */
  24. #if defined(STARPU_QUICK_CHECK) || defined(STARPU_SANITIZE_LEAK) || defined(STARPU_SANITIZE_ADDRESS)
  25. static size_t size = 1024;
  26. #else
  27. /* Must be bigger than available cache size per core, 64MiB should be enough */
  28. static size_t size = 64UL << 20;
  29. #endif
  30. static unsigned cpustep = 0;
  31. static unsigned noalone = 0;
  32. static unsigned iter = 30;
  33. static unsigned total_ncpus;
  34. static starpu_pthread_barrier_t barrier_begin, barrier_end;
  35. static float *result;
  36. static void **buffers; /* Indexed by logical core number */
  37. static char padding1[STARPU_CACHELINE_SIZE];
  38. static volatile char finished;
  39. static char padding2[STARPU_CACHELINE_SIZE];
  40. static unsigned interleave(unsigned i);
  41. /* Initialize the buffer locally */
  42. void initialize_buffer(void *foo)
  43. {
  44. (void) foo;
  45. unsigned id = starpu_worker_get_id();
  46. #ifdef STARPU_HAVE_POSIX_MEMALIGN
  47. int ret = posix_memalign(&buffers[id], getpagesize(), 2*size);
  48. STARPU_ASSERT(ret == 0);
  49. #else
  50. buffers[id] = malloc(2*size);
  51. #endif
  52. memset(buffers[id], 0, 2*size);
  53. }
  54. /* Actual transfer codelet */
  55. void bw_func(void *descr[], void *arg)
  56. {
  57. (void)descr;
  58. int id = (uintptr_t) arg;
  59. void *src = buffers[id];
  60. void *dst = (void*) ((uintptr_t)src + size);
  61. unsigned i;
  62. double start, stop;
  63. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  64. start = starpu_timing_now();
  65. for (i = 0; i < iter; i++)
  66. {
  67. memcpy(dst, src, size);
  68. STARPU_SYNCHRONIZE();
  69. }
  70. stop = starpu_timing_now();
  71. STARPU_PTHREAD_BARRIER_WAIT(&barrier_end);
  72. finished = 1;
  73. result[id] = (size*iter) / (stop - start);
  74. }
  75. static struct starpu_codelet bw_codelet =
  76. {
  77. .cpu_funcs = {bw_func},
  78. .model = NULL,
  79. .nbuffers = 0,
  80. };
  81. /* Codelet that waits for completion while doing lots of cpu yields (nop). */
  82. void nop_func(void *descr[], void *arg)
  83. {
  84. (void)descr;
  85. (void)arg;
  86. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  87. while (!finished)
  88. {
  89. unsigned i;
  90. for (i = 0; i < 1000000; i++)
  91. STARPU_UYIELD();
  92. STARPU_SYNCHRONIZE();
  93. }
  94. }
  95. static struct starpu_codelet nop_codelet =
  96. {
  97. .cpu_funcs = {nop_func},
  98. .model = NULL,
  99. .nbuffers = 0,
  100. };
  101. /* Codelet that waits for completion while aggressively reading the finished variable. */
  102. void sync_func(void *descr[], void *arg)
  103. {
  104. (void)descr;
  105. (void)arg;
  106. STARPU_PTHREAD_BARRIER_WAIT(&barrier_begin);
  107. while (!finished)
  108. {
  109. STARPU_VALGRIND_YIELD();
  110. STARPU_SYNCHRONIZE();
  111. }
  112. }
  113. static struct starpu_codelet sync_codelet =
  114. {
  115. .cpu_funcs = {sync_func},
  116. .model = NULL,
  117. .nbuffers = 0,
  118. };
  119. static void usage(char **argv)
  120. {
  121. fprintf(stderr, "Usage: %s [-n niter] [-s size (MB)] [-c cpustep] [-a]\n", argv[0]);
  122. fprintf(stderr, "\t-n niter\tNumber of iterations\n");
  123. fprintf(stderr, "\t-s size\tBuffer size in MB\n");
  124. fprintf(stderr, "\t-c cpustep\tCpu number increment\n");
  125. fprintf(stderr, "\t-a Do not run the alone test\n");
  126. exit(EXIT_FAILURE);
  127. }
  128. static void parse_args(int argc, char **argv)
  129. {
  130. int c;
  131. while ((c = getopt(argc, argv, "n:s:c:ah")) != -1)
  132. switch(c)
  133. {
  134. case 'n':
  135. iter = atoi(optarg);
  136. break;
  137. case 's':
  138. size = (long)atoi(optarg) << 20;
  139. break;
  140. case 'c':
  141. cpustep = atoi(optarg);
  142. break;
  143. case 'a':
  144. noalone = 1;
  145. break;
  146. case 'h':
  147. usage(argv);
  148. break;
  149. }
  150. }
  151. static unsigned interleave(unsigned i)
  152. {
  153. /* TODO: rather distribute over hierarchy */
  154. if (total_ncpus > 1)
  155. return (i % (total_ncpus/2))*2 + i / (total_ncpus/2);
  156. else
  157. return 0;
  158. }
  159. enum sleep_type
  160. {
  161. PAUSE,
  162. NOP,
  163. SYNC,
  164. SCHED,
  165. };
  166. static float bench(int *argc, char ***argv, unsigned nbusy, unsigned ncpus, int intl, enum sleep_type sleep)
  167. {
  168. int ret;
  169. unsigned i;
  170. struct starpu_conf conf;
  171. float bw;
  172. starpu_conf_init(&conf);
  173. conf.precedence_over_environment_variables = 1;
  174. starpu_conf_noworker(&conf);
  175. conf.ncpus = ncpus;
  176. if (intl && sleep == PAUSE)
  177. {
  178. conf.use_explicit_workers_bindid = 1;
  179. for (i = 0; i < ncpus; i++)
  180. conf.workers_bindid[i] = interleave(i);
  181. }
  182. ret = starpu_initialize(&conf, argc, argv);
  183. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  184. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  185. if (sleep == PAUSE || sleep == SCHED)
  186. /* In these cases we don't have a task on each cpu */
  187. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, nbusy);
  188. else
  189. STARPU_PTHREAD_BARRIER_INIT(&barrier_begin, NULL, ncpus);
  190. STARPU_PTHREAD_BARRIER_INIT(&barrier_end, NULL, nbusy);
  191. finished = 0;
  192. for (i = 0; i < ncpus; i++)
  193. result[i] = NAN;
  194. for (i = 0; i < nbusy; i++)
  195. {
  196. struct starpu_task *task = starpu_task_create();
  197. task->cl = &bw_codelet;
  198. if (intl)
  199. task->cl_arg = (void*) (uintptr_t) interleave(i);
  200. else
  201. task->cl_arg = (void*) (uintptr_t) i;
  202. task->execute_on_a_specific_worker = 1;
  203. if (intl && sleep != PAUSE) /* In the pause case we interleaved above */
  204. task->workerid = interleave(i);
  205. else
  206. task->workerid = i;
  207. ret = starpu_task_submit(task);
  208. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  209. }
  210. if (sleep != PAUSE && sleep != SCHED)
  211. {
  212. /* Add waiting tasks */
  213. for ( ; i < ncpus; i++)
  214. {
  215. struct starpu_task *task = starpu_task_create();
  216. switch (sleep)
  217. {
  218. case NOP:
  219. task->cl = &nop_codelet;
  220. break;
  221. case SYNC:
  222. task->cl = &sync_codelet;
  223. break;
  224. default:
  225. STARPU_ASSERT(0);
  226. }
  227. task->execute_on_a_specific_worker = 1;
  228. task->workerid = interleave(i);
  229. ret = starpu_task_submit(task);
  230. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  231. }
  232. }
  233. starpu_task_wait_for_all();
  234. starpu_shutdown();
  235. for (bw = 0., i = 0; i < nbusy; i++)
  236. {
  237. if (intl)
  238. bw += result[interleave(i)];
  239. else
  240. bw += result[i];
  241. }
  242. return bw;
  243. }
  244. int main(int argc, char **argv)
  245. {
  246. int ret;
  247. unsigned n;
  248. struct starpu_conf conf;
  249. float alone, alone_int, alone_int_nop, alone_int_sync, sched, sched_int;
  250. parse_args(argc, argv);
  251. starpu_conf_init(&conf);
  252. conf.precedence_over_environment_variables = 1;
  253. starpu_conf_noworker(&conf);
  254. conf.ncpus = -1;
  255. ret = starpu_initialize(&conf, &argc, &argv);
  256. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  257. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  258. total_ncpus = starpu_cpu_worker_get_count();
  259. buffers = malloc(total_ncpus * sizeof(*buffers));
  260. starpu_execute_on_each_worker_ex(initialize_buffer, NULL, STARPU_CPU, "initialize_buffer");
  261. starpu_shutdown();
  262. if (total_ncpus == 0)
  263. return STARPU_TEST_SKIPPED;
  264. result = malloc(total_ncpus * sizeof(result[0]));
  265. if (cpustep == 0) {
  266. #if defined(STARPU_QUICK_CHECK) || defined(STARPU_SANITIZE_LEAK) || defined(STARPU_SANITIZE_ADDRESS)
  267. cpustep = total_ncpus / 2;
  268. #elif defined(STARPU_LONG_CHECK)
  269. cpustep = 1;
  270. #else
  271. cpustep = total_ncpus / 8;
  272. #endif
  273. if (cpustep == 0)
  274. cpustep = 1;
  275. }
  276. printf("# nw\ta comp.\t+sched\teff%%\ta scat.\t+nop\t+sync\t+sched\teff%% vs nop\n");
  277. for (n = cpustep; n <= total_ncpus; n += cpustep)
  278. {
  279. if (noalone)
  280. {
  281. alone = 0.;
  282. alone_int = 0.;
  283. alone_int_nop = 0.;
  284. alone_int_sync = 0.;
  285. }
  286. else
  287. {
  288. alone = bench(&argc, &argv, n, n, 0, PAUSE);
  289. alone_int = bench(&argc, &argv, n, n, 1, PAUSE);
  290. alone_int_nop = bench(&argc, &argv, n, total_ncpus, 1, NOP);
  291. alone_int_sync = bench(&argc, &argv, n, total_ncpus, 1, SYNC);
  292. }
  293. sched = bench(&argc, &argv, n, total_ncpus, 0, SCHED);
  294. sched_int = bench(&argc, &argv, n, total_ncpus, 1, SCHED);
  295. printf("%u\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n",
  296. n,
  297. alone/1000,
  298. sched/1000, sched*100/alone,
  299. alone_int/1000,
  300. alone_int_nop/1000,
  301. alone_int_sync/1000,
  302. sched_int/1000, sched_int*100/alone_int_nop);
  303. fflush(stdout);
  304. }
  305. free(result);
  306. for (n = 0; n < total_ncpus; n++)
  307. free(buffers[n]);
  308. free(buffers);
  309. return EXIT_SUCCESS;
  310. }