regression_based_memset.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <starpu_scheduler.h>
  19. #include "../helper.h"
  20. #define ERROR_RETURN(retval) { fprintf(stderr, "Error %d %s:line %d: \n", retval,__FILE__,__LINE__); return(retval); }
  21. /*
  22. * Benchmark memset with a linear and non-linear regression
  23. */
  24. #define STARTlin 1024
  25. #define START 1024
  26. #ifdef STARPU_QUICK_CHECK
  27. #define END 1048576
  28. #define NENERGY 3
  29. #else
  30. #define END 16777216
  31. #define NENERGY 100
  32. #endif
  33. #ifdef STARPU_USE_CUDA
  34. static void memset_cuda(void *descr[], void *arg)
  35. {
  36. (void)arg;
  37. STARPU_SKIP_IF_VALGRIND;
  38. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  39. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  40. cudaMemsetAsync(ptr, 42, n * sizeof(*ptr), starpu_cuda_get_local_stream());
  41. }
  42. #endif
  43. #ifdef STARPU_USE_OPENCL
  44. extern void memset_opencl(void *buffers[], void *args);
  45. #endif
  46. void memset0_cpu(void *descr[], void *arg)
  47. {
  48. (void)arg;
  49. STARPU_SKIP_IF_VALGRIND;
  50. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  51. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  52. unsigned i;
  53. for (i = 0; i < n; i++)
  54. ptr[i] = 42;
  55. }
  56. void memset_cpu(void *descr[], void *arg)
  57. {
  58. (void)arg;
  59. STARPU_SKIP_IF_VALGRIND;
  60. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  61. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  62. starpu_usleep(10);
  63. memset(ptr, 42, n * sizeof(*ptr));
  64. }
  65. static struct starpu_perfmodel model =
  66. {
  67. .type = STARPU_REGRESSION_BASED,
  68. .symbol = "memset_regression_based"
  69. };
  70. static struct starpu_perfmodel nl_model =
  71. {
  72. .type = STARPU_NL_REGRESSION_BASED,
  73. .symbol = "non_linear_memset_regression_based"
  74. };
  75. static struct starpu_perfmodel energy_model =
  76. {
  77. .type = STARPU_REGRESSION_BASED,
  78. .symbol = "memset_regression_based_energy"
  79. };
  80. static struct starpu_perfmodel nl_energy_model =
  81. {
  82. .type = STARPU_NL_REGRESSION_BASED,
  83. .symbol = "non_linear_memset_regression_based_energy"
  84. };
  85. static struct starpu_codelet memset_cl =
  86. {
  87. #ifdef STARPU_USE_CUDA
  88. .cuda_funcs = {memset_cuda},
  89. .cuda_flags = {STARPU_CUDA_ASYNC},
  90. #endif
  91. #ifdef STARPU_USE_OPENCL
  92. .opencl_funcs = {memset_opencl},
  93. .opencl_flags = {STARPU_OPENCL_ASYNC},
  94. #endif
  95. .cpu_funcs = {memset0_cpu, memset_cpu},
  96. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  97. .model = &model,
  98. .energy_model = &energy_model,
  99. .nbuffers = 1,
  100. .modes = {STARPU_W}
  101. };
  102. static struct starpu_codelet nl_memset_cl =
  103. {
  104. #ifdef STARPU_USE_CUDA
  105. .cuda_funcs = {memset_cuda},
  106. .cuda_flags = {STARPU_CUDA_ASYNC},
  107. #endif
  108. #ifdef STARPU_USE_OPENCL
  109. .opencl_funcs = {memset_opencl},
  110. .opencl_flags = {STARPU_OPENCL_ASYNC},
  111. #endif
  112. .cpu_funcs = {memset0_cpu, memset_cpu},
  113. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  114. .model = &nl_model,
  115. .energy_model = &nl_energy_model,
  116. .nbuffers = 1,
  117. .modes = {STARPU_W}
  118. };
  119. static void test_memset(int nelems, struct starpu_codelet *codelet)
  120. {
  121. int nloops = 100;
  122. int loop;
  123. starpu_data_handle_t handle;
  124. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, nelems, sizeof(int));
  125. for (loop = 0; loop < nloops; loop++)
  126. {
  127. struct starpu_task *task = starpu_task_create();
  128. task->cl = codelet;
  129. task->handles[0] = handle;
  130. int ret = starpu_task_submit(task);
  131. if (ret == -ENODEV)
  132. exit(STARPU_TEST_SKIPPED);
  133. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  134. }
  135. starpu_data_unregister(handle);
  136. }
  137. static int test_memset_energy(int nelems, int workerid, int where, enum starpu_worker_archtype archtype, int impl, struct starpu_codelet *codelet)
  138. {
  139. int nloops;
  140. int loop;
  141. nloops = NENERGY;
  142. if (workerid == -1)
  143. nloops *= starpu_worker_get_count_by_type(archtype);
  144. starpu_data_handle_t handle[nloops];
  145. for (loop = 0; loop < nloops; loop++)
  146. {
  147. struct starpu_task *task = starpu_task_create();
  148. starpu_vector_data_register(&handle[loop], -1, (uintptr_t)NULL, nelems, sizeof(int));
  149. task->cl = codelet;
  150. task->where = where;
  151. task->handles[0] = handle[loop];
  152. task->flops = nelems;
  153. if (workerid != -1)
  154. {
  155. task->execute_on_a_specific_worker = 1;
  156. task->workerid = workerid;
  157. }
  158. int ret = starpu_task_submit(task);
  159. if (ret == -ENODEV)
  160. exit(STARPU_TEST_SKIPPED);
  161. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  162. }
  163. for (loop = 0; loop < nloops; loop++)
  164. {
  165. starpu_data_unregister(handle[loop]);
  166. }
  167. return nloops;
  168. }
  169. static int bench_energy(int workerid, int where, enum starpu_worker_archtype archtype, int impl, struct starpu_codelet *codelet)
  170. {
  171. int size;
  172. int retval;
  173. int ntasks;
  174. for (size = STARTlin; size < END; size *= 2)
  175. {
  176. starpu_data_handle_t handle;
  177. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, size, sizeof(int));
  178. if ( (retval = starpu_energy_start(workerid, archtype)) != 0)
  179. {
  180. starpu_data_unregister(handle);
  181. _STARPU_DISP("Energy measurement not supported for archtype %d\n", archtype);
  182. return -1;
  183. }
  184. /* Use a linear regression */
  185. ntasks = test_memset_energy(size, workerid, where, archtype, impl, codelet);
  186. struct starpu_task *task = starpu_task_create();
  187. task->cl = codelet;
  188. task->handles[0] = handle;
  189. task->synchronous = 1;
  190. task->destroy = 0;
  191. task->flops = size;
  192. retval = starpu_energy_stop(codelet->energy_model, task, impl, ntasks, workerid, archtype);
  193. starpu_task_destroy (task);
  194. starpu_data_unregister(handle);
  195. if (retval != 0)
  196. ERROR_RETURN(retval);
  197. }
  198. return 0;
  199. }
  200. static void show_task_perfs(int size, struct starpu_task *task)
  201. {
  202. unsigned workerid;
  203. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++)
  204. {
  205. char name[32];
  206. starpu_worker_get_name(workerid, name, sizeof(name));
  207. unsigned nimpl;
  208. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  209. {
  210. FPRINTF(stdout, "Expected time for %d on %s (impl %u):\t%f\n",
  211. size, name, nimpl, starpu_task_expected_length(task, starpu_worker_get_perf_archtype(workerid, task->sched_ctx), nimpl));
  212. }
  213. }
  214. }
  215. #ifdef STARPU_USE_OPENCL
  216. struct starpu_opencl_program opencl_program;
  217. #endif
  218. int main(int argc, char **argv)
  219. {
  220. struct starpu_conf conf;
  221. starpu_data_handle_t handle;
  222. int ret;
  223. unsigned i;
  224. starpu_conf_init(&conf);
  225. conf.sched_policy_name = "dmda";
  226. conf.calibrate = 2;
  227. ret = starpu_initialize(&conf, &argc, &argv);
  228. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  229. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  230. #ifdef STARPU_USE_OPENCL
  231. ret = starpu_opencl_load_opencl_from_file("tests/perfmodels/opencl_memset_kernel.cl",
  232. &opencl_program, NULL);
  233. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  234. #endif
  235. int size;
  236. for (size = STARTlin; size < END; size *= 2)
  237. {
  238. /* Use a linear regression */
  239. test_memset(size, &memset_cl);
  240. }
  241. for (size = START; size < END; size *= 2)
  242. {
  243. /* Use a non-linear regression */
  244. test_memset(size, &nl_memset_cl);
  245. }
  246. ret = starpu_task_wait_for_all();
  247. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  248. /* Now create a dummy task just to estimate its duration according to the regression */
  249. size = 12345;
  250. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, size, sizeof(int));
  251. struct starpu_task *task = starpu_task_create();
  252. task->cl = &memset_cl;
  253. task->handles[0] = handle;
  254. task->destroy = 0;
  255. show_task_perfs(size, task);
  256. task->cl = &nl_memset_cl;
  257. show_task_perfs(size, task);
  258. starpu_task_destroy(task);
  259. starpu_data_unregister(handle);
  260. #ifdef STARPU_USE_OPENCL
  261. ret = starpu_opencl_unload_opencl(&opencl_program);
  262. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
  263. #endif
  264. starpu_shutdown();
  265. starpu_conf_init(&conf);
  266. /* Use a scheduler which doesn't choose the implementation */
  267. conf.sched_policy_name = "eager";
  268. conf.calibrate = 1;
  269. ret = starpu_initialize(&conf, &argc, &argv);
  270. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  271. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  272. #ifdef STARPU_USE_OPENCL
  273. ret = starpu_opencl_load_opencl_from_file("tests/perfmodels/opencl_memset_kernel.cl",
  274. &opencl_program, NULL);
  275. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  276. #endif
  277. if (starpu_cpu_worker_get_count() > 0)
  278. {
  279. memset_cl.cpu_funcs[1] = NULL;
  280. bench_energy(-1, STARPU_CPU, STARPU_CPU_WORKER, 0, &memset_cl);
  281. memset_cl.cpu_funcs[1] = memset_cpu;
  282. memset_cl.cpu_funcs[0] = NULL;
  283. bench_energy(-1, STARPU_CPU, STARPU_CPU_WORKER, 1, &memset_cl);
  284. nl_memset_cl.cpu_funcs[1] = NULL;
  285. bench_energy(-1, STARPU_CPU, STARPU_CPU_WORKER, 0, &nl_memset_cl);
  286. nl_memset_cl.cpu_funcs[1] = memset_cpu;
  287. nl_memset_cl.cpu_funcs[0] = NULL;
  288. bench_energy(-1, STARPU_CPU, STARPU_CPU_WORKER, 1, &nl_memset_cl);
  289. }
  290. for (i = 0; i < starpu_cuda_worker_get_count(); i++)
  291. {
  292. int workerid = starpu_worker_get_by_type(STARPU_CUDA_WORKER, i);
  293. bench_energy(workerid, STARPU_CUDA, STARPU_CUDA_WORKER, 0, &memset_cl);
  294. bench_energy(workerid, STARPU_CUDA, STARPU_CUDA_WORKER, 0, &nl_memset_cl);
  295. }
  296. #ifdef STARPU_USE_OPENCL
  297. ret = starpu_opencl_unload_opencl(&opencl_program);
  298. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
  299. #endif
  300. starpu_shutdown();
  301. return EXIT_SUCCESS;
  302. }