regression_based_gpu.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2011 Télécom-SudParis
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <starpu_scheduler.h>
  19. #include "../helper.h"
  20. /*
  21. * A multi-implementation benchmark with dmda scheduler
  22. * we aim to test OPENCL workers and calculate the estimated time for each type of worker (CPU or OPENCL or CUDA)
  23. * dmda choose OPENCL workers for lage size (variable size of compare_performance) size=1234567
  24. * dmda choose CPU workers for small size (size=1234)
  25. */
  26. #define STARTlin 131072
  27. #define START 1024
  28. #ifdef STARPU_QUICK_CHECK
  29. #define END 1048576
  30. #else
  31. #define END 16777216
  32. #endif
  33. #ifdef STARPU_USE_CUDA
  34. static void memset_cuda(void *descr[], void *arg)
  35. {
  36. (void)arg;
  37. STARPU_SKIP_IF_VALGRIND;
  38. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  39. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  40. cudaMemsetAsync(ptr, 42, n * sizeof(*ptr), starpu_cuda_get_local_stream());
  41. }
  42. #endif
  43. #ifdef STARPU_USE_OPENCL
  44. extern void memset0_opencl(void *buffers[], void *args);
  45. extern void memset_opencl(void *buffers[], void *args);
  46. #endif
  47. void memset0_cpu(void *descr[], void *arg)
  48. {
  49. (void)arg;
  50. STARPU_SKIP_IF_VALGRIND;
  51. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  52. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  53. unsigned i;
  54. //starpu_usleep(100);
  55. for (i = 0; i < n; i++)
  56. ptr[0] += i;
  57. }
  58. void memset_cpu(void *descr[], void *arg)
  59. {
  60. (void)arg;
  61. STARPU_SKIP_IF_VALGRIND;
  62. unsigned *ptr = (unsigned *)STARPU_VECTOR_GET_PTR(descr[0]);
  63. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  64. //starpu_usleep(10);
  65. memset(ptr, 42, n * sizeof(*ptr));
  66. }
  67. static struct starpu_perfmodel model =
  68. {
  69. .type = STARPU_REGRESSION_BASED,
  70. .symbol = "memset_regression_based"
  71. };
  72. static struct starpu_perfmodel nl_model =
  73. {
  74. .type = STARPU_NL_REGRESSION_BASED,
  75. .symbol = "non_linear_memset_regression_based"
  76. };
  77. static struct starpu_codelet memset_cl =
  78. {
  79. #ifdef STARPU_USE_CUDA
  80. .cuda_funcs = {memset_cuda},
  81. .cuda_flags = {STARPU_CUDA_ASYNC},
  82. #endif
  83. #ifdef STARPU_USE_OPENCL
  84. .opencl_funcs = {memset0_opencl, memset_opencl},
  85. .opencl_flags = {STARPU_OPENCL_ASYNC},
  86. #endif
  87. .cpu_funcs = {memset0_cpu, memset_cpu},
  88. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  89. .model = &model,
  90. .nbuffers = 1,
  91. .modes = {STARPU_SCRATCH}
  92. };
  93. static struct starpu_codelet nl_memset_cl =
  94. {
  95. #ifdef STARPU_USE_CUDA
  96. .cuda_funcs = {memset_cuda},
  97. .cuda_flags = {STARPU_CUDA_ASYNC},
  98. #endif
  99. #ifdef STARPU_USE_OPENCL
  100. .opencl_funcs = {memset0_opencl, memset_opencl},
  101. .opencl_flags = {STARPU_OPENCL_ASYNC},
  102. #endif
  103. .cpu_funcs = {memset0_cpu, memset_cpu},
  104. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  105. .model = &nl_model,
  106. .nbuffers = 1,
  107. .modes = {STARPU_SCRATCH}
  108. };
  109. static void test_memset(int nelems, struct starpu_codelet *codelet)
  110. {
  111. int nloops = 100;
  112. int loop;
  113. starpu_data_handle_t handle;
  114. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, nelems, sizeof(int));
  115. for (loop = 0; loop < nloops; loop++)
  116. {
  117. struct starpu_task *task = starpu_task_create();
  118. task->cl = codelet;
  119. task->handles[0] = handle;
  120. int ret = starpu_task_submit(task);
  121. if (ret == -ENODEV)
  122. exit(STARPU_TEST_SKIPPED);
  123. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  124. }
  125. starpu_do_schedule();
  126. starpu_data_unregister(handle);
  127. }
  128. static void compare_performance(int size, struct starpu_codelet *codelet, struct starpu_task *compar_task)
  129. {
  130. unsigned i;
  131. unsigned niter = 100;
  132. starpu_data_handle_t handle;
  133. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, size, sizeof(int));
  134. struct starpu_task *tasks[niter];
  135. for (i = 0; i < niter; i++)
  136. {
  137. struct starpu_task *task = starpu_task_create();
  138. task->cl = codelet;
  139. task->handles[0] = handle;
  140. task->synchronous = 1;
  141. /* We will destroy the task structure by hand so that we can
  142. * query the profiling info before the task is destroyed. */
  143. task->destroy = 0;
  144. tasks[i] = task;
  145. int ret = starpu_task_submit(task);
  146. if (STARPU_UNLIKELY(ret == -ENODEV))
  147. {
  148. FPRINTF(stderr, "No worker may execute this task\n");
  149. exit(0);
  150. }
  151. }
  152. starpu_data_unregister(handle);
  153. starpu_task_wait_for_all();
  154. double length_cpu_sum = 0.0;
  155. double length_gpu_sum = 0.0;
  156. enum starpu_worker_archtype archi;
  157. for (i = 0; i < niter; i++)
  158. {
  159. struct starpu_task *task = tasks[i];
  160. struct starpu_profiling_task_info *info = task->profiling_info;
  161. //archi=starpu_worker_get_type(0);
  162. archi=starpu_worker_get_type(info->workerid);
  163. switch (archi)
  164. {
  165. case STARPU_CPU_WORKER:
  166. FPRINTF(stdout, "cpuuu\n");
  167. /* How long was the task execution ? */
  168. length_cpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  169. break;
  170. case STARPU_OPENCL_WORKER:
  171. FPRINTF(stdout, "openclllllll\n");
  172. /* How long was the task execution ? */
  173. length_gpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  174. break;
  175. case STARPU_CUDA_WORKER:
  176. FPRINTF(stdout, "cudaaaaaa\n");
  177. /* How long was the task execution ? */
  178. length_gpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  179. break;
  180. default:
  181. FPRINTF(stdout, "unsupported!\n");
  182. break;
  183. }
  184. /* We don't need the task structure anymore */
  185. starpu_task_destroy(task);
  186. }
  187. unsigned worker;
  188. /* Display the occupancy of all workers during the test */
  189. unsigned ncpus = starpu_cpu_worker_get_count();
  190. unsigned ngpus = starpu_opencl_worker_get_count()+starpu_cuda_worker_get_count();
  191. //unsigned ncpu= starpu_worker_get_count_by_type(STARPU_CPU_WORKER);
  192. FPRINTF(stderr, "ncpus %u \n", ncpus);
  193. FPRINTF(stderr, "ngpus %u \n", ngpus);
  194. for (worker= 0; worker< starpu_worker_get_count(); worker++)
  195. {
  196. struct starpu_profiling_worker_info worker_info;
  197. int ret = starpu_profiling_worker_get_info(worker, &worker_info);
  198. STARPU_ASSERT(!ret);
  199. char workername[128];
  200. starpu_worker_get_name(worker, workername, sizeof(workername));
  201. unsigned nimpl;
  202. FPRINTF(stdout, "\n Worker :%s ::::::::::\n\n", workername);
  203. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  204. {
  205. switch (starpu_worker_get_type(worker))
  206. {
  207. case STARPU_CPU_WORKER:
  208. FPRINTF(stdout, "Expected time for %d on %s (impl %u): %f, Measured time: %f \n",
  209. size, workername, nimpl,starpu_task_expected_length(compar_task, starpu_worker_get_perf_archtype(worker, compar_task->sched_ctx), nimpl), ((length_cpu_sum)/niter));
  210. break;
  211. case STARPU_OPENCL_WORKER:
  212. FPRINTF(stdout, "Expectedd time for %d on %s (impl %u): %f, Measuredd time: %f \n",
  213. size, workername, nimpl,starpu_task_expected_length(compar_task, starpu_worker_get_perf_archtype(worker, compar_task->sched_ctx), nimpl), ((length_gpu_sum)/niter));
  214. break;
  215. case STARPU_CUDA_WORKER:
  216. FPRINTF(stdout, "Expectedd time for %d on %s (impl %u): %f, Measuredd time: %f \n",
  217. size, workername, nimpl,starpu_task_expected_length(compar_task, starpu_worker_get_perf_archtype(worker, compar_task->sched_ctx), nimpl), ((length_gpu_sum)/niter));
  218. break;
  219. default:
  220. FPRINTF(stdout, "unsupported!\n");
  221. break;
  222. }
  223. }
  224. }
  225. }
  226. #ifdef STARPU_USE_OPENCL
  227. struct starpu_opencl_program opencl_program;
  228. #endif
  229. int main(int argc, char **argv)
  230. {
  231. /* Enable profiling */
  232. starpu_profiling_status_set(STARPU_PROFILING_ENABLE);
  233. struct starpu_conf conf;
  234. starpu_data_handle_t handle;
  235. int ret;
  236. starpu_conf_init(&conf);
  237. conf.sched_policy_name = "dmda";
  238. conf.calibrate = 2;
  239. ret = starpu_initialize(&conf, &argc, &argv);
  240. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  241. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  242. #ifdef STARPU_USE_OPENCL
  243. ret = starpu_opencl_load_opencl_from_file("tests/perfmodels/opencl_memset_kernel.cl",
  244. &opencl_program, NULL);
  245. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  246. #endif
  247. int size;
  248. for (size = STARTlin; size < END; size *= 2)
  249. {
  250. /* Use a linear regression */
  251. test_memset(size, &memset_cl);
  252. }
  253. for (size = START*1.5; size < END; size *= 2)
  254. {
  255. /* Use a non-linear regression */
  256. test_memset(size, &nl_memset_cl);
  257. }
  258. ret = starpu_task_wait_for_all();
  259. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  260. #ifdef STARPU_USE_OPENCL
  261. ret = starpu_opencl_unload_opencl(&opencl_program);
  262. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
  263. #endif
  264. starpu_shutdown();
  265. /* Test Phase */
  266. starpu_conf_init(&conf);
  267. conf.sched_policy_name = "dmda";
  268. conf.calibrate = 0;
  269. ret = starpu_initialize(&conf, &argc, &argv);
  270. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  271. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  272. #ifdef STARPU_USE_OPENCL
  273. ret = starpu_opencl_load_opencl_from_file("tests/perfmodels/opencl_memset_kernel.cl",
  274. &opencl_program, NULL);
  275. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  276. #endif
  277. /* Now create a dummy task just to estimate its duration according to the regression */
  278. size = 1234567;
  279. starpu_vector_data_register(&handle, -1, (uintptr_t)NULL, size, sizeof(int));
  280. struct starpu_task *task = starpu_task_create();
  281. task->handles[0] = handle;
  282. task->destroy = 0;
  283. //FPRINTF(stdout, "\n ////linear regression results////\n");
  284. //task->cl = &memset_cl;
  285. //compare_performance(size, &memset_cl, task);
  286. FPRINTF(stdout, "\n ////non linear regression results////\n");
  287. task->cl = &nl_memset_cl;
  288. compare_performance(size, &nl_memset_cl, task);
  289. starpu_task_destroy(task);
  290. starpu_data_unregister(handle);
  291. #ifdef STARPU_USE_OPENCL
  292. ret = starpu_opencl_unload_opencl(&opencl_program);
  293. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
  294. #endif
  295. starpu_shutdown();
  296. return EXIT_SUCCESS;
  297. }