regression_based_04.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011,2012,2014 Inria
  4. * Copyright (C) 2011-2016,2019 Université de Bordeaux
  5. * Copyright (C) 2011-2017 CNRS
  6. * Copyright (C) 2011 Télécom-SudParis
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <starpu.h>
  20. #include <starpu_scheduler.h>
  21. #include "../helper.h"
  22. /*
  23. * A multi-implementation benchmark with dmda scheduler
  24. * we aim to test OPENCL workers and calculate the estimated time for each type of worker (CPU or OPENCL or CUDA)
  25. * dmda choose OPENCL workers for lage size (variable size of compare_performance) size=1234567
  26. * dmda choose CPU workers for small size (size=1234)
  27. */
  28. #define STARTlin (512*1024)
  29. #define START 1024
  30. #ifdef STARPU_QUICK_CHECK
  31. #define END 1048576
  32. #else
  33. #define END 16777216
  34. #endif
  35. #ifdef STARPU_USE_CUDA
  36. static void memset_cuda(void *descr[], void *arg)
  37. {
  38. (void)arg;
  39. STARPU_SKIP_IF_VALGRIND;
  40. int *ptr = (int *)STARPU_VECTOR_GET_PTR(descr[0]);
  41. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  42. cudaMemsetAsync(ptr, 42, n * sizeof(*ptr), starpu_cuda_get_local_stream());
  43. }
  44. #endif
  45. int ret;
  46. extern void memset0_opencl(void *buffers[], void *args);
  47. extern void memset_opencl(void *buffers[], void *args);
  48. void memset0_cpu(void *descr[], void *arg)
  49. {
  50. (void)arg;
  51. STARPU_SKIP_IF_VALGRIND;
  52. int *ptr = (int *)STARPU_VECTOR_GET_PTR(descr[0]);
  53. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  54. //starpu_usleep(100);
  55. unsigned i;
  56. for (i = 0; i < n; i++)
  57. ptr[0] += i;
  58. }
  59. void memset_cpu(void *descr[], void *arg)
  60. {
  61. (void)arg;
  62. STARPU_SKIP_IF_VALGRIND;
  63. int *ptr = (int *)STARPU_VECTOR_GET_PTR(descr[0]);
  64. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  65. //starpu_usleep(10);
  66. memset(ptr, 42, n * sizeof(*ptr));
  67. }
  68. static struct starpu_perfmodel model =
  69. {
  70. .type = STARPU_REGRESSION_BASED,
  71. .symbol = "memset_regression_based"
  72. };
  73. static struct starpu_perfmodel nl_model =
  74. {
  75. .type = STARPU_NL_REGRESSION_BASED,
  76. .symbol = "non_linear_memset_regression_based"
  77. };
  78. static struct starpu_codelet memset_cl =
  79. {
  80. #ifdef STARPU_USE_CUDA
  81. .cuda_funcs = {memset_cuda},
  82. .cuda_flags = {STARPU_CUDA_ASYNC},
  83. #endif
  84. .opencl_funcs = {memset0_opencl, memset_opencl},
  85. .opencl_flags = {STARPU_OPENCL_ASYNC},
  86. .cpu_funcs = {memset0_cpu, memset_cpu},
  87. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  88. .model = &model,
  89. .nbuffers = 1,
  90. .modes = {STARPU_W}
  91. };
  92. static struct starpu_codelet nl_memset_cl =
  93. {
  94. #ifdef STARPU_USE_CUDA
  95. .cuda_funcs = {memset_cuda},
  96. .cuda_flags = {STARPU_CUDA_ASYNC},
  97. #endif
  98. .opencl_funcs = {memset0_opencl, memset_opencl},
  99. .opencl_flags = {STARPU_OPENCL_ASYNC},
  100. .cpu_funcs = {memset0_cpu, memset_cpu},
  101. .cpu_funcs_name = {"memset0_cpu", "memset_cpu"},
  102. .model = &nl_model,
  103. .nbuffers = 1,
  104. .modes = {STARPU_W}
  105. };
  106. static void test_memset(int nelems, struct starpu_codelet *codelet)
  107. {
  108. int nloops = 100;
  109. int loop;
  110. starpu_data_handle_t handle;
  111. void *dummy_buffer = malloc(nelems*sizeof(int));
  112. STARPU_ASSERT(dummy_buffer != NULL);
  113. starpu_vector_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)dummy_buffer, nelems, sizeof(int));
  114. for (loop = 0; loop < nloops; loop++)
  115. {
  116. struct starpu_task *task = starpu_task_create();
  117. task->cl = codelet;
  118. task->handles[0] = handle;
  119. int ret = starpu_task_submit(task);
  120. if (ret == -ENODEV)
  121. exit(STARPU_TEST_SKIPPED);
  122. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  123. }
  124. starpu_data_unregister(handle);
  125. free(dummy_buffer);
  126. }
  127. static void compare_performance(int size, struct starpu_codelet *codelet, struct starpu_task *task)
  128. {
  129. unsigned i;
  130. int niter = 100;
  131. starpu_data_handle_t handle;
  132. void *dummy_buffer = malloc(size*sizeof(int));
  133. STARPU_ASSERT(dummy_buffer != NULL);
  134. starpu_vector_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)dummy_buffer, size, sizeof(int));
  135. struct starpu_task **tasks = (struct starpu_task **) malloc(niter*sizeof(struct starpu_task *));
  136. assert(tasks);
  137. for (i = 0; i < niter; i++)
  138. {
  139. //fabriquer la tache
  140. struct starpu_task *task = starpu_task_create();
  141. task->cl = codelet;
  142. task->handles[0] = handle;
  143. task->synchronous = 1;
  144. /* We will destroy the task structure by hand so that we can
  145. * query the profiling info before the task is destroyed. */
  146. task->destroy = 0;
  147. tasks[i] = task;
  148. //soumettre la tache
  149. ret = starpu_task_submit(task);
  150. if (STARPU_UNLIKELY(ret == -ENODEV))
  151. {
  152. FPRINTF(stderr, "No worker may execute this task\n");
  153. exit(0);
  154. }
  155. }
  156. starpu_data_unregister(handle);
  157. free(dummy_buffer);
  158. starpu_task_wait_for_all();
  159. double length_cpu_sum = 0.0;
  160. double length_gpu_sum = 0.0;
  161. enum starpu_worker_archtype archi;
  162. for (i = 0; i < niter; i++)
  163. {
  164. struct starpu_task *task = tasks[i];
  165. struct starpu_profiling_task_info *info = task->profiling_info;
  166. //archi=starpu_worker_get_type(0);
  167. archi=starpu_worker_get_type(info->workerid);
  168. switch (archi)
  169. {
  170. case STARPU_CPU_WORKER:
  171. FPRINTF(stdout, "cpuuu\n");
  172. /* How long was the task execution ? */
  173. length_cpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  174. break;
  175. case STARPU_OPENCL_WORKER:
  176. FPRINTF(stdout, "openclllllll\n");
  177. /* How long was the task execution ? */
  178. length_gpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  179. break;
  180. case STARPU_CUDA_WORKER:
  181. FPRINTF(stdout, "cudaaaaaa\n");
  182. /* How long was the task execution ? */
  183. length_gpu_sum += starpu_timing_timespec_delay_us(&info->start_time, &info->end_time);
  184. break;
  185. }
  186. /* We don't need the task structure anymore */
  187. starpu_task_destroy(task);
  188. }
  189. unsigned worker;
  190. /* Display the occupancy of all workers during the test */
  191. unsigned ncpus = starpu_cpu_worker_get_count();
  192. unsigned ngpus = starpu_opencl_worker_get_count()+starpu_cuda_worker_get_count();
  193. //unsigned ncpu= starpu_worker_get_count_by_type(STARPU_CPU_WORKER);
  194. FPRINTF(stderr, "ncpus %u \n", ncpus);
  195. FPRINTF(stderr, "ngpus %u \n", ngpus);
  196. for (worker= 0; worker< starpu_worker_get_count(); worker++)
  197. {
  198. struct starpu_profiling_worker_info worker_info;
  199. ret = starpu_profiling_worker_get_info(worker, &worker_info);
  200. STARPU_ASSERT(!ret);
  201. char workername[128];
  202. starpu_worker_get_name(worker, workername, sizeof(workername));
  203. unsigned nimpl;
  204. FPRINTF(stdout, "\n Worker :%s ::::::::::\n\n", workername);
  205. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  206. {
  207. switch (starpu_worker_get_type(worker))
  208. {
  209. case STARPU_CPU_WORKER:
  210. FPRINTF(stdout, "Expected time for %d on %s (impl %u): %f, Measured time: %f \n",
  211. size, workername, nimpl,starpu_task_expected_length(task, starpu_worker_get_perf_archtype(worker, task->sched_ctx), nimpl), ((length_cpu_sum)/niter));
  212. break;
  213. case STARPU_OPENCL_WORKER:
  214. FPRINTF(stdout, "Expectedd time for %d on %s (impl %u): %f, Measuredd time: %f \n",
  215. size, workername, nimpl,starpu_task_expected_length(task, starpu_worker_get_perf_archtype(worker, task->sched_ctx), nimpl), ((length_gpu_sum)/niter));
  216. break;
  217. case STARPU_CUDA_WORKER:
  218. FPRINTF(stdout, "Expectedd time for %d on %s (impl %u): %f, Measuredd time: %f \n",
  219. size, workername, nimpl,starpu_task_expected_length(task, starpu_worker_get_perf_archtype(worker, task->sched_ctx), nimpl), ((length_gpu_sum)/niter));
  220. break;
  221. }
  222. }
  223. }
  224. }
  225. struct starpu_opencl_program opencl_program;
  226. int main(int argc, char **argv)
  227. {
  228. /* Enable profiling */
  229. starpu_profiling_status_set(1);
  230. struct starpu_conf conf;
  231. starpu_data_handle_t handle;
  232. int ret;
  233. starpu_conf_init(&conf);
  234. conf.sched_policy_name = "dmda";
  235. conf.calibrate = 2;
  236. ret = starpu_initialize(&conf, &argc, &argv);
  237. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  238. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  239. ret = starpu_opencl_load_opencl_from_file("/home/makni/makni/starpu.git/tests/perfmodels/opencl_memset_kernel_01.cl",
  240. &opencl_program, NULL);
  241. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  242. int size;
  243. for (size = STARTlin; size < END; size *= 2)
  244. {
  245. /* Use a linear regression */
  246. test_memset(size, &memset_cl);
  247. }
  248. for (size = START*1.5; size < END; size *= 2)
  249. {
  250. /* Use a non-linear regression */
  251. test_memset(size, &nl_memset_cl);
  252. }
  253. ret = starpu_task_wait_for_all();
  254. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  255. starpu_shutdown();
  256. /* Test Phase */
  257. starpu_conf_init(&conf);
  258. conf.sched_policy_name = "dmda";
  259. conf.calibrate = 0;
  260. ret = starpu_initialize(&conf, &argc, &argv);
  261. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  262. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  263. ret = starpu_opencl_load_opencl_from_file("/home/makni/makni/starpu.git/tests/perfmodels/opencl_memset_kernel_01.cl",
  264. &opencl_program, NULL);
  265. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  266. /* Now create a dummy task just to estimate its duration according to the regression */
  267. size = 1234567;
  268. void *dummy_buffer = malloc(size*sizeof(int));
  269. STARPU_ASSERT(dummy_buffer != NULL);
  270. starpu_vector_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)dummy_buffer, size, sizeof(int));
  271. struct starpu_task *task = starpu_task_create();
  272. task->cl = &memset_cl;
  273. task->handles[0] = handle;
  274. task->destroy = 0;
  275. //FPRINTF(stdout, "\n ////linear regression results////\n");
  276. //compare_performance(size, &memset_cl,task);
  277. task->cl = &nl_memset_cl;
  278. FPRINTF(stdout, "\n ////non linear regression results////\n");
  279. compare_performance(size, &nl_memset_cl,task);
  280. starpu_task_destroy(task);
  281. starpu_data_unregister(handle);
  282. free(dummy_buffer);
  283. ret = starpu_opencl_unload_opencl(&opencl_program);
  284. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_unload_opencl");
  285. starpu_shutdown();
  286. return EXIT_SUCCESS;
  287. }