simple_cpu_gpu_sched.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_scheduler.h>
  18. #include "../helper.h"
  19. /*
  20. * Schedulers that are aware of the expected task length provided by the
  21. * perfmodels must make sure that :
  22. * - cpu_task is cheduled on a CPU.
  23. * - gpu_task is scheduled on a GPU.
  24. *
  25. * Applies to : dmda and to what other schedulers ?
  26. */
  27. static void
  28. dummy(void *buffers[], void *args)
  29. {
  30. (void) buffers;
  31. (void) args;
  32. }
  33. /*
  34. * Fake cost functions.
  35. */
  36. static double
  37. cpu_task_cpu(struct starpu_task *task,
  38. struct starpu_perfmodel_arch* arch,
  39. unsigned nimpl)
  40. {
  41. (void) task;
  42. (void) arch;
  43. (void) nimpl;
  44. return 1.0;
  45. }
  46. static double
  47. cpu_task_gpu(struct starpu_task *task,
  48. struct starpu_perfmodel_arch* arch,
  49. unsigned nimpl)
  50. {
  51. (void) task;
  52. (void) arch;
  53. (void) nimpl;
  54. return 1000.0;
  55. }
  56. static double
  57. gpu_task_cpu(struct starpu_task *task,
  58. struct starpu_perfmodel_arch* arch,
  59. unsigned nimpl)
  60. {
  61. (void) task;
  62. (void) arch;
  63. (void) nimpl;
  64. return 1000.0;
  65. }
  66. static double
  67. gpu_task_gpu(struct starpu_task *task,
  68. struct starpu_perfmodel_arch* arch,
  69. unsigned nimpl)
  70. {
  71. (void) task;
  72. (void) arch;
  73. (void) nimpl;
  74. return 1.0;
  75. }
  76. static struct starpu_perfmodel model_cpu_task =
  77. {
  78. .type = STARPU_PER_ARCH,
  79. .symbol = "model_cpu_task"
  80. };
  81. static struct starpu_perfmodel model_gpu_task =
  82. {
  83. .type = STARPU_PER_ARCH,
  84. .symbol = "model_gpu_task"
  85. };
  86. static void
  87. init_perfmodels(void)
  88. {
  89. unsigned devid, ncore;
  90. starpu_initialize_model(&model_cpu_task);
  91. starpu_initialize_model(&model_gpu_task);
  92. if(model_cpu_task.per_arch[STARPU_CPU_WORKER] != NULL)
  93. {
  94. for(devid=0; model_cpu_task.per_arch[STARPU_CPU_WORKER][devid] != NULL; devid++)
  95. {
  96. for(ncore=0; model_cpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore] != NULL; ncore++)
  97. {
  98. model_cpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore][0].cost_function = cpu_task_cpu;
  99. model_gpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore][0].cost_function = gpu_task_cpu;
  100. }
  101. }
  102. }
  103. if(model_cpu_task.per_arch[STARPU_CUDA_WORKER] != NULL)
  104. {
  105. for(devid=0; model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid] != NULL; devid++)
  106. {
  107. for(ncore=0; model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore] != NULL; ncore++)
  108. {
  109. model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore][0].cost_function = cpu_task_gpu;
  110. model_gpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore][0].cost_function = gpu_task_gpu;
  111. }
  112. }
  113. }
  114. if(model_cpu_task.per_arch[STARPU_OPENCL_WORKER] != NULL)
  115. {
  116. for(devid=0; model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid] != NULL; devid++)
  117. {
  118. for(ncore=0; model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore] != NULL; ncore++)
  119. {
  120. model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore][0].cost_function = cpu_task_gpu;
  121. model_gpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore][0].cost_function = gpu_task_gpu;
  122. }
  123. }
  124. }
  125. }
  126. /*
  127. * Dummy codelets.
  128. */
  129. static struct starpu_codelet cpu_cl =
  130. {
  131. .cpu_funcs = { dummy, NULL },
  132. .cuda_funcs = { dummy, NULL },
  133. .opencl_funcs = { dummy, NULL },
  134. .nbuffers = 0,
  135. .model = &model_cpu_task
  136. };
  137. static struct starpu_codelet gpu_cl =
  138. {
  139. .cpu_funcs = { dummy, NULL },
  140. .cuda_funcs = { dummy, NULL },
  141. .opencl_funcs = { dummy, NULL },
  142. .nbuffers = 0,
  143. .model = &model_gpu_task
  144. };
  145. static int
  146. run(struct starpu_sched_policy *policy)
  147. {
  148. struct starpu_conf conf;
  149. starpu_conf_init(&conf);
  150. conf.sched_policy = policy;
  151. int ret = starpu_init(&conf);
  152. if (ret == -ENODEV)
  153. exit(STARPU_TEST_SKIPPED);
  154. /* At least 1 CPU and 1 GPU are needed. */
  155. if (starpu_cpu_worker_get_count() == 0) {
  156. starpu_shutdown();
  157. exit(STARPU_TEST_SKIPPED);
  158. }
  159. if (starpu_cuda_worker_get_count() == 0 &&
  160. starpu_opencl_worker_get_count() == 0) {
  161. starpu_shutdown();
  162. exit(STARPU_TEST_SKIPPED);
  163. }
  164. starpu_profiling_status_set(1);
  165. init_perfmodels();
  166. struct starpu_task *cpu_task = starpu_task_create();
  167. cpu_task->cl = &cpu_cl;
  168. cpu_task->destroy = 0;
  169. struct starpu_task *gpu_task = starpu_task_create();
  170. gpu_task->cl = &gpu_cl;
  171. gpu_task->destroy = 0;
  172. ret = starpu_task_submit(cpu_task);
  173. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  174. ret = starpu_task_submit(gpu_task);
  175. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  176. starpu_task_wait_for_all();
  177. enum starpu_worker_archtype cpu_task_worker, gpu_task_worker;
  178. cpu_task_worker = starpu_worker_get_type(cpu_task->profiling_info->workerid);
  179. gpu_task_worker = starpu_worker_get_type(gpu_task->profiling_info->workerid);
  180. if (cpu_task_worker != STARPU_CPU_WORKER ||
  181. (gpu_task_worker != STARPU_CUDA_WORKER &&
  182. gpu_task_worker != STARPU_OPENCL_WORKER))
  183. ret = 1;
  184. else
  185. ret = 0;
  186. starpu_task_destroy(cpu_task);
  187. starpu_task_destroy(gpu_task);
  188. starpu_shutdown();
  189. return ret;
  190. }
  191. /*
  192. extern struct starpu_sched_policy _starpu_sched_ws_policy;
  193. extern struct starpu_sched_policy _starpu_sched_prio_policy;
  194. extern struct starpu_sched_policy _starpu_sched_random_policy;
  195. extern struct starpu_sched_policy _starpu_sched_dm_policy;
  196. extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  197. extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  198. extern struct starpu_sched_policy _starpu_sched_eager_policy;
  199. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  200. extern struct starpu_sched_policy _starpu_sched_peager_policy;
  201. */
  202. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  203. /* XXX: what policies are we interested in ? */
  204. static struct starpu_sched_policy *policies[] =
  205. {
  206. //&_starpu_sched_ws_policy,
  207. //&_starpu_sched_prio_policy,
  208. //&_starpu_sched_dm_policy,
  209. &_starpu_sched_dmda_policy,
  210. //&_starpu_sched_dmda_ready_policy,
  211. //&_starpu_sched_dmda_sorted_policy,
  212. //&_starpu_sched_random_policy,
  213. //&_starpu_sched_eager_policy,
  214. //&_starpu_sched_parallel_heft_policy,
  215. //&_starpu_sched_peager_policy
  216. };
  217. int
  218. main(void)
  219. {
  220. #ifndef STARPU_HAVE_SETENV
  221. /* XXX: is this macro used by all the schedulers we are interested in ? */
  222. #warning "setenv() is not available, skipping this test"
  223. return STARPU_TEST_SKIPPED;
  224. #else
  225. setenv("STARPU_SCHED_BETA", "0", 1);
  226. int i;
  227. int n_policies = sizeof(policies)/sizeof(policies[0]);
  228. for (i = 0; i < n_policies; ++i)
  229. {
  230. struct starpu_sched_policy *policy = policies[i];
  231. FPRINTF(stdout, "Running with policy %s.\n",
  232. policy->policy_name);
  233. int ret;
  234. ret = run(policy);
  235. if (ret == 1)
  236. return EXIT_FAILURE;
  237. }
  238. return EXIT_SUCCESS;
  239. #endif
  240. }