simple_cpu_gpu_sched.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_scheduler.h>
  18. #include "../helper.h"
  19. /*
  20. * Schedulers that are aware of the expected task length provided by the
  21. * perfmodels must make sure that :
  22. * - cpu_task is cheduled on a CPU.
  23. * - gpu_task is scheduled on a GPU.
  24. *
  25. * Applies to : dmda and to what other schedulers ?
  26. */
  27. void dummy(void *buffers[], void *args)
  28. {
  29. (void) buffers;
  30. (void) args;
  31. }
  32. /*
  33. * Fake cost functions.
  34. */
  35. static double
  36. cpu_task_cpu(struct starpu_task *task,
  37. struct starpu_perfmodel_arch* arch,
  38. unsigned nimpl)
  39. {
  40. (void) task;
  41. (void) arch;
  42. (void) nimpl;
  43. return 1.0;
  44. }
  45. static double
  46. cpu_task_gpu(struct starpu_task *task,
  47. struct starpu_perfmodel_arch* arch,
  48. unsigned nimpl)
  49. {
  50. (void) task;
  51. (void) arch;
  52. (void) nimpl;
  53. return 1000.0;
  54. }
  55. static double
  56. gpu_task_cpu(struct starpu_task *task,
  57. struct starpu_perfmodel_arch* arch,
  58. unsigned nimpl)
  59. {
  60. (void) task;
  61. (void) arch;
  62. (void) nimpl;
  63. return 1000.0;
  64. }
  65. static double
  66. gpu_task_gpu(struct starpu_task *task,
  67. struct starpu_perfmodel_arch* arch,
  68. unsigned nimpl)
  69. {
  70. (void) task;
  71. (void) arch;
  72. (void) nimpl;
  73. return 1.0;
  74. }
  75. static struct starpu_perfmodel model_cpu_task =
  76. {
  77. .type = STARPU_PER_ARCH,
  78. .symbol = "model_cpu_task"
  79. };
  80. static struct starpu_perfmodel model_gpu_task =
  81. {
  82. .type = STARPU_PER_ARCH,
  83. .symbol = "model_gpu_task"
  84. };
  85. static void
  86. init_perfmodels_gpu(int gpu_type)
  87. {
  88. int nb_worker_gpu = starpu_worker_get_count_by_type(gpu_type);
  89. int *worker_gpu_ids = malloc(nb_worker_gpu * sizeof(int));
  90. int worker_gpu;
  91. starpu_worker_get_ids_by_type(gpu_type, worker_gpu_ids, nb_worker_gpu);
  92. for(worker_gpu = 0 ; worker_gpu < nb_worker_gpu ; worker_gpu ++)
  93. {
  94. struct starpu_perfmodel_arch arch_gpu;
  95. arch_gpu.ndevices = 1;
  96. arch_gpu.devices = (struct starpu_perfmodel_device*)malloc(sizeof(struct starpu_perfmodel_device));
  97. arch_gpu.devices[0].type = gpu_type;
  98. arch_gpu.devices[0].devid = starpu_worker_get_devid(worker_gpu_ids[worker_gpu]);
  99. arch_gpu.devices[0].ncores = 1;
  100. int comb_gpu = starpu_get_arch_comb(arch_gpu.ndevices, arch_gpu.devices);
  101. if(comb_gpu == -1)
  102. {
  103. comb_gpu = starpu_add_arch_comb(arch_gpu.ndevices, arch_gpu.devices);
  104. model_cpu_task.per_arch[comb_gpu] = (struct starpu_perfmodel_per_arch*)malloc(sizeof(struct starpu_perfmodel_per_arch));
  105. memset(&model_cpu_task.per_arch[comb_gpu][0], 0, sizeof(struct starpu_perfmodel_per_arch));
  106. model_cpu_task.nimpls[comb_gpu] = 1;
  107. model_cpu_task.per_arch[comb_gpu][0].cost_function = cpu_task_gpu;
  108. model_gpu_task.per_arch[comb_gpu] = (struct starpu_perfmodel_per_arch*)malloc(sizeof(struct starpu_perfmodel_per_arch));
  109. memset(&model_gpu_task.per_arch[comb_gpu][0], 0, sizeof(struct starpu_perfmodel_per_arch));
  110. model_gpu_task.nimpls[comb_gpu] = 1;
  111. model_gpu_task.per_arch[comb_gpu][0].cost_function = gpu_task_gpu;
  112. }
  113. }
  114. }
  115. static void
  116. init_perfmodels(void)
  117. {
  118. unsigned devid, ncore;
  119. starpu_perfmodel_init(NULL, &model_cpu_task);
  120. starpu_perfmodel_init(NULL, &model_gpu_task);
  121. struct starpu_perfmodel_arch arch_cpu;
  122. arch_cpu.ndevices = 1;
  123. arch_cpu.devices = (struct starpu_perfmodel_device*)malloc(sizeof(struct starpu_perfmodel_device));
  124. arch_cpu.devices[0].type = STARPU_CPU_WORKER;
  125. arch_cpu.devices[0].devid = 0;
  126. arch_cpu.devices[0].ncores = 1;
  127. int comb_cpu = starpu_get_arch_comb(arch_cpu.ndevices, arch_cpu.devices);
  128. if (comb_cpu == -1)
  129. comb_cpu = starpu_add_arch_comb(arch_cpu.ndevices, arch_cpu.devices);
  130. model_cpu_task.per_arch[comb_cpu] = (struct starpu_perfmodel_per_arch*)malloc(sizeof(struct starpu_perfmodel_per_arch));
  131. memset(&model_cpu_task.per_arch[comb_cpu][0], 0, sizeof(struct starpu_perfmodel_per_arch));
  132. model_cpu_task.nimpls[comb_cpu] = 1;
  133. model_cpu_task.per_arch[comb_cpu][0].cost_function = cpu_task_cpu;
  134. model_gpu_task.per_arch[comb_cpu] = (struct starpu_perfmodel_per_arch*)malloc(sizeof(struct starpu_perfmodel_per_arch));
  135. memset(&model_gpu_task.per_arch[comb_cpu][0], 0, sizeof(struct starpu_perfmodel_per_arch));
  136. model_gpu_task.nimpls[comb_cpu] = 1;
  137. model_gpu_task.per_arch[comb_cpu][0].cost_function = gpu_task_cpu;
  138. // We need to set the cost function for each combination with a CUDA or a OpenCL worker
  139. init_perfmodels_gpu(STARPU_CUDA_WORKER);
  140. init_perfmodels_gpu(STARPU_OPENCL_WORKER);
  141. /* if(model_cpu_task.per_arch[STARPU_CPU_WORKER] != NULL) */
  142. /* { */
  143. /* for(devid=0; model_cpu_task.per_arch[STARPU_CPU_WORKER][devid] != NULL; devid++) */
  144. /* { */
  145. /* for(ncore=0; model_cpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore] != NULL; ncore++) */
  146. /* { */
  147. /* model_cpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore][0].cost_function = cpu_task_cpu; */
  148. /* model_gpu_task.per_arch[STARPU_CPU_WORKER][devid][ncore][0].cost_function = gpu_task_cpu; */
  149. /* } */
  150. /* } */
  151. /* } */
  152. /* if(model_cpu_task.per_arch[STARPU_CUDA_WORKER] != NULL) */
  153. /* { */
  154. /* for(devid=0; model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid] != NULL; devid++) */
  155. /* { */
  156. /* for(ncore=0; model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore] != NULL; ncore++) */
  157. /* { */
  158. /* model_cpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore][0].cost_function = cpu_task_gpu; */
  159. /* model_gpu_task.per_arch[STARPU_CUDA_WORKER][devid][ncore][0].cost_function = gpu_task_gpu; */
  160. /* } */
  161. /* } */
  162. /* } */
  163. /* if(model_cpu_task.per_arch[STARPU_OPENCL_WORKER] != NULL) */
  164. /* { */
  165. /* for(devid=0; model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid] != NULL; devid++) */
  166. /* { */
  167. /* for(ncore=0; model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore] != NULL; ncore++) */
  168. /* { */
  169. /* model_cpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore][0].cost_function = cpu_task_gpu; */
  170. /* model_gpu_task.per_arch[STARPU_OPENCL_WORKER][devid][ncore][0].cost_function = gpu_task_gpu; */
  171. /* } */
  172. /* } */
  173. /* } */
  174. }
  175. /*
  176. * Dummy codelets.
  177. */
  178. static struct starpu_codelet cpu_cl =
  179. {
  180. .cpu_funcs = { dummy, NULL },
  181. .cuda_funcs = { dummy, NULL },
  182. .opencl_funcs = { dummy, NULL },
  183. .nbuffers = 0,
  184. .model = &model_cpu_task
  185. };
  186. static struct starpu_codelet gpu_cl =
  187. {
  188. .cpu_funcs = { dummy, NULL },
  189. .cuda_funcs = { dummy, NULL },
  190. .opencl_funcs = { dummy, NULL },
  191. .nbuffers = 0,
  192. .model = &model_gpu_task
  193. };
  194. static int
  195. run(struct starpu_sched_policy *policy)
  196. {
  197. struct starpu_conf conf;
  198. starpu_conf_init(&conf);
  199. conf.sched_policy = policy;
  200. int ret = starpu_init(&conf);
  201. if (ret == -ENODEV)
  202. exit(STARPU_TEST_SKIPPED);
  203. /* At least 1 CPU and 1 GPU are needed. */
  204. if (starpu_cpu_worker_get_count() == 0)
  205. {
  206. starpu_shutdown();
  207. exit(STARPU_TEST_SKIPPED);
  208. }
  209. if (starpu_cuda_worker_get_count() == 0 && starpu_opencl_worker_get_count() == 0)
  210. {
  211. starpu_shutdown();
  212. exit(STARPU_TEST_SKIPPED);
  213. }
  214. starpu_profiling_status_set(1);
  215. init_perfmodels();
  216. struct starpu_task *cpu_task = starpu_task_create();
  217. cpu_task->cl = &cpu_cl;
  218. cpu_task->destroy = 0;
  219. struct starpu_task *gpu_task = starpu_task_create();
  220. gpu_task->cl = &gpu_cl;
  221. gpu_task->destroy = 0;
  222. ret = starpu_task_submit(cpu_task);
  223. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  224. ret = starpu_task_submit(gpu_task);
  225. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  226. starpu_task_wait_for_all();
  227. enum starpu_worker_archtype cpu_task_worker, gpu_task_worker;
  228. cpu_task_worker = starpu_worker_get_type(cpu_task->profiling_info->workerid);
  229. gpu_task_worker = starpu_worker_get_type(gpu_task->profiling_info->workerid);
  230. if (cpu_task_worker != STARPU_CPU_WORKER || (gpu_task_worker != STARPU_CUDA_WORKER && gpu_task_worker != STARPU_OPENCL_WORKER))
  231. {
  232. FPRINTF(stderr, "Task did not execute on expected worker\n");
  233. ret = 1;
  234. }
  235. else
  236. ret = 0;
  237. starpu_task_destroy(cpu_task);
  238. starpu_task_destroy(gpu_task);
  239. starpu_shutdown();
  240. return ret;
  241. }
  242. /*
  243. extern struct starpu_sched_policy _starpu_sched_ws_policy;
  244. extern struct starpu_sched_policy _starpu_sched_prio_policy;
  245. extern struct starpu_sched_policy _starpu_sched_random_policy;
  246. extern struct starpu_sched_policy _starpu_sched_dm_policy;
  247. extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  248. extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  249. extern struct starpu_sched_policy _starpu_sched_eager_policy;
  250. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  251. extern struct starpu_sched_policy _starpu_sched_peager_policy;
  252. */
  253. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  254. /* XXX: what policies are we interested in ? */
  255. static struct starpu_sched_policy *policies[] =
  256. {
  257. //&_starpu_sched_ws_policy,
  258. //&_starpu_sched_prio_policy,
  259. //&_starpu_sched_dm_policy,
  260. &_starpu_sched_dmda_policy,
  261. //&_starpu_sched_dmda_ready_policy,
  262. //&_starpu_sched_dmda_sorted_policy,
  263. //&_starpu_sched_random_policy,
  264. //&_starpu_sched_eager_policy,
  265. //&_starpu_sched_parallel_heft_policy,
  266. //&_starpu_sched_peager_policy
  267. };
  268. int
  269. main(void)
  270. {
  271. #ifndef STARPU_HAVE_SETENV
  272. /* XXX: is this macro used by all the schedulers we are interested in ? */
  273. #warning "setenv() is not available, skipping this test"
  274. return STARPU_TEST_SKIPPED;
  275. #else
  276. setenv("STARPU_SCHED_BETA", "0", 1);
  277. int i;
  278. int n_policies = sizeof(policies)/sizeof(policies[0]);
  279. for (i = 0; i < n_policies; ++i)
  280. {
  281. struct starpu_sched_policy *policy = policies[i];
  282. FPRINTF(stdout, "Running with policy %s.\n",
  283. policy->policy_name);
  284. int ret;
  285. ret = run(policy);
  286. if (ret == 1)
  287. return EXIT_FAILURE;
  288. }
  289. return EXIT_SUCCESS;
  290. #endif
  291. }