simple_cpu_gpu_sched.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_profiling.h>
  18. #include "../helper.h"
  19. /*
  20. * Schedulers that are aware of the expected task length provided by the
  21. * perfmodels must make sure that :
  22. * - cpu_task is cheduled on a CPU.
  23. * - gpu_task is scheduled on a GPU.
  24. *
  25. * Applies to : heft, XXX : and to what other schedulers ?
  26. */
  27. static void
  28. dummy(void *buffers[], void *args)
  29. {
  30. (void) buffers;
  31. (void) args;
  32. }
  33. /*
  34. * Fake cost functions.
  35. */
  36. double cpu_task_cpu(struct starpu_task *task,
  37. enum starpu_perf_archtype arch,
  38. unsigned nimpl)
  39. {
  40. (void) task;
  41. (void) arch;
  42. (void) nimpl;
  43. return 1.0;
  44. }
  45. double cpu_task_gpu(struct starpu_task *task,
  46. enum starpu_perf_archtype arch,
  47. unsigned nimpl)
  48. {
  49. (void) task;
  50. (void) arch;
  51. (void) nimpl;
  52. return 1000.0;
  53. }
  54. double gpu_task_cpu(struct starpu_task *task,
  55. enum starpu_perf_archtype arch,
  56. unsigned nimpl)
  57. {
  58. (void) task;
  59. (void) arch;
  60. (void) nimpl;
  61. return 1000.0;
  62. }
  63. double gpu_task_gpu(struct starpu_task *task,
  64. enum starpu_perf_archtype arch,
  65. unsigned nimpl)
  66. {
  67. (void) task;
  68. (void) arch;
  69. (void) nimpl;
  70. return 1.0;
  71. }
  72. struct starpu_perfmodel model_cpu_task =
  73. {
  74. .type = STARPU_PER_ARCH
  75. };
  76. struct starpu_perfmodel model_gpu_task =
  77. {
  78. .type = STARPU_PER_ARCH
  79. };
  80. static void
  81. init_perfmodels(void)
  82. {
  83. int i;
  84. for (i = STARPU_CPU_DEFAULT; i < STARPU_CUDA_DEFAULT; i++)
  85. {
  86. model_cpu_task.per_arch[i][0].cost_function = cpu_task_cpu;
  87. model_gpu_task.per_arch[i][0].cost_function = gpu_task_cpu;
  88. }
  89. for (i = STARPU_CUDA_DEFAULT; i < STARPU_GORDON_DEFAULT; i++)
  90. {
  91. model_cpu_task.per_arch[i][0].cost_function = cpu_task_gpu;
  92. model_gpu_task.per_arch[i][0].cost_function = gpu_task_gpu;
  93. }
  94. }
  95. /*
  96. * Dummy codelets.
  97. */
  98. struct starpu_codelet cpu_cl =
  99. {
  100. .cpu_funcs = { dummy, NULL },
  101. .cuda_funcs = { dummy, NULL },
  102. .opencl_funcs = { dummy, NULL },
  103. .nbuffers = 0,
  104. .model = &model_cpu_task
  105. };
  106. struct starpu_codelet gpu_cl =
  107. {
  108. .cpu_funcs = { dummy, NULL },
  109. .cuda_funcs = { dummy, NULL },
  110. .opencl_funcs = { dummy, NULL },
  111. .nbuffers = 0,
  112. .model = &model_gpu_task
  113. };
  114. static int
  115. run(struct starpu_sched_policy *policy)
  116. {
  117. struct starpu_conf conf;
  118. starpu_conf_init(&conf);
  119. conf.sched_policy = policy;
  120. int ret = starpu_init(&conf);
  121. if (ret == -ENODEV)
  122. exit(STARPU_TEST_SKIPPED);
  123. /* At least 1 CPU and 1 GPU are needed. */
  124. if (starpu_cpu_worker_get_count() == 0)
  125. exit(STARPU_TEST_SKIPPED);
  126. if (starpu_cuda_worker_get_count() == 0 &&
  127. starpu_opencl_worker_get_count() == 0)
  128. exit(STARPU_TEST_SKIPPED);
  129. starpu_profiling_status_set(1);
  130. init_perfmodels();
  131. struct starpu_task *cpu_task = starpu_task_create();
  132. cpu_task->cl = &cpu_cl;
  133. cpu_task->destroy = 0;
  134. struct starpu_task *gpu_task = starpu_task_create();
  135. gpu_task->cl = &gpu_cl;
  136. gpu_task->destroy = 0;
  137. ret = starpu_task_submit(cpu_task);
  138. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  139. ret = starpu_task_submit(gpu_task);
  140. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  141. starpu_task_wait_for_all();
  142. enum starpu_archtype cpu_task_worker, gpu_task_worker;
  143. cpu_task_worker = starpu_worker_get_type(cpu_task->profiling_info->workerid);
  144. gpu_task_worker = starpu_worker_get_type(gpu_task->profiling_info->workerid);
  145. if (cpu_task_worker != STARPU_CPU_WORKER ||
  146. (gpu_task_worker != STARPU_CUDA_WORKER &&
  147. gpu_task_worker != STARPU_OPENCL_WORKER))
  148. ret = 1;
  149. else
  150. ret = 0;
  151. starpu_task_destroy(cpu_task);
  152. starpu_task_destroy(gpu_task);
  153. starpu_shutdown();
  154. return ret;
  155. }
  156. /*
  157. extern struct starpu_sched_policy _starpu_sched_ws_policy;
  158. extern struct starpu_sched_policy _starpu_sched_prio_policy;
  159. extern struct starpu_sched_policy _starpu_sched_random_policy;
  160. extern struct starpu_sched_policy _starpu_sched_dm_policy;
  161. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  162. extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  163. extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  164. extern struct starpu_sched_policy _starpu_sched_eager_policy;
  165. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  166. extern struct starpu_sched_policy _starpu_sched_pgreedy_policy;
  167. */
  168. extern struct starpu_sched_policy _starpu_sched_heft_policy;
  169. /* XXX: what policies are we interested in ? */
  170. static struct starpu_sched_policy *policies[] =
  171. {
  172. //&_starpu_sched_ws_policy,
  173. //&_starpu_sched_prio_policy,
  174. //&_starpu_sched_dm_policy,
  175. //&_starpu_sched_dmda_policy,
  176. &_starpu_sched_heft_policy,
  177. //&_starpu_sched_dmda_ready_policy,
  178. //&_starpu_sched_dmda_sorted_policy,
  179. //&_starpu_sched_random_policy,
  180. //&_starpu_sched_eager_policy,
  181. //&_starpu_sched_parallel_heft_policy,
  182. //&_starpu_sched_pgreedy_policy
  183. };
  184. int
  185. main(void)
  186. {
  187. #ifndef STARPU_HAVE_SETENV
  188. /* XXX: is this macro used by all the schedulers we are interested in ? */
  189. #warning "setenv() is not available, skipping this test"
  190. return STARPU_TEST_SKIPPED;
  191. #else
  192. setenv("STARPU_SCHED_BETA", "0", 1);
  193. int i;
  194. int n_policies = sizeof(policies)/sizeof(policies[0]);
  195. for (i = 0; i < n_policies; ++i)
  196. {
  197. struct starpu_sched_policy *policy = policies[i];
  198. FPRINTF(stdout, "Running with policy %s.\n",
  199. policy->policy_name);
  200. int ret;
  201. ret = run(policy);
  202. if (ret == 1)
  203. return EXIT_FAILURE;
  204. }
  205. return EXIT_SUCCESS;
  206. #endif
  207. }