sched_ctx.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <stdlib.h>
  18. #ifdef STARPU_HAVE_VALGRIND_H
  19. #include <valgrind/valgrind.h>
  20. #endif
  21. #ifdef STARPU_QUICK_CHECK
  22. #define NTASKS 64
  23. #else
  24. #define NTASKS 1000
  25. #endif
  26. int tasks_executed = 0;
  27. int ctx1_tasks_executed = 0;
  28. int ctx2_tasks_executed = 0;
  29. int cpu_tasks_executed = 0;
  30. int gpu_tasks_executed = 0;
  31. static void sched_ctx_cpu_func(void *descr[], void *arg)
  32. {
  33. (void)descr;
  34. (void)arg;
  35. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  36. (void)STARPU_ATOMIC_ADD(&ctx1_tasks_executed,1);
  37. (void)STARPU_ATOMIC_ADD(&cpu_tasks_executed,1);
  38. }
  39. static void sched_ctx2_cpu_func(void *descr[], void *arg)
  40. {
  41. (void)descr;
  42. (void)arg;
  43. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  44. (void)STARPU_ATOMIC_ADD(&ctx2_tasks_executed,1);
  45. (void)STARPU_ATOMIC_ADD(&cpu_tasks_executed,1);
  46. }
  47. static void sched_ctx2_cuda_func(void *descr[], void *arg)
  48. {
  49. (void)descr;
  50. (void)arg;
  51. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  52. (void)STARPU_ATOMIC_ADD(&ctx2_tasks_executed,1);
  53. (void)STARPU_ATOMIC_ADD(&gpu_tasks_executed,1);
  54. }
  55. static struct starpu_codelet sched_ctx_codelet1 =
  56. {
  57. .cpu_funcs = {sched_ctx_cpu_func},
  58. .model = NULL,
  59. .nbuffers = 0,
  60. .name = "sched_ctx"
  61. };
  62. static struct starpu_codelet sched_ctx_codelet2 =
  63. {
  64. .cpu_funcs = {sched_ctx2_cpu_func},
  65. .cuda_funcs = {sched_ctx2_cuda_func},
  66. .model = NULL,
  67. .nbuffers = 0,
  68. .name = "sched_ctx"
  69. };
  70. int main(void)
  71. {
  72. int ntasks = NTASKS;
  73. int ret;
  74. unsigned ncuda = 0;
  75. int nprocs1 = 0;
  76. int nprocs2 = 0;
  77. int procs1[STARPU_NMAXWORKERS], procs2[STARPU_NMAXWORKERS];
  78. char *sched = getenv("STARPU_SCHED");
  79. ret = starpu_init(NULL);
  80. if (ret == -ENODEV)
  81. return 77;
  82. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  83. #ifdef STARPU_HAVE_VALGRIND_H
  84. if (RUNNING_ON_VALGRIND)
  85. ntasks = 8;
  86. #endif
  87. #ifdef STARPU_USE_CPU
  88. nprocs1 = starpu_cpu_worker_get_count();
  89. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, procs1, nprocs1);
  90. #endif
  91. // if there is no cpu, skip
  92. if (nprocs1 == 0) goto enodev;
  93. #ifdef STARPU_USE_CUDA
  94. ncuda = nprocs2 = starpu_cuda_worker_get_count();
  95. starpu_worker_get_ids_by_type(STARPU_CUDA_WORKER, procs2, nprocs2);
  96. #endif
  97. if (nprocs2 == 0)
  98. {
  99. nprocs2 = 1;
  100. procs2[0] = procs1[0];
  101. }
  102. /*create contexts however you want*/
  103. unsigned sched_ctx1 = starpu_sched_ctx_create(procs1, nprocs1, "ctx1", STARPU_SCHED_CTX_POLICY_NAME, sched?sched:"eager", 0);
  104. unsigned sched_ctx2 = starpu_sched_ctx_create(procs2, nprocs2, "ctx2", STARPU_SCHED_CTX_POLICY_NAME, sched?sched:"eager", 0);
  105. /*indicate what to do with the resources when context 2 finishes (it depends on your application)*/
  106. starpu_sched_ctx_set_inheritor(sched_ctx2, sched_ctx1);
  107. starpu_sched_ctx_display_workers(sched_ctx2, stderr);
  108. int i;
  109. for (i = 0; i < ntasks/2; i++)
  110. {
  111. struct starpu_task *task = starpu_task_create();
  112. task->cl = &sched_ctx_codelet1;
  113. task->cl_arg = NULL;
  114. /*submit tasks to context*/
  115. ret = starpu_task_submit_to_ctx(task,sched_ctx1);
  116. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  117. }
  118. /* tell starpu when you finished submitting tasks to this context
  119. in order to allow moving resources from this context to the inheritor one
  120. when its corresponding tasks finished executing */
  121. starpu_sched_ctx_finished_submit(sched_ctx1);
  122. for (i = 0; i < ntasks/2; i++)
  123. {
  124. struct starpu_task *task = starpu_task_create();
  125. task->cl = &sched_ctx_codelet2;
  126. task->cl_arg = NULL;
  127. ret = starpu_task_submit_to_ctx(task,sched_ctx2);
  128. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  129. }
  130. starpu_sched_ctx_finished_submit(sched_ctx2);
  131. /* wait for all tasks at the end*/
  132. starpu_task_wait_for_all();
  133. starpu_sched_ctx_add_workers(procs1, nprocs1, sched_ctx2);
  134. starpu_sched_ctx_delete(sched_ctx1);
  135. starpu_sched_ctx_delete(sched_ctx2);
  136. printf("tasks executed %d out of %d\n", tasks_executed, ntasks);
  137. printf("tasks executed on ctx1: %d\n", ctx1_tasks_executed);
  138. printf("tasks executed on ctx2: %d\n", ctx2_tasks_executed);
  139. printf("tasks executed on CPU: %d\n", cpu_tasks_executed);
  140. printf("tasks executed on GPU: %d\n", gpu_tasks_executed);
  141. enodev:
  142. starpu_shutdown();
  143. return nprocs1 == 0 ? 77 : 0;
  144. }