sched_ctx.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2014,2017,2018 Inria
  4. * Copyright (C) 2010-2018 CNRS
  5. * Copyright (C) 2010-2014 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include <stdlib.h>
  20. #ifdef STARPU_QUICK_CHECK
  21. #define NTASKS 64
  22. #else
  23. #define NTASKS 1000
  24. #endif
  25. int tasks_executed = 0;
  26. int ctx1_tasks_executed = 0;
  27. int ctx2_tasks_executed = 0;
  28. int cpu_tasks_executed = 0;
  29. int gpu_tasks_executed = 0;
  30. static void sched_ctx_cpu_func(void *descr[], void *arg)
  31. {
  32. (void)descr;
  33. (void)arg;
  34. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  35. (void)STARPU_ATOMIC_ADD(&ctx1_tasks_executed,1);
  36. (void)STARPU_ATOMIC_ADD(&cpu_tasks_executed,1);
  37. }
  38. static void sched_ctx2_cpu_func(void *descr[], void *arg)
  39. {
  40. (void)descr;
  41. (void)arg;
  42. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  43. (void)STARPU_ATOMIC_ADD(&ctx2_tasks_executed,1);
  44. (void)STARPU_ATOMIC_ADD(&cpu_tasks_executed,1);
  45. }
  46. static void sched_ctx2_cuda_func(void *descr[], void *arg)
  47. {
  48. (void)descr;
  49. (void)arg;
  50. (void)STARPU_ATOMIC_ADD(&tasks_executed,1);
  51. (void)STARPU_ATOMIC_ADD(&ctx2_tasks_executed,1);
  52. (void)STARPU_ATOMIC_ADD(&gpu_tasks_executed,1);
  53. }
  54. static struct starpu_codelet sched_ctx_codelet1 =
  55. {
  56. .cpu_funcs = {sched_ctx_cpu_func},
  57. .model = NULL,
  58. .nbuffers = 0,
  59. .name = "sched_ctx"
  60. };
  61. static struct starpu_codelet sched_ctx_codelet2 =
  62. {
  63. .cpu_funcs = {sched_ctx2_cpu_func},
  64. .cuda_funcs = {sched_ctx2_cuda_func},
  65. .model = NULL,
  66. .nbuffers = 0,
  67. .name = "sched_ctx"
  68. };
  69. int main(void)
  70. {
  71. int ntasks = NTASKS;
  72. int ret;
  73. unsigned ncuda = 0;
  74. int nprocs1 = 0;
  75. int nprocs2 = 0;
  76. int procs1[STARPU_NMAXWORKERS], procs2[STARPU_NMAXWORKERS];
  77. char *sched = getenv("STARPU_SCHED");
  78. ret = starpu_init(NULL);
  79. if (ret == -ENODEV)
  80. return 77;
  81. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  82. #ifdef STARPU_USE_CPU
  83. nprocs1 = starpu_cpu_worker_get_count();
  84. starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, procs1, nprocs1);
  85. #endif
  86. // if there is no cpu, skip
  87. if (nprocs1 == 0) goto enodev;
  88. #ifdef STARPU_USE_CUDA
  89. ncuda = nprocs2 = starpu_cuda_worker_get_count();
  90. starpu_worker_get_ids_by_type(STARPU_CUDA_WORKER, procs2, nprocs2);
  91. #endif
  92. if (nprocs2 == 0)
  93. {
  94. nprocs2 = 1;
  95. procs2[0] = procs1[0];
  96. }
  97. /*create contexts however you want*/
  98. unsigned sched_ctx1 = starpu_sched_ctx_create(procs1, nprocs1, "ctx1", STARPU_SCHED_CTX_POLICY_NAME, sched?sched:"eager", 0);
  99. unsigned sched_ctx2 = starpu_sched_ctx_create(procs2, nprocs2, "ctx2", STARPU_SCHED_CTX_POLICY_NAME, sched?sched:"eager", 0);
  100. /*indicate what to do with the resources when context 2 finishes (it depends on your application)*/
  101. starpu_sched_ctx_set_inheritor(sched_ctx2, sched_ctx1);
  102. starpu_sched_ctx_display_workers(sched_ctx2, stderr);
  103. int i;
  104. for (i = 0; i < ntasks/2; i++)
  105. {
  106. struct starpu_task *task = starpu_task_create();
  107. task->cl = &sched_ctx_codelet1;
  108. task->cl_arg = NULL;
  109. /*submit tasks to context*/
  110. ret = starpu_task_submit_to_ctx(task,sched_ctx1);
  111. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  112. }
  113. /* tell starpu when you finished submitting tasks to this context
  114. in order to allow moving resources from this context to the inheritor one
  115. when its corresponding tasks finished executing */
  116. starpu_sched_ctx_finished_submit(sched_ctx1);
  117. for (i = 0; i < ntasks/2; i++)
  118. {
  119. struct starpu_task *task = starpu_task_create();
  120. task->cl = &sched_ctx_codelet2;
  121. task->cl_arg = NULL;
  122. ret = starpu_task_submit_to_ctx(task,sched_ctx2);
  123. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  124. }
  125. starpu_sched_ctx_finished_submit(sched_ctx2);
  126. /* wait for all tasks at the end*/
  127. starpu_task_wait_for_all();
  128. starpu_sched_ctx_add_workers(procs1, nprocs1, sched_ctx2);
  129. starpu_sched_ctx_delete(sched_ctx1);
  130. starpu_sched_ctx_delete(sched_ctx2);
  131. printf("tasks executed %d out of %d\n", tasks_executed, ntasks);
  132. printf("tasks executed on ctx1: %d\n", ctx1_tasks_executed);
  133. printf("tasks executed on ctx2: %d\n", ctx2_tasks_executed);
  134. printf("tasks executed on CPU: %d\n", cpu_tasks_executed);
  135. printf("tasks executed on GPU: %d\n", gpu_tasks_executed);
  136. enodev:
  137. starpu_shutdown();
  138. return nprocs1 == 0 ? 77 : 0;
  139. }