data_locality.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #include <starpu.h>
  2. #include "../helper.h"
  3. #define NTASKS 8
  4. /*
  5. * It is very inefficient to keep moving data between memory nodes. This
  6. * test makes sure the scheduler will take account of the data locality
  7. * when scheduling tasks.
  8. *
  9. * Applies to : dmda, heft, pheft.
  10. */
  11. static void
  12. dummy(void *buffers[], void *args)
  13. {
  14. (void) buffers;
  15. (void) args;
  16. }
  17. /*
  18. * Dummy cost function, used to make sure the scheduler does schedule the
  19. * task, instead of getting rid of it as soon as possible because it doesn't
  20. * know its expected length.
  21. */
  22. static double
  23. cost_function(struct starpu_task *task, unsigned nimpl)
  24. {
  25. (void) task;
  26. (void) nimpl;
  27. return 0.0;
  28. }
  29. static struct starpu_perfmodel model =
  30. {
  31. .type = STARPU_COMMON,
  32. .cost_function = cost_function
  33. };
  34. static struct starpu_codelet cl =
  35. {
  36. .cpu_funcs = { dummy, NULL },
  37. .cuda_funcs = { dummy, NULL },
  38. .opencl_funcs = { dummy, NULL },
  39. .modes = { STARPU_RW },
  40. .model = &model,
  41. .nbuffers = 1
  42. };
  43. static int var = 42;
  44. static starpu_data_handle_t rw_handle;
  45. static void
  46. init_data(void)
  47. {
  48. starpu_variable_data_register(&rw_handle, 0, (uintptr_t) &var,
  49. sizeof(var));
  50. }
  51. static void
  52. free_data(void)
  53. {
  54. starpu_data_unregister(rw_handle);
  55. }
  56. static int
  57. run(struct starpu_sched_policy *policy)
  58. {
  59. int ret;
  60. struct starpu_conf conf;
  61. starpu_conf_init(&conf);
  62. conf.sched_policy = policy;
  63. ret = starpu_init(&conf);
  64. if (ret == -ENODEV)
  65. goto enodev;
  66. if (starpu_cpu_worker_get_count() == 0 ||
  67. (starpu_cuda_worker_get_count() == 0 &&
  68. starpu_opencl_worker_get_count() == 0))
  69. goto enodev;
  70. starpu_profiling_status_set(1);
  71. init_data();
  72. /* Send the handle to a GPU. */
  73. cl.where = STARPU_CUDA | STARPU_OPENCL;
  74. struct starpu_task *tasks[NTASKS];
  75. tasks[0] = starpu_task_create();
  76. tasks[0]->cl = &cl;
  77. tasks[0]->synchronous = 1;
  78. tasks[0]->handles[0] = rw_handle;
  79. tasks[0]->destroy = 0;
  80. ret = starpu_task_submit(tasks[0]);
  81. if (ret == -ENODEV)
  82. goto enodev;
  83. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  84. /* Now, run multiple tasks using this handle. */
  85. cl.where |= STARPU_CPU;
  86. int i;
  87. for (i = 1; i < NTASKS; i++)
  88. {
  89. tasks[i] = starpu_task_create();
  90. tasks[i]->cl = &cl;
  91. tasks[i]->handles[0] = rw_handle;
  92. tasks[i]->destroy = 0;
  93. ret = starpu_task_submit(tasks[i]);
  94. if (ret == -ENODEV)
  95. goto enodev;
  96. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  97. }
  98. starpu_task_wait_for_all();
  99. /* All tasks should have been executed on the same GPU. */
  100. ret = 0;
  101. unsigned workerid = tasks[0]->profiling_info->workerid;
  102. for (i = 0; i < NTASKS; i++)
  103. {
  104. if (tasks[i]->profiling_info->workerid != workerid)
  105. {
  106. ret = 1;
  107. break;
  108. }
  109. starpu_task_destroy(tasks[i]);
  110. }
  111. /* Clean everything up. */
  112. for (; i < NTASKS; i++)
  113. starpu_task_destroy(tasks[i]);
  114. free_data();
  115. starpu_shutdown();
  116. return ret;
  117. enodev:
  118. starpu_shutdown();
  119. return -ENODEV;
  120. }
  121. /* XXX: Does this test apply to other schedulers ? */
  122. //extern struct starpu_sched_policy _starpu_sched_ws_policy;
  123. //extern struct starpu_sched_policy _starpu_sched_prio_policy;
  124. //extern struct starpu_sched_policy _starpu_sched_random_policy;
  125. //extern struct starpu_sched_policy _starpu_sched_dm_policy;
  126. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  127. //extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  128. //extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  129. //extern struct starpu_sched_policy _starpu_sched_eager_policy;
  130. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  131. //extern struct starpu_sched_policy _starpu_sched_pgreedy_policy;
  132. extern struct starpu_sched_policy _starpu_sched_heft_policy;
  133. static struct starpu_sched_policy *policies[] =
  134. {
  135. //&_starpu_sched_ws_policy,
  136. //&_starpu_sched_prio_policy,
  137. //&_starpu_sched_dm_policy,
  138. &_starpu_sched_dmda_policy,
  139. &_starpu_sched_heft_policy,
  140. //&_starpu_sched_dmda_ready_policy,
  141. //&_starpu_sched_dmda_sorted_policy,
  142. //&_starpu_sched_random_policy,
  143. //&_starpu_sched_eager_policy,
  144. &_starpu_sched_parallel_heft_policy,
  145. //&_starpu_sched_pgreedy_policy
  146. };
  147. int
  148. main(void)
  149. {
  150. int i;
  151. int n_policies = sizeof(policies)/sizeof(policies[0]);
  152. for (i = 0; i < n_policies; ++i)
  153. {
  154. struct starpu_sched_policy *policy = policies[i];
  155. FPRINTF(stdout, "Running with policy %s.\n",
  156. policy->policy_name);
  157. int ret = run(policy);
  158. if (ret == -ENODEV)
  159. return STARPU_TEST_SKIPPED;
  160. if (ret == 1)
  161. return EXIT_FAILURE;
  162. }
  163. return EXIT_SUCCESS;
  164. }