data_locality.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. #include <starpu.h>
  2. #include <starpu_profiling.h>
  3. #include "../helper.h"
  4. #define NTASKS 8
  5. /*
  6. * It is very inefficient to keep moving data between memory nodes. This
  7. * test makes sure the scheduler will take account of the data locality
  8. * when scheduling tasks.
  9. *
  10. * Applies to : dmda, heft, pheft.
  11. */
  12. static void
  13. dummy(void *buffers[], void *args)
  14. {
  15. (void) buffers;
  16. (void) args;
  17. }
  18. /*
  19. * Dummy cost function, used to make sure the scheduler does schedule the
  20. * task, instead of getting rid of it as soon as possible because it doesn't
  21. * know its expected length.
  22. */
  23. static double
  24. cost_function(struct starpu_task *task, unsigned nimpl)
  25. {
  26. (void) task;
  27. (void) nimpl;
  28. return 0.0;
  29. }
  30. static struct starpu_perfmodel model =
  31. {
  32. .type = STARPU_COMMON,
  33. .cost_function = cost_function
  34. };
  35. static struct starpu_codelet cl =
  36. {
  37. .cpu_funcs = { dummy, NULL },
  38. .cuda_funcs = { dummy, NULL },
  39. .opencl_funcs = { dummy, NULL },
  40. .modes = { STARPU_RW },
  41. .model = &model,
  42. .nbuffers = 1
  43. };
  44. static int var = 42;
  45. static starpu_data_handle_t rw_handle;
  46. static void
  47. init_data(void)
  48. {
  49. starpu_variable_data_register(&rw_handle, 0, (uintptr_t) &var,
  50. sizeof(var));
  51. }
  52. static void
  53. free_data(void)
  54. {
  55. starpu_data_unregister(rw_handle);
  56. }
  57. static int
  58. run(struct starpu_sched_policy *policy)
  59. {
  60. int ret;
  61. struct starpu_conf conf;
  62. starpu_conf_init(&conf);
  63. conf.sched_policy = policy;
  64. ret = starpu_init(&conf);
  65. if (ret == -ENODEV)
  66. goto enodev;
  67. if (starpu_cpu_worker_get_count() == 0 ||
  68. (starpu_cuda_worker_get_count() == 0 &&
  69. starpu_opencl_worker_get_count() == 0))
  70. goto enodev;
  71. starpu_profiling_status_set(1);
  72. init_data();
  73. /* Send the handle to a GPU. */
  74. cl.where = STARPU_CUDA | STARPU_OPENCL;
  75. struct starpu_task *tasks[NTASKS];
  76. tasks[0] = starpu_task_create();
  77. tasks[0]->cl = &cl;
  78. tasks[0]->synchronous = 1;
  79. tasks[0]->handles[0] = rw_handle;
  80. tasks[0]->destroy = 0;
  81. ret = starpu_task_submit(tasks[0]);
  82. if (ret == -ENODEV)
  83. goto enodev;
  84. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  85. /* Now, run multiple tasks using this handle. */
  86. cl.where |= STARPU_CPU;
  87. int i;
  88. for (i = 1; i < NTASKS; i++)
  89. {
  90. tasks[i] = starpu_task_create();
  91. tasks[i]->cl = &cl;
  92. tasks[i]->handles[0] = rw_handle;
  93. tasks[i]->destroy = 0;
  94. ret = starpu_task_submit(tasks[i]);
  95. if (ret == -ENODEV)
  96. goto enodev;
  97. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  98. }
  99. starpu_task_wait_for_all();
  100. /* All tasks should have been executed on the same GPU. */
  101. ret = 0;
  102. unsigned workerid = tasks[0]->profiling_info->workerid;
  103. for (i = 0; i < NTASKS; i++)
  104. {
  105. if (tasks[i]->profiling_info->workerid != workerid)
  106. {
  107. ret = 1;
  108. break;
  109. }
  110. starpu_task_destroy(tasks[i]);
  111. }
  112. /* Clean everything up. */
  113. for (; i < NTASKS; i++)
  114. starpu_task_destroy(tasks[i]);
  115. free_data();
  116. starpu_shutdown();
  117. return ret;
  118. enodev:
  119. starpu_shutdown();
  120. return -ENODEV;
  121. }
  122. /* XXX: Does this test apply to other schedulers ? */
  123. //extern struct starpu_sched_policy _starpu_sched_ws_policy;
  124. //extern struct starpu_sched_policy _starpu_sched_prio_policy;
  125. //extern struct starpu_sched_policy _starpu_sched_random_policy;
  126. //extern struct starpu_sched_policy _starpu_sched_dm_policy;
  127. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  128. //extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  129. //extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  130. //extern struct starpu_sched_policy _starpu_sched_eager_policy;
  131. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  132. //extern struct starpu_sched_policy _starpu_sched_pgreedy_policy;
  133. extern struct starpu_sched_policy _starpu_sched_heft_policy;
  134. static struct starpu_sched_policy *policies[] =
  135. {
  136. //&_starpu_sched_ws_policy,
  137. //&_starpu_sched_prio_policy,
  138. //&_starpu_sched_dm_policy,
  139. &_starpu_sched_dmda_policy,
  140. &_starpu_sched_heft_policy,
  141. //&_starpu_sched_dmda_ready_policy,
  142. //&_starpu_sched_dmda_sorted_policy,
  143. //&_starpu_sched_random_policy,
  144. //&_starpu_sched_eager_policy,
  145. &_starpu_sched_parallel_heft_policy,
  146. //&_starpu_sched_pgreedy_policy
  147. };
  148. int
  149. main(void)
  150. {
  151. int i;
  152. int n_policies = sizeof(policies)/sizeof(policies[0]);
  153. for (i = 0; i < n_policies; ++i)
  154. {
  155. struct starpu_sched_policy *policy = policies[i];
  156. FPRINTF(stdout, "Running with policy %s.\n",
  157. policy->policy_name);
  158. int ret = run(policy);
  159. if (ret == -ENODEV)
  160. return STARPU_TEST_SKIPPED;
  161. if (ret == 1)
  162. return EXIT_FAILURE;
  163. }
  164. return EXIT_SUCCESS;
  165. }