data_locality.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. #include <starpu.h>
  2. #include "../helper.h"
  3. #define NTASKS 8
  4. /*
  5. * It is very inefficient to keep moving data between memory nodes. This
  6. * test makes sure the scheduler will take account of the data locality
  7. * when scheduling tasks.
  8. *
  9. * Applies to : dmda, heft, pheft.
  10. */
  11. static void
  12. dummy(void *buffers[], void *args)
  13. {
  14. (void) buffers;
  15. (void) args;
  16. }
  17. /*
  18. * Dummy cost function, used to make sure the scheduler does schedule the
  19. * task, instead of getting rid of it as soon as possible because it doesn't
  20. * know its expected length.
  21. */
  22. static double
  23. cost_function(struct starpu_task *task, unsigned nimpl)
  24. {
  25. (void) task;
  26. (void) nimpl;
  27. return 0.0;
  28. }
  29. static struct starpu_perfmodel model =
  30. {
  31. .type = STARPU_COMMON,
  32. .cost_function = cost_function
  33. };
  34. static struct starpu_codelet cl =
  35. {
  36. .cpu_funcs = { dummy, NULL },
  37. .cuda_funcs = { dummy, NULL },
  38. .opencl_funcs = { dummy, NULL },
  39. .modes = { STARPU_RW },
  40. .model = &model,
  41. .nbuffers = 1
  42. };
  43. static int var = 42;
  44. static starpu_data_handle_t rw_handle;
  45. static void
  46. init_data(void)
  47. {
  48. starpu_variable_data_register(&rw_handle, 0, (uintptr_t) &var,
  49. sizeof(var));
  50. }
  51. static void
  52. free_data(void)
  53. {
  54. starpu_data_unregister(rw_handle);
  55. }
  56. static int
  57. run(struct starpu_sched_policy *policy)
  58. {
  59. int ret;
  60. struct starpu_conf conf;
  61. starpu_conf_init(&conf);
  62. conf.sched_policy = policy;
  63. ret = starpu_init(&conf);
  64. if (ret == -ENODEV)
  65. goto enodev;
  66. if (starpu_cpu_worker_get_count() == 0 ||
  67. (starpu_cuda_worker_get_count() == 0 &&
  68. starpu_opencl_worker_get_count() == 0))
  69. goto enodev;
  70. starpu_profiling_status_set(1);
  71. init_data();
  72. /* Send the handle to a GPU. */
  73. cl.where = STARPU_CUDA | STARPU_OPENCL;
  74. struct starpu_task *tasks[NTASKS];
  75. tasks[0] = starpu_task_create();
  76. tasks[0]->cl = &cl;
  77. tasks[0]->synchronous = 1;
  78. tasks[0]->handles[0] = rw_handle;
  79. tasks[0]->destroy = 0;
  80. ret = starpu_task_submit(tasks[0]);
  81. if (ret == -ENODEV)
  82. goto enodev;
  83. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  84. /* Now, run multiple tasks using this handle. */
  85. cl.where |= STARPU_CPU;
  86. int i;
  87. for (i = 1; i < NTASKS; i++)
  88. {
  89. tasks[i] = starpu_task_create();
  90. tasks[i]->cl = &cl;
  91. tasks[i]->handles[0] = rw_handle;
  92. tasks[i]->destroy = 0;
  93. ret = starpu_task_submit(tasks[i]);
  94. if (ret == -ENODEV)
  95. goto enodev;
  96. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  97. }
  98. starpu_task_wait_for_all();
  99. /* All tasks should have been executed on the same GPU. */
  100. ret = 0;
  101. int workerid = tasks[0]->profiling_info->workerid;
  102. for (i = 0; i < NTASKS; i++)
  103. {
  104. if (tasks[i]->profiling_info->workerid != workerid)
  105. {
  106. FPRINTF(stderr, "Error for task %d. Worker id %d different from expected worker id %d\n", i, tasks[i]->profiling_info->workerid, workerid);
  107. ret = 1;
  108. break;
  109. }
  110. starpu_task_destroy(tasks[i]);
  111. }
  112. /* Clean everything up. */
  113. for (; i < NTASKS; i++)
  114. starpu_task_destroy(tasks[i]);
  115. free_data();
  116. starpu_shutdown();
  117. return ret;
  118. enodev:
  119. starpu_shutdown();
  120. return -ENODEV;
  121. }
  122. /* XXX: Does this test apply to other schedulers ? */
  123. //extern struct starpu_sched_policy _starpu_sched_ws_policy;
  124. //extern struct starpu_sched_policy _starpu_sched_prio_policy;
  125. //extern struct starpu_sched_policy _starpu_sched_random_policy;
  126. //extern struct starpu_sched_policy _starpu_sched_dm_policy;
  127. extern struct starpu_sched_policy _starpu_sched_dmda_policy;
  128. //extern struct starpu_sched_policy _starpu_sched_dmda_ready_policy;
  129. //extern struct starpu_sched_policy _starpu_sched_dmda_sorted_policy;
  130. //extern struct starpu_sched_policy _starpu_sched_eager_policy;
  131. extern struct starpu_sched_policy _starpu_sched_parallel_heft_policy;
  132. //extern struct starpu_sched_policy _starpu_sched_pgreedy_policy;
  133. extern struct starpu_sched_policy _starpu_sched_heft_policy;
  134. static struct starpu_sched_policy *policies[] =
  135. {
  136. //&_starpu_sched_ws_policy,
  137. //&_starpu_sched_prio_policy,
  138. //&_starpu_sched_dm_policy,
  139. &_starpu_sched_dmda_policy,
  140. &_starpu_sched_heft_policy,
  141. //&_starpu_sched_dmda_ready_policy,
  142. //&_starpu_sched_dmda_sorted_policy,
  143. //&_starpu_sched_random_policy,
  144. //&_starpu_sched_eager_policy,
  145. &_starpu_sched_parallel_heft_policy,
  146. //&_starpu_sched_pgreedy_policy
  147. };
  148. int
  149. main(void)
  150. {
  151. int i;
  152. int n_policies = sizeof(policies)/sizeof(policies[0]);
  153. for (i = 0; i < n_policies; ++i)
  154. {
  155. struct starpu_sched_policy *policy = policies[i];
  156. FPRINTF(stdout, "Running with policy %s.\n",
  157. policy->policy_name);
  158. int ret = run(policy);
  159. if (ret == -ENODEV)
  160. return STARPU_TEST_SKIPPED;
  161. if (ret == 1)
  162. return EXIT_FAILURE;
  163. }
  164. return EXIT_SUCCESS;
  165. }