deque_modeling_policy_data_aware.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <limits.h>
  20. #include <core/perfmodel/perfmodel.h>
  21. #include <core/task_bundle.h>
  22. #include <core/workers.h>
  23. #include <sched_policies/fifo_queues.h>
  24. #include <core/perfmodel/perfmodel.h>
  25. #include <starpu_parameters.h>
  26. #ifndef DBL_MIN
  27. #define DBL_MIN __DBL_MIN__
  28. #endif
  29. #ifndef DBL_MAX
  30. #define DBL_MAX __DBL_MAX__
  31. #endif
  32. static unsigned nworkers;
  33. static struct _starpu_fifo_taskq *queue_array[STARPU_NMAXWORKERS];
  34. static _starpu_pthread_cond_t sched_cond[STARPU_NMAXWORKERS];
  35. static _starpu_pthread_mutex_t sched_mutex[STARPU_NMAXWORKERS];
  36. static double alpha = _STARPU_DEFAULT_ALPHA;
  37. static double beta = _STARPU_DEFAULT_BETA;
  38. static double _gamma = _STARPU_DEFAULT_GAMMA;
  39. static double idle_power = 0.0;
  40. #ifdef STARPU_VERBOSE
  41. static long int total_task_cnt = 0;
  42. static long int ready_task_cnt = 0;
  43. #endif
  44. static int count_non_ready_buffers(struct starpu_task *task, uint32_t node)
  45. {
  46. int cnt = 0;
  47. unsigned nbuffers = task->cl->nbuffers;
  48. unsigned index;
  49. for (index = 0; index < nbuffers; index++)
  50. {
  51. starpu_data_handle_t handle;
  52. handle = task->handles[index];
  53. int is_valid;
  54. starpu_data_query_status(handle, node, NULL, &is_valid, NULL);
  55. if (!is_valid)
  56. cnt++;
  57. }
  58. return cnt;
  59. }
  60. static struct starpu_task *_starpu_fifo_pop_first_ready_task(struct _starpu_fifo_taskq *fifo_queue, unsigned node)
  61. {
  62. struct starpu_task *task = NULL, *current;
  63. if (fifo_queue->ntasks == 0)
  64. return NULL;
  65. if (fifo_queue->ntasks > 0)
  66. {
  67. fifo_queue->ntasks--;
  68. task = starpu_task_list_back(&fifo_queue->taskq);
  69. int first_task_priority = task->priority;
  70. current = task;
  71. int non_ready_best = INT_MAX;
  72. while (current)
  73. {
  74. int priority = current->priority;
  75. if (priority <= first_task_priority)
  76. {
  77. int non_ready = count_non_ready_buffers(current, node);
  78. if (non_ready < non_ready_best)
  79. {
  80. non_ready_best = non_ready;
  81. task = current;
  82. if (non_ready == 0)
  83. break;
  84. }
  85. }
  86. current = current->prev;
  87. }
  88. starpu_task_list_erase(&fifo_queue->taskq, task);
  89. _STARPU_TRACE_JOB_POP(task, 0);
  90. }
  91. return task;
  92. }
  93. static struct starpu_task *dmda_pop_ready_task(void)
  94. {
  95. struct starpu_task *task;
  96. int workerid = starpu_worker_get_id();
  97. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  98. unsigned node = starpu_worker_get_memory_node(workerid);
  99. task = _starpu_fifo_pop_first_ready_task(fifo, node);
  100. if (task)
  101. {
  102. double model = task->predicted;
  103. fifo->exp_len -= model;
  104. fifo->exp_start = starpu_timing_now() + model;
  105. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  106. #ifdef STARPU_VERBOSE
  107. if (task->cl)
  108. {
  109. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  110. if (non_ready == 0)
  111. ready_task_cnt++;
  112. }
  113. total_task_cnt++;
  114. #endif
  115. }
  116. return task;
  117. }
  118. static struct starpu_task *dmda_pop_task(void)
  119. {
  120. struct starpu_task *task;
  121. int workerid = starpu_worker_get_id();
  122. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  123. task = _starpu_fifo_pop_local_task(fifo);
  124. if (task)
  125. {
  126. double model = task->predicted;
  127. fifo->exp_len -= model;
  128. fifo->exp_start = starpu_timing_now() + model;
  129. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  130. #ifdef STARPU_VERBOSE
  131. if (task->cl)
  132. {
  133. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  134. if (non_ready == 0)
  135. ready_task_cnt++;
  136. }
  137. total_task_cnt++;
  138. #endif
  139. }
  140. return task;
  141. }
  142. static struct starpu_task *dmda_pop_every_task(void)
  143. {
  144. struct starpu_task *new_list;
  145. int workerid = starpu_worker_get_id();
  146. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  147. new_list = _starpu_fifo_pop_every_task(fifo, &sched_mutex[workerid], workerid);
  148. while (new_list)
  149. {
  150. double model = new_list->predicted;
  151. fifo->exp_len -= model;
  152. fifo->exp_start = starpu_timing_now() + model;
  153. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  154. new_list = new_list->next;
  155. }
  156. return new_list;
  157. }
  158. static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, int prio)
  159. {
  160. /* make sure someone coule execute that task ! */
  161. STARPU_ASSERT(best_workerid != -1);
  162. struct _starpu_fifo_taskq *fifo;
  163. fifo = queue_array[best_workerid];
  164. fifo->exp_end += predicted;
  165. fifo->exp_len += predicted;
  166. task->predicted = predicted;
  167. /* TODO predicted_transfer */
  168. unsigned memory_node = starpu_worker_get_memory_node(best_workerid);
  169. if (starpu_get_prefetch_flag())
  170. starpu_prefetch_task_input_on_node(task, memory_node);
  171. if (prio)
  172. return _starpu_fifo_push_sorted_task(queue_array[best_workerid],
  173. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  174. else
  175. return _starpu_fifo_push_task(queue_array[best_workerid],
  176. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  177. }
  178. /* TODO: factorize with dmda!! */
  179. static int _dm_push_task(struct starpu_task *task, unsigned prio)
  180. {
  181. /* find the queue */
  182. struct _starpu_fifo_taskq *fifo;
  183. unsigned worker;
  184. int best = -1;
  185. double best_exp_end = 0.0;
  186. double model_best = 0.0;
  187. int ntasks_best = -1;
  188. double ntasks_best_end = 0.0;
  189. int calibrating = 0;
  190. /* A priori, we know all estimations */
  191. int unknown = 0;
  192. unsigned best_impl = 0;
  193. unsigned nimpl;
  194. for (worker = 0; worker < nworkers; worker++)
  195. {
  196. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  197. {
  198. double exp_end;
  199. fifo = queue_array[worker];
  200. /* Sometimes workers didn't take the tasks as early as we expected */
  201. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  202. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  203. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  204. {
  205. /* no one on that queue may execute this task */
  206. continue;
  207. }
  208. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  209. double local_length = starpu_task_expected_length(task, perf_arch, nimpl);
  210. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  211. //_STARPU_DEBUG("Scheduler dm: task length (%lf) worker (%u) kernel (%u) \n", local_length,worker,nimpl);
  212. if (ntasks_best == -1
  213. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  214. || (!calibrating && isnan(local_length)) /* Not calibrating but this worker is being calibrated */
  215. || (calibrating && isnan(local_length) && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  216. )
  217. {
  218. ntasks_best_end = ntasks_end;
  219. ntasks_best = worker;
  220. best_impl = nimpl;
  221. }
  222. if (isnan(local_length))
  223. /* we are calibrating, we want to speed-up calibration time
  224. * so we privilege non-calibrated tasks (but still
  225. * greedily distribute them to avoid dumb schedules) */
  226. calibrating = 1;
  227. if (isnan(local_length) || _STARPU_IS_ZERO(local_length))
  228. /* there is no prediction available for that task
  229. * with that arch yet, so switch to a greedy strategy */
  230. unknown = 1;
  231. if (unknown)
  232. continue;
  233. exp_end = fifo->exp_start + fifo->exp_len + local_length;
  234. if (best == -1 || exp_end < best_exp_end)
  235. {
  236. /* a better solution was found */
  237. best_exp_end = exp_end;
  238. best = worker;
  239. model_best = local_length;
  240. best_impl = nimpl;
  241. }
  242. }
  243. }
  244. if (unknown)
  245. {
  246. best = ntasks_best;
  247. model_best = 0.0;
  248. }
  249. //_STARPU_DEBUG("Scheduler dm: kernel (%u)\n", best_impl);
  250. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  251. /* we should now have the best worker in variable "best" */
  252. return push_task_on_best_worker(task, best, model_best, prio);
  253. }
  254. static void compute_all_performance_predictions(struct starpu_task *task,
  255. double local_task_length[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
  256. double exp_end[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
  257. double *max_exp_endp,
  258. double *best_exp_endp,
  259. double local_data_penalty[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
  260. double local_power[STARPU_NMAXWORKERS][STARPU_MAXIMPLEMENTATIONS],
  261. int *forced_worker, int *forced_impl)
  262. {
  263. int calibrating = 0;
  264. double max_exp_end = DBL_MIN;
  265. double best_exp_end = DBL_MAX;
  266. int ntasks_best = -1;
  267. int nimpl_best = 0;
  268. double ntasks_best_end = 0.0;
  269. /* A priori, we know all estimations */
  270. int unknown = 0;
  271. unsigned worker;
  272. unsigned nimpl;
  273. starpu_task_bundle_t bundle = task->bundle;
  274. for (worker = 0; worker < nworkers; worker++)
  275. {
  276. struct _starpu_fifo_taskq *fifo = queue_array[worker];
  277. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  278. {
  279. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  280. {
  281. /* no one on that queue may execute this task */
  282. continue;
  283. }
  284. /* Sometimes workers didn't take the tasks as early as we expected */
  285. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  286. exp_end[worker][nimpl] = fifo->exp_start + fifo->exp_len;
  287. if (exp_end[worker][nimpl] > max_exp_end)
  288. max_exp_end = exp_end[worker][nimpl];
  289. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  290. unsigned memory_node = starpu_worker_get_memory_node(worker);
  291. //_STARPU_DEBUG("Scheduler dmda: task length (%lf) worker (%u) kernel (%u) \n", local_task_length[worker][nimpl],worker,nimpl);
  292. if (bundle)
  293. {
  294. STARPU_ABORT(); /* Not implemented yet. */
  295. }
  296. else
  297. {
  298. local_task_length[worker][nimpl] = starpu_task_expected_length(task, perf_arch, nimpl);
  299. local_data_penalty[worker][nimpl] = starpu_task_expected_data_transfer_time(memory_node, task);
  300. local_power[worker][nimpl] = starpu_task_expected_power(task, perf_arch,nimpl);
  301. }
  302. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  303. if (ntasks_best == -1
  304. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better worker */
  305. || (!calibrating && isnan(local_task_length[worker][nimpl])) /* Not calibrating but this worker is being calibrated */
  306. || (calibrating && isnan(local_task_length[worker][nimpl]) && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  307. )
  308. {
  309. ntasks_best_end = ntasks_end;
  310. ntasks_best = worker;
  311. nimpl_best = nimpl;
  312. }
  313. if (isnan(local_task_length[worker][nimpl]))
  314. /* we are calibrating, we want to speed-up calibration time
  315. * so we privilege non-calibrated tasks (but still
  316. * greedily distribute them to avoid dumb schedules) */
  317. calibrating = 1;
  318. if (isnan(local_task_length[worker][nimpl])
  319. || _STARPU_IS_ZERO(local_task_length[worker][nimpl]))
  320. /* there is no prediction available for that task
  321. * with that arch (yet or at all), so switch to a greedy strategy */
  322. unknown = 1;
  323. if (unknown)
  324. continue;
  325. exp_end[worker][nimpl] = fifo->exp_start + fifo->exp_len + local_task_length[worker][nimpl];
  326. if (exp_end[worker][nimpl] < best_exp_end)
  327. {
  328. /* a better solution was found */
  329. best_exp_end = exp_end[worker][nimpl];
  330. nimpl_best = nimpl;
  331. }
  332. if (isnan(local_power[worker][nimpl]))
  333. local_power[worker][nimpl] = 0.;
  334. }
  335. }
  336. *forced_worker = unknown?ntasks_best:-1;
  337. *forced_impl = unknown?nimpl_best:-1;
  338. *best_exp_endp = best_exp_end;
  339. *max_exp_endp = max_exp_end;
  340. }
  341. static int _dmda_push_task(struct starpu_task *task, unsigned prio)
  342. {
  343. /* find the queue */
  344. unsigned worker;
  345. int best = -1;
  346. int selected_impl = 0;
  347. double model_best = 0.0;
  348. /* this flag is set if the corresponding worker is selected because
  349. there is no performance prediction available yet */
  350. int forced_best = -1;
  351. int forced_impl = -1;
  352. double local_task_length[nworkers][STARPU_MAXIMPLEMENTATIONS];
  353. double local_data_penalty[nworkers][STARPU_MAXIMPLEMENTATIONS];
  354. double local_power[nworkers][STARPU_MAXIMPLEMENTATIONS];
  355. double exp_end[nworkers][STARPU_MAXIMPLEMENTATIONS];
  356. double max_exp_end = 0.0;
  357. double best_exp_end;
  358. double fitness[nworkers][STARPU_MAXIMPLEMENTATIONS];
  359. compute_all_performance_predictions(task,
  360. local_task_length,
  361. exp_end,
  362. &max_exp_end,
  363. &best_exp_end,
  364. local_data_penalty,
  365. local_power,
  366. &forced_best,
  367. &forced_impl);
  368. double best_fitness = -1;
  369. unsigned nimpl;
  370. if (forced_best == -1)
  371. {
  372. for (worker = 0; worker < nworkers; worker++)
  373. {
  374. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  375. {
  376. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  377. {
  378. /* no one on that queue may execute this task */
  379. continue;
  380. }
  381. fitness[worker][nimpl] = alpha*(exp_end[worker][nimpl] - best_exp_end)
  382. + beta*(local_data_penalty[worker][nimpl])
  383. + _gamma*(local_power[worker][nimpl]);
  384. if (exp_end[worker][nimpl] > max_exp_end)
  385. {
  386. /* This placement will make the computation
  387. * longer, take into account the idle
  388. * consumption of other cpus */
  389. fitness[worker][nimpl] += _gamma * idle_power * (exp_end[worker][nimpl] - max_exp_end) / 1000000.0;
  390. }
  391. if (best == -1 || fitness[worker][nimpl] < best_fitness)
  392. {
  393. /* we found a better solution */
  394. best_fitness = fitness[worker][nimpl];
  395. best = worker;
  396. selected_impl = nimpl;
  397. //_STARPU_DEBUG("best fitness (worker %d) %e = alpha*(%e) + beta(%e) +gamma(%e)\n", worker, best_fitness, exp_end[worker][nimpl] - best_exp_end, local_data_penalty[worker][nimpl], local_power[worker][nimpl]);
  398. }
  399. }
  400. }
  401. }
  402. STARPU_ASSERT(forced_best != -1 || best != -1);
  403. if (forced_best != -1)
  404. {
  405. /* there is no prediction available for that task
  406. * with that arch we want to speed-up calibration time
  407. * so we force this measurement */
  408. best = forced_best;
  409. model_best = 0.0;
  410. //penality_best = 0.0;
  411. }
  412. else
  413. {
  414. model_best = local_task_length[best][selected_impl];
  415. //penality_best = local_data_penalty[best][best_impl];
  416. }
  417. //_STARPU_DEBUG("Scheduler dmda: kernel (%u)\n", best_impl);
  418. _starpu_get_job_associated_to_task(task)->nimpl = selected_impl;
  419. /* we should now have the best worker in variable "best" */
  420. return push_task_on_best_worker(task, best, model_best, prio);
  421. }
  422. static int dmda_push_sorted_task(struct starpu_task *task)
  423. {
  424. return _dmda_push_task(task, 1);
  425. }
  426. static int dm_push_task(struct starpu_task *task)
  427. {
  428. return _dm_push_task(task, 0);
  429. }
  430. static int dmda_push_task(struct starpu_task *task)
  431. {
  432. return _dmda_push_task(task, 0);
  433. }
  434. static void initialize_dmda_policy(struct starpu_machine_topology *topology,
  435. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  436. {
  437. nworkers = topology->nworkers;
  438. const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
  439. if (strval_alpha)
  440. alpha = atof(strval_alpha);
  441. const char *strval_beta = getenv("STARPU_SCHED_BETA");
  442. if (strval_beta)
  443. beta = atof(strval_beta);
  444. const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
  445. if (strval_gamma)
  446. _gamma = atof(strval_gamma);
  447. const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
  448. if (strval_idle_power)
  449. idle_power = atof(strval_idle_power);
  450. unsigned workerid;
  451. for (workerid = 0; workerid < nworkers; workerid++)
  452. {
  453. queue_array[workerid] = _starpu_create_fifo();
  454. _STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
  455. _STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
  456. starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
  457. }
  458. }
  459. static void initialize_dmda_sorted_policy(struct starpu_machine_topology *topology,
  460. struct starpu_sched_policy *_policy)
  461. {
  462. initialize_dmda_policy(topology, _policy);
  463. /* The application may use any integer */
  464. starpu_sched_set_min_priority(INT_MIN);
  465. starpu_sched_set_max_priority(INT_MAX);
  466. }
  467. static void deinitialize_dmda_policy(struct starpu_machine_topology *topology,
  468. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  469. {
  470. unsigned workerid;
  471. for (workerid = 0; workerid < topology->nworkers; workerid++)
  472. _starpu_destroy_fifo(queue_array[workerid]);
  473. _STARPU_DEBUG("total_task_cnt %ld ready_task_cnt %ld -> %f\n", total_task_cnt, ready_task_cnt, (100.0f*ready_task_cnt)/total_task_cnt);
  474. }
  475. /* TODO: use post_exec_hook to fix the expected start */
  476. struct starpu_sched_policy _starpu_sched_dm_policy =
  477. {
  478. .init_sched = initialize_dmda_policy,
  479. .deinit_sched = deinitialize_dmda_policy,
  480. .push_task = dm_push_task,
  481. .pop_task = dmda_pop_task,
  482. .pre_exec_hook = NULL,
  483. .post_exec_hook = NULL,
  484. .pop_every_task = dmda_pop_every_task,
  485. .policy_name = "dm",
  486. .policy_description = "performance model"
  487. };
  488. struct starpu_sched_policy _starpu_sched_dmda_policy =
  489. {
  490. .init_sched = initialize_dmda_policy,
  491. .deinit_sched = deinitialize_dmda_policy,
  492. .push_task = dmda_push_task,
  493. .pop_task = dmda_pop_task,
  494. .pre_exec_hook = NULL,
  495. .post_exec_hook = NULL,
  496. .pop_every_task = dmda_pop_every_task,
  497. .policy_name = "dmda",
  498. .policy_description = "data-aware performance model"
  499. };
  500. struct starpu_sched_policy _starpu_sched_dmda_sorted_policy =
  501. {
  502. .init_sched = initialize_dmda_sorted_policy,
  503. .deinit_sched = deinitialize_dmda_policy,
  504. .push_task = dmda_push_sorted_task,
  505. .pop_task = dmda_pop_ready_task,
  506. .pre_exec_hook = NULL,
  507. .post_exec_hook = NULL,
  508. .pop_every_task = dmda_pop_every_task,
  509. .policy_name = "dmdas",
  510. .policy_description = "data-aware performance model (sorted)"
  511. };
  512. struct starpu_sched_policy _starpu_sched_dmda_ready_policy =
  513. {
  514. .init_sched = initialize_dmda_policy,
  515. .deinit_sched = deinitialize_dmda_policy,
  516. .push_task = dmda_push_task,
  517. .pop_task = dmda_pop_ready_task,
  518. .pre_exec_hook = NULL,
  519. .post_exec_hook = NULL,
  520. .pop_every_task = dmda_pop_every_task,
  521. .policy_name = "dmdar",
  522. .policy_description = "data-aware performance model (ready)"
  523. };