deque_modeling_policy_data_aware.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <limits.h>
  20. #include <core/workers.h>
  21. #include <sched_policies/fifo_queues.h>
  22. #include <core/perfmodel/perfmodel.h>
  23. #include <starpu_parameters.h>
  24. static unsigned nworkers;
  25. static struct _starpu_fifo_taskq *queue_array[STARPU_NMAXWORKERS];
  26. static pthread_cond_t sched_cond[STARPU_NMAXWORKERS];
  27. static pthread_mutex_t sched_mutex[STARPU_NMAXWORKERS];
  28. static double alpha = _STARPU_DEFAULT_ALPHA;
  29. static double beta = _STARPU_DEFAULT_BETA;
  30. static double _gamma = _STARPU_DEFAULT_GAMMA;
  31. static double idle_power = 0.0;
  32. #ifdef STARPU_VERBOSE
  33. static long int total_task_cnt = 0;
  34. static long int ready_task_cnt = 0;
  35. #endif
  36. static int count_non_ready_buffers(struct starpu_task *task, uint32_t node)
  37. {
  38. int cnt = 0;
  39. struct starpu_buffer_descr *descrs = task->buffers;
  40. unsigned nbuffers = task->cl->nbuffers;
  41. unsigned index;
  42. for (index = 0; index < nbuffers; index++)
  43. {
  44. struct starpu_buffer_descr *descr;
  45. starpu_data_handle_t handle;
  46. descr = &descrs[index];
  47. handle = descr->handle;
  48. int is_valid;
  49. starpu_data_query_status(handle, node, NULL, &is_valid, NULL);
  50. if (!is_valid)
  51. cnt++;
  52. }
  53. return cnt;
  54. }
  55. static struct starpu_task *_starpu_fifo_pop_first_ready_task(struct _starpu_fifo_taskq *fifo_queue, unsigned node)
  56. {
  57. struct starpu_task *task = NULL, *current;
  58. if (fifo_queue->ntasks == 0)
  59. return NULL;
  60. if (fifo_queue->ntasks > 0)
  61. {
  62. fifo_queue->ntasks--;
  63. task = starpu_task_list_back(&fifo_queue->taskq);
  64. int first_task_priority = task->priority;
  65. current = task;
  66. int non_ready_best = INT_MAX;
  67. while (current)
  68. {
  69. int priority = current->priority;
  70. if (priority <= first_task_priority)
  71. {
  72. int non_ready = count_non_ready_buffers(current, node);
  73. if (non_ready < non_ready_best)
  74. {
  75. non_ready_best = non_ready;
  76. task = current;
  77. if (non_ready == 0)
  78. break;
  79. }
  80. }
  81. current = current->prev;
  82. }
  83. starpu_task_list_erase(&fifo_queue->taskq, task);
  84. _STARPU_TRACE_JOB_POP(task, 0);
  85. }
  86. return task;
  87. }
  88. static struct starpu_task *dmda_pop_ready_task(void)
  89. {
  90. struct starpu_task *task;
  91. int workerid = starpu_worker_get_id();
  92. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  93. unsigned node = starpu_worker_get_memory_node(workerid);
  94. task = _starpu_fifo_pop_first_ready_task(fifo, node);
  95. if (task)
  96. {
  97. double model = task->predicted;
  98. fifo->exp_len -= model;
  99. fifo->exp_start = starpu_timing_now() + model;
  100. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  101. #ifdef STARPU_VERBOSE
  102. if (task->cl)
  103. {
  104. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  105. if (non_ready == 0)
  106. ready_task_cnt++;
  107. }
  108. total_task_cnt++;
  109. #endif
  110. }
  111. return task;
  112. }
  113. static struct starpu_task *dmda_pop_task(void)
  114. {
  115. struct starpu_task *task;
  116. int workerid = starpu_worker_get_id();
  117. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  118. task = _starpu_fifo_pop_task(fifo, workerid);
  119. if (task)
  120. {
  121. double model = task->predicted;
  122. fifo->exp_len -= model;
  123. fifo->exp_start = starpu_timing_now() + model;
  124. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  125. #ifdef STARPU_VERBOSE
  126. if (task->cl)
  127. {
  128. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  129. if (non_ready == 0)
  130. ready_task_cnt++;
  131. }
  132. total_task_cnt++;
  133. #endif
  134. }
  135. return task;
  136. }
  137. static struct starpu_task *dmda_pop_every_task(void)
  138. {
  139. struct starpu_task *new_list;
  140. int workerid = starpu_worker_get_id();
  141. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  142. new_list = _starpu_fifo_pop_every_task(fifo, &sched_mutex[workerid], workerid);
  143. while (new_list)
  144. {
  145. double model = new_list->predicted;
  146. fifo->exp_len -= model;
  147. fifo->exp_start = starpu_timing_now() + model;
  148. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  149. new_list = new_list->next;
  150. }
  151. return new_list;
  152. }
  153. static
  154. int _starpu_fifo_push_sorted_task(struct _starpu_fifo_taskq *fifo_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, struct starpu_task *task)
  155. {
  156. struct starpu_task_list *list = &fifo_queue->taskq;
  157. _STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
  158. _STARPU_TRACE_JOB_PUSH(task, 0);
  159. if (list->head == NULL)
  160. {
  161. list->head = task;
  162. list->tail = task;
  163. task->prev = NULL;
  164. task->next = NULL;
  165. }
  166. else
  167. {
  168. struct starpu_task *current = list->head;
  169. struct starpu_task *prev = NULL;
  170. while (current)
  171. {
  172. if (current->priority >= task->priority)
  173. break;
  174. prev = current;
  175. current = current->next;
  176. }
  177. if (prev == NULL)
  178. {
  179. /* Insert at the front of the list */
  180. list->head->prev = task;
  181. task->prev = NULL;
  182. task->next = list->head;
  183. list->head = task;
  184. }
  185. else
  186. {
  187. if (current)
  188. {
  189. /* Insert between prev and current */
  190. task->prev = prev;
  191. prev->next = task;
  192. task->next = current;
  193. current->prev = task;
  194. }
  195. else
  196. {
  197. /* Insert at the tail of the list */
  198. list->tail->next = task;
  199. task->next = NULL;
  200. task->prev = list->tail;
  201. list->tail = task;
  202. }
  203. }
  204. }
  205. fifo_queue->ntasks++;
  206. fifo_queue->nprocessed++;
  207. _STARPU_PTHREAD_COND_SIGNAL(sched_cond);
  208. _STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
  209. return 0;
  210. }
  211. static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, int prio)
  212. {
  213. /* make sure someone coule execute that task ! */
  214. STARPU_ASSERT(best_workerid != -1);
  215. struct _starpu_fifo_taskq *fifo;
  216. fifo = queue_array[best_workerid];
  217. fifo->exp_end += predicted;
  218. fifo->exp_len += predicted;
  219. task->predicted = predicted;
  220. /* TODO predicted_transfer */
  221. unsigned memory_node = starpu_worker_get_memory_node(best_workerid);
  222. if (starpu_get_prefetch_flag())
  223. starpu_prefetch_task_input_on_node(task, memory_node);
  224. if (prio)
  225. return _starpu_fifo_push_sorted_task(queue_array[best_workerid],
  226. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  227. else
  228. return _starpu_fifo_push_task(queue_array[best_workerid],
  229. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  230. }
  231. /* TODO: factorize with dmda!! */
  232. static int _dm_push_task(struct starpu_task *task, unsigned prio)
  233. {
  234. /* find the queue */
  235. struct _starpu_fifo_taskq *fifo;
  236. unsigned worker;
  237. int best = -1;
  238. double best_exp_end = 0.0;
  239. double model_best = 0.0;
  240. int ntasks_best = -1;
  241. double ntasks_best_end = 0.0;
  242. int calibrating = 0;
  243. /* A priori, we know all estimations */
  244. int unknown = 0;
  245. unsigned best_impl = 0;
  246. unsigned nimpl;
  247. for (worker = 0; worker < nworkers; worker++)
  248. {
  249. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  250. {
  251. double exp_end;
  252. fifo = queue_array[worker];
  253. /* Sometimes workers didn't take the tasks as early as we expected */
  254. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  255. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  256. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  257. {
  258. /* no one on that queue may execute this task */
  259. continue;
  260. }
  261. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  262. double local_length = starpu_task_expected_length(task, perf_arch, nimpl);
  263. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  264. //_STARPU_DEBUG("Scheduler dm: task length (%lf) worker (%u) kernel (%u) \n", local_length,worker,nimpl);
  265. if (ntasks_best == -1
  266. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  267. || (!calibrating && local_length == -1.0) /* Not calibrating but this worker is being calibrated */
  268. || (calibrating && local_length == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  269. )
  270. {
  271. ntasks_best_end = ntasks_end;
  272. ntasks_best = worker;
  273. best_impl = nimpl;
  274. }
  275. if (local_length == -1.0)
  276. /* we are calibrating, we want to speed-up calibration time
  277. * so we privilege non-calibrated tasks (but still
  278. * greedily distribute them to avoid dumb schedules) */
  279. calibrating = 1;
  280. if (local_length <= 0.0)
  281. /* there is no prediction available for that task
  282. * with that arch yet, so switch to a greedy strategy */
  283. unknown = 1;
  284. if (unknown)
  285. continue;
  286. exp_end = fifo->exp_start + fifo->exp_len + local_length;
  287. if (best == -1 || exp_end < best_exp_end)
  288. {
  289. /* a better solution was found */
  290. best_exp_end = exp_end;
  291. best = worker;
  292. model_best = local_length;
  293. best_impl = nimpl;
  294. }
  295. }
  296. }
  297. if (unknown)
  298. {
  299. best = ntasks_best;
  300. model_best = 0.0;
  301. }
  302. //_STARPU_DEBUG("Scheduler dm: kernel (%u)\n", best_impl);
  303. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  304. /* we should now have the best worker in variable "best" */
  305. return push_task_on_best_worker(task, best, model_best, prio);
  306. }
  307. static int _dmda_push_task(struct starpu_task *task, unsigned prio)
  308. {
  309. /* find the queue */
  310. struct _starpu_fifo_taskq *fifo;
  311. unsigned worker;
  312. int best = -1;
  313. /* this flag is set if the corresponding worker is selected because
  314. there is no performance prediction available yet */
  315. int forced_best = -1;
  316. double local_task_length[nworkers][STARPU_MAXIMPLEMENTATIONS];
  317. double local_data_penalty[nworkers][STARPU_MAXIMPLEMENTATIONS];
  318. double local_power[nworkers][STARPU_MAXIMPLEMENTATIONS];
  319. double exp_end[nworkers][STARPU_MAXIMPLEMENTATIONS];
  320. double max_exp_end = 0.0;
  321. double fitness[nworkers][STARPU_MAXIMPLEMENTATIONS];
  322. double best_exp_end = 10e240;
  323. double model_best = 0.0;
  324. //double penality_best = 0.0;
  325. int ntasks_best = -1;
  326. double ntasks_best_end = 0.0;
  327. int calibrating = 0;
  328. /* A priori, we know all estimations */
  329. int unknown = 0;
  330. unsigned best_impl = 0;
  331. unsigned nimpl;
  332. for (worker = 0; worker < nworkers; worker++)
  333. {
  334. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  335. {
  336. fifo = queue_array[worker];
  337. /* Sometimes workers didn't take the tasks as early as we expected */
  338. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  339. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  340. if (fifo->exp_end > max_exp_end)
  341. max_exp_end = fifo->exp_end;
  342. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  343. {
  344. /* no one on that queue may execute this task */
  345. continue;
  346. }
  347. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  348. local_task_length[worker][nimpl] = starpu_task_expected_length(task, perf_arch, nimpl);
  349. //_STARPU_DEBUG("Scheduler dmda: task length (%lf) worker (%u) kernel (%u) \n", local_task_length[worker][nimpl],worker,nimpl);
  350. unsigned memory_node = starpu_worker_get_memory_node(worker);
  351. local_data_penalty[worker][nimpl] = starpu_task_expected_data_transfer_time(memory_node, task);
  352. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  353. if (ntasks_best == -1
  354. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  355. || (!calibrating && local_task_length[worker][nimpl] == -1.0) /* Not calibrating but this worker is being calibrated */
  356. || (calibrating && local_task_length[worker][nimpl] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  357. )
  358. {
  359. ntasks_best_end = ntasks_end;
  360. ntasks_best = worker;
  361. best_impl = nimpl;
  362. }
  363. if (local_task_length[worker][nimpl] == -1.0)
  364. /* we are calibrating, we want to speed-up calibration time
  365. * so we privilege non-calibrated tasks (but still
  366. * greedily distribute them to avoid dumb schedules) */
  367. calibrating = 1;
  368. if (local_task_length[worker][nimpl] <= 0.0)
  369. /* there is no prediction available for that task
  370. * with that arch yet, so switch to a greedy strategy */
  371. unknown = 1;
  372. if (unknown)
  373. continue;
  374. exp_end[worker][nimpl] = fifo->exp_start + fifo->exp_len + local_task_length[worker][nimpl];
  375. if (exp_end[worker][nimpl] < best_exp_end)
  376. {
  377. /* a better solution was found */
  378. best_exp_end = exp_end[worker][nimpl];
  379. best_impl = nimpl;
  380. }
  381. local_power[worker][nimpl] = starpu_task_expected_power(task, perf_arch, nimpl);
  382. if (local_power[worker][nimpl] == -1.0)
  383. local_power[worker][nimpl] = 0.;
  384. }
  385. }
  386. if (unknown)
  387. forced_best = ntasks_best;
  388. double best_fitness = -1;
  389. if (forced_best == -1)
  390. {
  391. for (worker = 0; worker < nworkers; worker++)
  392. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  393. {
  394. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  395. {
  396. /* no one on that queue may execute this task */
  397. continue;
  398. }
  399. fitness[worker][nimpl] = alpha*(exp_end[worker][nimpl] - best_exp_end)
  400. + beta*(local_data_penalty[worker][nimpl])
  401. + _gamma*(local_power[worker][nimpl]);
  402. if (exp_end[worker][nimpl] > max_exp_end)
  403. {
  404. /* This placement will make the computation
  405. * longer, take into account the idle
  406. * consumption of other cpus */
  407. fitness[worker][nimpl] += _gamma * idle_power * (exp_end[worker][nimpl] - max_exp_end) / 1000000.0;
  408. }
  409. if (best == -1 || fitness[worker][nimpl] < best_fitness)
  410. {
  411. /* we found a better solution */
  412. best_fitness = fitness[worker][nimpl];
  413. best = worker;
  414. best_impl = nimpl;
  415. // _STARPU_DEBUG("best fitness (worker %d) %e = alpha*(%e) + beta(%e) +gamma(%e)\n", worker, best_fitness, exp_end[worker][nimpl] - best_exp_end, local_data_penalty[worker][nimpl], local_power[worker][nimpl]);
  416. }
  417. }
  418. }
  419. STARPU_ASSERT(forced_best != -1 || best != -1);
  420. if (forced_best != -1)
  421. {
  422. /* there is no prediction available for that task
  423. * with that arch we want to speed-up calibration time
  424. * so we force this measurement */
  425. best = forced_best;
  426. model_best = 0.0;
  427. //penality_best = 0.0;
  428. }
  429. else
  430. {
  431. model_best = local_task_length[best][nimpl];
  432. //penality_best = local_data_penalty[best][nimpl];
  433. }
  434. //_STARPU_DEBUG("Scheduler dmda: kernel (%u)\n", best_impl);
  435. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  436. /* we should now have the best worker in variable "best" */
  437. return push_task_on_best_worker(task, best, model_best, prio);
  438. }
  439. static int dmda_push_sorted_task(struct starpu_task *task)
  440. {
  441. return _dmda_push_task(task, 1);
  442. }
  443. static int dm_push_task(struct starpu_task *task)
  444. {
  445. return _dm_push_task(task, 0);
  446. }
  447. static int dmda_push_task(struct starpu_task *task)
  448. {
  449. return _dmda_push_task(task, 0);
  450. }
  451. static void initialize_dmda_policy(struct starpu_machine_topology *topology,
  452. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  453. {
  454. nworkers = topology->nworkers;
  455. const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
  456. if (strval_alpha)
  457. alpha = atof(strval_alpha);
  458. const char *strval_beta = getenv("STARPU_SCHED_BETA");
  459. if (strval_beta)
  460. beta = atof(strval_beta);
  461. const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
  462. if (strval_gamma)
  463. _gamma = atof(strval_gamma);
  464. const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
  465. if (strval_idle_power)
  466. idle_power = atof(strval_idle_power);
  467. unsigned workerid;
  468. for (workerid = 0; workerid < nworkers; workerid++)
  469. {
  470. queue_array[workerid] = _starpu_create_fifo();
  471. _STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
  472. _STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
  473. starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
  474. }
  475. }
  476. static void initialize_dmda_sorted_policy(struct starpu_machine_topology *topology,
  477. struct starpu_sched_policy *_policy)
  478. {
  479. initialize_dmda_policy(topology, _policy);
  480. /* The application may use any integer */
  481. starpu_sched_set_min_priority(INT_MIN);
  482. starpu_sched_set_max_priority(INT_MAX);
  483. }
  484. static void deinitialize_dmda_policy(struct starpu_machine_topology *topology,
  485. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  486. {
  487. unsigned workerid;
  488. for (workerid = 0; workerid < topology->nworkers; workerid++)
  489. _starpu_destroy_fifo(queue_array[workerid]);
  490. _STARPU_DEBUG("total_task_cnt %ld ready_task_cnt %ld -> %f\n", total_task_cnt, ready_task_cnt, (100.0f*ready_task_cnt)/total_task_cnt);
  491. }
  492. /* TODO: use post_exec_hook to fix the expected start */
  493. struct starpu_sched_policy _starpu_sched_dm_policy =
  494. {
  495. .init_sched = initialize_dmda_policy,
  496. .deinit_sched = deinitialize_dmda_policy,
  497. .push_task = dm_push_task,
  498. .pop_task = dmda_pop_task,
  499. .post_exec_hook = NULL,
  500. .pop_every_task = dmda_pop_every_task,
  501. .policy_name = "dm",
  502. .policy_description = "performance model"
  503. };
  504. struct starpu_sched_policy _starpu_sched_dmda_policy =
  505. {
  506. .init_sched = initialize_dmda_policy,
  507. .deinit_sched = deinitialize_dmda_policy,
  508. .push_task = dmda_push_task,
  509. .pop_task = dmda_pop_task,
  510. .post_exec_hook = NULL,
  511. .pop_every_task = dmda_pop_every_task,
  512. .policy_name = "dmda",
  513. .policy_description = "data-aware performance model"
  514. };
  515. struct starpu_sched_policy _starpu_sched_dmda_sorted_policy =
  516. {
  517. .init_sched = initialize_dmda_sorted_policy,
  518. .deinit_sched = deinitialize_dmda_policy,
  519. .push_task = dmda_push_sorted_task,
  520. .pop_task = dmda_pop_ready_task,
  521. .post_exec_hook = NULL,
  522. .pop_every_task = dmda_pop_every_task,
  523. .policy_name = "dmdas",
  524. .policy_description = "data-aware performance model (sorted)"
  525. };
  526. struct starpu_sched_policy _starpu_sched_dmda_ready_policy =
  527. {
  528. .init_sched = initialize_dmda_policy,
  529. .deinit_sched = deinitialize_dmda_policy,
  530. .push_task = dmda_push_task,
  531. .pop_task = dmda_pop_ready_task,
  532. .post_exec_hook = NULL,
  533. .pop_every_task = dmda_pop_every_task,
  534. .policy_name = "dmdar",
  535. .policy_description = "data-aware performance model (ready)"
  536. };