deque_modeling_policy_data_aware.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2011 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. * Copyright (C) 2011 Télécom-SudParis
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <limits.h>
  20. #include <core/workers.h>
  21. #include <sched_policies/fifo_queues.h>
  22. #include <core/perfmodel/perfmodel.h>
  23. #include <starpu_parameters.h>
  24. static unsigned nworkers;
  25. static struct _starpu_fifo_taskq *queue_array[STARPU_NMAXWORKERS];
  26. static pthread_cond_t sched_cond[STARPU_NMAXWORKERS];
  27. static pthread_mutex_t sched_mutex[STARPU_NMAXWORKERS];
  28. static double alpha = _STARPU_DEFAULT_ALPHA;
  29. static double beta = _STARPU_DEFAULT_BETA;
  30. static double _gamma = _STARPU_DEFAULT_GAMMA;
  31. static double idle_power = 0.0;
  32. #ifdef STARPU_VERBOSE
  33. static long int total_task_cnt = 0;
  34. static long int ready_task_cnt = 0;
  35. #endif
  36. static int count_non_ready_buffers(struct starpu_task *task, uint32_t node)
  37. {
  38. int cnt = 0;
  39. struct starpu_buffer_descr *descrs = task->buffers;
  40. unsigned nbuffers = task->cl->nbuffers;
  41. unsigned index;
  42. for (index = 0; index < nbuffers; index++)
  43. {
  44. struct starpu_buffer_descr *descr;
  45. starpu_data_handle_t handle;
  46. descr = &descrs[index];
  47. handle = descr->handle;
  48. int is_valid;
  49. starpu_data_query_status(handle, node, NULL, &is_valid, NULL);
  50. if (!is_valid)
  51. cnt++;
  52. }
  53. return cnt;
  54. }
  55. static struct starpu_task *_starpu_fifo_pop_first_ready_task(struct _starpu_fifo_taskq *fifo_queue, unsigned node)
  56. {
  57. struct starpu_task *task = NULL, *current;
  58. if (fifo_queue->ntasks == 0)
  59. return NULL;
  60. if (fifo_queue->ntasks > 0)
  61. {
  62. fifo_queue->ntasks--;
  63. task = starpu_task_list_back(&fifo_queue->taskq);
  64. int first_task_priority = task->priority;
  65. current = task;
  66. int non_ready_best = INT_MAX;
  67. while (current)
  68. {
  69. int priority = current->priority;
  70. if (priority <= first_task_priority)
  71. {
  72. int non_ready = count_non_ready_buffers(current, node);
  73. if (non_ready < non_ready_best)
  74. {
  75. non_ready_best = non_ready;
  76. task = current;
  77. if (non_ready == 0)
  78. break;
  79. }
  80. }
  81. current = current->prev;
  82. }
  83. starpu_task_list_erase(&fifo_queue->taskq, task);
  84. _STARPU_TRACE_JOB_POP(task, 0);
  85. }
  86. return task;
  87. }
  88. static struct starpu_task *dmda_pop_ready_task(void)
  89. {
  90. struct starpu_task *task;
  91. int workerid = starpu_worker_get_id();
  92. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  93. unsigned node = starpu_worker_get_memory_node(workerid);
  94. task = _starpu_fifo_pop_first_ready_task(fifo, node);
  95. if (task) {
  96. double model = task->predicted;
  97. fifo->exp_len -= model;
  98. fifo->exp_start = starpu_timing_now() + model;
  99. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  100. #ifdef STARPU_VERBOSE
  101. if (task->cl)
  102. {
  103. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  104. if (non_ready == 0)
  105. ready_task_cnt++;
  106. }
  107. total_task_cnt++;
  108. #endif
  109. }
  110. return task;
  111. }
  112. static struct starpu_task *dmda_pop_task(void)
  113. {
  114. struct starpu_task *task;
  115. int workerid = starpu_worker_get_id();
  116. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  117. task = _starpu_fifo_pop_task(fifo, workerid);
  118. if (task) {
  119. double model = task->predicted;
  120. fifo->exp_len -= model;
  121. fifo->exp_start = starpu_timing_now() + model;
  122. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  123. #ifdef STARPU_VERBOSE
  124. if (task->cl)
  125. {
  126. int non_ready = count_non_ready_buffers(task, starpu_worker_get_memory_node(workerid));
  127. if (non_ready == 0)
  128. ready_task_cnt++;
  129. }
  130. total_task_cnt++;
  131. #endif
  132. }
  133. return task;
  134. }
  135. static struct starpu_task *dmda_pop_every_task(void)
  136. {
  137. struct starpu_task *new_list;
  138. int workerid = starpu_worker_get_id();
  139. struct _starpu_fifo_taskq *fifo = queue_array[workerid];
  140. new_list = _starpu_fifo_pop_every_task(fifo, &sched_mutex[workerid], workerid);
  141. while (new_list)
  142. {
  143. double model = new_list->predicted;
  144. fifo->exp_len -= model;
  145. fifo->exp_start = starpu_timing_now() + model;
  146. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  147. new_list = new_list->next;
  148. }
  149. return new_list;
  150. }
  151. static
  152. int _starpu_fifo_push_sorted_task(struct _starpu_fifo_taskq *fifo_queue, pthread_mutex_t *sched_mutex, pthread_cond_t *sched_cond, struct starpu_task *task)
  153. {
  154. struct starpu_task_list *list = &fifo_queue->taskq;
  155. _STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
  156. _STARPU_TRACE_JOB_PUSH(task, 0);
  157. if (list->head == NULL)
  158. {
  159. list->head = task;
  160. list->tail = task;
  161. task->prev = NULL;
  162. task->next = NULL;
  163. }
  164. else {
  165. struct starpu_task *current = list->head;
  166. struct starpu_task *prev = NULL;
  167. while (current)
  168. {
  169. if (current->priority >= task->priority)
  170. break;
  171. prev = current;
  172. current = current->next;
  173. }
  174. if (prev == NULL)
  175. {
  176. /* Insert at the front of the list */
  177. list->head->prev = task;
  178. task->prev = NULL;
  179. task->next = list->head;
  180. list->head = task;
  181. }
  182. else {
  183. if (current)
  184. {
  185. /* Insert between prev and current */
  186. task->prev = prev;
  187. prev->next = task;
  188. task->next = current;
  189. current->prev = task;
  190. }
  191. else {
  192. /* Insert at the tail of the list */
  193. list->tail->next = task;
  194. task->next = NULL;
  195. task->prev = list->tail;
  196. list->tail = task;
  197. }
  198. }
  199. }
  200. fifo_queue->ntasks++;
  201. fifo_queue->nprocessed++;
  202. _STARPU_PTHREAD_COND_SIGNAL(sched_cond);
  203. _STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
  204. return 0;
  205. }
  206. static int push_task_on_best_worker(struct starpu_task *task, int best_workerid, double predicted, int prio)
  207. {
  208. /* make sure someone coule execute that task ! */
  209. STARPU_ASSERT(best_workerid != -1);
  210. struct _starpu_fifo_taskq *fifo;
  211. fifo = queue_array[best_workerid];
  212. fifo->exp_end += predicted;
  213. fifo->exp_len += predicted;
  214. task->predicted = predicted;
  215. /* TODO predicted_transfer */
  216. unsigned memory_node = starpu_worker_get_memory_node(best_workerid);
  217. if (starpu_get_prefetch_flag())
  218. starpu_prefetch_task_input_on_node(task, memory_node);
  219. if (prio)
  220. return _starpu_fifo_push_sorted_task(queue_array[best_workerid],
  221. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  222. else
  223. return _starpu_fifo_push_task(queue_array[best_workerid],
  224. &sched_mutex[best_workerid], &sched_cond[best_workerid], task);
  225. }
  226. /* TODO: factorize with dmda!! */
  227. static int _dm_push_task(struct starpu_task *task, unsigned prio)
  228. {
  229. /* find the queue */
  230. struct _starpu_fifo_taskq *fifo;
  231. unsigned worker;
  232. int best = -1;
  233. double best_exp_end = 0.0;
  234. double model_best = 0.0;
  235. int ntasks_best = -1;
  236. double ntasks_best_end = 0.0;
  237. int calibrating = 0;
  238. /* A priori, we know all estimations */
  239. int unknown = 0;
  240. unsigned best_impl = 0;
  241. unsigned nimpl;
  242. for (worker = 0; worker < nworkers; worker++) {
  243. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++) {
  244. double exp_end;
  245. fifo = queue_array[worker];
  246. /* Sometimes workers didn't take the tasks as early as we expected */
  247. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  248. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  249. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  250. {
  251. /* no one on that queue may execute this task */
  252. continue;
  253. }
  254. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  255. double local_length = starpu_task_expected_length(task, perf_arch, nimpl);
  256. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  257. //_STARPU_DEBUG("Scheduler dm: task length (%lf) worker (%u) kernel (%u) \n", local_length,worker,nimpl);
  258. if (ntasks_best == -1
  259. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  260. || (!calibrating && local_length == -1.0) /* Not calibrating but this worker is being calibrated */
  261. || (calibrating && local_length == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  262. ) {
  263. ntasks_best_end = ntasks_end;
  264. ntasks_best = worker;
  265. best_impl = nimpl;
  266. }
  267. if (local_length == -1.0)
  268. /* we are calibrating, we want to speed-up calibration time
  269. * so we privilege non-calibrated tasks (but still
  270. * greedily distribute them to avoid dumb schedules) */
  271. calibrating = 1;
  272. if (local_length <= 0.0)
  273. /* there is no prediction available for that task
  274. * with that arch yet, so switch to a greedy strategy */
  275. unknown = 1;
  276. if (unknown)
  277. continue;
  278. exp_end = fifo->exp_start + fifo->exp_len + local_length;
  279. if (best == -1 || exp_end < best_exp_end)
  280. {
  281. /* a better solution was found */
  282. best_exp_end = exp_end;
  283. best = worker;
  284. model_best = local_length;
  285. best_impl = nimpl;
  286. }
  287. }
  288. }
  289. if (unknown) {
  290. best = ntasks_best;
  291. model_best = 0.0;
  292. }
  293. //_STARPU_DEBUG("Scheduler dm: kernel (%u)\n", best_impl);
  294. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  295. /* we should now have the best worker in variable "best" */
  296. return push_task_on_best_worker(task, best, model_best, prio);
  297. }
  298. static int _dmda_push_task(struct starpu_task *task, unsigned prio)
  299. {
  300. /* find the queue */
  301. struct _starpu_fifo_taskq *fifo;
  302. unsigned worker;
  303. int best = -1;
  304. /* this flag is set if the corresponding worker is selected because
  305. there is no performance prediction available yet */
  306. int forced_best = -1;
  307. double local_task_length[nworkers][STARPU_MAXIMPLEMENTATIONS];
  308. double local_data_penalty[nworkers][STARPU_MAXIMPLEMENTATIONS];
  309. double local_power[nworkers][STARPU_MAXIMPLEMENTATIONS];
  310. double exp_end[nworkers][STARPU_MAXIMPLEMENTATIONS];
  311. double max_exp_end = 0.0;
  312. double fitness[nworkers][STARPU_MAXIMPLEMENTATIONS];
  313. double best_exp_end = 10e240;
  314. double model_best = 0.0;
  315. //double penality_best = 0.0;
  316. int ntasks_best = -1;
  317. double ntasks_best_end = 0.0;
  318. int calibrating = 0;
  319. /* A priori, we know all estimations */
  320. int unknown = 0;
  321. unsigned best_impl = 0;
  322. unsigned nimpl;
  323. for (worker = 0; worker < nworkers; worker++) {
  324. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++) {
  325. fifo = queue_array[worker];
  326. /* Sometimes workers didn't take the tasks as early as we expected */
  327. fifo->exp_start = STARPU_MAX(fifo->exp_start, starpu_timing_now());
  328. fifo->exp_end = fifo->exp_start + fifo->exp_len;
  329. if (fifo->exp_end > max_exp_end)
  330. max_exp_end = fifo->exp_end;
  331. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  332. {
  333. /* no one on that queue may execute this task */
  334. continue;
  335. }
  336. enum starpu_perf_archtype perf_arch = starpu_worker_get_perf_archtype(worker);
  337. local_task_length[worker][nimpl] = starpu_task_expected_length(task, perf_arch, nimpl);
  338. //_STARPU_DEBUG("Scheduler dmda: task length (%lf) worker (%u) kernel (%u) \n", local_task_length[worker][nimpl],worker,nimpl);
  339. unsigned memory_node = starpu_worker_get_memory_node(worker);
  340. local_data_penalty[worker][nimpl] = starpu_task_expected_data_transfer_time(memory_node, task);
  341. double ntasks_end = fifo->ntasks / starpu_worker_get_relative_speedup(perf_arch);
  342. if (ntasks_best == -1
  343. || (!calibrating && ntasks_end < ntasks_best_end) /* Not calibrating, take better task */
  344. || (!calibrating && local_task_length[worker][nimpl] == -1.0) /* Not calibrating but this worker is being calibrated */
  345. || (calibrating && local_task_length[worker][nimpl] == -1.0 && ntasks_end < ntasks_best_end) /* Calibrating, compete this worker with other non-calibrated */
  346. ) {
  347. ntasks_best_end = ntasks_end;
  348. ntasks_best = worker;
  349. best_impl = nimpl;
  350. }
  351. if (local_task_length[worker][nimpl] == -1.0)
  352. /* we are calibrating, we want to speed-up calibration time
  353. * so we privilege non-calibrated tasks (but still
  354. * greedily distribute them to avoid dumb schedules) */
  355. calibrating = 1;
  356. if (local_task_length[worker][nimpl] <= 0.0)
  357. /* there is no prediction available for that task
  358. * with that arch yet, so switch to a greedy strategy */
  359. unknown = 1;
  360. if (unknown)
  361. continue;
  362. exp_end[worker][nimpl] = fifo->exp_start + fifo->exp_len + local_task_length[worker][nimpl];
  363. if (exp_end[worker][nimpl] < best_exp_end)
  364. {
  365. /* a better solution was found */
  366. best_exp_end = exp_end[worker][nimpl];
  367. best_impl = nimpl;
  368. }
  369. local_power[worker][nimpl] = starpu_task_expected_power(task, perf_arch, nimpl);
  370. if (local_power[worker][nimpl] == -1.0)
  371. local_power[worker][nimpl] = 0.;
  372. }
  373. }
  374. if (unknown)
  375. forced_best = ntasks_best;
  376. double best_fitness = -1;
  377. if (forced_best == -1)
  378. {
  379. for (worker = 0; worker < nworkers; worker++)
  380. for (nimpl = 0; nimpl < STARPU_MAXIMPLEMENTATIONS; nimpl++)
  381. {
  382. if (!starpu_worker_can_execute_task(worker, task, nimpl))
  383. {
  384. /* no one on that queue may execute this task */
  385. continue;
  386. }
  387. fitness[worker][nimpl] = alpha*(exp_end[worker][nimpl] - best_exp_end)
  388. + beta*(local_data_penalty[worker][nimpl])
  389. + _gamma*(local_power[worker][nimpl]);
  390. if (exp_end[worker][nimpl] > max_exp_end) {
  391. /* This placement will make the computation
  392. * longer, take into account the idle
  393. * consumption of other cpus */
  394. fitness[worker][nimpl] += _gamma * idle_power * (exp_end[worker][nimpl] - max_exp_end) / 1000000.0;
  395. }
  396. if (best == -1 || fitness[worker][nimpl] < best_fitness)
  397. {
  398. /* we found a better solution */
  399. best_fitness = fitness[worker][nimpl];
  400. best = worker;
  401. best_impl = nimpl;
  402. // _STARPU_DEBUG("best fitness (worker %d) %e = alpha*(%e) + beta(%e) +gamma(%e)\n", worker, best_fitness, exp_end[worker][nimpl] - best_exp_end, local_data_penalty[worker][nimpl], local_power[worker][nimpl]);
  403. }
  404. }
  405. }
  406. STARPU_ASSERT(forced_best != -1 || best != -1);
  407. if (forced_best != -1)
  408. {
  409. /* there is no prediction available for that task
  410. * with that arch we want to speed-up calibration time
  411. * so we force this measurement */
  412. best = forced_best;
  413. model_best = 0.0;
  414. //penality_best = 0.0;
  415. }
  416. else
  417. {
  418. model_best = local_task_length[best][nimpl];
  419. //penality_best = local_data_penalty[best][nimpl];
  420. }
  421. //_STARPU_DEBUG("Scheduler dmda: kernel (%u)\n", best_impl);
  422. _starpu_get_job_associated_to_task(task)->nimpl = best_impl;
  423. /* we should now have the best worker in variable "best" */
  424. return push_task_on_best_worker(task, best, model_best, prio);
  425. }
  426. static int dmda_push_sorted_task(struct starpu_task *task)
  427. {
  428. return _dmda_push_task(task, 1);
  429. }
  430. static int dm_push_task(struct starpu_task *task)
  431. {
  432. return _dm_push_task(task, 0);
  433. }
  434. static int dmda_push_task(struct starpu_task *task)
  435. {
  436. return _dmda_push_task(task, 0);
  437. }
  438. static void initialize_dmda_policy(struct starpu_machine_topology *topology,
  439. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  440. {
  441. nworkers = topology->nworkers;
  442. const char *strval_alpha = getenv("STARPU_SCHED_ALPHA");
  443. if (strval_alpha)
  444. alpha = atof(strval_alpha);
  445. const char *strval_beta = getenv("STARPU_SCHED_BETA");
  446. if (strval_beta)
  447. beta = atof(strval_beta);
  448. const char *strval_gamma = getenv("STARPU_SCHED_GAMMA");
  449. if (strval_gamma)
  450. _gamma = atof(strval_gamma);
  451. const char *strval_idle_power = getenv("STARPU_IDLE_POWER");
  452. if (strval_idle_power)
  453. idle_power = atof(strval_idle_power);
  454. unsigned workerid;
  455. for (workerid = 0; workerid < nworkers; workerid++)
  456. {
  457. queue_array[workerid] = _starpu_create_fifo();
  458. _STARPU_PTHREAD_MUTEX_INIT(&sched_mutex[workerid], NULL);
  459. _STARPU_PTHREAD_COND_INIT(&sched_cond[workerid], NULL);
  460. starpu_worker_set_sched_condition(workerid, &sched_cond[workerid], &sched_mutex[workerid]);
  461. }
  462. }
  463. static void initialize_dmda_sorted_policy(struct starpu_machine_topology *topology,
  464. struct starpu_sched_policy *_policy)
  465. {
  466. initialize_dmda_policy(topology, _policy);
  467. /* The application may use any integer */
  468. starpu_sched_set_min_priority(INT_MIN);
  469. starpu_sched_set_max_priority(INT_MAX);
  470. }
  471. static void deinitialize_dmda_policy(struct starpu_machine_topology *topology,
  472. __attribute__ ((unused)) struct starpu_sched_policy *_policy)
  473. {
  474. unsigned workerid;
  475. for (workerid = 0; workerid < topology->nworkers; workerid++)
  476. _starpu_destroy_fifo(queue_array[workerid]);
  477. _STARPU_DEBUG("total_task_cnt %ld ready_task_cnt %ld -> %f\n", total_task_cnt, ready_task_cnt, (100.0f*ready_task_cnt)/total_task_cnt);
  478. }
  479. /* TODO: use post_exec_hook to fix the expected start */
  480. struct starpu_sched_policy _starpu_sched_dm_policy = {
  481. .init_sched = initialize_dmda_policy,
  482. .deinit_sched = deinitialize_dmda_policy,
  483. .push_task = dm_push_task,
  484. .pop_task = dmda_pop_task,
  485. .post_exec_hook = NULL,
  486. .pop_every_task = dmda_pop_every_task,
  487. .policy_name = "dm",
  488. .policy_description = "performance model"
  489. };
  490. struct starpu_sched_policy _starpu_sched_dmda_policy = {
  491. .init_sched = initialize_dmda_policy,
  492. .deinit_sched = deinitialize_dmda_policy,
  493. .push_task = dmda_push_task,
  494. .pop_task = dmda_pop_task,
  495. .post_exec_hook = NULL,
  496. .pop_every_task = dmda_pop_every_task,
  497. .policy_name = "dmda",
  498. .policy_description = "data-aware performance model"
  499. };
  500. struct starpu_sched_policy _starpu_sched_dmda_sorted_policy = {
  501. .init_sched = initialize_dmda_sorted_policy,
  502. .deinit_sched = deinitialize_dmda_policy,
  503. .push_task = dmda_push_sorted_task,
  504. .pop_task = dmda_pop_ready_task,
  505. .post_exec_hook = NULL,
  506. .pop_every_task = dmda_pop_every_task,
  507. .policy_name = "dmdas",
  508. .policy_description = "data-aware performance model (sorted)"
  509. };
  510. struct starpu_sched_policy _starpu_sched_dmda_ready_policy = {
  511. .init_sched = initialize_dmda_policy,
  512. .deinit_sched = deinitialize_dmda_policy,
  513. .push_task = dmda_push_task,
  514. .pop_task = dmda_pop_ready_task,
  515. .post_exec_hook = NULL,
  516. .pop_every_task = dmda_pop_every_task,
  517. .policy_name = "dmdar",
  518. .policy_description = "data-aware performance model (ready)"
  519. };