heteroprio.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015, 2017 INRIA
  4. * Copyright (C) 2016, 2017 CNRS
  5. * Copyright (C) 2016 Uppsala University
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <starpu_config.h>
  20. #include <starpu_scheduler.h>
  21. #include <schedulers/starpu_heteroprio.h>
  22. #include <common/fxt.h>
  23. #include <core/task.h>
  24. #include <core/workers.h>
  25. #include <core/debug.h>
  26. #include <sched_policies/fifo_queues.h>
  27. #include <limits.h>
  28. #ifndef DBL_MIN
  29. #define DBL_MIN __DBL_MIN__
  30. #endif
  31. #ifndef DBL_MAX
  32. #define DBL_MAX __DBL_MAX__
  33. #endif
  34. /* A bucket corresponds to a Pair of priorities
  35. * When a task is pushed with a priority X, it will be stored
  36. * into the bucket X.
  37. * All the tasks stored in the fifo should be computable by the arch
  38. * in valid_archs.
  39. * For example if valid_archs = (STARPU_CPU|STARPU_CUDA)
  40. * Then task->task->where should be at least (STARPU_CPU|STARPU_CUDA)
  41. */
  42. struct _heteroprio_bucket
  43. {
  44. /* The task of the current bucket */
  45. struct _starpu_fifo_taskq* tasks_queue;
  46. /* The correct arch for the current bucket */
  47. unsigned valid_archs;
  48. /* The slow factors for any archs */
  49. float slow_factors_per_index[STARPU_NB_TYPES];
  50. /* The base arch for the slow factor (the fatest arch for the current task in the bucket */
  51. unsigned factor_base_arch_index;
  52. };
  53. /* Init a bucket */
  54. static void _heteroprio_bucket_init(struct _heteroprio_bucket* bucket)
  55. {
  56. memset(bucket, 0, sizeof(*bucket));
  57. bucket->tasks_queue = _starpu_create_fifo();
  58. }
  59. /* Release a bucket */
  60. static void _heteroprio_bucket_release(struct _heteroprio_bucket* bucket)
  61. {
  62. STARPU_ASSERT(_starpu_fifo_empty(bucket->tasks_queue) != 0);
  63. _starpu_destroy_fifo(bucket->tasks_queue);
  64. }
  65. /* A worker is mainly composed of a fifo for the tasks
  66. * and some direct access to worker properties.
  67. * The fifo is implemented with any array,
  68. * to read a task, access tasks_queue[tasks_queue_index]
  69. * to write a task, access tasks_queue[(tasks_queue_index+tasks_queue_size)%HETEROPRIO_MAX_PREFETCH]
  70. */
  71. /* ANDRA_MODIF: can use starpu fifo + starpu sched_mutex*/
  72. struct _heteroprio_worker_wrapper
  73. {
  74. unsigned arch_type;
  75. unsigned arch_index;
  76. struct _starpu_fifo_taskq *tasks_queue;
  77. };
  78. struct _starpu_heteroprio_data
  79. {
  80. starpu_pthread_mutex_t policy_mutex;
  81. struct starpu_bitmap *waiters;
  82. /* The bucket to store the tasks */
  83. struct _heteroprio_bucket buckets[STARPU_HETEROPRIO_MAX_PRIO];
  84. /* The number of buckets for each arch */
  85. unsigned nb_prio_per_arch_index[STARPU_NB_TYPES];
  86. /* The mapping to the corresponding buckets */
  87. unsigned prio_mapping_per_arch_index[STARPU_NB_TYPES][STARPU_HETEROPRIO_MAX_PRIO];
  88. /* The number of available tasks for a given arch (not prefetched) */
  89. unsigned nb_remaining_tasks_per_arch_index[STARPU_NB_TYPES];
  90. /* The total number of tasks in the bucket (not prefetched) */
  91. unsigned total_tasks_in_buckets;
  92. /* The total number of prefetched tasks for a given arch */
  93. unsigned nb_prefetched_tasks_per_arch_index[STARPU_NB_TYPES];
  94. /* The information for all the workers */
  95. struct _heteroprio_worker_wrapper workers_heteroprio[STARPU_NMAXWORKERS];
  96. /* The number of workers for a given arch */
  97. unsigned nb_workers_per_arch_index[STARPU_NB_TYPES];
  98. };
  99. /** Tell how many prio there are for a given arch */
  100. void starpu_heteroprio_set_nb_prios(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned max_prio)
  101. {
  102. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  103. STARPU_ASSERT(max_prio < STARPU_HETEROPRIO_MAX_PRIO);
  104. hp->nb_prio_per_arch_index[arch] = max_prio;
  105. }
  106. /** Set the mapping for a given arch prio=>bucket */
  107. inline void starpu_heteroprio_set_mapping(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned source_prio, unsigned dest_bucket_id)
  108. {
  109. STARPU_ASSERT(dest_bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  110. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  111. hp->prio_mapping_per_arch_index[arch][source_prio] = dest_bucket_id;
  112. hp->buckets[dest_bucket_id].valid_archs |= starpu_heteroprio_types_to_arch[arch];
  113. _STARPU_DEBUG("Adding arch %d to bucket %d\n", arch, dest_bucket_id);
  114. }
  115. /** Tell which arch is the faster for the tasks of a bucket (optional) */
  116. inline void starpu_heteroprio_set_faster_arch(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned bucket_id)
  117. {
  118. STARPU_ASSERT(bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  119. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  120. hp->buckets[bucket_id].factor_base_arch_index = arch;
  121. hp->buckets[bucket_id].slow_factors_per_index[arch] = 0;
  122. }
  123. /** Tell how slow is a arch for the tasks of a bucket (optional) */
  124. inline void starpu_heteroprio_set_arch_slow_factor(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned bucket_id, float slow_factor)
  125. {
  126. STARPU_ASSERT(bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  127. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  128. hp->buckets[bucket_id].slow_factors_per_index[arch] = slow_factor;
  129. }
  130. /** If the user does not provide an init callback we create a single bucket for all architectures */
  131. static inline void default_init_sched(unsigned sched_ctx_id)
  132. {
  133. int min_prio = starpu_sched_ctx_get_min_priority(sched_ctx_id);
  134. int max_prio = starpu_sched_ctx_get_max_priority(sched_ctx_id);
  135. STARPU_ASSERT(min_prio >= 0);
  136. STARPU_ASSERT(max_prio >= 0);
  137. // By default each type of devices uses 1 bucket and no slow factor
  138. #ifdef STARPU_USE_CPU
  139. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_CPU_IDX, max_prio-min_prio+1);
  140. #endif
  141. #ifdef STARPU_USE_CUDA
  142. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_CUDA_IDX, max_prio-min_prio+1);
  143. #endif
  144. #ifdef STARPU_USE_OPENCL
  145. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_OPENCL_IDX, max_prio-min_prio+1);
  146. #endif
  147. #ifdef STARPU_USE_MIC
  148. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_MIC_IDX, max_prio-min_prio+1);
  149. #endif
  150. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  151. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_MPI_MS_IDX, max_prio-min_prio+1);
  152. #endif
  153. #ifdef STARPU_USE_SCC
  154. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_SCC_IDX, max_prio-min_prio+1);
  155. #endif
  156. // Direct mapping
  157. int prio;
  158. for(prio=min_prio ; prio<=max_prio ; prio++)
  159. {
  160. #ifdef STARPU_USE_CPU
  161. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_CPU_IDX, prio, prio);
  162. #endif
  163. #ifdef STARPU_USE_CUDA
  164. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_CUDA_IDX, prio, prio);
  165. #endif
  166. #ifdef STARPU_USE_OPENCL
  167. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_OPENCL_IDX, prio, prio);
  168. #endif
  169. #ifdef STARPU_USE_MIC
  170. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_MIC_IDX, prio, prio);
  171. #endif
  172. #ifdef STARPU_USE_MPI_MASTER_SLAVE
  173. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_MPI_MS_IDX, prio, prio);
  174. #endif
  175. #ifdef STARPU_USE_SCC
  176. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_SCC_IDX, prio, prio);
  177. #endif
  178. }
  179. }
  180. static void initialize_heteroprio_policy(unsigned sched_ctx_id)
  181. {
  182. /* Alloc the scheduler data */
  183. struct _starpu_heteroprio_data *hp;
  184. _STARPU_MALLOC(hp, sizeof(struct _starpu_heteroprio_data));
  185. memset(hp, 0, sizeof(*hp));
  186. hp->waiters = starpu_bitmap_create();
  187. starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)hp);
  188. STARPU_PTHREAD_MUTEX_INIT(&hp->policy_mutex, NULL);
  189. unsigned idx_prio;
  190. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  191. _heteroprio_bucket_init(&hp->buckets[idx_prio]);
  192. void (*init_sched)(unsigned) = starpu_sched_ctx_get_sched_policy_init(sched_ctx_id);
  193. if(init_sched)
  194. init_sched(sched_ctx_id);
  195. else
  196. default_init_sched(sched_ctx_id);
  197. /* Ensure that information have been correctly filled */
  198. unsigned check_all_archs[STARPU_HETEROPRIO_MAX_PRIO];
  199. memset(check_all_archs, 0, sizeof(unsigned)*STARPU_HETEROPRIO_MAX_PRIO);
  200. unsigned arch_index;
  201. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  202. {
  203. STARPU_ASSERT(hp->nb_prio_per_arch_index[arch_index] <= STARPU_HETEROPRIO_MAX_PRIO);
  204. unsigned check_archs[STARPU_HETEROPRIO_MAX_PRIO];
  205. memset(check_archs, 0, sizeof(unsigned)*STARPU_HETEROPRIO_MAX_PRIO);
  206. for(idx_prio = 0; idx_prio < hp->nb_prio_per_arch_index[arch_index]; ++idx_prio)
  207. {
  208. const unsigned mapped_prio = hp->prio_mapping_per_arch_index[arch_index][idx_prio];
  209. STARPU_ASSERT(mapped_prio <= STARPU_HETEROPRIO_MAX_PRIO);
  210. STARPU_ASSERT(hp->buckets[mapped_prio].slow_factors_per_index[arch_index] >= 0.0);
  211. STARPU_ASSERT(hp->buckets[mapped_prio].valid_archs & starpu_heteroprio_types_to_arch[arch_index]);
  212. check_archs[mapped_prio] = 1;
  213. check_all_archs[mapped_prio] += 1;
  214. }
  215. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  216. {
  217. /* Ensure the current arch use a bucket or someone else can use it */
  218. STARPU_ASSERT(check_archs[idx_prio] == 1 || hp->buckets[idx_prio].valid_archs == 0
  219. || (hp->buckets[idx_prio].valid_archs & ~starpu_heteroprio_types_to_arch[arch_index]) != 0);
  220. }
  221. }
  222. /* Ensure that if a valid_archs = (STARPU_CPU|STARPU_CUDA) then check_all_archs[] = 2 for example */
  223. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  224. {
  225. unsigned nb_arch_on_bucket = 0;
  226. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  227. {
  228. if(hp->buckets[idx_prio].valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  229. {
  230. nb_arch_on_bucket += 1;
  231. }
  232. }
  233. STARPU_ASSERT_MSG(check_all_archs[idx_prio] == nb_arch_on_bucket, "check_all_archs[idx_prio(%u)] = %u != nb_arch_on_bucket = %u\n", idx_prio, check_all_archs[idx_prio], nb_arch_on_bucket);
  234. }
  235. }
  236. static void deinitialize_heteroprio_policy(unsigned sched_ctx_id)
  237. {
  238. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  239. /* Ensure there are no more tasks */
  240. STARPU_ASSERT(hp->total_tasks_in_buckets == 0);
  241. unsigned arch_index;
  242. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  243. {
  244. STARPU_ASSERT(hp->nb_remaining_tasks_per_arch_index[arch_index] == 0);
  245. STARPU_ASSERT(hp->nb_prefetched_tasks_per_arch_index[arch_index] == 0);
  246. }
  247. unsigned idx_prio;
  248. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  249. {
  250. _heteroprio_bucket_release(&hp->buckets[idx_prio]);
  251. }
  252. starpu_bitmap_destroy(hp->waiters);
  253. STARPU_PTHREAD_MUTEX_DESTROY(&hp->policy_mutex);
  254. free(hp);
  255. }
  256. static void add_workers_heteroprio_policy(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  257. {
  258. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  259. unsigned i;
  260. for (i = 0; i < nworkers; i++)
  261. {
  262. int workerid = workerids[i];
  263. memset(&hp->workers_heteroprio[workerid], 0, sizeof(hp->workers_heteroprio[workerid]));
  264. /* if the worker has already belonged to this context
  265. the queue and the synchronization variables have been already initialized */
  266. if(hp->workers_heteroprio[workerid].tasks_queue == NULL)
  267. {
  268. hp->workers_heteroprio[workerid].tasks_queue = _starpu_create_fifo();
  269. switch(starpu_worker_get_type(workerid))
  270. {
  271. case STARPU_CPU_WORKER:
  272. hp->workers_heteroprio[workerid].arch_type = STARPU_CPU;
  273. hp->workers_heteroprio[workerid].arch_index = STARPU_CPU_IDX;
  274. break;
  275. case STARPU_CUDA_WORKER:
  276. hp->workers_heteroprio[workerid].arch_type = STARPU_CUDA;
  277. hp->workers_heteroprio[workerid].arch_index = STARPU_CUDA_IDX;
  278. break;
  279. case STARPU_OPENCL_WORKER:
  280. hp->workers_heteroprio[workerid].arch_type = STARPU_OPENCL;
  281. hp->workers_heteroprio[workerid].arch_index = STARPU_OPENCL_IDX;
  282. break;
  283. case STARPU_MIC_WORKER:
  284. hp->workers_heteroprio[workerid].arch_type = STARPU_MIC;
  285. hp->workers_heteroprio[workerid].arch_index = STARPU_MIC_IDX;
  286. break;
  287. case STARPU_SCC_WORKER:
  288. hp->workers_heteroprio[workerid].arch_type = STARPU_SCC;
  289. hp->workers_heteroprio[workerid].arch_index = STARPU_SCC_IDX;
  290. break;
  291. case STARPU_MPI_MS_WORKER:
  292. hp->workers_heteroprio[workerid].arch_type = STARPU_MPI_MS;
  293. hp->workers_heteroprio[workerid].arch_index = STARPU_MPI_MS_IDX;
  294. break;
  295. default:
  296. STARPU_ASSERT(0);
  297. }
  298. }
  299. hp->nb_workers_per_arch_index[hp->workers_heteroprio[workerid].arch_index]++;
  300. }
  301. }
  302. static void remove_workers_heteroprio_policy(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  303. {
  304. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  305. unsigned i;
  306. for (i = 0; i < nworkers; i++)
  307. {
  308. int workerid = workerids[i];
  309. if(hp->workers_heteroprio[workerid].tasks_queue != NULL)
  310. {
  311. _starpu_destroy_fifo(hp->workers_heteroprio[workerid].tasks_queue);
  312. hp->workers_heteroprio[workerid].tasks_queue = NULL;
  313. }
  314. }
  315. }
  316. /* Push a new task (simply store it and update counters) */
  317. static int push_task_heteroprio_policy(struct starpu_task *task)
  318. {
  319. unsigned sched_ctx_id = task->sched_ctx;
  320. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  321. /* One worker at a time use heteroprio */
  322. _starpu_worker_relax_on();
  323. STARPU_PTHREAD_MUTEX_LOCK(&hp->policy_mutex);
  324. _starpu_worker_relax_off();
  325. /* Retrieve the correct bucket */
  326. STARPU_ASSERT(task->priority < STARPU_HETEROPRIO_MAX_PRIO);
  327. struct _heteroprio_bucket* bucket = &hp->buckets[task->priority];
  328. /* Ensure that any worker that check that list can compute the task */
  329. STARPU_ASSERT_MSG(bucket->valid_archs, "The bucket %d does not have any archs\n", task->priority);
  330. STARPU_ASSERT(((bucket->valid_archs ^ task->where) & bucket->valid_archs) == 0);
  331. /* save the task */
  332. _starpu_fifo_push_back_task(bucket->tasks_queue,task);
  333. /* Inc counters */
  334. unsigned arch_index;
  335. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  336. {
  337. /* We test the archs on the bucket and not on task->where since it is restrictive */
  338. if(bucket->valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  339. hp->nb_remaining_tasks_per_arch_index[arch_index] += 1;
  340. }
  341. hp->total_tasks_in_buckets += 1;
  342. starpu_push_task_end(task);
  343. /*if there are no tasks_queue block */
  344. /* wake people waiting for a task */
  345. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  346. struct starpu_sched_ctx_iterator it;
  347. #ifndef STARPU_NON_BLOCKING_DRIVERS
  348. char dowake[STARPU_NMAXWORKERS] = { 0 };
  349. #endif
  350. workers->init_iterator(workers, &it);
  351. while(workers->has_next(workers, &it))
  352. {
  353. unsigned worker = workers->get_next(workers, &it);
  354. #ifdef STARPU_NON_BLOCKING_DRIVERS
  355. if (!starpu_bitmap_get(hp->waiters, worker))
  356. /* This worker is not waiting for a task */
  357. continue;
  358. #endif
  359. if (starpu_worker_can_execute_task_first_impl(worker, task, NULL))
  360. {
  361. /* It can execute this one, tell him! */
  362. #ifdef STARPU_NON_BLOCKING_DRIVERS
  363. starpu_bitmap_unset(hp->waiters, worker);
  364. /* We really woke at least somebody, no need to wake somebody else */
  365. break;
  366. #else
  367. dowake[worker] = 1;
  368. #endif
  369. }
  370. }
  371. /* Let the task free */
  372. STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
  373. #ifndef STARPU_NON_BLOCKING_DRIVERS
  374. /* Now that we have a list of potential workers, try to wake one */
  375. workers->init_iterator(workers, &it);
  376. while(workers->has_next(workers, &it))
  377. {
  378. unsigned worker = workers->get_next(workers, &it);
  379. if (dowake[worker])
  380. if (_starpu_wake_worker_relax(worker))
  381. break; // wake up a single worker
  382. }
  383. #endif
  384. return 0;
  385. }
  386. static struct starpu_task *pop_task_heteroprio_policy(unsigned sched_ctx_id)
  387. {
  388. const unsigned workerid = starpu_worker_get_id_check();
  389. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  390. struct _heteroprio_worker_wrapper* worker = &hp->workers_heteroprio[workerid];
  391. #ifdef STARPU_NON_BLOCKING_DRIVERS
  392. /* If no tasks available, no tasks in worker queue or some arch worker queue just return NULL */
  393. if (!STARPU_RUNNING_ON_VALGRIND
  394. && (hp->total_tasks_in_buckets == 0 || hp->nb_remaining_tasks_per_arch_index[worker->arch_index] == 0)
  395. && worker->tasks_queue->ntasks == 0 && hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] == 0)
  396. {
  397. return NULL;
  398. }
  399. if (!STARPU_RUNNING_ON_VALGRIND && starpu_bitmap_get(hp->waiters, workerid))
  400. {
  401. /* Nobody woke us, avoid bothering the mutex */
  402. return NULL;
  403. }
  404. #endif
  405. _starpu_worker_relax_on();
  406. STARPU_PTHREAD_MUTEX_LOCK(&hp->policy_mutex);
  407. _starpu_worker_relax_off();
  408. /* keep track of the new added task to perfom real prefetch on node */
  409. unsigned nb_added_tasks = 0;
  410. /* Check that some tasks are available for the current worker arch */
  411. if( hp->nb_remaining_tasks_per_arch_index[worker->arch_index] != 0 )
  412. {
  413. /* Ideally we would like to fill the prefetch array */
  414. unsigned nb_tasks_to_prefetch = (STARPU_HETEROPRIO_MAX_PREFETCH-worker->tasks_queue->ntasks);
  415. /* But there are maybe less tasks than that! */
  416. if(nb_tasks_to_prefetch > hp->nb_remaining_tasks_per_arch_index[worker->arch_index])
  417. {
  418. nb_tasks_to_prefetch = hp->nb_remaining_tasks_per_arch_index[worker->arch_index];
  419. }
  420. /* But in case there are less tasks than worker we take the minimum */
  421. if(hp->nb_remaining_tasks_per_arch_index[worker->arch_index] < starpu_sched_ctx_get_nworkers(sched_ctx_id))
  422. {
  423. if(worker->tasks_queue->ntasks == 0)
  424. nb_tasks_to_prefetch = 1;
  425. else
  426. nb_tasks_to_prefetch = 0;
  427. }
  428. unsigned idx_prio, arch_index;
  429. /* We iterate until we found all the tasks we need */
  430. for(idx_prio = 0; nb_tasks_to_prefetch && idx_prio < hp->nb_prio_per_arch_index[worker->arch_index]; ++idx_prio)
  431. {
  432. /* Retrieve the bucket using the mapping */
  433. struct _heteroprio_bucket* bucket = &hp->buckets[hp->prio_mapping_per_arch_index[worker->arch_index][idx_prio]];
  434. /* Ensure we can compute task from this bucket */
  435. STARPU_ASSERT(bucket->valid_archs & worker->arch_type);
  436. /* Take nb_tasks_to_prefetch tasks if possible */
  437. while(!_starpu_fifo_empty(bucket->tasks_queue) && nb_tasks_to_prefetch &&
  438. (bucket->factor_base_arch_index == 0 ||
  439. worker->arch_index == bucket->factor_base_arch_index ||
  440. (((float)bucket->tasks_queue->ntasks)/((float)hp->nb_workers_per_arch_index[bucket->factor_base_arch_index])) >= bucket->slow_factors_per_index[worker->arch_index]))
  441. {
  442. struct starpu_task* task = _starpu_fifo_pop_local_task(bucket->tasks_queue);
  443. STARPU_ASSERT(starpu_worker_can_execute_task(workerid, task, 0));
  444. /* Save the task */
  445. STARPU_AYU_ADDTOTASKQUEUE(starpu_task_get_job_id(task), workerid);
  446. _starpu_fifo_push_task(worker->tasks_queue, task);
  447. /* Update general counter */
  448. hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] += 1;
  449. hp->total_tasks_in_buckets -= 1;
  450. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  451. {
  452. /* We test the archs on the bucket and not on task->where since it is restrictive */
  453. if(bucket->valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  454. {
  455. hp->nb_remaining_tasks_per_arch_index[arch_index] -= 1;
  456. }
  457. }
  458. /* Decrease the number of tasks to found */
  459. nb_tasks_to_prefetch -= 1;
  460. nb_added_tasks += 1;
  461. // TODO starpu_prefetch_task_input_on_node(task, workerid);
  462. }
  463. }
  464. }
  465. struct starpu_task* task = NULL;
  466. /* The worker has some tasks in its queue */
  467. if(worker->tasks_queue->ntasks)
  468. {
  469. task = _starpu_fifo_pop_task(worker->tasks_queue, workerid);
  470. hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] -= 1;
  471. }
  472. /* Otherwise look if we can steal some work */
  473. else if(hp->nb_prefetched_tasks_per_arch_index[worker->arch_index])
  474. {
  475. /* If HETEROPRIO_MAX_PREFETCH==1 it should not be possible to steal work */
  476. STARPU_ASSERT(STARPU_HETEROPRIO_MAX_PREFETCH != 1);
  477. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  478. struct starpu_sched_ctx_iterator it;
  479. workers->init_iterator(workers, &it);
  480. unsigned victim;
  481. unsigned current_worker;
  482. /* Start stealing from just after ourself */
  483. while(workers->has_next(workers, &it))
  484. {
  485. current_worker = workers->get_next(workers, &it);
  486. if(current_worker == workerid)
  487. break;
  488. }
  489. /* circular loop */
  490. while (1)
  491. {
  492. if (!workers->has_next(workers, &it))
  493. {
  494. /* End of the list, restart from the beginning */
  495. workers->init_iterator(workers, &it);
  496. }
  497. while(workers->has_next(workers, &it))
  498. {
  499. victim = workers->get_next(workers, &it);
  500. /* When getting on ourself again, we're done trying to find work */
  501. if(victim == workerid)
  502. goto done;
  503. /* If it is the same arch and there is a task to steal */
  504. if(hp->workers_heteroprio[victim].arch_index == worker->arch_index
  505. && hp->workers_heteroprio[victim].tasks_queue->ntasks)
  506. {
  507. /* ensure the worker is not currently prefetching its data */
  508. _starpu_worker_lock(victim);
  509. if(hp->workers_heteroprio[victim].arch_index == worker->arch_index
  510. && hp->workers_heteroprio[victim].tasks_queue->ntasks)
  511. {
  512. /* steal the last added task */
  513. task = _starpu_fifo_pop_task(hp->workers_heteroprio[victim].tasks_queue, workerid);
  514. /* we steal a task update global counter */
  515. hp->nb_prefetched_tasks_per_arch_index[hp->workers_heteroprio[victim].arch_index] -= 1;
  516. _starpu_worker_unlock(victim);
  517. goto done;
  518. }
  519. _starpu_worker_unlock(victim);
  520. }
  521. }
  522. }
  523. done: ;
  524. }
  525. if (!task)
  526. {
  527. /* Tell pushers that we are waiting for tasks_queue for us */
  528. starpu_bitmap_set(hp->waiters, workerid);
  529. }
  530. STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
  531. if(task)
  532. {
  533. _starpu_worker_relax_on();
  534. _starpu_sched_ctx_lock_write(sched_ctx_id);
  535. _starpu_worker_relax_off();
  536. unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
  537. if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
  538. {
  539. starpu_sched_ctx_move_task_to_ctx_locked(task, child_sched_ctx, 1);
  540. starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
  541. _starpu_sched_ctx_unlock_write(sched_ctx_id);
  542. return NULL;
  543. }
  544. _starpu_sched_ctx_unlock_write(sched_ctx_id);
  545. }
  546. /* if we have task (task) me way have some in the queue (worker->tasks_queue_size) that was freshly addeed (nb_added_tasks) */
  547. if(task && worker->tasks_queue->ntasks && nb_added_tasks && starpu_get_prefetch_flag())
  548. {
  549. const unsigned memory_node = starpu_worker_get_memory_node(workerid);
  550. /* TOTO berenger: iterate in the other sense */
  551. struct starpu_task *task_to_prefetch = NULL;
  552. for (task_to_prefetch = starpu_task_list_begin(&worker->tasks_queue->taskq);
  553. (task_to_prefetch != starpu_task_list_end(&worker->tasks_queue->taskq) &&
  554. nb_added_tasks && hp->nb_remaining_tasks_per_arch_index[worker->arch_index] != 0);
  555. task_to_prefetch = starpu_task_list_next(task_to_prefetch))
  556. {
  557. /* prefetch from closest to end task */
  558. starpu_prefetch_task_input_on_node(task_to_prefetch, memory_node);
  559. nb_added_tasks -= 1;
  560. }
  561. }
  562. return task;
  563. }
  564. struct starpu_sched_policy _starpu_sched_heteroprio_policy =
  565. {
  566. .init_sched = initialize_heteroprio_policy,
  567. .deinit_sched = deinitialize_heteroprio_policy,
  568. .add_workers = add_workers_heteroprio_policy,
  569. .remove_workers = remove_workers_heteroprio_policy,
  570. .push_task = push_task_heteroprio_policy,
  571. .simulate_push_task = NULL,
  572. .push_task_notify = NULL,
  573. .pop_task = pop_task_heteroprio_policy,
  574. .pre_exec_hook = NULL,
  575. .post_exec_hook = NULL,
  576. .pop_every_task = NULL,
  577. .policy_name = "heteroprio",
  578. .policy_description = "heteroprio",
  579. .worker_type = STARPU_WORKER_LIST,
  580. };