heteroprio.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2015 INRIA
  4. * Copyright (C) 2016 CNRS
  5. * Copyright (C) 2016 Uppsala University
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. /* Distributed queues using performance modeling to assign tasks */
  19. #include <starpu_config.h>
  20. #include <starpu_scheduler.h>
  21. #include <schedulers/starpu_heteroprio.h>
  22. #include <common/fxt.h>
  23. #include <core/task.h>
  24. #include <core/workers.h>
  25. #include <core/debug.h>
  26. #include <sched_policies/fifo_queues.h>
  27. #include <limits.h>
  28. #ifndef DBL_MIN
  29. #define DBL_MIN __DBL_MIN__
  30. #endif
  31. #ifndef DBL_MAX
  32. #define DBL_MAX __DBL_MAX__
  33. #endif
  34. /* A bucket corresponds to a Pair of priorities
  35. * When a task is pushed with a priority X, it will be stored
  36. * into the bucket X.
  37. * All the tasks stored in the fifo should be computable by the arch
  38. * in valid_archs.
  39. * For example if valid_archs = (STARPU_CPU|STARPU_CUDA)
  40. * Then task->task->cl->where should be at least (STARPU_CPU|STARPU_CUDA)
  41. */
  42. struct _heteroprio_bucket
  43. {
  44. /* The task of the current bucket */
  45. struct _starpu_fifo_taskq* tasks_queue;
  46. /* The correct arch for the current bucket */
  47. unsigned valid_archs;
  48. /* The slow factors for any archs */
  49. float slow_factors_per_index[STARPU_NB_TYPES];
  50. /* The base arch for the slow factor (the fatest arch for the current task in the bucket */
  51. unsigned factor_base_arch_index;
  52. };
  53. /* Init a bucket */
  54. static void _heteroprio_bucket_init(struct _heteroprio_bucket* bucket)
  55. {
  56. memset(bucket, 0, sizeof(*bucket));
  57. bucket->tasks_queue = _starpu_create_fifo();
  58. }
  59. /* Release a bucket */
  60. static void _heteroprio_bucket_release(struct _heteroprio_bucket* bucket)
  61. {
  62. STARPU_ASSERT(_starpu_fifo_empty(bucket->tasks_queue) != 0);
  63. _starpu_destroy_fifo(bucket->tasks_queue);
  64. }
  65. /* A worker is mainly composed of a fifo for the tasks
  66. * and some direct access to worker properties.
  67. * The fifo is implemented with any array,
  68. * to read a task, access tasks_queue[tasks_queue_index]
  69. * to write a task, access tasks_queue[(tasks_queue_index+tasks_queue_size)%HETEROPRIO_MAX_PREFETCH]
  70. */
  71. /* ANDRA_MODIF: can use starpu fifo + starpu sched_mutex*/
  72. struct _heteroprio_worker_wrapper
  73. {
  74. unsigned arch_type;
  75. unsigned arch_index;
  76. struct _starpu_fifo_taskq *tasks_queue;
  77. };
  78. struct _starpu_heteroprio_data
  79. {
  80. starpu_pthread_mutex_t policy_mutex;
  81. struct starpu_bitmap *waiters;
  82. /* The bucket to store the tasks */
  83. struct _heteroprio_bucket buckets[STARPU_HETEROPRIO_MAX_PRIO];
  84. /* The number of buckets for each arch */
  85. unsigned nb_prio_per_arch_index[STARPU_NB_TYPES];
  86. /* The mapping to the corresponding buckets */
  87. unsigned prio_mapping_per_arch_index[STARPU_NB_TYPES][STARPU_HETEROPRIO_MAX_PRIO];
  88. /* The number of available tasks for a given arch (not prefetched) */
  89. unsigned nb_remaining_tasks_per_arch_index[STARPU_NB_TYPES];
  90. /* The total number of tasks in the bucket (not prefetched) */
  91. unsigned total_tasks_in_buckets;
  92. /* The total number of prefetched tasks for a given arch */
  93. unsigned nb_prefetched_tasks_per_arch_index[STARPU_NB_TYPES];
  94. /* The information for all the workers */
  95. struct _heteroprio_worker_wrapper workers_heteroprio[STARPU_NMAXWORKERS];
  96. /* The number of workers for a given arch */
  97. unsigned nb_workers_per_arch_index[STARPU_NB_TYPES];
  98. };
  99. /** Tell how many prio there are for a given arch */
  100. void starpu_heteroprio_set_nb_prios(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned max_prio)
  101. {
  102. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  103. STARPU_ASSERT(max_prio < STARPU_HETEROPRIO_MAX_PRIO);
  104. hp->nb_prio_per_arch_index[arch] = max_prio;
  105. }
  106. /** Set the mapping for a given arch prio=>bucket */
  107. inline void starpu_heteroprio_set_mapping(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned source_prio, unsigned dest_bucket_id)
  108. {
  109. STARPU_ASSERT(dest_bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  110. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  111. hp->prio_mapping_per_arch_index[arch][source_prio] = dest_bucket_id;
  112. hp->buckets[dest_bucket_id].valid_archs |= starpu_heteroprio_types_to_arch[arch];
  113. _STARPU_DEBUG("Adding arch %d to bucket %d\n", arch, dest_bucket_id);
  114. }
  115. /** Tell which arch is the faster for the tasks of a bucket (optional) */
  116. inline void starpu_heteroprio_set_faster_arch(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned bucket_id)
  117. {
  118. STARPU_ASSERT(bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  119. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  120. hp->buckets[bucket_id].factor_base_arch_index = arch;
  121. hp->buckets[bucket_id].slow_factors_per_index[arch] = 0;
  122. }
  123. /** Tell how slow is a arch for the tasks of a bucket (optional) */
  124. inline void starpu_heteroprio_set_arch_slow_factor(unsigned sched_ctx_id, enum starpu_heteroprio_types arch, unsigned bucket_id, float slow_factor)
  125. {
  126. STARPU_ASSERT(bucket_id < STARPU_HETEROPRIO_MAX_PRIO);
  127. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  128. hp->buckets[bucket_id].slow_factors_per_index[arch] = slow_factor;
  129. }
  130. /** If the user does not provide an init callback we create a single bucket for all architectures */
  131. static inline void default_init_sched(unsigned sched_ctx_id)
  132. {
  133. int min_prio = starpu_sched_ctx_get_min_priority(sched_ctx_id);
  134. int max_prio = starpu_sched_ctx_get_max_priority(sched_ctx_id);
  135. // By default each type of devices uses 1 bucket and no slow factor
  136. #ifdef STARPU_USE_CPU
  137. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_CPU_IDX, max_prio-min_prio+1);
  138. #endif
  139. #ifdef STARPU_USE_CUDA
  140. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_CUDA_IDX, max_prio-min_prio+1);
  141. #endif
  142. #ifdef STARPU_USE_OPENCL
  143. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_OPENCL_IDX, max_prio-min_prio+1);
  144. #endif
  145. #ifdef STARPU_USE_MIC
  146. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_MIC_IDX, max_prio-min_prio+1);
  147. #endif
  148. #ifdef STARPU_USE_SCC
  149. starpu_heteroprio_set_nb_prios(sched_ctx_id, STARPU_SCC_IDX, max_prio-min_prio+1);
  150. #endif
  151. // Direct mapping
  152. int prio;
  153. for(prio=min_prio ; prio<=max_prio ; prio++)
  154. {
  155. #ifdef STARPU_USE_CPU
  156. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_CPU_IDX, prio, prio);
  157. #endif
  158. #ifdef STARPU_USE_CUDA
  159. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_CUDA_IDX, prio, prio);
  160. #endif
  161. #ifdef STARPU_USE_OPENCL
  162. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_OPENCL_IDX, prio, prio);
  163. #endif
  164. #ifdef STARPU_USE_MIC
  165. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_MIC_IDX, prio, prio);
  166. #endif
  167. #ifdef STARPU_USE_SCC
  168. starpu_heteroprio_set_mapping(sched_ctx_id, STARPU_SCC_IDX, prio, prio);
  169. #endif
  170. }
  171. }
  172. static void initialize_heteroprio_policy(unsigned sched_ctx_id)
  173. {
  174. /* Alloc the scheduler data */
  175. struct _starpu_heteroprio_data *hp;
  176. _STARPU_MALLOC(hp, sizeof(struct _starpu_heteroprio_data));
  177. memset(hp, 0, sizeof(*hp));
  178. hp->waiters = starpu_bitmap_create();
  179. starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)hp);
  180. STARPU_PTHREAD_MUTEX_INIT(&hp->policy_mutex, NULL);
  181. unsigned idx_prio;
  182. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  183. _heteroprio_bucket_init(&hp->buckets[idx_prio]);
  184. void (*init_sched)(unsigned) = starpu_sched_ctx_get_sched_policy_init(sched_ctx_id);
  185. if(init_sched)
  186. init_sched(sched_ctx_id);
  187. else
  188. default_init_sched(sched_ctx_id);
  189. /* Ensure that information have been correctly filled */
  190. unsigned check_all_archs[STARPU_HETEROPRIO_MAX_PRIO];
  191. memset(check_all_archs, 0, sizeof(unsigned)*STARPU_HETEROPRIO_MAX_PRIO);
  192. unsigned arch_index;
  193. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  194. {
  195. STARPU_ASSERT(hp->nb_prio_per_arch_index[arch_index] <= STARPU_HETEROPRIO_MAX_PRIO);
  196. unsigned check_archs[STARPU_HETEROPRIO_MAX_PRIO];
  197. memset(check_archs, 0, sizeof(unsigned)*STARPU_HETEROPRIO_MAX_PRIO);
  198. for(idx_prio = 0; idx_prio < hp->nb_prio_per_arch_index[arch_index]; ++idx_prio)
  199. {
  200. const unsigned mapped_prio = hp->prio_mapping_per_arch_index[arch_index][idx_prio];
  201. STARPU_ASSERT(mapped_prio <= STARPU_HETEROPRIO_MAX_PRIO);
  202. STARPU_ASSERT(hp->buckets[mapped_prio].slow_factors_per_index[arch_index] >= 0.0);
  203. STARPU_ASSERT(hp->buckets[mapped_prio].valid_archs & starpu_heteroprio_types_to_arch[arch_index]);
  204. check_archs[mapped_prio] = 1;
  205. check_all_archs[mapped_prio] += 1;
  206. }
  207. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  208. {
  209. /* Ensure the current arch use a bucket or someone else can use it */
  210. STARPU_ASSERT(check_archs[idx_prio] == 1 || hp->buckets[idx_prio].valid_archs == 0
  211. || (hp->buckets[idx_prio].valid_archs & ~starpu_heteroprio_types_to_arch[arch_index]) != 0);
  212. }
  213. }
  214. /* Ensure that if a valid_archs = (STARPU_CPU|STARPU_CUDA) then check_all_archs[] = 2 for example */
  215. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  216. {
  217. unsigned nb_arch_on_bucket = 0;
  218. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  219. {
  220. if(hp->buckets[idx_prio].valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  221. {
  222. nb_arch_on_bucket += 1;
  223. }
  224. }
  225. STARPU_ASSERT_MSG(check_all_archs[idx_prio] == nb_arch_on_bucket, "check_all_archs[idx_prio(%u)] = %u != nb_arch_on_bucket = %u\n", idx_prio, check_all_archs[idx_prio], nb_arch_on_bucket);
  226. }
  227. }
  228. static void deinitialize_heteroprio_policy(unsigned sched_ctx_id)
  229. {
  230. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  231. /* Ensure there are no more tasks */
  232. STARPU_ASSERT(hp->total_tasks_in_buckets == 0);
  233. unsigned arch_index;
  234. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  235. {
  236. STARPU_ASSERT(hp->nb_remaining_tasks_per_arch_index[arch_index] == 0);
  237. STARPU_ASSERT(hp->nb_prefetched_tasks_per_arch_index[arch_index] == 0);
  238. }
  239. unsigned idx_prio;
  240. for(idx_prio = 0; idx_prio < STARPU_HETEROPRIO_MAX_PRIO; ++idx_prio)
  241. {
  242. _heteroprio_bucket_release(&hp->buckets[idx_prio]);
  243. }
  244. starpu_bitmap_destroy(hp->waiters);
  245. STARPU_PTHREAD_MUTEX_DESTROY(&hp->policy_mutex);
  246. free(hp);
  247. }
  248. static void add_workers_heteroprio_policy(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  249. {
  250. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  251. unsigned i;
  252. for (i = 0; i < nworkers; i++)
  253. {
  254. int workerid = workerids[i];
  255. memset(&hp->workers_heteroprio[workerid], 0, sizeof(hp->workers_heteroprio[workerid]));
  256. /* if the worker has already belonged to this context
  257. the queue and the synchronization variables have been already initialized */
  258. if(hp->workers_heteroprio[workerid].tasks_queue == NULL)
  259. {
  260. hp->workers_heteroprio[workerid].tasks_queue = _starpu_create_fifo();
  261. switch(starpu_worker_get_type(workerid))
  262. {
  263. case STARPU_CPU_WORKER:
  264. hp->workers_heteroprio[workerid].arch_type = STARPU_CPU;
  265. hp->workers_heteroprio[workerid].arch_index = STARPU_CPU_IDX;
  266. break;
  267. case STARPU_CUDA_WORKER:
  268. hp->workers_heteroprio[workerid].arch_type = STARPU_CUDA;
  269. hp->workers_heteroprio[workerid].arch_index = STARPU_CUDA_IDX;
  270. break;
  271. case STARPU_OPENCL_WORKER:
  272. hp->workers_heteroprio[workerid].arch_type = STARPU_OPENCL;
  273. hp->workers_heteroprio[workerid].arch_index = STARPU_OPENCL_IDX;
  274. break;
  275. case STARPU_MIC_WORKER:
  276. hp->workers_heteroprio[workerid].arch_type = STARPU_MIC;
  277. hp->workers_heteroprio[workerid].arch_index = STARPU_MIC_IDX;
  278. break;
  279. case STARPU_SCC_WORKER:
  280. hp->workers_heteroprio[workerid].arch_type = STARPU_SCC;
  281. hp->workers_heteroprio[workerid].arch_index = STARPU_SCC_IDX;
  282. break;
  283. default:
  284. STARPU_ASSERT(0);
  285. }
  286. }
  287. hp->nb_workers_per_arch_index[hp->workers_heteroprio[workerid].arch_index]++;
  288. }
  289. }
  290. static void remove_workers_heteroprio_policy(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  291. {
  292. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  293. unsigned i;
  294. for (i = 0; i < nworkers; i++)
  295. {
  296. int workerid = workerids[i];
  297. if(hp->workers_heteroprio[workerid].tasks_queue != NULL)
  298. {
  299. _starpu_destroy_fifo(hp->workers_heteroprio[workerid].tasks_queue);
  300. hp->workers_heteroprio[workerid].tasks_queue = NULL;
  301. }
  302. }
  303. }
  304. /* Push a new task (simply store it and update counters) */
  305. static int push_task_heteroprio_policy(struct starpu_task *task)
  306. {
  307. unsigned sched_ctx_id = task->sched_ctx;
  308. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  309. /* One worker at a time use heteroprio */
  310. STARPU_PTHREAD_MUTEX_LOCK(&hp->policy_mutex);
  311. /* Retrieve the correct bucket */
  312. STARPU_ASSERT(task->priority < STARPU_HETEROPRIO_MAX_PRIO);
  313. struct _heteroprio_bucket* bucket = &hp->buckets[task->priority];
  314. /* Ensure that any worker that check that list can compute the task */
  315. STARPU_ASSERT_MSG(bucket->valid_archs, "The bucket %d does not have any archs\n", task->priority);
  316. STARPU_ASSERT(((bucket->valid_archs ^ task->cl->where) & bucket->valid_archs) == 0);
  317. /* save the task */
  318. _starpu_fifo_push_back_task(bucket->tasks_queue,task);
  319. /* Inc counters */
  320. unsigned arch_index;
  321. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  322. {
  323. /* We test the archs on the bucket and not on task->cl->where since it is restrictive */
  324. if(bucket->valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  325. hp->nb_remaining_tasks_per_arch_index[arch_index] += 1;
  326. }
  327. hp->total_tasks_in_buckets += 1;
  328. starpu_push_task_end(task);
  329. /*if there are no tasks_queue block */
  330. /* wake people waiting for a task */
  331. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  332. struct starpu_sched_ctx_iterator it;
  333. #ifndef STARPU_NON_BLOCKING_DRIVERS
  334. char dowake[STARPU_NMAXWORKERS] = { 0 };
  335. #endif
  336. workers->init_iterator(workers, &it);
  337. while(workers->has_next(workers, &it))
  338. {
  339. unsigned worker = workers->get_next(workers, &it);
  340. #ifdef STARPU_NON_BLOCKING_DRIVERS
  341. if (!starpu_bitmap_get(hp->waiters, worker))
  342. /* This worker is not waiting for a task */
  343. continue;
  344. #endif
  345. if (starpu_worker_can_execute_task_first_impl(worker, task, NULL))
  346. {
  347. /* It can execute this one, tell him! */
  348. #ifdef STARPU_NON_BLOCKING_DRIVERS
  349. starpu_bitmap_unset(hp->waiters, worker);
  350. /* We really woke at least somebody, no need to wake somebody else */
  351. break;
  352. #else
  353. dowake[worker] = 1;
  354. #endif
  355. }
  356. }
  357. /* Let the task free */
  358. STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
  359. #ifndef STARPU_NON_BLOCKING_DRIVERS
  360. /* Now that we have a list of potential workers, try to wake one */
  361. workers->init_iterator(workers, &it);
  362. while(workers->has_next(workers, &it))
  363. {
  364. unsigned worker = workers->get_next(workers, &it);
  365. if (dowake[worker])
  366. if (starpu_wake_worker(worker))
  367. break; // wake up a single worker
  368. }
  369. #endif
  370. return 0;
  371. }
  372. static struct starpu_task *pop_task_heteroprio_policy(unsigned sched_ctx_id)
  373. {
  374. const unsigned workerid = starpu_worker_get_id_check();
  375. struct _starpu_heteroprio_data *hp = (struct _starpu_heteroprio_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  376. struct _heteroprio_worker_wrapper* worker = &hp->workers_heteroprio[workerid];
  377. #ifdef STARPU_NON_BLOCKING_DRIVERS
  378. /* If no tasks available, no tasks in worker queue or some arch worker queue just return NULL */
  379. if (!STARPU_RUNNING_ON_VALGRIND
  380. && (hp->total_tasks_in_buckets == 0 || hp->nb_remaining_tasks_per_arch_index[worker->arch_index] == 0)
  381. && worker->tasks_queue->ntasks == 0 && hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] == 0){
  382. return NULL;
  383. }
  384. if (!STARPU_RUNNING_ON_VALGRIND && starpu_bitmap_get(hp->waiters, workerid))
  385. {
  386. /* Nobody woke us, avoid bothering the mutex */
  387. return NULL;
  388. }
  389. #endif
  390. starpu_pthread_mutex_t *worker_sched_mutex;
  391. starpu_pthread_cond_t *worker_sched_cond;
  392. starpu_worker_get_sched_condition(workerid, &worker_sched_mutex, &worker_sched_cond);
  393. /* Note: Releasing this mutex before taking the victim mutex, to avoid interlock*/
  394. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(worker_sched_mutex);
  395. STARPU_PTHREAD_MUTEX_LOCK(&hp->policy_mutex);
  396. /* keep track of the new added task to perfom real prefetch on node */
  397. unsigned nb_added_tasks = 0;
  398. /* Check that some tasks are available for the current worker arch */
  399. if( hp->nb_remaining_tasks_per_arch_index[worker->arch_index] != 0 )
  400. {
  401. /* Ideally we would like to fill the prefetch array */
  402. unsigned nb_tasks_to_prefetch = (STARPU_HETEROPRIO_MAX_PREFETCH-worker->tasks_queue->ntasks);
  403. /* But there are maybe less tasks than that! */
  404. if(nb_tasks_to_prefetch > hp->nb_remaining_tasks_per_arch_index[worker->arch_index])
  405. {
  406. nb_tasks_to_prefetch = hp->nb_remaining_tasks_per_arch_index[worker->arch_index];
  407. }
  408. /* But in case there are less tasks than worker we take the minimum */
  409. if(hp->nb_remaining_tasks_per_arch_index[worker->arch_index] < starpu_sched_ctx_get_nworkers(sched_ctx_id))
  410. {
  411. if(worker->tasks_queue->ntasks == 0)
  412. nb_tasks_to_prefetch = 1;
  413. else
  414. nb_tasks_to_prefetch = 0;
  415. }
  416. unsigned idx_prio, arch_index;
  417. /* We iterate until we found all the tasks we need */
  418. for(idx_prio = 0; nb_tasks_to_prefetch && idx_prio < hp->nb_prio_per_arch_index[worker->arch_index]; ++idx_prio)
  419. {
  420. /* Retrieve the bucket using the mapping */
  421. struct _heteroprio_bucket* bucket = &hp->buckets[hp->prio_mapping_per_arch_index[worker->arch_index][idx_prio]];
  422. /* Ensure we can compute task from this bucket */
  423. STARPU_ASSERT(bucket->valid_archs & worker->arch_type);
  424. /* Take nb_tasks_to_prefetch tasks if possible */
  425. while(!_starpu_fifo_empty(bucket->tasks_queue) && nb_tasks_to_prefetch &&
  426. (bucket->factor_base_arch_index == 0 ||
  427. worker->arch_index == bucket->factor_base_arch_index ||
  428. (((float)bucket->tasks_queue->ntasks)/((float)hp->nb_workers_per_arch_index[bucket->factor_base_arch_index])) >= bucket->slow_factors_per_index[worker->arch_index]))
  429. {
  430. struct starpu_task* task = _starpu_fifo_pop_local_task(bucket->tasks_queue);
  431. STARPU_ASSERT(starpu_worker_can_execute_task(workerid, task, 0));
  432. /* Save the task */
  433. STARPU_AYU_ADDTOTASKQUEUE(_starpu_get_job_associated_to_task(task)->job_id, workerid);
  434. _starpu_fifo_push_task(worker->tasks_queue, task);
  435. /* Update general counter */
  436. hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] += 1;
  437. hp->total_tasks_in_buckets -= 1;
  438. for(arch_index = 0; arch_index < STARPU_NB_TYPES; ++arch_index)
  439. {
  440. /* We test the archs on the bucket and not on task->cl->where since it is restrictive */
  441. if(bucket->valid_archs & starpu_heteroprio_types_to_arch[arch_index])
  442. {
  443. hp->nb_remaining_tasks_per_arch_index[arch_index] -= 1;
  444. }
  445. }
  446. /* Decrease the number of tasks to found */
  447. nb_tasks_to_prefetch -= 1;
  448. nb_added_tasks += 1;
  449. // TODO starpu_prefetch_task_input_on_node(task, workerid);
  450. }
  451. }
  452. }
  453. struct starpu_task* task = NULL;
  454. /* The worker has some tasks in its queue */
  455. if(worker->tasks_queue->ntasks)
  456. {
  457. task = _starpu_fifo_pop_task(worker->tasks_queue, workerid);
  458. hp->nb_prefetched_tasks_per_arch_index[worker->arch_index] -= 1;
  459. }
  460. /* Otherwise look if we can steal some work */
  461. else if(hp->nb_prefetched_tasks_per_arch_index[worker->arch_index])
  462. {
  463. /* If HETEROPRIO_MAX_PREFETCH==1 it should not be possible to steal work */
  464. STARPU_ASSERT(STARPU_HETEROPRIO_MAX_PREFETCH != 1);
  465. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  466. struct starpu_sched_ctx_iterator it;
  467. workers->init_iterator(workers, &it);
  468. unsigned victim;
  469. unsigned current_worker;
  470. /* Start stealing from just after ourself */
  471. while(workers->has_next(workers, &it))
  472. {
  473. current_worker = workers->get_next(workers, &it);
  474. if(current_worker == workerid)
  475. break;
  476. }
  477. /* circular loop */
  478. while (1) {
  479. if (!workers->has_next(workers, &it))
  480. {
  481. /* End of the list, restart from the beginning */
  482. workers->init_iterator(workers, &it);
  483. }
  484. while(workers->has_next(workers, &it))
  485. {
  486. victim = workers->get_next(workers, &it);
  487. /* When getting on ourself again, we're done trying to find work */
  488. if(victim == workerid)
  489. goto done;
  490. /* If it is the same arch and there is a task to steal */
  491. if(hp->workers_heteroprio[victim].arch_index == worker->arch_index
  492. && hp->workers_heteroprio[victim].tasks_queue->ntasks){
  493. starpu_pthread_mutex_t *victim_sched_mutex;
  494. starpu_pthread_cond_t *victim_sched_cond;
  495. starpu_worker_get_sched_condition(victim, &victim_sched_mutex, &victim_sched_cond);
  496. /* ensure the worker is not currently prefetching its data */
  497. STARPU_PTHREAD_MUTEX_LOCK_SCHED(victim_sched_mutex);
  498. if(hp->workers_heteroprio[victim].arch_index == worker->arch_index
  499. && hp->workers_heteroprio[victim].tasks_queue->ntasks)
  500. {
  501. /* steal the last added task */
  502. task = _starpu_fifo_pop_task(hp->workers_heteroprio[victim].tasks_queue, workerid);
  503. /* we steal a task update global counter */
  504. hp->nb_prefetched_tasks_per_arch_index[hp->workers_heteroprio[victim].arch_index] -= 1;
  505. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(victim_sched_mutex);
  506. goto done;
  507. }
  508. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(victim_sched_mutex);
  509. }
  510. }
  511. }
  512. done: ;
  513. }
  514. if (!task)
  515. {
  516. /* Tell pushers that we are waiting for tasks_queue for us */
  517. starpu_bitmap_set(hp->waiters, workerid);
  518. }
  519. STARPU_PTHREAD_MUTEX_UNLOCK(&hp->policy_mutex);
  520. STARPU_PTHREAD_MUTEX_LOCK_SCHED(worker_sched_mutex);
  521. if(task)
  522. {
  523. unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
  524. if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
  525. {
  526. starpu_sched_ctx_move_task_to_ctx(task, child_sched_ctx, 1, 1);
  527. starpu_sched_ctx_revert_task_counters(sched_ctx_id, task->flops);
  528. return NULL;
  529. }
  530. }
  531. /* if we have task (task) me way have some in the queue (worker->tasks_queue_size) that was freshly addeed (nb_added_tasks) */
  532. if(task && worker->tasks_queue->ntasks && nb_added_tasks && starpu_get_prefetch_flag())
  533. {
  534. const unsigned memory_node = starpu_worker_get_memory_node(workerid);
  535. /* TOTO berenger: iterate in the other sense */
  536. struct starpu_task *task_to_prefetch = NULL;
  537. for (task_to_prefetch = starpu_task_list_begin(&worker->tasks_queue->taskq);
  538. (task_to_prefetch != starpu_task_list_end(&worker->tasks_queue->taskq) &&
  539. nb_added_tasks && hp->nb_remaining_tasks_per_arch_index[worker->arch_index] != 0);
  540. task_to_prefetch = starpu_task_list_next(task_to_prefetch))
  541. {
  542. /* prefetch from closest to end task */
  543. starpu_prefetch_task_input_on_node(task_to_prefetch, memory_node);
  544. nb_added_tasks -= 1;
  545. }
  546. }
  547. return task;
  548. }
  549. struct starpu_sched_policy _starpu_sched_heteroprio_policy =
  550. {
  551. .init_sched = initialize_heteroprio_policy,
  552. .deinit_sched = deinitialize_heteroprio_policy,
  553. .add_workers = add_workers_heteroprio_policy,
  554. .remove_workers = remove_workers_heteroprio_policy,
  555. .push_task = push_task_heteroprio_policy,
  556. .simulate_push_task = NULL,
  557. .push_task_notify = NULL,
  558. .pop_task = pop_task_heteroprio_policy,
  559. .pre_exec_hook = NULL,
  560. .post_exec_hook = NULL,
  561. .pop_every_task = NULL,
  562. .policy_name = "heteroprio",
  563. .policy_description = "heteroprio",
  564. .worker_type = STARPU_WORKER_LIST,
  565. };