parallel_eager.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2016 Université de Bordeaux
  4. * Copyright (C) 2011 Télécom-SudParis
  5. * Copyright (C) 2011-2013 INRIA
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <sched_policies/fifo_queues.h>
  19. #include <core/detect_combined_workers.h>
  20. #include <starpu_scheduler.h>
  21. #include <core/workers.h>
  22. struct _starpu_peager_data
  23. {
  24. struct _starpu_fifo_taskq *fifo;
  25. struct _starpu_fifo_taskq *local_fifo[STARPU_NMAXWORKERS];
  26. int master_id[STARPU_NMAXWORKERS];
  27. starpu_pthread_mutex_t policy_mutex;
  28. };
  29. #define STARPU_NMAXCOMBINED_WORKERS 520
  30. /* instead of STARPU_NMAXCOMBINED_WORKERS, we should use some "MAX combination .."*/
  31. static int possible_combinations_cnt[STARPU_NMAXWORKERS];
  32. static int possible_combinations[STARPU_NMAXWORKERS][STARPU_NMAXCOMBINED_WORKERS];
  33. static int possible_combinations_size[STARPU_NMAXWORKERS][STARPU_NMAXCOMBINED_WORKERS];
  34. /*!!!!!!! It doesn't work with several contexts because the combined workers are constructed
  35. from the workers available to the program, and not to the context !!!!!!!!!!!!!!!!!!!!!!!
  36. */
  37. static void peager_add_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  38. {
  39. _starpu_sched_find_worker_combinations(workerids, nworkers);
  40. struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  41. unsigned nbasic_workers = starpu_worker_get_count();
  42. unsigned ncombined_workers= starpu_combined_worker_get_count();
  43. unsigned workerid, i;
  44. /* Find the master of each worker. We first assign the worker as its
  45. * own master, and then iterate over the different worker combinations
  46. * to find the biggest combination containing this worker. */
  47. for(i = 0; i < nworkers; i++)
  48. {
  49. workerid = workerids[i];
  50. starpu_sched_ctx_worker_shares_tasks_lists(workerid, sched_ctx_id);
  51. int cnt = possible_combinations_cnt[workerid]++;
  52. possible_combinations[workerid][cnt] = workerid;
  53. possible_combinations_size[workerid][cnt] = 1;
  54. data->master_id[workerid] = workerid;
  55. }
  56. for (i = 0; i < ncombined_workers; i++)
  57. {
  58. workerid = nbasic_workers + i;
  59. /* Note that we ASSUME that the workers are sorted by size ! */
  60. int *workers;
  61. int size;
  62. starpu_combined_worker_get_description(workerid, &size, &workers);
  63. int master = workers[0];
  64. int j;
  65. for (j = 0; j < size; j++)
  66. {
  67. if (data->master_id[workers[j]] > master)
  68. data->master_id[workers[j]] = master;
  69. int cnt = possible_combinations_cnt[workers[j]]++;
  70. possible_combinations[workers[j]][cnt] = workerid;
  71. possible_combinations_size[workers[j]][cnt] = size;
  72. }
  73. }
  74. for(i = 0; i < nworkers; i++)
  75. {
  76. workerid = workerids[i];
  77. /* slaves pick up tasks from their local queue, their master
  78. * will put tasks directly in that local list when a parallel
  79. * tasks comes. */
  80. data->local_fifo[workerid] = _starpu_create_fifo();
  81. }
  82. #if 0
  83. for(i = 0; i < nworkers; i++)
  84. {
  85. workerid = workerids[i];
  86. fprintf(stderr, "MASTER of %d = %d\n", workerid, master_id[workerid]);
  87. }
  88. #endif
  89. }
  90. static void peager_remove_workers(unsigned sched_ctx_id, int *workerids, unsigned nworkers)
  91. {
  92. struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  93. int workerid;
  94. unsigned i;
  95. for(i = 0; i < nworkers; i++)
  96. {
  97. workerid = workerids[i];
  98. if(!starpu_worker_is_combined_worker(workerid))
  99. _starpu_destroy_fifo(data->local_fifo[workerid]);
  100. }
  101. }
  102. static void initialize_peager_policy(unsigned sched_ctx_id)
  103. {
  104. struct _starpu_peager_data *data = (struct _starpu_peager_data*)malloc(sizeof(struct _starpu_peager_data));
  105. /* masters pick tasks from that queue */
  106. data->fifo = _starpu_create_fifo();
  107. starpu_sched_ctx_set_policy_data(sched_ctx_id, (void*)data);
  108. STARPU_PTHREAD_MUTEX_INIT(&data->policy_mutex, NULL);
  109. }
  110. static void deinitialize_peager_policy(unsigned sched_ctx_id)
  111. {
  112. /* TODO check that there is no task left in the queue */
  113. struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  114. /* deallocate the job queue */
  115. _starpu_destroy_fifo(data->fifo);
  116. STARPU_PTHREAD_MUTEX_DESTROY(&data->policy_mutex);
  117. free(data);
  118. }
  119. static int push_task_peager_policy(struct starpu_task *task)
  120. {
  121. unsigned sched_ctx_id = task->sched_ctx;
  122. int ret_val = -1;
  123. struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  124. STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
  125. ret_val = _starpu_fifo_push_task(data->fifo, task);
  126. starpu_push_task_end(task);
  127. STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
  128. #ifndef STARPU_NON_BLOCKING_DRIVERS
  129. /* if there are no tasks block */
  130. /* wake people waiting for a task */
  131. struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
  132. struct starpu_sched_ctx_iterator it;
  133. int worker = -1;
  134. workers->init_iterator(workers, &it);
  135. while(workers->has_next(workers, &it))
  136. {
  137. worker = workers->get_next(workers, &it);
  138. int master = data->master_id[worker];
  139. /* If this is not a CPU or a MIC, then the worker simply grabs tasks from the fifo */
  140. if ((!starpu_worker_is_combined_worker(worker) &&
  141. starpu_worker_get_type(worker) != STARPU_MIC_WORKER &&
  142. starpu_worker_get_type(worker) != STARPU_CPU_WORKER)
  143. || (master == worker))
  144. starpu_wake_worker(worker);
  145. }
  146. #endif
  147. return ret_val;
  148. }
  149. static struct starpu_task *pop_task_peager_policy(unsigned sched_ctx_id)
  150. {
  151. struct _starpu_peager_data *data = (struct _starpu_peager_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);
  152. int workerid = starpu_worker_get_id();
  153. /* If this is not a CPU or a MIC, then the worker simply grabs tasks from the fifo */
  154. if (starpu_worker_get_type(workerid) != STARPU_CPU_WORKER && starpu_worker_get_type(workerid) != STARPU_MIC_WORKER)
  155. {
  156. struct starpu_task *task = NULL;
  157. STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
  158. task = _starpu_fifo_pop_task(data->fifo, workerid);
  159. STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
  160. return task;
  161. }
  162. int master = data->master_id[workerid];
  163. //_STARPU_DEBUG("workerid:%d, master:%d\n",workerid,master);
  164. if (master == workerid)
  165. {
  166. /* The worker is a master */
  167. struct starpu_task *task = NULL;
  168. STARPU_PTHREAD_MUTEX_LOCK(&data->policy_mutex);
  169. task = _starpu_fifo_pop_task(data->fifo, workerid);
  170. STARPU_PTHREAD_MUTEX_UNLOCK(&data->policy_mutex);
  171. if (!task)
  172. return NULL;
  173. /* Find the largest compatible worker combination */
  174. int best_size = -1;
  175. int best_workerid = -1;
  176. int i;
  177. for (i = 0; i < possible_combinations_cnt[master]; i++)
  178. {
  179. if (possible_combinations_size[workerid][i] > best_size)
  180. {
  181. int combined_worker = possible_combinations[workerid][i];
  182. if (starpu_combined_worker_can_execute_task(combined_worker, task, 0))
  183. {
  184. best_size = possible_combinations_size[workerid][i];
  185. best_workerid = combined_worker;
  186. }
  187. }
  188. }
  189. /* In case nobody can execute this task, we let the master
  190. * worker take it anyway, so that it can discard it afterward.
  191. * */
  192. if (best_workerid == -1)
  193. return task;
  194. /* Is this a basic worker or a combined worker ? */
  195. int nbasic_workers = (int)starpu_worker_get_count();
  196. int is_basic_worker = (best_workerid < nbasic_workers);
  197. if (is_basic_worker)
  198. {
  199. /* The master is alone */
  200. return task;
  201. }
  202. else
  203. {
  204. starpu_parallel_task_barrier_init(task, best_workerid);
  205. int worker_size = 0;
  206. int *combined_workerid;
  207. starpu_combined_worker_get_description(best_workerid, &worker_size, &combined_workerid);
  208. /* Dispatch task aliases to the different slaves */
  209. for (i = 1; i < worker_size; i++)
  210. {
  211. struct starpu_task *alias = starpu_task_dup(task);
  212. int local_worker = combined_workerid[i];
  213. alias->destroy = 1;
  214. starpu_pthread_mutex_t *sched_mutex;
  215. starpu_pthread_cond_t *sched_cond;
  216. starpu_worker_get_sched_condition(local_worker, &sched_mutex, &sched_cond);
  217. STARPU_PTHREAD_MUTEX_LOCK_SCHED(sched_mutex);
  218. _starpu_fifo_push_task(data->local_fifo[local_worker], alias);
  219. #if !defined(STARPU_NON_BLOCKING_DRIVERS) || defined(STARPU_SIMGRID)
  220. starpu_wakeup_worker_locked(local_worker, sched_cond, sched_mutex);
  221. #endif
  222. STARPU_PTHREAD_MUTEX_UNLOCK_SCHED(sched_mutex);
  223. }
  224. /* The master also manipulated an alias */
  225. struct starpu_task *master_alias = starpu_task_dup(task);
  226. master_alias->destroy = 1;
  227. return master_alias;
  228. }
  229. }
  230. else
  231. {
  232. /* The worker is a slave */
  233. return _starpu_fifo_pop_task(data->local_fifo[workerid], workerid);
  234. }
  235. }
  236. struct starpu_sched_policy _starpu_sched_peager_policy =
  237. {
  238. .init_sched = initialize_peager_policy,
  239. .deinit_sched = deinitialize_peager_policy,
  240. .add_workers = peager_add_workers,
  241. .remove_workers = peager_remove_workers,
  242. .push_task = push_task_peager_policy,
  243. .pop_task = pop_task_peager_policy,
  244. .pre_exec_hook = NULL,
  245. .post_exec_hook = NULL,
  246. .pop_every_task = NULL,
  247. .policy_name = "peager",
  248. .policy_description = "parallel eager policy",
  249. .worker_type = STARPU_WORKER_LIST,
  250. };