sched_ctx_config.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <sched_ctx_hypervisor_intern.h>
  17. static struct starpu_sched_ctx_hypervisor_policy_config* _create_config(void)
  18. {
  19. struct starpu_sched_ctx_hypervisor_policy_config *config = (struct starpu_sched_ctx_hypervisor_policy_config *)malloc(sizeof(struct starpu_sched_ctx_hypervisor_policy_config));
  20. config->min_nworkers = -1;
  21. config->max_nworkers = -1;
  22. config->new_workers_max_idle = -1.0;
  23. int i;
  24. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  25. {
  26. config->granularity = -1;
  27. config->priority[i] = -1;
  28. config->fixed_workers[i] = -1;
  29. config->max_idle[i] = -1.0;
  30. config->empty_ctx_max_idle[i] = -1.0;
  31. config->min_working[i] = -1.0;
  32. }
  33. return config;
  34. }
  35. static void _update_config(struct starpu_sched_ctx_hypervisor_policy_config *old, struct starpu_sched_ctx_hypervisor_policy_config* new)
  36. {
  37. old->min_nworkers = new->min_nworkers != -1 ? new->min_nworkers : old->min_nworkers ;
  38. old->max_nworkers = new->max_nworkers != -1 ? new->max_nworkers : old->max_nworkers ;
  39. old->new_workers_max_idle = new->new_workers_max_idle != -1.0 ? new->new_workers_max_idle : old->new_workers_max_idle;
  40. old->granularity = new->granularity != -1 ? new->granularity : old->granularity;
  41. int i;
  42. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  43. {
  44. old->priority[i] = new->priority[i] != -1 ? new->priority[i] : old->priority[i];
  45. old->fixed_workers[i] = new->fixed_workers[i] != -1 ? new->fixed_workers[i] : old->fixed_workers[i];
  46. old->max_idle[i] = new->max_idle[i] != -1.0 ? new->max_idle[i] : old->max_idle[i];
  47. old->empty_ctx_max_idle[i] = new->empty_ctx_max_idle[i] != -1.0 ? new->empty_ctx_max_idle[i] : old->empty_ctx_max_idle[i];
  48. old->min_working[i] = new->min_working[i] != -1.0 ? new->min_working[i] : old->min_working[i];
  49. }
  50. }
  51. void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
  52. {
  53. if(hypervisor.sched_ctx_w[sched_ctx].config != NULL && config != NULL)
  54. {
  55. _update_config(hypervisor.sched_ctx_w[sched_ctx].config, config);
  56. }
  57. else
  58. hypervisor.sched_ctx_w[sched_ctx].config = config;
  59. return;
  60. }
  61. void _add_config(unsigned sched_ctx)
  62. {
  63. struct starpu_sched_ctx_hypervisor_policy_config *config = _create_config();
  64. config->min_nworkers = 0;
  65. config->max_nworkers = STARPU_NMAXWORKERS;
  66. config->new_workers_max_idle = MAX_IDLE_TIME;
  67. int i;
  68. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  69. {
  70. config->granularity = 1;
  71. config->priority[i] = 0;
  72. config->fixed_workers[i] = 0;
  73. config->max_idle[i] = MAX_IDLE_TIME;
  74. config->empty_ctx_max_idle[i] = MAX_IDLE_TIME;
  75. config->min_working[i] = MIN_WORKING_TIME;
  76. }
  77. sched_ctx_hypervisor_set_config(sched_ctx, config);
  78. }
  79. void _remove_config(unsigned sched_ctx)
  80. {
  81. sched_ctx_hypervisor_set_config(sched_ctx, NULL);
  82. }
  83. struct starpu_sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
  84. {
  85. return hypervisor.sched_ctx_w[sched_ctx].config;
  86. }
  87. static struct starpu_sched_ctx_hypervisor_policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
  88. {
  89. struct starpu_sched_ctx_hypervisor_policy_config *config = NULL;
  90. if(later)
  91. config = _create_config();
  92. else
  93. config = sched_ctx_hypervisor_get_config(sched_ctx);
  94. assert(config != NULL);
  95. int arg_type;
  96. int i;
  97. int *workerids;
  98. int nworkers;
  99. while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
  100. {
  101. switch(arg_type)
  102. {
  103. case HYPERVISOR_MAX_IDLE:
  104. workerids = va_arg(varg_list, int*);
  105. nworkers = va_arg(varg_list, int);
  106. double max_idle = va_arg(varg_list, double);
  107. for(i = 0; i < nworkers; i++)
  108. config->max_idle[workerids[i]] = max_idle;
  109. break;
  110. case HYPERVISOR_EMPTY_CTX_MAX_IDLE:
  111. workerids = va_arg(varg_list, int*);
  112. nworkers = va_arg(varg_list, int);
  113. double empty_ctx_max_idle = va_arg(varg_list, double);
  114. for(i = 0; i < nworkers; i++)
  115. config->empty_ctx_max_idle[workerids[i]] = empty_ctx_max_idle;
  116. break;
  117. case HYPERVISOR_MIN_WORKING:
  118. workerids = va_arg(varg_list, int*);
  119. nworkers = va_arg(varg_list, int);
  120. double min_working = va_arg(varg_list, double);
  121. for(i = 0; i < nworkers; i++)
  122. config->min_working[workerids[i]] = min_working;
  123. break;
  124. case HYPERVISOR_PRIORITY:
  125. workerids = va_arg(varg_list, int*);
  126. nworkers = va_arg(varg_list, int);
  127. int priority = va_arg(varg_list, int);
  128. for(i = 0; i < nworkers; i++)
  129. config->priority[workerids[i]] = priority;
  130. break;
  131. case HYPERVISOR_MIN_WORKERS:
  132. config->min_nworkers = va_arg(varg_list, unsigned);
  133. break;
  134. case HYPERVISOR_MAX_WORKERS:
  135. config->max_nworkers = va_arg(varg_list, unsigned);
  136. break;
  137. case HYPERVISOR_GRANULARITY:
  138. config->granularity = va_arg(varg_list, unsigned);
  139. break;
  140. case HYPERVISOR_FIXED_WORKERS:
  141. workerids = va_arg(varg_list, int*);
  142. nworkers = va_arg(varg_list, int);
  143. for(i = 0; i < nworkers; i++)
  144. config->fixed_workers[workerids[i]] = 1;
  145. break;
  146. case HYPERVISOR_NEW_WORKERS_MAX_IDLE:
  147. config->new_workers_max_idle = va_arg(varg_list, double);
  148. break;
  149. /* not important for the strateg, needed just to jump these args in the iteration of the args */
  150. case HYPERVISOR_TIME_TO_APPLY:
  151. va_arg(varg_list, int);
  152. break;
  153. case HYPERVISOR_MIN_TASKS:
  154. va_arg(varg_list, int);
  155. break;
  156. }
  157. }
  158. va_end(varg_list);
  159. return later ? config : NULL;
  160. }
  161. void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
  162. {
  163. va_list varg_list;
  164. va_start(varg_list, sched_ctx);
  165. int arg_type;
  166. int stop = 0;
  167. int task_tag = -1;
  168. while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
  169. {
  170. switch(arg_type)
  171. {
  172. case HYPERVISOR_TIME_TO_APPLY:
  173. task_tag = va_arg(varg_list, int);
  174. stop = 1;
  175. break;
  176. case HYPERVISOR_MIN_TASKS:
  177. hypervisor.min_tasks = va_arg(varg_list, int);
  178. hypervisor.check_min_tasks[sched_ctx] = 1;
  179. break;
  180. }
  181. if(stop) break;
  182. }
  183. va_end(varg_list);
  184. va_start(varg_list, sched_ctx);
  185. /* if config not null => save hypervisor configuration and consider it later */
  186. struct starpu_sched_ctx_hypervisor_policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
  187. if(config != NULL)
  188. {
  189. struct configuration_entry *entry;
  190. entry = malloc(sizeof *entry);
  191. STARPU_ASSERT(entry != NULL);
  192. entry->task_tag = task_tag;
  193. entry->configuration = config;
  194. pthread_mutex_lock(&hypervisor.conf_mut[sched_ctx]);
  195. HASH_ADD_INT(hypervisor.configurations[sched_ctx], task_tag, entry);
  196. pthread_mutex_unlock(&hypervisor.conf_mut[sched_ctx]);
  197. }
  198. return;
  199. }