sc_config.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <sc_hypervisor_intern.h>
  17. static struct sc_hypervisor_policy_config* _create_config(void)
  18. {
  19. struct sc_hypervisor_policy_config *config = (struct sc_hypervisor_policy_config *)malloc(sizeof(struct sc_hypervisor_policy_config));
  20. config->min_nworkers = -1;
  21. config->max_nworkers = -1;
  22. config->new_workers_max_idle = -1.0;
  23. config->ispeed_ctx_sample = 0.0;
  24. int i;
  25. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  26. {
  27. config->granularity = -1;
  28. config->priority[i] = -1;
  29. config->fixed_workers[i] = -1;
  30. config->max_idle[i] = -1.0;
  31. config->min_working[i] = -1.0;
  32. config->ispeed_w_sample[i] = 0.0;
  33. }
  34. return config;
  35. }
  36. static void _update_config(struct sc_hypervisor_policy_config *old, struct sc_hypervisor_policy_config* new)
  37. {
  38. old->min_nworkers = new->min_nworkers != -1 ? new->min_nworkers : old->min_nworkers ;
  39. old->max_nworkers = new->max_nworkers != -1 ? new->max_nworkers : old->max_nworkers ;
  40. old->new_workers_max_idle = new->new_workers_max_idle != -1.0 ? new->new_workers_max_idle : old->new_workers_max_idle;
  41. old->granularity = new->granularity != -1 ? new->granularity : old->granularity;
  42. int i;
  43. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  44. {
  45. old->priority[i] = new->priority[i] != -1 ? new->priority[i] : old->priority[i];
  46. old->fixed_workers[i] = new->fixed_workers[i] != -1 ? new->fixed_workers[i] : old->fixed_workers[i];
  47. old->max_idle[i] = new->max_idle[i] != -1.0 ? new->max_idle[i] : old->max_idle[i];
  48. old->min_working[i] = new->min_working[i] != -1.0 ? new->min_working[i] : old->min_working[i];
  49. }
  50. }
  51. void sc_hypervisor_set_config(unsigned sched_ctx, void *config)
  52. {
  53. if(hypervisor.sched_ctx_w[sched_ctx].config != NULL && config != NULL)
  54. {
  55. _update_config(hypervisor.sched_ctx_w[sched_ctx].config, config);
  56. }
  57. else
  58. {
  59. hypervisor.sched_ctx_w[sched_ctx].config = config;
  60. }
  61. return;
  62. }
  63. void _add_config(unsigned sched_ctx)
  64. {
  65. struct sc_hypervisor_policy_config *config = _create_config();
  66. config->min_nworkers = 0;
  67. config->max_nworkers = STARPU_NMAXWORKERS;
  68. config->new_workers_max_idle = MAX_IDLE_TIME;
  69. int i;
  70. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  71. {
  72. config->granularity = 1;
  73. config->priority[i] = 0;
  74. config->fixed_workers[i] = 0;
  75. config->max_idle[i] = MAX_IDLE_TIME;
  76. config->min_working[i] = MIN_WORKING_TIME;
  77. }
  78. sc_hypervisor_set_config(sched_ctx, config);
  79. }
  80. void _remove_config(unsigned sched_ctx)
  81. {
  82. sc_hypervisor_set_config(sched_ctx, NULL);
  83. }
  84. struct sc_hypervisor_policy_config* sc_hypervisor_get_config(unsigned sched_ctx)
  85. {
  86. return hypervisor.sched_ctx_w[sched_ctx].config;
  87. }
  88. static struct sc_hypervisor_policy_config* _ctl(unsigned sched_ctx, va_list varg_list, unsigned later)
  89. {
  90. struct sc_hypervisor_policy_config *config = NULL;
  91. if(later)
  92. config = _create_config();
  93. else
  94. config = sc_hypervisor_get_config(sched_ctx);
  95. assert(config != NULL);
  96. int arg_type;
  97. int i;
  98. int *workerids;
  99. int nworkers;
  100. while ((arg_type = va_arg(varg_list, int)) != SC_HYPERVISOR_NULL)
  101. {
  102. switch(arg_type)
  103. {
  104. case SC_HYPERVISOR_MAX_IDLE:
  105. workerids = va_arg(varg_list, int*);
  106. nworkers = va_arg(varg_list, int);
  107. double max_idle = va_arg(varg_list, double);
  108. for(i = 0; i < nworkers; i++)
  109. config->max_idle[workerids[i]] = max_idle;
  110. break;
  111. case SC_HYPERVISOR_MIN_WORKING:
  112. workerids = va_arg(varg_list, int*);
  113. nworkers = va_arg(varg_list, int);
  114. double min_working = va_arg(varg_list, double);
  115. for(i = 0; i < nworkers; i++)
  116. config->min_working[workerids[i]] = min_working;
  117. break;
  118. case SC_HYPERVISOR_PRIORITY:
  119. workerids = va_arg(varg_list, int*);
  120. nworkers = va_arg(varg_list, int);
  121. int priority = va_arg(varg_list, int);
  122. for(i = 0; i < nworkers; i++)
  123. config->priority[workerids[i]] = priority;
  124. break;
  125. case SC_HYPERVISOR_MIN_WORKERS:
  126. config->min_nworkers = va_arg(varg_list, unsigned);
  127. break;
  128. case SC_HYPERVISOR_MAX_WORKERS:
  129. config->max_nworkers = va_arg(varg_list, unsigned);
  130. break;
  131. case SC_HYPERVISOR_GRANULARITY:
  132. config->granularity = va_arg(varg_list, unsigned);
  133. break;
  134. case SC_HYPERVISOR_FIXED_WORKERS:
  135. workerids = va_arg(varg_list, int*);
  136. nworkers = va_arg(varg_list, int);
  137. for(i = 0; i < nworkers; i++)
  138. config->fixed_workers[workerids[i]] = 1;
  139. break;
  140. case SC_HYPERVISOR_NEW_WORKERS_MAX_IDLE:
  141. config->new_workers_max_idle = va_arg(varg_list, double);
  142. break;
  143. case SC_HYPERVISOR_ISPEED_W_SAMPLE:
  144. workerids = va_arg(varg_list, int*);
  145. nworkers = va_arg(varg_list, int);
  146. double sample = va_arg(varg_list, double);
  147. for(i = 0; i < nworkers; i++)
  148. config->ispeed_w_sample[workerids[i]] = sample;
  149. break;
  150. case SC_HYPERVISOR_ISPEED_CTX_SAMPLE:
  151. config->ispeed_ctx_sample = va_arg(varg_list, double);
  152. break;
  153. /* not important for the strateg, needed just to jump these args in the iteration of the args */
  154. case SC_HYPERVISOR_TIME_TO_APPLY:
  155. va_arg(varg_list, int);
  156. break;
  157. case SC_HYPERVISOR_MIN_TASKS:
  158. va_arg(varg_list, int);
  159. break;
  160. }
  161. }
  162. va_end(varg_list);
  163. return later ? config : NULL;
  164. }
  165. void sc_hypervisor_ctl(unsigned sched_ctx, ...)
  166. {
  167. va_list varg_list;
  168. va_start(varg_list, sched_ctx);
  169. int arg_type;
  170. int stop = 0;
  171. int task_tag = -1;
  172. while ((arg_type = va_arg(varg_list, int)) != SC_HYPERVISOR_NULL)
  173. {
  174. switch(arg_type)
  175. {
  176. case SC_HYPERVISOR_TIME_TO_APPLY:
  177. task_tag = va_arg(varg_list, int);
  178. stop = 1;
  179. break;
  180. case SC_HYPERVISOR_MIN_TASKS:
  181. hypervisor.min_tasks = va_arg(varg_list, int);
  182. hypervisor.check_min_tasks[sched_ctx] = 1;
  183. break;
  184. }
  185. if(stop) break;
  186. }
  187. va_end(varg_list);
  188. va_start(varg_list, sched_ctx);
  189. /* if config not null => save hypervisor configuration and consider it later */
  190. struct sc_hypervisor_policy_config *config = _ctl(sched_ctx, varg_list, (task_tag > 0));
  191. if(config != NULL)
  192. {
  193. struct configuration_entry *entry;
  194. entry = malloc(sizeof *entry);
  195. STARPU_ASSERT(entry != NULL);
  196. entry->task_tag = task_tag;
  197. entry->configuration = config;
  198. starpu_pthread_mutex_lock(&hypervisor.conf_mut[sched_ctx]);
  199. HASH_ADD_INT(hypervisor.configurations[sched_ctx], task_tag, entry);
  200. starpu_pthread_mutex_unlock(&hypervisor.conf_mut[sched_ctx]);
  201. }
  202. return;
  203. }