sched_ctx_config.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011, 2012 INRIA
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <sched_ctx_hypervisor_intern.h>
  17. static struct sched_ctx_hypervisor_policy_config* _create_config(void)
  18. {
  19. struct sched_ctx_hypervisor_policy_config *config = (struct sched_ctx_hypervisor_policy_config *)malloc(sizeof(struct sched_ctx_hypervisor_policy_config));
  20. config->min_nworkers = -1;
  21. config->max_nworkers = -1;
  22. config->new_workers_max_idle = -1.0;
  23. config->ispeed_ctx_sample = 0.0;
  24. int i;
  25. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  26. {
  27. config->granularity = -1;
  28. config->priority[i] = -1;
  29. config->fixed_workers[i] = -1;
  30. config->max_idle[i] = -1.0;
  31. config->empty_ctx_max_idle[i] = -1.0;
  32. config->min_working[i] = -1.0;
  33. config->ispeed_w_sample[i] = 0.0;
  34. }
  35. return config;
  36. }
  37. static void _update_config(struct sched_ctx_hypervisor_policy_config *old, struct sched_ctx_hypervisor_policy_config* new)
  38. {
  39. old->min_nworkers = new->min_nworkers != -1 ? new->min_nworkers : old->min_nworkers ;
  40. old->max_nworkers = new->max_nworkers != -1 ? new->max_nworkers : old->max_nworkers ;
  41. old->new_workers_max_idle = new->new_workers_max_idle != -1.0 ? new->new_workers_max_idle : old->new_workers_max_idle;
  42. old->granularity = new->granularity != -1 ? new->granularity : old->granularity;
  43. int i;
  44. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  45. {
  46. old->priority[i] = new->priority[i] != -1 ? new->priority[i] : old->priority[i];
  47. old->fixed_workers[i] = new->fixed_workers[i] != -1 ? new->fixed_workers[i] : old->fixed_workers[i];
  48. old->max_idle[i] = new->max_idle[i] != -1.0 ? new->max_idle[i] : old->max_idle[i];
  49. old->empty_ctx_max_idle[i] = new->empty_ctx_max_idle[i] != -1.0 ? new->empty_ctx_max_idle[i] : old->empty_ctx_max_idle[i];
  50. old->min_working[i] = new->min_working[i] != -1.0 ? new->min_working[i] : old->min_working[i];
  51. }
  52. }
  53. void sched_ctx_hypervisor_set_config(unsigned sched_ctx, void *config)
  54. {
  55. if(hypervisor.sched_ctx_w[sched_ctx].config != NULL && config != NULL)
  56. {
  57. _update_config(hypervisor.sched_ctx_w[sched_ctx].config, config);
  58. }
  59. else
  60. {
  61. hypervisor.sched_ctx_w[sched_ctx].config = config;
  62. }
  63. return;
  64. }
  65. void _add_config(unsigned sched_ctx)
  66. {
  67. struct sched_ctx_hypervisor_policy_config *config = _create_config();
  68. config->min_nworkers = 0;
  69. config->max_nworkers = STARPU_NMAXWORKERS;
  70. config->new_workers_max_idle = MAX_IDLE_TIME;
  71. int i;
  72. for(i = 0; i < STARPU_NMAXWORKERS; i++)
  73. {
  74. config->granularity = 1;
  75. config->priority[i] = 0;
  76. config->fixed_workers[i] = 0;
  77. config->max_idle[i] = MAX_IDLE_TIME;
  78. config->empty_ctx_max_idle[i] = MAX_IDLE_TIME;
  79. config->min_working[i] = MIN_WORKING_TIME;
  80. }
  81. sched_ctx_hypervisor_set_config(sched_ctx, config);
  82. }
  83. void _remove_config(unsigned sched_ctx)
  84. {
  85. sched_ctx_hypervisor_set_config(sched_ctx, NULL);
  86. }
  87. struct sched_ctx_hypervisor_policy_config* sched_ctx_hypervisor_get_config(unsigned sched_ctx)
  88. {
  89. return hypervisor.sched_ctx_w[sched_ctx].config;
  90. }
  91. static struct sched_ctx_hypervisor_policy_config* _ioctl(unsigned sched_ctx, va_list varg_list, unsigned later)
  92. {
  93. struct sched_ctx_hypervisor_policy_config *config = NULL;
  94. if(later)
  95. config = _create_config();
  96. else
  97. config = sched_ctx_hypervisor_get_config(sched_ctx);
  98. assert(config != NULL);
  99. int arg_type;
  100. int i;
  101. int *workerids;
  102. int nworkers;
  103. while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
  104. {
  105. switch(arg_type)
  106. {
  107. case HYPERVISOR_MAX_IDLE:
  108. workerids = va_arg(varg_list, int*);
  109. nworkers = va_arg(varg_list, int);
  110. double max_idle = va_arg(varg_list, double);
  111. for(i = 0; i < nworkers; i++)
  112. config->max_idle[workerids[i]] = max_idle;
  113. break;
  114. case HYPERVISOR_EMPTY_CTX_MAX_IDLE:
  115. workerids = va_arg(varg_list, int*);
  116. nworkers = va_arg(varg_list, int);
  117. double empty_ctx_max_idle = va_arg(varg_list, double);
  118. for(i = 0; i < nworkers; i++)
  119. config->empty_ctx_max_idle[workerids[i]] = empty_ctx_max_idle;
  120. break;
  121. case HYPERVISOR_MIN_WORKING:
  122. workerids = va_arg(varg_list, int*);
  123. nworkers = va_arg(varg_list, int);
  124. double min_working = va_arg(varg_list, double);
  125. for(i = 0; i < nworkers; i++)
  126. config->min_working[workerids[i]] = min_working;
  127. break;
  128. case HYPERVISOR_PRIORITY:
  129. workerids = va_arg(varg_list, int*);
  130. nworkers = va_arg(varg_list, int);
  131. int priority = va_arg(varg_list, int);
  132. for(i = 0; i < nworkers; i++)
  133. config->priority[workerids[i]] = priority;
  134. break;
  135. case HYPERVISOR_MIN_WORKERS:
  136. config->min_nworkers = va_arg(varg_list, unsigned);
  137. break;
  138. case HYPERVISOR_MAX_WORKERS:
  139. config->max_nworkers = va_arg(varg_list, unsigned);
  140. break;
  141. case HYPERVISOR_GRANULARITY:
  142. config->granularity = va_arg(varg_list, unsigned);
  143. break;
  144. case HYPERVISOR_FIXED_WORKERS:
  145. workerids = va_arg(varg_list, int*);
  146. nworkers = va_arg(varg_list, int);
  147. for(i = 0; i < nworkers; i++)
  148. config->fixed_workers[workerids[i]] = 1;
  149. break;
  150. case HYPERVISOR_NEW_WORKERS_MAX_IDLE:
  151. config->new_workers_max_idle = va_arg(varg_list, double);
  152. break;
  153. case HYPERVISOR_ISPEED_W_SAMPLE:
  154. workerids = va_arg(varg_list, int*);
  155. nworkers = va_arg(varg_list, int);
  156. double sample = va_arg(varg_list, double);
  157. for(i = 0; i < nworkers; i++)
  158. config->ispeed_w_sample[workerids[i]] = sample;
  159. break;
  160. case HYPERVISOR_ISPEED_CTX_SAMPLE:
  161. config->ispeed_ctx_sample = va_arg(varg_list, double);
  162. break;
  163. /* not important for the strateg, needed just to jump these args in the iteration of the args */
  164. case HYPERVISOR_TIME_TO_APPLY:
  165. va_arg(varg_list, int);
  166. break;
  167. case HYPERVISOR_MIN_TASKS:
  168. va_arg(varg_list, int);
  169. break;
  170. }
  171. }
  172. va_end(varg_list);
  173. return later ? config : NULL;
  174. }
  175. void sched_ctx_hypervisor_ioctl(unsigned sched_ctx, ...)
  176. {
  177. va_list varg_list;
  178. va_start(varg_list, sched_ctx);
  179. int arg_type;
  180. int stop = 0;
  181. int task_tag = -1;
  182. while ((arg_type = va_arg(varg_list, int)) != HYPERVISOR_NULL)
  183. {
  184. switch(arg_type)
  185. {
  186. case HYPERVISOR_TIME_TO_APPLY:
  187. task_tag = va_arg(varg_list, int);
  188. stop = 1;
  189. break;
  190. case HYPERVISOR_MIN_TASKS:
  191. hypervisor.min_tasks = va_arg(varg_list, int);
  192. hypervisor.check_min_tasks[sched_ctx] = 1;
  193. break;
  194. }
  195. if(stop) break;
  196. }
  197. va_end(varg_list);
  198. va_start(varg_list, sched_ctx);
  199. /* if config not null => save hypervisor configuration and consider it later */
  200. struct sched_ctx_hypervisor_policy_config *config = _ioctl(sched_ctx, varg_list, (task_tag > 0));
  201. if(config != NULL)
  202. {
  203. struct configuration_entry *entry;
  204. entry = malloc(sizeof *entry);
  205. STARPU_ASSERT(entry != NULL);
  206. entry->task_tag = task_tag;
  207. entry->configuration = config;
  208. pthread_mutex_lock(&hypervisor.conf_mut[sched_ctx]);
  209. HASH_ADD_INT(hypervisor.configurations[sched_ctx], task_tag, entry);
  210. pthread_mutex_unlock(&hypervisor.conf_mut[sched_ctx]);
  211. }
  212. return;
  213. }