memory_nodes.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2016 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <common/config.h>
  18. #include <core/sched_policy.h>
  19. #include <datawizard/datastats.h>
  20. #include <datawizard/memory_manager.h>
  21. #include <datawizard/memory_nodes.h>
  22. #include <datawizard/malloc.h>
  23. #include <common/fxt.h>
  24. #include "copy_driver.h"
  25. #include "memalloc.h"
  26. static struct _starpu_memory_node_descr descr;
  27. starpu_pthread_key_t _starpu_memory_node_key STARPU_ATTRIBUTE_INTERNAL;
  28. void _starpu_memory_nodes_init(void)
  29. {
  30. /* there is no node yet, subsequent nodes will be
  31. * added using _starpu_memory_node_register */
  32. descr.nnodes = 0;
  33. STARPU_PTHREAD_KEY_CREATE(&_starpu_memory_node_key, NULL);
  34. unsigned i;
  35. for (i = 0; i < STARPU_MAXNODES; i++)
  36. {
  37. descr.nodes[i] = STARPU_UNUSED;
  38. descr.nworkers[i] = 0;
  39. }
  40. _starpu_init_mem_chunk_lists();
  41. _starpu_init_data_request_lists();
  42. _starpu_memory_manager_init();
  43. STARPU_PTHREAD_RWLOCK_INIT(&descr.conditions_rwlock, NULL);
  44. descr.total_condition_count = 0;
  45. }
  46. void _starpu_memory_nodes_deinit(void)
  47. {
  48. _starpu_deinit_data_request_lists();
  49. _starpu_deinit_mem_chunk_lists();
  50. STARPU_PTHREAD_RWLOCK_DESTROY(&descr.conditions_rwlock);
  51. STARPU_PTHREAD_KEY_DELETE(_starpu_memory_node_key);
  52. }
  53. void _starpu_memory_node_add_nworkers(unsigned node)
  54. {
  55. descr.nworkers[node]++;
  56. }
  57. unsigned _starpu_memory_node_get_nworkers(unsigned node)
  58. {
  59. return descr.nworkers[node];
  60. }
  61. struct _starpu_memory_node_descr *_starpu_memory_node_get_description(void)
  62. {
  63. return &descr;
  64. }
  65. enum starpu_node_kind starpu_node_get_kind(unsigned node)
  66. {
  67. return descr.nodes[node];
  68. }
  69. int _starpu_memory_node_get_devid(unsigned node)
  70. {
  71. return descr.devid[node];
  72. }
  73. unsigned starpu_memory_nodes_get_count(void)
  74. {
  75. return descr.nnodes;
  76. }
  77. void _starpu_memory_node_get_name(unsigned node, char *name, int size)
  78. {
  79. const char *prefix;
  80. switch (descr.nodes[node])
  81. {
  82. case STARPU_CPU_RAM:
  83. prefix = "RAM";
  84. break;
  85. case STARPU_CUDA_RAM:
  86. prefix = "CUDA";
  87. break;
  88. case STARPU_OPENCL_RAM:
  89. prefix = "OpenCL";
  90. break;
  91. case STARPU_DISK_RAM:
  92. prefix = "Disk";
  93. break;
  94. case STARPU_MIC_RAM:
  95. prefix = "MIC";
  96. break;
  97. case STARPU_SCC_RAM:
  98. prefix = "SCC_RAM";
  99. break;
  100. case STARPU_SCC_SHM:
  101. prefix = "SCC_shared";
  102. break;
  103. case STARPU_UNUSED:
  104. default:
  105. prefix = "unknown";
  106. STARPU_ASSERT(0);
  107. }
  108. snprintf(name, size, "%s %u", prefix, descr.devid[node]);
  109. }
  110. unsigned _starpu_memory_node_register(enum starpu_node_kind kind, int devid)
  111. {
  112. unsigned node;
  113. /* ATOMIC_ADD returns the new value ... */
  114. node = STARPU_ATOMIC_ADD(&descr.nnodes, 1) - 1;
  115. STARPU_ASSERT_MSG(node < STARPU_MAXNODES,"Too many nodes (%u) for maximum %u. Use configure option --enable-maxnodes=xxx to update the maximum number of nodes.", node, STARPU_MAXNODES);
  116. descr.nodes[node] = kind;
  117. _STARPU_TRACE_NEW_MEM_NODE(node);
  118. descr.devid[node] = devid;
  119. /* for now, there is no condition associated to that newly created node */
  120. descr.condition_count[node] = 0;
  121. _starpu_malloc_init(node);
  122. return node;
  123. }
  124. #ifdef STARPU_SIMGRID
  125. void _starpu_simgrid_memory_node_set_host(unsigned node, msg_host_t host)
  126. {
  127. descr.host[node] = host;
  128. }
  129. msg_host_t _starpu_simgrid_memory_node_get_host(unsigned node)
  130. {
  131. return descr.host[node];
  132. }
  133. #endif
  134. /* TODO move in a more appropriate file !! */
  135. /* Register a condition variable associated to worker which is associated to a
  136. * memory node itself. */
  137. void _starpu_memory_node_register_condition(starpu_pthread_cond_t *cond, starpu_pthread_mutex_t *mutex, unsigned nodeid)
  138. {
  139. unsigned cond_id;
  140. unsigned nconds_total, nconds;
  141. STARPU_PTHREAD_RWLOCK_WRLOCK(&descr.conditions_rwlock);
  142. /* we only insert the queue if it's not already in the list */
  143. nconds = descr.condition_count[nodeid];
  144. for (cond_id = 0; cond_id < nconds; cond_id++)
  145. {
  146. if (descr.conditions_attached_to_node[nodeid][cond_id].cond == cond)
  147. {
  148. STARPU_ASSERT(descr.conditions_attached_to_node[nodeid][cond_id].mutex == mutex);
  149. /* the condition is already in the list */
  150. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr.conditions_rwlock);
  151. return;
  152. }
  153. }
  154. /* it was not found locally */
  155. descr.conditions_attached_to_node[nodeid][cond_id].cond = cond;
  156. descr.conditions_attached_to_node[nodeid][cond_id].mutex = mutex;
  157. descr.condition_count[nodeid]++;
  158. /* do we have to add it in the global list as well ? */
  159. nconds_total = descr.total_condition_count;
  160. for (cond_id = 0; cond_id < nconds_total; cond_id++)
  161. {
  162. if (descr.conditions_all[cond_id].cond == cond)
  163. {
  164. /* the queue is already in the global list */
  165. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr.conditions_rwlock);
  166. return;
  167. }
  168. }
  169. /* it was not in the global list either */
  170. descr.conditions_all[nconds_total].cond = cond;
  171. descr.conditions_all[nconds_total].mutex = mutex;
  172. descr.total_condition_count++;
  173. STARPU_PTHREAD_RWLOCK_UNLOCK(&descr.conditions_rwlock);
  174. }
  175. unsigned starpu_worker_get_memory_node(unsigned workerid)
  176. {
  177. struct _starpu_machine_config *config = _starpu_get_machine_config();
  178. /* This workerid may either be a basic worker or a combined worker */
  179. unsigned nworkers = config->topology.nworkers;
  180. if (workerid < config->topology.nworkers)
  181. return config->workers[workerid].memory_node;
  182. /* We have a combined worker */
  183. unsigned ncombinedworkers = config->topology.ncombinedworkers;
  184. STARPU_ASSERT_MSG(workerid < ncombinedworkers + nworkers, "Bad workerid %u, maximum %u", workerid, ncombinedworkers + nworkers);
  185. return config->combined_workers[workerid - nworkers].memory_node;
  186. }