simgrid.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012-2014 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <datawizard/memory_nodes.h>
  18. #include <unistd.h>
  19. #include <core/perfmodel/perfmodel.h>
  20. #include <core/workers.h>
  21. #include <core/simgrid.h>
  22. #ifdef STARPU_SIMGRID
  23. #include <msg/msg.h>
  24. #pragma weak starpu_main
  25. extern int starpu_main(int argc, char *argv[]);
  26. struct main_args
  27. {
  28. int argc;
  29. char **argv;
  30. };
  31. int do_starpu_main(int argc STARPU_ATTRIBUTE_UNUSED, char *argv[] STARPU_ATTRIBUTE_UNUSED)
  32. {
  33. struct main_args *args = MSG_process_get_data(MSG_process_self());
  34. return starpu_main(args->argc, args->argv);
  35. }
  36. int _starpu_simgrid_get_nbhosts(const char *prefix)
  37. {
  38. int ret;
  39. xbt_dynar_t hosts = MSG_hosts_as_dynar();
  40. unsigned i, nb = xbt_dynar_length(hosts);
  41. unsigned len = strlen(prefix);
  42. ret = 0;
  43. for (i = 0; i < nb; i++) {
  44. const char *name;
  45. name = MSG_host_get_name(xbt_dynar_get_as(hosts, i, msg_host_t));
  46. if (!strncmp(name, prefix, len))
  47. ret++;
  48. }
  49. xbt_dynar_free(&hosts);
  50. return ret;
  51. }
  52. #ifdef STARPU_DEVEL
  53. #warning TODO: use another way to start main, when simgrid provides it, and then include the application-provided configuration for platform numbers
  54. #endif
  55. #undef main
  56. int main(int argc, char **argv)
  57. {
  58. xbt_dynar_t hosts;
  59. int i;
  60. char path[256];
  61. if (!starpu_main)
  62. {
  63. _STARPU_ERROR("The main file of this application needs to be compiled with starpu.h included, to properly define starpu_main\n");
  64. exit(EXIT_FAILURE);
  65. }
  66. MSG_init(&argc, argv);
  67. #if SIMGRID_VERSION_MAJOR < 3 || (SIMGRID_VERSION_MAJOR == 3 && SIMGRID_VERSION_MINOR < 9)
  68. /* Versions earlier than 3.9 didn't support our communication tasks */
  69. MSG_config("workstation/model", "ptask_L07");
  70. #endif
  71. /* Load XML platform */
  72. _starpu_simgrid_get_platform_path(path, sizeof(path));
  73. MSG_create_environment(path);
  74. hosts = MSG_hosts_as_dynar();
  75. int nb = xbt_dynar_length(hosts);
  76. for (i = 0; i < nb; i++)
  77. MSG_host_set_data(xbt_dynar_get_as(hosts, i, msg_host_t), calloc(MAX_TSD, sizeof(void*)));
  78. struct main_args args = { .argc = argc, .argv = argv };
  79. MSG_process_create("main", &do_starpu_main, &args, xbt_dynar_get_as(hosts, 0, msg_host_t));
  80. xbt_dynar_free(&hosts);
  81. MSG_main();
  82. return 0;
  83. }
  84. /* Task execution submitted by StarPU */
  85. void _starpu_simgrid_execute_job(struct _starpu_job *j, struct starpu_perfmodel_arch* perf_arch, double length)
  86. {
  87. struct starpu_task *task = j->task;
  88. msg_task_t simgrid_task;
  89. if (j->internal)
  90. /* This is not useful to include in simulation (and probably
  91. * doesn't have a perfmodel anyway) */
  92. return;
  93. if (isnan(length))
  94. {
  95. length = starpu_task_expected_length(task, perf_arch, j->nimpl);
  96. STARPU_ASSERT_MSG(!_STARPU_IS_ZERO(length) && !isnan(length),
  97. "Codelet %s does not have a perfmodel, or is not calibrated enough, please re-run in non-simgrid mode until it is calibrated",
  98. _starpu_job_get_model_name(j));
  99. }
  100. simgrid_task = MSG_task_create(_starpu_job_get_model_name(j),
  101. length/1000000.0*MSG_get_host_speed(MSG_host_self()),
  102. 0, NULL);
  103. MSG_task_execute(simgrid_task);
  104. }
  105. /* Note: simgrid is not parallel, so there is no need to hold locks for management of transfers. */
  106. LIST_TYPE(transfer,
  107. msg_task_t task;
  108. int src_node;
  109. int dst_node;
  110. int run_node;
  111. /* communication termination signalization */
  112. unsigned *finished;
  113. starpu_pthread_mutex_t *mutex;
  114. starpu_pthread_cond_t *cond;
  115. /* transfers which wait for this transfer */
  116. struct transfer **wake;
  117. unsigned nwake;
  118. /* Number of transfers that this transfer waits for */
  119. unsigned nwait;
  120. )
  121. struct transfer_list *pending;
  122. /* Tell for two transfers whether they should be handled in sequence */
  123. static int transfers_are_sequential(struct transfer *new_transfer, struct transfer *old_transfer)
  124. {
  125. int new_is_cuda STARPU_ATTRIBUTE_UNUSED, old_is_cuda STARPU_ATTRIBUTE_UNUSED;
  126. int new_is_opencl STARPU_ATTRIBUTE_UNUSED, old_is_opencl STARPU_ATTRIBUTE_UNUSED;
  127. int new_is_gpu_gpu, old_is_gpu_gpu;
  128. new_is_cuda = starpu_node_get_kind(new_transfer->src_node) == STARPU_CUDA_RAM;
  129. new_is_cuda |= starpu_node_get_kind(new_transfer->dst_node) == STARPU_CUDA_RAM;
  130. old_is_cuda = starpu_node_get_kind(old_transfer->src_node) == STARPU_CUDA_RAM;
  131. old_is_cuda |= starpu_node_get_kind(old_transfer->dst_node) == STARPU_CUDA_RAM;
  132. new_is_opencl = starpu_node_get_kind(new_transfer->src_node) == STARPU_OPENCL_RAM;
  133. new_is_opencl |= starpu_node_get_kind(new_transfer->dst_node) == STARPU_OPENCL_RAM;
  134. old_is_opencl = starpu_node_get_kind(old_transfer->src_node) == STARPU_OPENCL_RAM;
  135. old_is_opencl |= starpu_node_get_kind(old_transfer->dst_node) == STARPU_OPENCL_RAM;
  136. new_is_gpu_gpu = new_transfer->src_node && new_transfer->dst_node;
  137. old_is_gpu_gpu = old_transfer->src_node && old_transfer->dst_node;
  138. /* We ignore cuda-opencl transfers, they can not happen */
  139. STARPU_ASSERT(!((new_is_cuda && old_is_opencl) || (old_is_cuda && new_is_opencl)));
  140. /* The following constraints have been observed with CUDA alone */
  141. /* Same source/destination, sequential */
  142. if (new_transfer->src_node == old_transfer->src_node && new_transfer->dst_node == old_transfer->dst_node)
  143. return 1;
  144. /* Crossed GPU-GPU, sequential */
  145. if (new_is_gpu_gpu
  146. && new_transfer->src_node == old_transfer->dst_node
  147. && old_transfer->src_node == new_transfer->dst_node)
  148. return 1;
  149. /* GPU-GPU transfers are sequential with any RAM->GPU transfer */
  150. if (new_is_gpu_gpu
  151. && old_transfer->dst_node == new_transfer->src_node
  152. && old_transfer->dst_node == new_transfer->dst_node)
  153. return 1;
  154. if (old_is_gpu_gpu
  155. && new_transfer->dst_node == old_transfer->src_node
  156. && new_transfer->dst_node == old_transfer->dst_node)
  157. return 1;
  158. /* These constraints come from StarPU */
  159. /* StarPU uses one stream per direction */
  160. /* RAM->GPU and GPU->RAM are already handled by "same source/destination" */
  161. /* StarPU uses one stream per running GPU for GPU-GPU transfers */
  162. if (new_is_gpu_gpu && old_is_gpu_gpu && new_transfer->run_node == old_transfer->run_node)
  163. return 1;
  164. return 0;
  165. }
  166. /* Actually execute the transfer, and then start transfers waiting for this one. */
  167. static int transfer_execute(int argc STARPU_ATTRIBUTE_UNUSED, char *argv[] STARPU_ATTRIBUTE_UNUSED)
  168. {
  169. struct transfer *transfer = MSG_process_get_data(MSG_process_self());
  170. unsigned i;
  171. _STARPU_DEBUG("transfer %p started\n", transfer);
  172. MSG_task_execute(transfer->task);
  173. MSG_task_destroy(transfer->task);
  174. _STARPU_DEBUG("transfer %p finished\n", transfer);
  175. STARPU_PTHREAD_MUTEX_LOCK(transfer->mutex);
  176. *transfer->finished = 1;
  177. STARPU_PTHREAD_COND_BROADCAST(transfer->cond);
  178. STARPU_PTHREAD_MUTEX_UNLOCK(transfer->mutex);
  179. /* The workers which started this request may be sleeping out of tasks, wake it */
  180. _starpu_wake_all_blocked_workers_on_node(transfer->run_node);
  181. /* Wake transfers waiting for my termination */
  182. /* Note: due to possible preemption inside process_create, the array
  183. * may grow while doing this */
  184. for (i = 0; i < transfer->nwake; i++)
  185. {
  186. struct transfer *wake = transfer->wake[i];
  187. STARPU_ASSERT(wake->nwait > 0);
  188. wake->nwait--;
  189. if (!wake->nwait)
  190. {
  191. _STARPU_DEBUG("triggering transfer %p\n", wake);
  192. MSG_process_create("transfer task", transfer_execute, wake, MSG_get_host_by_name("MAIN"));
  193. }
  194. }
  195. free(transfer->wake);
  196. transfer_list_erase(pending, transfer);
  197. transfer_delete(transfer);
  198. return 0;
  199. }
  200. /* Look for sequentialization between this transfer and pending transfers, and submit this one */
  201. static void transfer_submit(struct transfer *transfer)
  202. {
  203. struct transfer *old;
  204. if (!pending)
  205. pending = transfer_list_new();
  206. for (old = transfer_list_begin(pending);
  207. old != transfer_list_end(pending);
  208. old = transfer_list_next(old))
  209. {
  210. if (transfers_are_sequential(transfer, old))
  211. {
  212. _STARPU_DEBUG("transfer %p(%d->%d) waits for %p(%d->%d)\n",
  213. transfer, transfer->src_node, transfer->dst_node,
  214. old, old->src_node, old->dst_node);
  215. /* Make new wait for the old */
  216. transfer->nwait++;
  217. /* Make old wake the new */
  218. old->wake = realloc(old->wake, (old->nwake + 1) * sizeof(old->wake));
  219. old->wake[old->nwake] = transfer;
  220. old->nwake++;
  221. }
  222. }
  223. transfer_list_push_front(pending, transfer);
  224. if (!transfer->nwait)
  225. {
  226. _STARPU_DEBUG("transfer %p waits for nobody, starting\n", transfer);
  227. MSG_process_create("transfer task", transfer_execute, transfer, MSG_get_host_by_name("MAIN"));
  228. }
  229. }
  230. /* Data transfer issued by StarPU */
  231. int _starpu_simgrid_transfer(size_t size, unsigned src_node, unsigned dst_node, struct _starpu_data_request *req)
  232. {
  233. msg_task_t task;
  234. msg_host_t *hosts = calloc(2, sizeof(*hosts));
  235. double *computation = calloc(2, sizeof(*computation));
  236. double *communication = calloc(4, sizeof(*communication));
  237. starpu_pthread_mutex_t mutex;
  238. starpu_pthread_cond_t cond;
  239. unsigned finished;
  240. hosts[0] = _starpu_simgrid_memory_node_get_host(src_node);
  241. hosts[1] = _starpu_simgrid_memory_node_get_host(dst_node);
  242. STARPU_ASSERT(hosts[0] != hosts[1]);
  243. communication[1] = size;
  244. task = MSG_parallel_task_create("copy", 2, hosts, computation, communication, NULL);
  245. struct transfer *transfer = transfer_new();
  246. _STARPU_DEBUG("creating transfer %p for %lu bytes\n", transfer, (unsigned long) size);
  247. transfer->task = task;
  248. transfer->src_node = src_node;
  249. transfer->dst_node = dst_node;
  250. transfer->run_node = _starpu_memory_node_get_local_key();
  251. if (req)
  252. {
  253. transfer->finished = &req->async_channel.event.finished;
  254. transfer->mutex = &req->async_channel.event.mutex;
  255. transfer->cond = &req->async_channel.event.cond;
  256. }
  257. else
  258. {
  259. transfer->finished = &finished;
  260. transfer->mutex = &mutex;
  261. transfer->cond = &cond;
  262. }
  263. *transfer->finished = 0;
  264. STARPU_PTHREAD_MUTEX_INIT(transfer->mutex, NULL);
  265. STARPU_PTHREAD_COND_INIT(transfer->cond, NULL);
  266. transfer->wake = NULL;
  267. transfer->nwake = 0;
  268. transfer->nwait = 0;
  269. if (req)
  270. _STARPU_TRACE_START_DRIVER_COPY_ASYNC(src_node, dst_node);
  271. /* Sleep 10µs for the GPU transfer queueing */
  272. MSG_process_sleep(0.000010);
  273. transfer_submit(transfer);
  274. /* Note: from here, transfer might be already freed */
  275. if (req)
  276. {
  277. _STARPU_TRACE_END_DRIVER_COPY_ASYNC(src_node, dst_node);
  278. _STARPU_TRACE_DATA_COPY(src_node, dst_node, size);
  279. return -EAGAIN;
  280. }
  281. else
  282. {
  283. /* this is not associated to a request so it's synchronous */
  284. STARPU_PTHREAD_MUTEX_LOCK(&mutex);
  285. while (!finished)
  286. STARPU_PTHREAD_COND_WAIT(&cond, &mutex);
  287. STARPU_PTHREAD_MUTEX_UNLOCK(&mutex);
  288. return 0;
  289. }
  290. }
  291. int
  292. _starpu_simgrid_thread_start(int argc STARPU_ATTRIBUTE_UNUSED, char *argv[] STARPU_ATTRIBUTE_UNUSED)
  293. {
  294. struct _starpu_pthread_args *_args = MSG_process_get_data(MSG_process_self());
  295. struct _starpu_pthread_args args = *_args;
  296. free(_args);
  297. args.f(args.arg);
  298. return 0;
  299. }
  300. #endif