mpi_like_async.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <starpu.h>
  18. #include <pthread.h>
  19. #include "../common/helper.h"
  20. #define NTHREADS 16
  21. #define NITER 128
  22. //#define DEBUG_MESSAGES 1
  23. //static pthread_cond_t cond;
  24. //static pthread_mutex_t mutex;
  25. struct thread_data {
  26. unsigned index;
  27. unsigned val;
  28. starpu_data_handle handle;
  29. pthread_t thread;
  30. pthread_mutex_t recv_mutex;
  31. unsigned recv_flag; // set when a message is received
  32. unsigned recv_buf;
  33. struct thread_data *neighbour;
  34. };
  35. struct data_req {
  36. int (*test_func)(void *);
  37. void *test_arg;
  38. struct data_req *next;
  39. };
  40. static pthread_mutex_t data_req_mutex;
  41. static pthread_cond_t data_req_cond;
  42. struct data_req *data_req_list;
  43. unsigned progress_thread_running;
  44. static struct thread_data problem_data[NTHREADS];
  45. /* We implement some ring transfer, every thread will try to receive a piece of
  46. * data from its neighbour and increment it before transmitting it to its
  47. * successor. */
  48. #ifdef STARPU_USE_CUDA
  49. void cuda_codelet_unsigned_inc(void *descr[], __attribute__ ((unused)) void *cl_arg);
  50. #endif
  51. static void increment_handle_cpu_kernel(void *descr[], void *cl_arg __attribute__((unused)))
  52. {
  53. unsigned *val = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  54. *val += 1;
  55. // FPRINTF(stderr, "VAL %d (&val = %p)\n", *val, val);
  56. }
  57. static starpu_codelet increment_handle_cl = {
  58. .where = STARPU_CPU|STARPU_CUDA,
  59. .cpu_func = increment_handle_cpu_kernel,
  60. #ifdef STARPU_USE_CUDA
  61. .cuda_func = cuda_codelet_unsigned_inc,
  62. #endif
  63. .nbuffers = 1
  64. };
  65. static void increment_handle_async(struct thread_data *thread_data)
  66. {
  67. struct starpu_task *task = starpu_task_create();
  68. task->cl = &increment_handle_cl;
  69. task->buffers[0].handle = thread_data->handle;
  70. task->buffers[0].mode = STARPU_RW;
  71. task->detach = 1;
  72. task->destroy = 1;
  73. int ret = starpu_task_submit(task);
  74. STARPU_ASSERT(!ret);
  75. }
  76. static int test_recv_handle_async(void *arg)
  77. {
  78. // FPRINTF(stderr, "test_recv_handle_async\n");
  79. int ret;
  80. struct thread_data *thread_data = (struct thread_data *) arg;
  81. PTHREAD_MUTEX_LOCK(&thread_data->recv_mutex);
  82. ret = (thread_data->recv_flag == 1);
  83. if (ret)
  84. {
  85. thread_data->recv_flag = 0;
  86. thread_data->val = thread_data->recv_buf;
  87. }
  88. PTHREAD_MUTEX_UNLOCK(&thread_data->recv_mutex);
  89. if (ret)
  90. {
  91. #ifdef DEBUG_MESSAGES
  92. FPRINTF(stderr, "Thread %d received value %d from thread %d\n",
  93. thread_data->index, thread_data->val, (thread_data->index - 1)%NTHREADS);
  94. #endif
  95. starpu_data_release(thread_data->handle);
  96. }
  97. return ret;
  98. }
  99. static void recv_handle_async(void *_thread_data)
  100. {
  101. struct thread_data *thread_data = (struct thread_data *) _thread_data;
  102. struct data_req *req = (struct data_req *) malloc(sizeof(struct data_req));
  103. req->test_func = test_recv_handle_async;
  104. req->test_arg = thread_data;
  105. req->next = NULL;
  106. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  107. req->next = data_req_list;
  108. data_req_list = req;
  109. PTHREAD_COND_SIGNAL(&data_req_cond);
  110. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  111. }
  112. static int test_send_handle_async(void *arg)
  113. {
  114. int ret;
  115. struct thread_data *thread_data = (struct thread_data *) arg;
  116. struct thread_data *neighbour_data = thread_data->neighbour;
  117. PTHREAD_MUTEX_LOCK(&neighbour_data->recv_mutex);
  118. ret = (neighbour_data->recv_flag == 0);
  119. PTHREAD_MUTEX_UNLOCK(&neighbour_data->recv_mutex);
  120. if (ret)
  121. {
  122. #ifdef DEBUG_MESSAGES
  123. FPRINTF(stderr, "Thread %d sends value %d to thread %d\n", thread_data->index, thread_data->val, neighbour_data->index);
  124. #endif
  125. starpu_data_release(thread_data->handle);
  126. }
  127. return ret;
  128. }
  129. static void send_handle_async(void *_thread_data)
  130. {
  131. struct thread_data *thread_data = (struct thread_data *) _thread_data;
  132. struct thread_data *neighbour_data = thread_data->neighbour;
  133. // FPRINTF(stderr, "send_handle_async\n");
  134. /* send the message */
  135. PTHREAD_MUTEX_LOCK(&neighbour_data->recv_mutex);
  136. neighbour_data->recv_buf = thread_data->val;
  137. neighbour_data->recv_flag = 1;
  138. PTHREAD_MUTEX_UNLOCK(&neighbour_data->recv_mutex);
  139. struct data_req *req = (struct data_req *) malloc(sizeof(struct data_req));
  140. req->test_func = test_send_handle_async;
  141. req->test_arg = thread_data;
  142. req->next = NULL;
  143. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  144. req->next = data_req_list;
  145. data_req_list = req;
  146. PTHREAD_COND_SIGNAL(&data_req_cond);
  147. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  148. }
  149. static void *progress_func(void *arg)
  150. {
  151. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  152. progress_thread_running = 1;
  153. PTHREAD_COND_SIGNAL(&data_req_cond);
  154. while (progress_thread_running) {
  155. struct data_req *req;
  156. if (data_req_list == NULL)
  157. PTHREAD_COND_WAIT(&data_req_cond, &data_req_mutex);
  158. req = data_req_list;
  159. if (req)
  160. {
  161. data_req_list = req->next;
  162. req->next = NULL;
  163. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  164. int ret = req->test_func(req->test_arg);
  165. if (ret)
  166. {
  167. free(req);
  168. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  169. }
  170. else {
  171. /* ret = 0 : the request is not finished, we put it back at the end of the list */
  172. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  173. struct data_req *req_aux = data_req_list;
  174. if (!req_aux)
  175. {
  176. /* The list is empty */
  177. data_req_list = req;
  178. }
  179. else {
  180. while (req_aux)
  181. {
  182. if (req_aux->next == NULL)
  183. {
  184. req_aux->next = req;
  185. break;
  186. }
  187. req_aux = req_aux->next;
  188. }
  189. }
  190. }
  191. }
  192. }
  193. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  194. return NULL;
  195. }
  196. static void *thread_func(void *arg)
  197. {
  198. unsigned iter;
  199. struct thread_data *thread_data = (struct thread_data *) arg;
  200. unsigned index = thread_data->index;
  201. int ret;
  202. starpu_variable_data_register(&thread_data->handle, 0, (uintptr_t)&thread_data->val, sizeof(unsigned));
  203. for (iter = 0; iter < NITER; iter++)
  204. {
  205. /* The first thread initiates the first transfer */
  206. if (!((index == 0) && (iter == 0)))
  207. {
  208. starpu_data_acquire_cb(
  209. thread_data->handle, STARPU_W,
  210. recv_handle_async, thread_data
  211. );
  212. }
  213. increment_handle_async(thread_data);
  214. if (!((index == (NTHREADS - 1)) && (iter == (NITER - 1))))
  215. {
  216. starpu_data_acquire_cb(
  217. thread_data->handle, STARPU_R,
  218. send_handle_async, thread_data
  219. );
  220. }
  221. }
  222. ret = starpu_task_wait_for_all();
  223. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  224. return NULL;
  225. }
  226. int main(int argc, char **argv)
  227. {
  228. int ret;
  229. void *retval;
  230. ret = starpu_init(NULL);
  231. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  232. /* Create a thread to perform blocking calls */
  233. pthread_t progress_thread;
  234. PTHREAD_MUTEX_INIT(&data_req_mutex, NULL);
  235. PTHREAD_COND_INIT(&data_req_cond, NULL);
  236. data_req_list = NULL;
  237. progress_thread_running = 0;
  238. unsigned t;
  239. for (t = 0; t < NTHREADS; t++)
  240. {
  241. problem_data[t].index = t;
  242. problem_data[t].val = 0;
  243. PTHREAD_MUTEX_INIT(&problem_data[t].recv_mutex, NULL);
  244. problem_data[t].recv_flag = 0;
  245. problem_data[t].neighbour = &problem_data[(t+1)%NTHREADS];
  246. }
  247. pthread_create(&progress_thread, NULL, progress_func, NULL);
  248. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  249. while (!progress_thread_running)
  250. PTHREAD_COND_WAIT(&data_req_cond, &data_req_mutex);
  251. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  252. for (t = 0; t < NTHREADS; t++)
  253. {
  254. ret = pthread_create(&problem_data[t].thread, NULL, thread_func, &problem_data[t]);
  255. STARPU_ASSERT(!ret);
  256. }
  257. for (t = 0; t < NTHREADS; t++)
  258. {
  259. ret = pthread_join(problem_data[t].thread, &retval);
  260. STARPU_ASSERT(!ret);
  261. STARPU_ASSERT(retval == NULL);
  262. }
  263. PTHREAD_MUTEX_LOCK(&data_req_mutex);
  264. progress_thread_running = 0;
  265. PTHREAD_COND_SIGNAL(&data_req_cond);
  266. PTHREAD_MUTEX_UNLOCK(&data_req_mutex);
  267. ret = pthread_join(progress_thread, &retval);
  268. STARPU_ASSERT(retval == NULL);
  269. /* We check that the value in the "last" thread is valid */
  270. starpu_data_handle last_handle = problem_data[NTHREADS - 1].handle;
  271. starpu_data_acquire(last_handle, STARPU_R);
  272. if (problem_data[NTHREADS - 1].val != (NTHREADS * NITER))
  273. {
  274. FPRINTF(stderr, "Final value : %u should be %d\n", problem_data[NTHREADS - 1].val, (NTHREADS * NITER));
  275. STARPU_ABORT();
  276. }
  277. starpu_data_release(last_handle);
  278. starpu_shutdown();
  279. return 0;
  280. }