pipeline.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012, 2013, 2014 CNRS
  4. * Copyright (C) 2012, 2014 Université de Bordeaux
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This examples shows how to submit a pipeline to StarPU with limited buffer
  19. * use, and avoiding submitted all the tasks at once.
  20. *
  21. * This is a dumb example pipeline, depicted here:
  22. *
  23. * x--\
  24. * >==axpy-->sum
  25. * y--/
  26. *
  27. * x and y produce vectors full of x and y values, axpy multiplies them, and sum
  28. * sums it up. We thus have 3 temporary buffers
  29. */
  30. #include <starpu.h>
  31. #include <stdint.h>
  32. #include <semaphore.h>
  33. #include <common/blas.h>
  34. #ifdef STARPU_USE_CUDA
  35. #include <cublas.h>
  36. #endif
  37. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  38. /* Vector size */
  39. #ifdef STARPU_QUICK_CHECK
  40. #define N 16
  41. #else
  42. #define N 1048576
  43. #endif
  44. /* Number of iteration buffers, and thus overlapped pipeline iterations */
  45. #define K 16
  46. /* Number of concurrently submitted pipeline iterations */
  47. #define C 64
  48. /* Number of iterations */
  49. #define L 256
  50. /* X / Y codelets */
  51. void pipeline_cpu_x(void *descr[], void *args)
  52. {
  53. float x;
  54. float *val = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  55. int n = STARPU_VECTOR_GET_NX(descr[0]);
  56. int i;
  57. starpu_codelet_unpack_args(args, &x);
  58. for (i = 0; i < n ; i++)
  59. val[i] = x;
  60. }
  61. static struct starpu_perfmodel pipeline_model_x =
  62. {
  63. .type = STARPU_HISTORY_BASED,
  64. .symbol = "pipeline_model_x"
  65. };
  66. static struct starpu_codelet pipeline_codelet_x =
  67. {
  68. .cpu_funcs = {pipeline_cpu_x},
  69. .nbuffers = 1,
  70. .modes = {STARPU_W},
  71. .model = &pipeline_model_x
  72. };
  73. /* axpy codelets */
  74. void pipeline_cpu_axpy(void *descr[], void *arg)
  75. {
  76. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  77. float *y = (float *) STARPU_VECTOR_GET_PTR(descr[1]);
  78. int n = STARPU_VECTOR_GET_NX(descr[0]);
  79. STARPU_SAXPY(n, 1., x, 1, y, 1);
  80. }
  81. #ifdef STARPU_USE_CUDA
  82. void pipeline_cublas_axpy(void *descr[], void *arg)
  83. {
  84. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  85. float *y = (float *) STARPU_VECTOR_GET_PTR(descr[1]);
  86. int n = STARPU_VECTOR_GET_NX(descr[0]);
  87. cublasSaxpy(n, 1., x, 1, y, 1);
  88. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  89. }
  90. #endif
  91. static struct starpu_perfmodel pipeline_model_axpy =
  92. {
  93. .type = STARPU_HISTORY_BASED,
  94. .symbol = "pipeline_model_axpy"
  95. };
  96. static struct starpu_codelet pipeline_codelet_axpy =
  97. {
  98. .cpu_funcs = {pipeline_cpu_axpy},
  99. #ifdef STARPU_USE_CUDA
  100. .cuda_funcs = {pipeline_cublas_axpy},
  101. #endif
  102. .nbuffers = 2,
  103. .modes = {STARPU_R, STARPU_RW},
  104. .model = &pipeline_model_axpy
  105. };
  106. /* sum codelet */
  107. void pipeline_cpu_sum(void *descr[], void *_args)
  108. {
  109. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  110. int n = STARPU_VECTOR_GET_NX(descr[0]);
  111. float y;
  112. y = STARPU_SASUM(n, x, 1);
  113. FPRINTF(stderr,"CPU finished with %f\n", y);
  114. }
  115. #ifdef STARPU_USE_CUDA
  116. void pipeline_cublas_sum(void *descr[], void *arg)
  117. {
  118. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  119. int n = STARPU_VECTOR_GET_NX(descr[0]);
  120. float y;
  121. y = cublasSasum(n, x, 1);
  122. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  123. FPRINTF(stderr,"CUBLAS finished with %f\n", y);
  124. }
  125. #endif
  126. static struct starpu_perfmodel pipeline_model_sum =
  127. {
  128. .type = STARPU_HISTORY_BASED,
  129. .symbol = "pipeline_model_sum"
  130. };
  131. static struct starpu_codelet pipeline_codelet_sum =
  132. {
  133. .cpu_funcs = {pipeline_cpu_sum},
  134. #ifdef STARPU_USE_CUDA
  135. .cuda_funcs = {pipeline_cublas_sum},
  136. #endif
  137. .nbuffers = 1,
  138. .modes = {STARPU_R},
  139. .model = &pipeline_model_sum
  140. };
  141. int main(void)
  142. {
  143. int ret = 0;
  144. int k, l, c;
  145. starpu_data_handle_t buffersX[K], buffersY[K], buffersP[K];
  146. sem_t sems[C];
  147. ret = starpu_init(NULL);
  148. if (ret == -ENODEV)
  149. exit(77);
  150. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  151. starpu_cublas_init();
  152. /* Initialize the K temporary buffers. No need to allocate it ourselves
  153. * Since it's the X and Y kernels which will fill the initial values. */
  154. for (k = 0; k < K; k++)
  155. {
  156. starpu_vector_data_register(&buffersX[k], -1, 0, N, sizeof(float));
  157. starpu_vector_data_register(&buffersY[k], -1, 0, N, sizeof(float));
  158. starpu_vector_data_register(&buffersP[k], -1, 0, N, sizeof(float));
  159. }
  160. /* Initialize way to wait for the C previous concurrent stages */
  161. for (c = 0; c < C; c++)
  162. sem_init(&sems[c], 0, 0);
  163. /* Submits the l pipeline stages */
  164. for (l = 0; l < L; l++)
  165. {
  166. float x = l;
  167. float y = 2*l;
  168. /* First wait for the C previous concurrent stages */
  169. if (l >= C)
  170. sem_wait(&sems[l%C]);
  171. /* Now submit the next stage */
  172. ret = starpu_task_insert(&pipeline_codelet_x,
  173. STARPU_W, buffersX[l%K],
  174. STARPU_VALUE, &x, sizeof(x),
  175. STARPU_TAG_ONLY, (starpu_tag_t) (100*l),
  176. 0);
  177. if (ret == -ENODEV) goto enodev;
  178. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert x");
  179. ret = starpu_task_insert(&pipeline_codelet_x,
  180. STARPU_W, buffersY[l%K],
  181. STARPU_VALUE, &y, sizeof(y),
  182. STARPU_TAG_ONLY, (starpu_tag_t) (100*l+1),
  183. 0);
  184. if (ret == -ENODEV) goto enodev;
  185. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert y");
  186. ret = starpu_task_insert(&pipeline_codelet_axpy,
  187. STARPU_R, buffersX[l%K],
  188. STARPU_RW, buffersY[l%K],
  189. STARPU_TAG_ONLY, (starpu_tag_t) l,
  190. 0);
  191. if (ret == -ENODEV) goto enodev;
  192. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert axpy");
  193. ret = starpu_task_insert(&pipeline_codelet_sum,
  194. STARPU_R, buffersY[l%K],
  195. STARPU_CALLBACK_WITH_ARG, (void (*)(void*))sem_post, &sems[l%C],
  196. STARPU_TAG_ONLY, (starpu_tag_t) l,
  197. 0);
  198. if (ret == -ENODEV) goto enodev;
  199. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_insert sum");
  200. }
  201. starpu_task_wait_for_all();
  202. enodev:
  203. for (k = 0; k < K; k++)
  204. {
  205. starpu_data_unregister(buffersX[k]);
  206. starpu_data_unregister(buffersY[k]);
  207. starpu_data_unregister(buffersP[k]);
  208. }
  209. starpu_shutdown();
  210. return (ret == -ENODEV ? 77 : 0);
  211. }