pipeline.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  4. * Copyright (C) 2010-2012 Université de Bordeaux 1
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This examples shows how to submit a pipeline to StarPU with limited buffer
  19. * use, and avoiding submitted all the tasks at once.
  20. *
  21. * This is a dumb example pipeline, depicted here:
  22. *
  23. * x--\
  24. * >==axpy-->sum
  25. * y--/
  26. *
  27. * x and y produce vectors full of x and y values, axpy multiplies them, and sum
  28. * sums it up. We thus have 3 temporary buffers
  29. */
  30. #include <starpu.h>
  31. #include <stdint.h>
  32. #include <semaphore.h>
  33. #include <common/blas.h>
  34. #include <cublas.h>
  35. #define FPRINTF(ofile, fmt, args ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ##args); }} while(0)
  36. /* Vector size */
  37. #ifdef STARPU_SLOW_MACHINE
  38. #define N 16
  39. #else
  40. #define N 1048576
  41. #endif
  42. /* Number of iteration buffers, and thus overlapped pipeline iterations */
  43. #define K 16
  44. /* Number of concurrently submitted pipeline iterations */
  45. #define C 64
  46. /* Number of iterations */
  47. #define L 256
  48. /* X / Y codelets */
  49. void pipeline_cpu_x(void *descr[], void *args)
  50. {
  51. float x;
  52. float *val = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  53. int n = STARPU_VECTOR_GET_NX(descr[0]);
  54. int i;
  55. starpu_codelet_unpack_args(args, &x);
  56. for (i = 0; i < n ; i++)
  57. val[i] = x;
  58. }
  59. static struct starpu_perfmodel pipeline_model_x =
  60. {
  61. .type = STARPU_HISTORY_BASED,
  62. .symbol = "pipeline_model_x"
  63. };
  64. static struct starpu_codelet pipeline_codelet_x =
  65. {
  66. .where = STARPU_CPU,
  67. .cpu_funcs = {pipeline_cpu_x, NULL},
  68. .nbuffers = 1,
  69. .modes = {STARPU_W},
  70. .model = &pipeline_model_x
  71. };
  72. /* axpy codelets */
  73. void pipeline_cpu_axpy(void *descr[], void *arg)
  74. {
  75. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  76. float *y = (float *) STARPU_VECTOR_GET_PTR(descr[1]);
  77. int n = STARPU_VECTOR_GET_NX(descr[0]);
  78. SAXPY(n, 1., x, 1, y, 1);
  79. }
  80. #ifdef STARPU_USE_CUDA
  81. void pipeline_cublas_axpy(void *descr[], void *arg)
  82. {
  83. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  84. float *y = (float *) STARPU_VECTOR_GET_PTR(descr[1]);
  85. int n = STARPU_VECTOR_GET_NX(descr[0]);
  86. cublasSaxpy(n, 1., x, 1, y, 1);
  87. }
  88. #endif
  89. static struct starpu_perfmodel pipeline_model_axpy =
  90. {
  91. .type = STARPU_HISTORY_BASED,
  92. .symbol = "pipeline_model_axpy"
  93. };
  94. static struct starpu_codelet pipeline_codelet_axpy =
  95. {
  96. .where = STARPU_CPU | STARPU_CUDA,
  97. .cpu_funcs = {pipeline_cpu_axpy, NULL},
  98. .cuda_funcs = {pipeline_cublas_axpy, NULL},
  99. .nbuffers = 2,
  100. .modes = {STARPU_R, STARPU_RW},
  101. .model = &pipeline_model_axpy
  102. };
  103. /* sum codelet */
  104. void pipeline_cpu_sum(void *descr[], void *_args)
  105. {
  106. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  107. int n = STARPU_VECTOR_GET_NX(descr[0]);
  108. float y;
  109. y = SASUM(n, x, 1);
  110. FPRINTF(stderr,"CPU finished with %f\n", y);
  111. }
  112. #ifdef STARPU_USE_CUDA
  113. void pipeline_cublas_sum(void *descr[], void *arg)
  114. {
  115. float *x = (float *) STARPU_VECTOR_GET_PTR(descr[0]);
  116. int n = STARPU_VECTOR_GET_NX(descr[0]);
  117. float y;
  118. y = cublasSasum(n, x, 1);
  119. FPRINTF(stderr,"CUBLAS finished with %f\n", y);
  120. }
  121. #endif
  122. static struct starpu_perfmodel pipeline_model_sum =
  123. {
  124. .type = STARPU_HISTORY_BASED,
  125. .symbol = "pipeline_model_sum"
  126. };
  127. static struct starpu_codelet pipeline_codelet_sum =
  128. {
  129. .where = STARPU_CPU | STARPU_CUDA,
  130. .cpu_funcs = {pipeline_cpu_sum, NULL},
  131. .cuda_funcs = {pipeline_cublas_sum, NULL},
  132. .nbuffers = 1,
  133. .modes = {STARPU_R},
  134. .model = &pipeline_model_sum
  135. };
  136. int main(void) {
  137. int ret;
  138. int k, l, c;
  139. starpu_data_handle_t buffersX[K], buffersY[K], buffersP[K];
  140. sem_t sems[C];
  141. ret = starpu_init(NULL);
  142. if (ret == -ENODEV)
  143. exit(77);
  144. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  145. starpu_helper_cublas_init();
  146. /* Initialize the K temporary buffers. No need to allocate it ourselves
  147. * Since it's the X and Y kernels which will fill the initial values. */
  148. for (k = 0; k < K; k++) {
  149. starpu_vector_data_register(&buffersX[k], -1, 0, N, sizeof(float));
  150. starpu_vector_data_register(&buffersY[k], -1, 0, N, sizeof(float));
  151. starpu_vector_data_register(&buffersP[k], -1, 0, N, sizeof(float));
  152. }
  153. /* Initialize way to wait for the C previous concurrent stages */
  154. for (c = 0; c < C; c++)
  155. sem_init(&sems[c], 0, 0);
  156. /* Submits the l pipeline stages */
  157. for (l = 0; l < L; l++) {
  158. int ret;
  159. float x = l;
  160. float y = 2*l;
  161. /* First wait for the C previous concurrent stages */
  162. if (l >= C)
  163. sem_wait(&sems[l%C]);
  164. /* Now submit the next stage */
  165. ret = starpu_insert_task(&pipeline_codelet_x,
  166. STARPU_W, buffersX[l%K],
  167. STARPU_VALUE, &x, sizeof(x),
  168. 0);
  169. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task x");
  170. ret = starpu_insert_task(&pipeline_codelet_x,
  171. STARPU_W, buffersY[l%K],
  172. STARPU_VALUE, &y, sizeof(y),
  173. 0);
  174. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task y");
  175. ret = starpu_insert_task(&pipeline_codelet_axpy,
  176. STARPU_R, buffersX[l%K],
  177. STARPU_RW, buffersY[l%K],
  178. 0);
  179. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task axpy");
  180. ret = starpu_insert_task(&pipeline_codelet_sum,
  181. STARPU_R, buffersY[l%K],
  182. STARPU_CALLBACK_WITH_ARG, (void (*)(void*))sem_post, &sems[l%C],
  183. 0);
  184. STARPU_CHECK_RETURN_VALUE(ret, "starpu_insert_task sum");
  185. }
  186. starpu_task_wait_for_all();
  187. starpu_shutdown();
  188. return 0;
  189. }