increment_redux_v2.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011-2014 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <config.h>
  17. #include <starpu.h>
  18. #include "../helper.h"
  19. static unsigned var = 0;
  20. static starpu_data_handle_t handle;
  21. /*
  22. * Reduction methods
  23. */
  24. #ifdef STARPU_USE_CUDA
  25. static void redux_cuda_kernel(void *descr[], void *arg)
  26. {
  27. STARPU_SKIP_IF_VALGRIND;
  28. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  29. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  30. unsigned host_dst, host_src;
  31. /* This is a dummy technique of course */
  32. cudaMemcpyAsync(&host_src, src, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  33. cudaMemcpyAsync(&host_dst, dst, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  34. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  35. host_dst += host_src;
  36. cudaMemcpyAsync(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  37. }
  38. static void neutral_cuda_kernel(void *descr[], void *arg)
  39. {
  40. STARPU_SKIP_IF_VALGRIND;
  41. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  42. /* This is a dummy technique of course */
  43. unsigned host_dst = 0;
  44. cudaMemcpyAsync(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  45. }
  46. #endif
  47. #ifdef STARPU_USE_OPENCL
  48. static void redux_opencl_kernel(void *descr[], void *arg)
  49. {
  50. STARPU_SKIP_IF_VALGRIND;
  51. unsigned h_dst, h_src;
  52. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  53. cl_mem d_src = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[1]);
  54. cl_command_queue queue;
  55. starpu_opencl_get_current_queue(&queue);
  56. /* This is a dummy technique of course */
  57. clEnqueueReadBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  58. clEnqueueReadBuffer(queue, d_src, CL_TRUE, 0, sizeof(unsigned), (void *)&h_src, 0, NULL, NULL);
  59. h_dst += h_src;
  60. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  61. clFinish(queue);
  62. }
  63. static void neutral_opencl_kernel(void *descr[], void *arg)
  64. {
  65. STARPU_SKIP_IF_VALGRIND;
  66. unsigned h_dst = 0;
  67. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  68. cl_command_queue queue;
  69. starpu_opencl_get_current_queue(&queue);
  70. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  71. clFinish(queue);
  72. }
  73. #endif
  74. void redux_cpu_kernel(void *descr[], void *arg)
  75. {
  76. STARPU_SKIP_IF_VALGRIND;
  77. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  78. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  79. *dst = *dst + *src;
  80. }
  81. void neutral_cpu_kernel(void *descr[], void *arg)
  82. {
  83. STARPU_SKIP_IF_VALGRIND;
  84. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  85. *dst = 0;
  86. }
  87. static struct starpu_codelet redux_cl =
  88. {
  89. #ifdef STARPU_USE_CUDA
  90. .cuda_funcs = {redux_cuda_kernel, NULL},
  91. .cuda_flags = {STARPU_CUDA_ASYNC},
  92. #endif
  93. #ifdef STARPU_USE_OPENCL
  94. .opencl_funcs = {redux_opencl_kernel, NULL},
  95. #endif
  96. .cpu_funcs = {redux_cpu_kernel, NULL},
  97. .cpu_funcs_name = {"redux_cpu_kernel", NULL},
  98. .modes = {STARPU_RW, STARPU_R},
  99. .nbuffers = 2
  100. };
  101. static struct starpu_codelet neutral_cl =
  102. {
  103. #ifdef STARPU_USE_CUDA
  104. .cuda_funcs = {neutral_cuda_kernel, NULL},
  105. .cuda_flags = {STARPU_CUDA_ASYNC},
  106. #endif
  107. #ifdef STARPU_USE_OPENCL
  108. .opencl_funcs = {neutral_opencl_kernel, NULL},
  109. #endif
  110. .cpu_funcs = {neutral_cpu_kernel, NULL},
  111. .cpu_funcs_name = {"neutral_cpu_kernel", NULL},
  112. .modes = {STARPU_W},
  113. .nbuffers = 1
  114. };
  115. /*
  116. * Increment codelet
  117. */
  118. #ifdef STARPU_USE_OPENCL
  119. /* dummy OpenCL implementation */
  120. static void increment_opencl_kernel(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED)
  121. {
  122. STARPU_SKIP_IF_VALGRIND;
  123. cl_mem d_token = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  124. unsigned h_token;
  125. cl_command_queue queue;
  126. starpu_opencl_get_current_queue(&queue);
  127. clEnqueueReadBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  128. h_token++;
  129. clEnqueueWriteBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  130. clFinish(queue);
  131. }
  132. #endif
  133. #ifdef STARPU_USE_CUDA
  134. static void increment_cuda_kernel(void *descr[], void *arg)
  135. {
  136. STARPU_SKIP_IF_VALGRIND;
  137. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  138. unsigned host_token;
  139. /* This is a dummy technique of course */
  140. cudaMemcpyAsync(&host_token, tokenptr, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  141. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  142. host_token++;
  143. cudaMemcpyAsync(tokenptr, &host_token, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  144. }
  145. #endif
  146. void increment_cpu_kernel(void *descr[], void *arg)
  147. {
  148. STARPU_SKIP_IF_VALGRIND;
  149. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  150. *tokenptr = *tokenptr + 1;
  151. }
  152. static struct starpu_codelet increment_cl =
  153. {
  154. #ifdef STARPU_USE_CUDA
  155. .cuda_funcs = {increment_cuda_kernel, NULL},
  156. .cuda_flags = {STARPU_CUDA_ASYNC},
  157. #endif
  158. #ifdef STARPU_USE_OPENCL
  159. .opencl_funcs = {increment_opencl_kernel, NULL},
  160. #endif
  161. .cpu_funcs = {increment_cpu_kernel, NULL},
  162. .cpu_funcs_name = {"increment_cpu_kernel", NULL},
  163. .nbuffers = 1,
  164. .modes = {STARPU_RW}
  165. };
  166. struct starpu_codelet increment_cl_redux =
  167. {
  168. #ifdef STARPU_USE_CUDA
  169. .cuda_funcs = {increment_cuda_kernel, NULL},
  170. .cuda_flags = {STARPU_CUDA_ASYNC},
  171. #endif
  172. #ifdef STARPU_USE_OPENCL
  173. .opencl_funcs = {increment_opencl_kernel, NULL},
  174. #endif
  175. .cpu_funcs = {increment_cpu_kernel, NULL},
  176. .cpu_funcs_name = {"increment_cpu_kernel", NULL},
  177. .nbuffers = 1,
  178. .modes = {STARPU_REDUX}
  179. };
  180. int main(int argc, char **argv)
  181. {
  182. int ret;
  183. ret = starpu_initialize(NULL, &argc, &argv);
  184. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  185. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  186. starpu_variable_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)&var, sizeof(unsigned));
  187. starpu_data_set_reduction_methods(handle, &redux_cl, &neutral_cl);
  188. #ifdef STARPU_QUICK_CHECK
  189. unsigned ntasks = 32;
  190. unsigned nloops = 4;
  191. #else
  192. unsigned ntasks = 1024;
  193. unsigned nloops = 16;
  194. #endif
  195. unsigned loop;
  196. unsigned t;
  197. for (loop = 0; loop < nloops; loop++)
  198. {
  199. for (t = 0; t < ntasks; t++)
  200. {
  201. struct starpu_task *task = starpu_task_create();
  202. if (t % 10 == 0)
  203. {
  204. task->cl = &increment_cl;
  205. }
  206. else
  207. {
  208. task->cl = &increment_cl_redux;
  209. }
  210. task->handles[0] = handle;
  211. ret = starpu_task_submit(task);
  212. if (ret == -ENODEV) goto enodev;
  213. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  214. }
  215. ret = starpu_data_acquire(handle, STARPU_R);
  216. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  217. if (var != ntasks *(loop+1))
  218. {
  219. _STARPU_DEBUG("%u != %u\n", var, ntasks*(loop+1));
  220. starpu_data_release(handle);
  221. starpu_data_unregister(handle);
  222. goto err;
  223. }
  224. starpu_data_release(handle);
  225. }
  226. starpu_data_unregister(handle);
  227. if (var != ntasks *nloops)
  228. {
  229. _STARPU_DEBUG("%u != %u\n", var, ntasks*nloops);
  230. goto err;
  231. }
  232. starpu_shutdown();
  233. return EXIT_SUCCESS;
  234. enodev:
  235. starpu_data_unregister(handle);
  236. fprintf(stderr, "WARNING: No one can execute this task\n");
  237. /* yes, we do not perform the computation but we did detect that no one
  238. * could perform the kernel, so this is not an error from StarPU */
  239. starpu_shutdown();
  240. return STARPU_TEST_SKIPPED;
  241. err:
  242. starpu_shutdown();
  243. STARPU_RETURN(EXIT_FAILURE);
  244. }