increment_redux.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010, 2012-2016 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <config.h>
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. /*
  21. * Check that STARPU_REDUX works with a mere incrementation
  22. */
  23. static unsigned var = 0;
  24. static starpu_data_handle_t handle;
  25. /*
  26. * Reduction methods
  27. */
  28. #ifdef STARPU_USE_CUDA
  29. static void redux_cuda_kernel(void *descr[], void *arg)
  30. {
  31. STARPU_SKIP_IF_VALGRIND;
  32. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  33. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  34. unsigned host_dst, host_src;
  35. /* This is a dummy technique of course */
  36. cudaMemcpyAsync(&host_src, src, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  37. cudaMemcpyAsync(&host_dst, dst, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  38. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  39. host_dst += host_src;
  40. cudaMemcpyAsync(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  41. }
  42. static void neutral_cuda_kernel(void *descr[], void *arg)
  43. {
  44. STARPU_SKIP_IF_VALGRIND;
  45. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  46. /* This is a dummy technique of course */
  47. unsigned host_dst = 0;
  48. cudaMemcpyAsync(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  49. }
  50. #endif
  51. #ifdef STARPU_USE_OPENCL
  52. static void redux_opencl_kernel(void *descr[], void *arg)
  53. {
  54. STARPU_SKIP_IF_VALGRIND;
  55. unsigned h_dst, h_src;
  56. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  57. cl_mem d_src = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[1]);
  58. cl_command_queue queue;
  59. starpu_opencl_get_current_queue(&queue);
  60. /* This is a dummy technique of course */
  61. clEnqueueReadBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  62. clEnqueueReadBuffer(queue, d_src, CL_TRUE, 0, sizeof(unsigned), (void *)&h_src, 0, NULL, NULL);
  63. h_dst += h_src;
  64. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  65. }
  66. static void neutral_opencl_kernel(void *descr[], void *arg)
  67. {
  68. STARPU_SKIP_IF_VALGRIND;
  69. unsigned h_dst = 0;
  70. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  71. cl_command_queue queue;
  72. starpu_opencl_get_current_queue(&queue);
  73. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  74. }
  75. #endif
  76. void redux_cpu_kernel(void *descr[], void *arg)
  77. {
  78. STARPU_SKIP_IF_VALGRIND;
  79. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  80. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  81. *dst = *dst + *src;
  82. }
  83. void neutral_cpu_kernel(void *descr[], void *arg)
  84. {
  85. STARPU_SKIP_IF_VALGRIND;
  86. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  87. *dst = 0;
  88. }
  89. static struct starpu_codelet redux_cl =
  90. {
  91. #ifdef STARPU_USE_CUDA
  92. .cuda_funcs = {redux_cuda_kernel},
  93. .cuda_flags = {STARPU_CUDA_ASYNC},
  94. #endif
  95. #ifdef STARPU_USE_OPENCL
  96. .opencl_funcs = {redux_opencl_kernel},
  97. .opencl_flags = {STARPU_OPENCL_ASYNC},
  98. #endif
  99. .cpu_funcs = {redux_cpu_kernel},
  100. .cpu_funcs_name = {"redux_cpu_kernel"},
  101. .modes = {STARPU_RW, STARPU_R},
  102. .nbuffers = 2
  103. };
  104. static struct starpu_codelet neutral_cl =
  105. {
  106. #ifdef STARPU_USE_CUDA
  107. .cuda_funcs = {neutral_cuda_kernel},
  108. .cuda_flags = {STARPU_CUDA_ASYNC},
  109. #endif
  110. #ifdef STARPU_USE_OPENCL
  111. .opencl_funcs = {neutral_opencl_kernel},
  112. .opencl_flags = {STARPU_OPENCL_ASYNC},
  113. #endif
  114. .cpu_funcs = {neutral_cpu_kernel},
  115. .cpu_funcs_name = {"neutral_cpu_kernel"},
  116. .modes = {STARPU_W},
  117. .nbuffers = 1
  118. };
  119. /*
  120. * Increment codelet
  121. */
  122. #ifdef STARPU_USE_OPENCL
  123. /* dummy OpenCL implementation */
  124. static void increment_opencl_kernel(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED)
  125. {
  126. STARPU_SKIP_IF_VALGRIND;
  127. cl_mem d_token = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  128. unsigned h_token;
  129. cl_command_queue queue;
  130. starpu_opencl_get_current_queue(&queue);
  131. clEnqueueReadBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  132. h_token++;
  133. clEnqueueWriteBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  134. }
  135. #endif
  136. #ifdef STARPU_USE_CUDA
  137. static void increment_cuda_kernel(void *descr[], void *arg)
  138. {
  139. STARPU_SKIP_IF_VALGRIND;
  140. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  141. unsigned host_token;
  142. /* This is a dummy technique of course */
  143. cudaMemcpyAsync(&host_token, tokenptr, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());
  144. cudaStreamSynchronize(starpu_cuda_get_local_stream());
  145. host_token++;
  146. cudaMemcpyAsync(tokenptr, &host_token, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());
  147. }
  148. #endif
  149. void increment_cpu_kernel(void *descr[], void *arg)
  150. {
  151. STARPU_SKIP_IF_VALGRIND;
  152. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  153. *tokenptr = *tokenptr + 1;
  154. }
  155. static struct starpu_codelet increment_cl =
  156. {
  157. #ifdef STARPU_USE_CUDA
  158. .cuda_funcs = {increment_cuda_kernel},
  159. .cuda_flags = {STARPU_CUDA_ASYNC},
  160. #endif
  161. #ifdef STARPU_USE_OPENCL
  162. .opencl_funcs = {increment_opencl_kernel},
  163. .opencl_flags = {STARPU_OPENCL_ASYNC},
  164. #endif
  165. .cpu_funcs = {increment_cpu_kernel},
  166. .cpu_funcs_name = {"increment_cpu_kernel"},
  167. .nbuffers = 1,
  168. .modes = {STARPU_REDUX}
  169. };
  170. int main(int argc, char **argv)
  171. {
  172. int ret;
  173. /* Not supported yet */
  174. if (starpu_get_env_number_default("STARPU_GLOBAL_ARBITER", 0) > 0)
  175. return STARPU_TEST_SKIPPED;
  176. ret = starpu_initialize(NULL, &argc, &argv);
  177. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  178. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  179. starpu_variable_data_register(&handle, STARPU_MAIN_RAM, (uintptr_t)&var, sizeof(unsigned));
  180. starpu_data_set_reduction_methods(handle, &redux_cl, &neutral_cl);
  181. #ifdef STARPU_QUICK_CHECK
  182. unsigned ntasks = 32;
  183. unsigned nloops = 4;
  184. #else
  185. unsigned ntasks = 1024;
  186. unsigned nloops = 16;
  187. #endif
  188. unsigned loop;
  189. unsigned t;
  190. for (loop = 0; loop < nloops; loop++)
  191. {
  192. for (t = 0; t < ntasks; t++)
  193. {
  194. struct starpu_task *task = starpu_task_create();
  195. task->cl = &increment_cl;
  196. task->handles[0] = handle;
  197. ret = starpu_task_submit(task);
  198. if (ret == -ENODEV) goto enodev;
  199. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  200. }
  201. ret = starpu_data_acquire(handle, STARPU_R);
  202. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  203. if (var != ntasks * (loop+1))
  204. {
  205. FPRINTF(stderr, "[end of loop] Value %u != Expected value %u\n", var, ntasks * (loop+1));
  206. starpu_data_release(handle);
  207. starpu_data_unregister(handle);
  208. goto err;
  209. }
  210. starpu_data_release(handle);
  211. }
  212. starpu_data_unregister(handle);
  213. if (var != ntasks * nloops)
  214. {
  215. FPRINTF(stderr, "Value %u != Expected value %u\n", var, ntasks * (loop+1));
  216. goto err;
  217. }
  218. starpu_shutdown();
  219. return EXIT_SUCCESS;
  220. enodev:
  221. starpu_data_unregister(handle);
  222. fprintf(stderr, "WARNING: No one can execute this task\n");
  223. /* yes, we do not perform the computation but we did detect that no one
  224. * could perform the kernel, so this is not an error from StarPU */
  225. starpu_shutdown();
  226. return STARPU_TEST_SKIPPED;
  227. err:
  228. starpu_shutdown();
  229. STARPU_RETURN(EXIT_FAILURE);
  230. }