increment_redux_v2.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <config.h>
  17. #include <starpu.h>
  18. #include "../helper.h"
  19. #ifdef STARPU_USE_CUDA
  20. #include <starpu_cuda.h>
  21. #endif
  22. #ifdef STARPU_USE_OPENCL
  23. #include <starpu_opencl.h>
  24. #endif
  25. static unsigned var = 0;
  26. static starpu_data_handle_t handle;
  27. /*
  28. * Reduction methods
  29. */
  30. #ifdef STARPU_USE_CUDA
  31. static void redux_cuda_kernel(void *descr[], void *arg)
  32. {
  33. STARPU_SKIP_IF_VALGRIND;
  34. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  35. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  36. unsigned host_dst, host_src;
  37. /* This is a dummy technique of course */
  38. cudaMemcpy(&host_src, src, sizeof(unsigned), cudaMemcpyDeviceToHost);
  39. cudaMemcpy(&host_dst, dst, sizeof(unsigned), cudaMemcpyDeviceToHost);
  40. cudaThreadSynchronize();
  41. host_dst += host_src;
  42. cudaMemcpy(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice);
  43. cudaThreadSynchronize();
  44. }
  45. static void neutral_cuda_kernel(void *descr[], void *arg)
  46. {
  47. STARPU_SKIP_IF_VALGRIND;
  48. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  49. /* This is a dummy technique of course */
  50. unsigned host_dst = 0;
  51. cudaMemcpy(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice);
  52. cudaThreadSynchronize();
  53. }
  54. #endif
  55. #ifdef STARPU_USE_OPENCL
  56. static void redux_opencl_kernel(void *descr[], void *arg)
  57. {
  58. STARPU_SKIP_IF_VALGRIND;
  59. unsigned h_dst, h_src;
  60. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  61. cl_mem d_src = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[1]);
  62. cl_command_queue queue;
  63. starpu_opencl_get_current_queue(&queue);
  64. /* This is a dummy technique of course */
  65. clEnqueueReadBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  66. clEnqueueReadBuffer(queue, d_src, CL_TRUE, 0, sizeof(unsigned), (void *)&h_src, 0, NULL, NULL);
  67. h_dst += h_src;
  68. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  69. clFinish(queue);
  70. }
  71. static void neutral_opencl_kernel(void *descr[], void *arg)
  72. {
  73. STARPU_SKIP_IF_VALGRIND;
  74. unsigned h_dst = 0;
  75. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  76. cl_command_queue queue;
  77. starpu_opencl_get_current_queue(&queue);
  78. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  79. clFinish(queue);
  80. }
  81. #endif
  82. static void redux_cpu_kernel(void *descr[], void *arg)
  83. {
  84. STARPU_SKIP_IF_VALGRIND;
  85. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  86. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  87. *dst = *dst + *src;
  88. }
  89. static void neutral_cpu_kernel(void *descr[], void *arg)
  90. {
  91. STARPU_SKIP_IF_VALGRIND;
  92. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  93. *dst = 0;
  94. }
  95. static struct starpu_codelet redux_cl =
  96. {
  97. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  98. #ifdef STARPU_USE_CUDA
  99. .cuda_funcs = {redux_cuda_kernel, NULL},
  100. #endif
  101. #ifdef STARPU_USE_OPENCL
  102. .opencl_funcs = {redux_opencl_kernel, NULL},
  103. #endif
  104. .cpu_funcs = {redux_cpu_kernel, NULL},
  105. .nbuffers = 2
  106. };
  107. static struct starpu_codelet neutral_cl =
  108. {
  109. .where = STARPU_CPU|STARPU_CUDA,
  110. #ifdef STARPU_USE_CUDA
  111. .cuda_funcs = {neutral_cuda_kernel, NULL},
  112. #endif
  113. #ifdef STARPU_USE_OPENCL
  114. .opencl_funcs = {neutral_opencl_kernel, NULL},
  115. #endif
  116. .cpu_funcs = {neutral_cpu_kernel, NULL},
  117. .nbuffers = 1
  118. };
  119. /*
  120. * Increment codelet
  121. */
  122. #ifdef STARPU_USE_OPENCL
  123. /* dummy OpenCL implementation */
  124. static void increment_opencl_kernel(void *descr[], void *cl_arg __attribute__((unused)))
  125. {
  126. STARPU_SKIP_IF_VALGRIND;
  127. cl_mem d_token = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  128. unsigned h_token;
  129. cl_command_queue queue;
  130. starpu_opencl_get_current_queue(&queue);
  131. clEnqueueReadBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  132. h_token++;
  133. clEnqueueWriteBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  134. clFinish(queue);
  135. }
  136. #endif
  137. #ifdef STARPU_USE_CUDA
  138. static void increment_cuda_kernel(void *descr[], void *arg)
  139. {
  140. STARPU_SKIP_IF_VALGRIND;
  141. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  142. unsigned host_token;
  143. /* This is a dummy technique of course */
  144. cudaMemcpy(&host_token, tokenptr, sizeof(unsigned), cudaMemcpyDeviceToHost);
  145. cudaThreadSynchronize();
  146. host_token++;
  147. cudaMemcpy(tokenptr, &host_token, sizeof(unsigned), cudaMemcpyHostToDevice);
  148. cudaThreadSynchronize();
  149. }
  150. #endif
  151. static void increment_cpu_kernel(void *descr[], void *arg)
  152. {
  153. STARPU_SKIP_IF_VALGRIND;
  154. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  155. *tokenptr = *tokenptr + 1;
  156. }
  157. static struct starpu_codelet increment_cl =
  158. {
  159. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  160. #ifdef STARPU_USE_CUDA
  161. .cuda_funcs = {increment_cuda_kernel, NULL},
  162. #endif
  163. #ifdef STARPU_USE_OPENCL
  164. .opencl_funcs = {increment_opencl_kernel, NULL},
  165. #endif
  166. .cpu_funcs = {increment_cpu_kernel, NULL},
  167. .nbuffers = 1,
  168. .modes = {STARPU_RW}
  169. };
  170. static struct starpu_codelet increment_cl_redux =
  171. {
  172. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  173. #ifdef STARPU_USE_CUDA
  174. .cuda_funcs = {increment_cuda_kernel, NULL},
  175. #endif
  176. #ifdef STARPU_USE_OPENCL
  177. .opencl_funcs = {increment_opencl_kernel, NULL},
  178. #endif
  179. .cpu_funcs = {increment_cpu_kernel, NULL},
  180. .nbuffers = 1,
  181. .modes = {STARPU_REDUX}
  182. };
  183. int main(int argc, char **argv)
  184. {
  185. int ret;
  186. ret = starpu_init(NULL);
  187. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  188. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  189. starpu_variable_data_register(&handle, 0, (uintptr_t)&var, sizeof(unsigned));
  190. starpu_data_set_reduction_methods(handle, &redux_cl, &neutral_cl);
  191. unsigned ntasks = 1024;
  192. unsigned nloops = 16;
  193. unsigned loop;
  194. unsigned t;
  195. for (loop = 0; loop < nloops; loop++)
  196. {
  197. for (t = 0; t < ntasks; t++)
  198. {
  199. struct starpu_task *task = starpu_task_create();
  200. if (t % 10 == 0)
  201. {
  202. task->cl = &increment_cl;
  203. }
  204. else
  205. {
  206. task->cl = &increment_cl_redux;
  207. }
  208. task->handles[0] = handle;
  209. int ret = starpu_task_submit(task);
  210. if (ret == -ENODEV) goto enodev;
  211. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  212. }
  213. ret = starpu_data_acquire(handle, STARPU_R);
  214. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  215. if (var != ntasks *(loop+1))
  216. {
  217. _STARPU_DEBUG("%d != %d\n", var, ntasks*(loop+1));
  218. starpu_data_release(handle);
  219. starpu_data_unregister(handle);
  220. goto err;
  221. }
  222. starpu_data_release(handle);
  223. }
  224. starpu_data_unregister(handle);
  225. if (var != ntasks *nloops)
  226. {
  227. _STARPU_DEBUG("%d != %d\n", var, ntasks*nloops);
  228. goto err;
  229. }
  230. starpu_shutdown();
  231. return EXIT_SUCCESS;
  232. enodev:
  233. starpu_data_unregister(handle);
  234. fprintf(stderr, "WARNING: No one can execute this task\n");
  235. /* yes, we do not perform the computation but we did detect that no one
  236. * could perform the kernel, so this is not an error from StarPU */
  237. starpu_shutdown();
  238. return STARPU_TEST_SKIPPED;
  239. err:
  240. starpu_shutdown();
  241. STARPU_RETURN(EXIT_FAILURE);
  242. }