increment_redux_v2.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <config.h>
  17. #if STARPU_HAVE_VALGRIND_H
  18. #include <valgrind/valgrind.h>
  19. #endif
  20. #include <starpu.h>
  21. #include "../helper.h"
  22. #ifdef STARPU_USE_CUDA
  23. #include <starpu_cuda.h>
  24. #endif
  25. #ifdef STARPU_USE_OPENCL
  26. #include <starpu_opencl.h>
  27. #endif
  28. static unsigned var = 0;
  29. static starpu_data_handle_t handle;
  30. /*
  31. * Reduction methods
  32. */
  33. #ifdef STARPU_USE_CUDA
  34. static void redux_cuda_kernel(void *descr[], void *arg)
  35. {
  36. STARPU_SKIP_IF_VALGRIND;
  37. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  38. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  39. unsigned host_dst, host_src;
  40. /* This is a dummy technique of course */
  41. cudaMemcpy(&host_src, src, sizeof(unsigned), cudaMemcpyDeviceToHost);
  42. cudaMemcpy(&host_dst, dst, sizeof(unsigned), cudaMemcpyDeviceToHost);
  43. cudaThreadSynchronize();
  44. host_dst += host_src;
  45. cudaMemcpy(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice);
  46. cudaThreadSynchronize();
  47. }
  48. static void neutral_cuda_kernel(void *descr[], void *arg)
  49. {
  50. STARPU_SKIP_IF_VALGRIND;
  51. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  52. /* This is a dummy technique of course */
  53. unsigned host_dst = 0;
  54. cudaMemcpy(dst, &host_dst, sizeof(unsigned), cudaMemcpyHostToDevice);
  55. cudaThreadSynchronize();
  56. }
  57. #endif
  58. #ifdef STARPU_USE_OPENCL
  59. static void redux_opencl_kernel(void *descr[], void *arg)
  60. {
  61. STARPU_SKIP_IF_VALGRIND;
  62. unsigned h_dst, h_src;
  63. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  64. cl_mem d_src = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[1]);
  65. cl_command_queue queue;
  66. starpu_opencl_get_current_queue(&queue);
  67. /* This is a dummy technique of course */
  68. clEnqueueReadBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  69. clEnqueueReadBuffer(queue, d_src, CL_TRUE, 0, sizeof(unsigned), (void *)&h_src, 0, NULL, NULL);
  70. h_dst += h_src;
  71. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  72. }
  73. static void neutral_opencl_kernel(void *descr[], void *arg)
  74. {
  75. STARPU_SKIP_IF_VALGRIND;
  76. unsigned h_dst = 0;
  77. cl_mem d_dst = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  78. cl_command_queue queue;
  79. starpu_opencl_get_current_queue(&queue);
  80. clEnqueueWriteBuffer(queue, d_dst, CL_TRUE, 0, sizeof(unsigned), (void *)&h_dst, 0, NULL, NULL);
  81. }
  82. #endif
  83. static void redux_cpu_kernel(void *descr[], void *arg)
  84. {
  85. STARPU_SKIP_IF_VALGRIND;
  86. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  87. unsigned *src = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);
  88. *dst = *dst + *src;
  89. }
  90. static void neutral_cpu_kernel(void *descr[], void *arg)
  91. {
  92. STARPU_SKIP_IF_VALGRIND;
  93. unsigned *dst = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  94. *dst = 0;
  95. }
  96. static struct starpu_codelet redux_cl =
  97. {
  98. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  99. #ifdef STARPU_USE_CUDA
  100. .cuda_funcs = {redux_cuda_kernel, NULL},
  101. #endif
  102. #ifdef STARPU_USE_OPENCL
  103. .opencl_funcs = {redux_opencl_kernel, NULL},
  104. #endif
  105. .cpu_funcs = {redux_cpu_kernel, NULL},
  106. .nbuffers = 2
  107. };
  108. static struct starpu_codelet neutral_cl =
  109. {
  110. .where = STARPU_CPU|STARPU_CUDA,
  111. #ifdef STARPU_USE_CUDA
  112. .cuda_funcs = {neutral_cuda_kernel, NULL},
  113. #endif
  114. #ifdef STARPU_USE_OPENCL
  115. .opencl_funcs = {neutral_opencl_kernel, NULL},
  116. #endif
  117. .cpu_funcs = {neutral_cpu_kernel, NULL},
  118. .nbuffers = 1
  119. };
  120. /*
  121. * Increment codelet
  122. */
  123. #ifdef STARPU_USE_OPENCL
  124. /* dummy OpenCL implementation */
  125. static void increment_opencl_kernel(void *descr[], void *cl_arg __attribute__((unused)))
  126. {
  127. STARPU_SKIP_IF_VALGRIND;
  128. cl_mem d_token = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);
  129. unsigned h_token;
  130. cl_command_queue queue;
  131. starpu_opencl_get_current_queue(&queue);
  132. clEnqueueReadBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  133. h_token++;
  134. clEnqueueWriteBuffer(queue, d_token, CL_TRUE, 0, sizeof(unsigned), (void *)&h_token, 0, NULL, NULL);
  135. }
  136. #endif
  137. #ifdef STARPU_USE_CUDA
  138. static void increment_cuda_kernel(void *descr[], void *arg)
  139. {
  140. STARPU_SKIP_IF_VALGRIND;
  141. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  142. unsigned host_token;
  143. /* This is a dummy technique of course */
  144. cudaMemcpy(&host_token, tokenptr, sizeof(unsigned), cudaMemcpyDeviceToHost);
  145. cudaThreadSynchronize();
  146. host_token++;
  147. cudaMemcpy(tokenptr, &host_token, sizeof(unsigned), cudaMemcpyHostToDevice);
  148. cudaThreadSynchronize();
  149. }
  150. #endif
  151. static void increment_cpu_kernel(void *descr[], void *arg)
  152. {
  153. STARPU_SKIP_IF_VALGRIND;
  154. unsigned *tokenptr = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);
  155. *tokenptr = *tokenptr + 1;
  156. }
  157. static struct starpu_codelet increment_cl =
  158. {
  159. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  160. #ifdef STARPU_USE_CUDA
  161. .cuda_funcs = {increment_cuda_kernel, NULL},
  162. #endif
  163. #ifdef STARPU_USE_OPENCL
  164. .opencl_funcs = {increment_opencl_kernel, NULL},
  165. #endif
  166. .cpu_funcs = {increment_cpu_kernel, NULL},
  167. .nbuffers = 1,
  168. .modes = {STARPU_RW}
  169. };
  170. static struct starpu_codelet increment_cl_redux =
  171. {
  172. .where = STARPU_CPU|STARPU_CUDA|STARPU_OPENCL,
  173. #ifdef STARPU_USE_CUDA
  174. .cuda_funcs = {increment_cuda_kernel, NULL},
  175. #endif
  176. #ifdef STARPU_USE_OPENCL
  177. .opencl_funcs = {increment_opencl_kernel, NULL},
  178. #endif
  179. .cpu_funcs = {increment_cpu_kernel, NULL},
  180. .nbuffers = 1,
  181. .modes = {STARPU_REDUX}
  182. };
  183. int main(int argc, char **argv)
  184. {
  185. int ret;
  186. ret = starpu_init(NULL);
  187. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  188. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  189. starpu_variable_data_register(&handle, 0, (uintptr_t)&var, sizeof(unsigned));
  190. starpu_data_set_reduction_methods(handle, &redux_cl, &neutral_cl);
  191. unsigned ntasks = 1024;
  192. unsigned nloops = 16;
  193. unsigned loop;
  194. unsigned t;
  195. for (loop = 0; loop < nloops; loop++)
  196. {
  197. for (t = 0; t < ntasks; t++)
  198. {
  199. struct starpu_task *task = starpu_task_create();
  200. if (t % 10 == 0)
  201. {
  202. task->cl = &increment_cl;
  203. }
  204. else
  205. {
  206. task->cl = &increment_cl_redux;
  207. }
  208. task->handles[0] = handle;
  209. int ret = starpu_task_submit(task);
  210. if (ret == -ENODEV) goto enodev;
  211. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  212. }
  213. ret = starpu_data_acquire(handle, STARPU_R);
  214. STARPU_CHECK_RETURN_VALUE(ret, "starpu_data_acquire");
  215. STARPU_ASSERT(var == ntasks*(loop + 1));
  216. starpu_data_release(handle);
  217. }
  218. starpu_data_unregister(handle);
  219. STARPU_ASSERT(var == ntasks*nloops);
  220. starpu_shutdown();
  221. return EXIT_SUCCESS;
  222. enodev:
  223. starpu_data_unregister(handle);
  224. fprintf(stderr, "WARNING: No one can execute this task\n");
  225. /* yes, we do not perform the computation but we did detect that no one
  226. * could perform the kernel, so this is not an error from StarPU */
  227. starpu_shutdown();
  228. return STARPU_TEST_SKIPPED;
  229. }