gpu_register.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. * Copyright (C) 2012,2013,2015-2017 CNRS
  5. * Copyright (C) 2011-2016,2019 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. #include "scal.h"
  21. /*
  22. * Register a handle from a GPU buffer, and performs a partitioned operation
  23. */
  24. #if ! (defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA))
  25. int main(void)
  26. {
  27. return STARPU_TEST_SKIPPED;
  28. }
  29. #else
  30. static int
  31. submit_tasks(starpu_data_handle_t handle, int pieces, int n)
  32. {
  33. int i, ret;
  34. for (i = 0; i < pieces; i++)
  35. {
  36. struct starpu_task *task = starpu_task_create();
  37. task->handles[0] = starpu_data_get_sub_data(handle, 1, i);
  38. task->cl = &scal_codelet;
  39. task->execute_on_a_specific_worker = 1;
  40. task->workerid = i%n;
  41. ret = starpu_task_submit(task);
  42. if (ret == -ENODEV)
  43. return -ENODEV;
  44. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  45. }
  46. ret = starpu_task_wait_for_all();
  47. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  48. return 0;
  49. }
  50. static int
  51. find_a_worker(enum starpu_worker_archtype type)
  52. {
  53. int worker[STARPU_NMAXWORKERS];
  54. int ret = starpu_worker_get_ids_by_type(type, worker, STARPU_NMAXWORKERS);
  55. if (ret == 0)
  56. return -ENODEV;
  57. if (ret == -ERANGE)
  58. return worker[STARPU_NMAXWORKERS-1];
  59. return worker[ret-1];
  60. }
  61. static int
  62. check_result(unsigned *t, size_t size)
  63. {
  64. unsigned i;
  65. for (i = 0; i < size; i++)
  66. {
  67. if (t[i] != i*2)
  68. {
  69. FPRINTF(stderr,"t[%u] is %u instead of %u\n", i, t[i], 2*i);
  70. return 1;
  71. }
  72. }
  73. return 0;
  74. }
  75. #ifdef STARPU_USE_CUDA
  76. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  77. static int
  78. test_cuda(void)
  79. {
  80. int ret;
  81. unsigned *foo_gpu;
  82. unsigned *foo;
  83. int n, i, size, pieces;
  84. int devid;
  85. int chosen;
  86. cudaError_t cures;
  87. starpu_data_handle_t handle;
  88. /* Find a CUDA worker */
  89. chosen = find_a_worker(STARPU_CUDA_WORKER);
  90. if (chosen == -ENODEV)
  91. return -ENODEV;
  92. n = starpu_worker_get_count();
  93. size = 10 * n;
  94. devid = starpu_worker_get_devid(chosen);
  95. foo_gpu = (void*) starpu_malloc_on_node(starpu_worker_get_memory_node(chosen), size * sizeof(*foo_gpu));
  96. foo = calloc(size, sizeof(*foo));
  97. for (i = 0; i < size; i++)
  98. foo[i] = i;
  99. cures = cudaMemcpy(foo_gpu, foo, size * sizeof(*foo_gpu), cudaMemcpyHostToDevice);
  100. if (!cures)
  101. cures = cudaThreadSynchronize();
  102. if (STARPU_UNLIKELY(cures))
  103. STARPU_CUDA_REPORT_ERROR(cures);
  104. starpu_vector_data_register(&handle, starpu_worker_get_memory_node(chosen), (uintptr_t)foo_gpu, size, sizeof(*foo_gpu));
  105. /* Broadcast the data to force in-place partitioning */
  106. for (i = 0; i < n; i++)
  107. starpu_data_prefetch_on_node(handle, starpu_worker_get_memory_node(i), 0);
  108. /* Even with just one worker, split in at least two */
  109. if (n == 1)
  110. pieces = 2;
  111. else
  112. pieces = n;
  113. struct starpu_data_filter f =
  114. {
  115. .filter_func = starpu_vector_filter_block,
  116. .nchildren = pieces,
  117. };
  118. starpu_data_partition(handle, &f);
  119. ret = submit_tasks(handle, pieces, n);
  120. if (ret == -ENODEV)
  121. {
  122. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  123. free(foo);
  124. return -ENODEV;
  125. }
  126. starpu_data_unpartition(handle, starpu_worker_get_memory_node(chosen));
  127. starpu_data_unregister(handle);
  128. starpu_cuda_set_device(devid);
  129. cures = cudaMemcpy(foo, foo_gpu, size * sizeof(*foo_gpu), cudaMemcpyDeviceToHost);
  130. if (!cures)
  131. cures = cudaThreadSynchronize();
  132. if (STARPU_UNLIKELY(cures))
  133. {
  134. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  135. free(foo);
  136. STARPU_CUDA_REPORT_ERROR(cures);
  137. return 1;
  138. }
  139. ret = check_result(foo, size);
  140. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  141. free(foo);
  142. return ret;
  143. }
  144. #endif
  145. #endif
  146. #ifdef STARPU_USE_OPENCL
  147. static int
  148. test_opencl(void)
  149. {
  150. int i;
  151. int ret;
  152. int chosen;
  153. int n;
  154. int size;
  155. int pieces;
  156. cl_mem foo_gpu;
  157. starpu_data_handle_t handle;
  158. ret = starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
  159. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  160. /* Find an OpenCL worker */
  161. chosen = find_a_worker(STARPU_OPENCL_WORKER);
  162. if (chosen == -ENODEV)
  163. return -ENODEV;
  164. n = starpu_worker_get_count();
  165. size = 10 * n;
  166. int devid;
  167. cl_int err;
  168. cl_context context;
  169. cl_command_queue queue;
  170. devid = starpu_worker_get_devid(chosen);
  171. starpu_opencl_get_context(devid, &context);
  172. starpu_opencl_get_queue(devid, &queue);
  173. foo_gpu = (void*) starpu_malloc_on_node(starpu_worker_get_memory_node(chosen), size * sizeof(int));
  174. unsigned int *foo = malloc(size*sizeof(*foo));
  175. for (i = 0; i < size; i++)
  176. foo[i] = i;
  177. err = clEnqueueWriteBuffer(queue,
  178. foo_gpu,
  179. CL_FALSE,
  180. 0,
  181. size*sizeof(int),
  182. foo,
  183. 0,
  184. NULL,
  185. NULL);
  186. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  187. STARPU_OPENCL_REPORT_ERROR(err);
  188. clFinish(queue);
  189. starpu_vector_data_register(&handle,
  190. starpu_worker_get_memory_node(chosen),
  191. (uintptr_t)foo_gpu,
  192. size,
  193. sizeof(int));
  194. /* Broadcast the data to force in-place partitioning */
  195. for (i = 0; i < n; i++)
  196. starpu_data_prefetch_on_node(handle, starpu_worker_get_memory_node(i), 0);
  197. /* Even with just one worker, split in at least two */
  198. if (n == 1)
  199. pieces = 2;
  200. else
  201. pieces = n;
  202. struct starpu_data_filter f =
  203. {
  204. .filter_func = starpu_vector_filter_block,
  205. .nchildren = pieces,
  206. };
  207. starpu_data_partition(handle, &f);
  208. ret = submit_tasks(handle, pieces, n);
  209. if (ret == -ENODEV)
  210. return -ENODEV;
  211. starpu_data_unpartition(handle, starpu_worker_get_memory_node(chosen));
  212. starpu_data_unregister(handle);
  213. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  214. ret = starpu_opencl_unload_opencl(&opencl_program);
  215. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  216. err = clEnqueueReadBuffer(queue,
  217. foo_gpu,
  218. CL_FALSE,
  219. 0,
  220. size*sizeof(*foo),
  221. foo,
  222. 0,
  223. NULL,
  224. NULL);
  225. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  226. STARPU_OPENCL_REPORT_ERROR(err);
  227. clFinish(queue);
  228. ret = check_result(foo, size);
  229. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(int));
  230. free(foo);
  231. return ret;
  232. }
  233. #endif /* !STARPU_USE_OPENCL */
  234. int main(int argc, char **argv)
  235. {
  236. int skipped_cuda = 1, skipped_opencl = 1;
  237. int ret;
  238. ret = starpu_initialize(NULL, &argc, &argv);
  239. if (ret == -ENODEV)
  240. return STARPU_TEST_SKIPPED;
  241. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  242. #ifdef STARPU_USE_OPENCL
  243. ret = starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
  244. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  245. #endif
  246. #ifdef STARPU_USE_CUDA
  247. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  248. ret = test_cuda();
  249. if (ret == 1)
  250. goto fail;
  251. else if (ret == 0)
  252. skipped_cuda = 0;
  253. #endif
  254. #endif
  255. #ifdef STARPU_USE_OPENCL
  256. ret = test_opencl();
  257. if (ret == 1)
  258. goto fail;
  259. else if (ret == 0)
  260. skipped_opencl = 0;
  261. #endif
  262. starpu_shutdown();
  263. if (skipped_cuda == 1 && skipped_opencl == 1)
  264. return STARPU_TEST_SKIPPED;
  265. return EXIT_SUCCESS;
  266. fail:
  267. starpu_shutdown();
  268. return EXIT_FAILURE;
  269. }
  270. #endif /* defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) */