gpu_register.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Inria
  4. * Copyright (C) 2012,2013,2015-2017 CNRS
  5. * Copyright (C) 2011-2016 Université de Bordeaux
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <starpu.h>
  19. #include "../helper.h"
  20. #include "scal.h"
  21. /*
  22. * Register a handle from a GPU buffer, and performs a partitioned operation
  23. */
  24. #if ! (defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA))
  25. int main(void)
  26. {
  27. return STARPU_TEST_SKIPPED;
  28. }
  29. #else
  30. static int
  31. submit_tasks(starpu_data_handle_t handle, int pieces, int n)
  32. {
  33. int i, ret;
  34. for (i = 0; i < pieces; i++)
  35. {
  36. struct starpu_task *task = starpu_task_create();
  37. task->handles[0] = starpu_data_get_sub_data(handle, 1, i);
  38. task->cl = &scal_codelet;
  39. task->execute_on_a_specific_worker = 1;
  40. task->workerid = i%n;
  41. ret = starpu_task_submit(task);
  42. if (ret == -ENODEV)
  43. return -ENODEV;
  44. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  45. }
  46. ret = starpu_task_wait_for_all();
  47. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  48. return 0;
  49. }
  50. static int
  51. find_a_worker(enum starpu_worker_archtype type)
  52. {
  53. int worker[STARPU_NMAXWORKERS];
  54. int ret = starpu_worker_get_ids_by_type(type, worker, STARPU_NMAXWORKERS);
  55. if (ret == 0)
  56. return -ENODEV;
  57. if (ret == -ERANGE)
  58. return worker[STARPU_NMAXWORKERS-1];
  59. return worker[ret-1];
  60. }
  61. static int
  62. check_result(unsigned *t, size_t size)
  63. {
  64. unsigned i;
  65. for (i = 0; i < size; i++)
  66. {
  67. if (t[i] != i*2)
  68. {
  69. FPRINTF(stderr,"t[%u] is %u instead of %u\n", i, t[i], 2*i);
  70. return 1;
  71. }
  72. }
  73. return 0;
  74. }
  75. #ifdef STARPU_USE_CUDA
  76. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  77. static int
  78. test_cuda(void)
  79. {
  80. int ret;
  81. unsigned *foo_gpu;
  82. unsigned *foo;
  83. int n, i, size, pieces;
  84. int devid;
  85. int chosen;
  86. cudaError_t cures;
  87. starpu_data_handle_t handle;
  88. /* Find a CUDA worker */
  89. chosen = find_a_worker(STARPU_CUDA_WORKER);
  90. if (chosen == -ENODEV)
  91. return -ENODEV;
  92. n = starpu_worker_get_count();
  93. size = 10 * n;
  94. devid = starpu_worker_get_devid(chosen);
  95. foo_gpu = (void*) starpu_malloc_on_node(starpu_worker_get_memory_node(chosen), size * sizeof(*foo_gpu));
  96. foo = calloc(size, sizeof(*foo));
  97. for (i = 0; i < size; i++)
  98. foo[i] = i;
  99. cures = cudaMemcpy(foo_gpu, foo, size * sizeof(*foo_gpu), cudaMemcpyHostToDevice);
  100. if (STARPU_UNLIKELY(cures))
  101. STARPU_CUDA_REPORT_ERROR(cures);
  102. starpu_vector_data_register(&handle, starpu_worker_get_memory_node(chosen), (uintptr_t)foo_gpu, size, sizeof(*foo_gpu));
  103. /* Broadcast the data to force in-place partitioning */
  104. for (i = 0; i < n; i++)
  105. starpu_data_prefetch_on_node(handle, starpu_worker_get_memory_node(i), 0);
  106. /* Even with just one worker, split in at least two */
  107. if (n == 1)
  108. pieces = 2;
  109. else
  110. pieces = n;
  111. struct starpu_data_filter f =
  112. {
  113. .filter_func = starpu_vector_filter_block,
  114. .nchildren = pieces,
  115. };
  116. starpu_data_partition(handle, &f);
  117. ret = submit_tasks(handle, pieces, n);
  118. if (ret == -ENODEV)
  119. {
  120. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  121. free(foo);
  122. return -ENODEV;
  123. }
  124. starpu_data_unpartition(handle, starpu_worker_get_memory_node(chosen));
  125. starpu_data_unregister(handle);
  126. starpu_cuda_set_device(devid);
  127. cures = cudaMemcpy(foo, foo_gpu, size * sizeof(*foo_gpu), cudaMemcpyDeviceToHost);
  128. if (STARPU_UNLIKELY(cures))
  129. {
  130. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  131. free(foo);
  132. STARPU_CUDA_REPORT_ERROR(cures);
  133. return 1;
  134. }
  135. ret = check_result(foo, size);
  136. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(*foo_gpu));
  137. free(foo);
  138. return ret;
  139. }
  140. #endif
  141. #endif
  142. #ifdef STARPU_USE_OPENCL
  143. static int
  144. test_opencl(void)
  145. {
  146. int i;
  147. int ret;
  148. int chosen;
  149. int n;
  150. int size;
  151. int pieces;
  152. cl_mem foo_gpu;
  153. starpu_data_handle_t handle;
  154. ret = starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
  155. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  156. /* Find an OpenCL worker */
  157. chosen = find_a_worker(STARPU_OPENCL_WORKER);
  158. if (chosen == -ENODEV)
  159. return -ENODEV;
  160. n = starpu_worker_get_count();
  161. size = 10 * n;
  162. int devid;
  163. cl_int err;
  164. cl_context context;
  165. cl_command_queue queue;
  166. devid = starpu_worker_get_devid(chosen);
  167. starpu_opencl_get_context(devid, &context);
  168. starpu_opencl_get_queue(devid, &queue);
  169. foo_gpu = (void*) starpu_malloc_on_node(starpu_worker_get_memory_node(chosen), size * sizeof(int));
  170. unsigned int *foo = malloc(size*sizeof(*foo));
  171. for (i = 0; i < size; i++)
  172. foo[i] = i;
  173. err = clEnqueueWriteBuffer(queue,
  174. foo_gpu,
  175. CL_FALSE,
  176. 0,
  177. size*sizeof(int),
  178. foo,
  179. 0,
  180. NULL,
  181. NULL);
  182. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  183. STARPU_OPENCL_REPORT_ERROR(err);
  184. clFinish(queue);
  185. starpu_vector_data_register(&handle,
  186. starpu_worker_get_memory_node(chosen),
  187. (uintptr_t)foo_gpu,
  188. size,
  189. sizeof(int));
  190. /* Broadcast the data to force in-place partitioning */
  191. for (i = 0; i < n; i++)
  192. starpu_data_prefetch_on_node(handle, starpu_worker_get_memory_node(i), 0);
  193. /* Even with just one worker, split in at least two */
  194. if (n == 1)
  195. pieces = 2;
  196. else
  197. pieces = n;
  198. struct starpu_data_filter f =
  199. {
  200. .filter_func = starpu_vector_filter_block,
  201. .nchildren = pieces,
  202. };
  203. starpu_data_partition(handle, &f);
  204. ret = submit_tasks(handle, pieces, n);
  205. if (ret == -ENODEV)
  206. return -ENODEV;
  207. starpu_data_unpartition(handle, starpu_worker_get_memory_node(chosen));
  208. starpu_data_unregister(handle);
  209. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  210. ret = starpu_opencl_unload_opencl(&opencl_program);
  211. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  212. err = clEnqueueReadBuffer(queue,
  213. foo_gpu,
  214. CL_FALSE,
  215. 0,
  216. size*sizeof(*foo),
  217. foo,
  218. 0,
  219. NULL,
  220. NULL);
  221. if (STARPU_UNLIKELY(err != CL_SUCCESS))
  222. STARPU_OPENCL_REPORT_ERROR(err);
  223. clFinish(queue);
  224. ret = check_result(foo, size);
  225. starpu_free_on_node(starpu_worker_get_memory_node(chosen), (uintptr_t) foo_gpu, size * sizeof(int));
  226. free(foo);
  227. return ret;
  228. }
  229. #endif /* !STARPU_USE_OPENCL */
  230. int main(int argc, char **argv)
  231. {
  232. int skipped_cuda = 1, skipped_opencl = 1;
  233. int ret;
  234. ret = starpu_initialize(NULL, &argc, &argv);
  235. if (ret == -ENODEV)
  236. return STARPU_TEST_SKIPPED;
  237. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  238. #ifdef STARPU_USE_OPENCL
  239. ret = starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
  240. STARPU_CHECK_RETURN_VALUE(ret, "starpu_opencl_load_opencl_from_file");
  241. #endif
  242. #ifdef STARPU_USE_CUDA
  243. #ifdef STARPU_HAVE_CUDA_MEMCPY_PEER
  244. ret = test_cuda();
  245. if (ret == 1)
  246. goto fail;
  247. else if (ret == 0)
  248. skipped_cuda = 0;
  249. #endif
  250. #endif
  251. #ifdef STARPU_USE_OPENCL
  252. ret = test_opencl();
  253. if (ret == 1)
  254. goto fail;
  255. else if (ret == 0)
  256. skipped_opencl = 0;
  257. #endif
  258. starpu_shutdown();
  259. if (skipped_cuda == 1 && skipped_opencl == 1)
  260. return STARPU_TEST_SKIPPED;
  261. return EXIT_SUCCESS;
  262. fail:
  263. starpu_shutdown();
  264. return EXIT_FAILURE;
  265. }
  266. #endif /* defined(STARPU_USE_OPENCL) || defined(STARPU_USE_CUDA) */