gpu_register.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2011 Université de Bordeaux 1
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <starpu_opencl.h>
  18. #include "../helper.h"
  19. #include "scal.h"
  20. int main(int argc, char **argv)
  21. {
  22. unsigned *foo_gpu;
  23. unsigned *foo;
  24. starpu_data_handle_t handle;
  25. int ret;
  26. int n, i, size;
  27. unsigned workerid;
  28. int chosen = -1;
  29. int devid;
  30. ret = starpu_init(NULL);
  31. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  32. /* TODO OpenCL, too */
  33. for (workerid = 0; workerid < starpu_worker_get_count(); workerid++) {
  34. if (starpu_worker_get_type(workerid) == STARPU_CUDA_WORKER) {
  35. chosen = workerid;
  36. break;
  37. }
  38. }
  39. if (chosen == -1)
  40. return STARPU_TEST_SKIPPED;
  41. #ifdef STARPU_USE_OPENCL
  42. starpu_opencl_load_opencl_from_file("tests/datawizard/scal_opencl.cl", &opencl_program, NULL);
  43. #endif
  44. n = starpu_worker_get_count();
  45. size = 10 * n;
  46. devid = starpu_worker_get_devid(chosen);
  47. cudaSetDevice(devid);
  48. cudaMalloc((void**)&foo_gpu, size * sizeof(*foo_gpu));
  49. foo = calloc(size, sizeof(*foo));
  50. for (i = 0; i < size; i++)
  51. foo[i] = i;
  52. cudaMemcpy(foo_gpu, foo, size * sizeof(*foo_gpu), cudaMemcpyHostToDevice);
  53. starpu_vector_data_register(&handle, starpu_worker_get_memory_node(chosen), (uintptr_t)foo_gpu, size, sizeof(*foo_gpu));
  54. /* Broadcast the data to force in-place partitioning */
  55. for (i = 0; i < n; i++)
  56. starpu_data_prefetch_on_node(handle, starpu_worker_get_memory_node(i), 0);
  57. struct starpu_data_filter f =
  58. {
  59. .filter_func = starpu_block_filter_func_vector,
  60. .nchildren = n > 1 ? n : 2,
  61. };
  62. starpu_data_partition(handle, &f);
  63. for (i = 0; i < n; i++) {
  64. struct starpu_task *task = starpu_task_create();
  65. task->handles[0] = starpu_data_get_sub_data(handle, 1, i);
  66. task->cl = &scal_codelet;
  67. task->execute_on_a_specific_worker = 1;
  68. task->workerid = i;
  69. ret = starpu_task_submit(task);
  70. if (ret == -ENODEV) goto enodev;
  71. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  72. }
  73. ret = starpu_task_wait_for_all();
  74. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_wait_for_all");
  75. starpu_data_unpartition(handle, 0);
  76. starpu_data_unregister(handle);
  77. cudaMemcpy(foo, foo_gpu, size * sizeof(*foo_gpu), cudaMemcpyDeviceToHost);
  78. starpu_shutdown();
  79. for (i = 0; i < size; i++) {
  80. if (foo[i] != i*2) {
  81. fprintf(stderr,"value %d is %d instead of %d\n", i, foo[i], 2*i);
  82. return EXIT_FAILURE;
  83. }
  84. }
  85. return EXIT_SUCCESS;
  86. enodev:
  87. starpu_data_unregister(handle);
  88. fprintf(stderr, "WARNING: No one can execute this task\n");
  89. /* yes, we do not perform the computation but we did detect that no one
  90. * could perform the kernel, so this is not an error from StarPU */
  91. starpu_shutdown();
  92. return STARPU_TEST_SKIPPED;
  93. }