incrementer_runtime.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include <pthread.h>
  18. #define NITER 50000
  19. extern void cuda_codelet_host(float *tab);
  20. static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
  21. static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
  22. void callback_func(void *argcb)
  23. {
  24. unsigned cnt = STARPU_ATOMIC_ADD((unsigned *)argcb, 1);
  25. if (cnt == NITER)
  26. {
  27. pthread_mutex_lock(&mutex);
  28. pthread_cond_signal(&cond);
  29. pthread_mutex_unlock(&mutex);
  30. }
  31. }
  32. void core_codelet(starpu_data_interface_t *buffers, __attribute__ ((unused)) void *_args)
  33. {
  34. float *val = (float *)buffers[0].vector.ptr;
  35. val[0] += 1.0f; val[1] += 1.0f;
  36. }
  37. #ifdef USE_CUDA
  38. void cuda_codelet(starpu_data_interface_t *buffers, __attribute__ ((unused)) void *_args)
  39. {
  40. float *val = (float *)buffers[0].vector.ptr;
  41. cuda_codelet_host(val);
  42. }
  43. #endif
  44. int main(int argc, char **argv)
  45. {
  46. unsigned counter = 0;
  47. starpu_init(NULL);
  48. float float_array[3] __attribute__ ((aligned (16))) = { 0.0f, 0.0f, 0.0f};
  49. starpu_data_handle float_array_handle;
  50. starpu_register_vector_data(&float_array_handle, 0 /* home node */,
  51. (uintptr_t)&float_array, 3, sizeof(float));
  52. starpu_codelet cl =
  53. {
  54. /* CUBLAS stands for CUDA kernels controlled from the host */
  55. .where = CORE|CUBLAS,
  56. .core_func = core_codelet,
  57. #ifdef USE_CUDA
  58. .cublas_func = cuda_codelet,
  59. #endif
  60. .nbuffers = 1
  61. };
  62. unsigned i;
  63. for (i = 0; i < NITER; i++)
  64. {
  65. struct starpu_task *task = starpu_task_create();
  66. task->cl = &cl;
  67. task->callback_func = callback_func;
  68. task->callback_arg = &counter;
  69. task->buffers[0].handle = float_array_handle;
  70. task->buffers[0].mode = STARPU_RW;
  71. int ret = starpu_submit_task(task);
  72. if (STARPU_UNLIKELY(ret == -ENODEV))
  73. {
  74. fprintf(stderr, "No worker may execute this task\n");
  75. exit(0);
  76. }
  77. }
  78. pthread_mutex_lock(&mutex);
  79. pthread_cond_wait(&cond, &mutex);
  80. pthread_mutex_unlock(&mutex);
  81. /* update the array in RAM */
  82. starpu_sync_data_with_mem(float_array_handle);
  83. fprintf(stderr, "array -> %f, %f, %f\n", float_array[0],
  84. float_array[1], float_array[2]);
  85. if (float_array[0] != float_array[1] + float_array[2])
  86. return 1;
  87. starpu_shutdown();
  88. return 0;
  89. }