matrix_as_vector.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2012 Centre National de la Recherche Scientifique
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <starpu.h>
  17. #include "../helper.h"
  18. #include <sys/time.h>
  19. #include <cublas.h>
  20. #define LOOPS 100
  21. void vector_cpu_func(void *descr[], void *cl_arg __attribute__((unused)))
  22. {
  23. STARPU_SKIP_IF_VALGRIND;
  24. float *matrix = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  25. int nx = STARPU_VECTOR_GET_NX(descr[0]);
  26. int i;
  27. float sum=0;
  28. for(i=0 ; i<nx ; i++) sum+=i;
  29. matrix[0] = sum/nx;
  30. }
  31. void vector_cuda_func(void *descr[], void *cl_arg __attribute__((unused)))
  32. {
  33. STARPU_SKIP_IF_VALGRIND;
  34. float *matrix = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  35. int nx = STARPU_VECTOR_GET_NX(descr[0]);
  36. float sum = cublasSasum(nx, matrix, 1);
  37. cudaThreadSynchronize();
  38. sum /= nx;
  39. cudaMemcpy(matrix, &sum, sizeof(matrix[0]), cudaMemcpyHostToDevice);
  40. cudaThreadSynchronize();
  41. }
  42. void matrix_cpu_func(void *descr[], void *cl_arg __attribute__((unused)))
  43. {
  44. STARPU_SKIP_IF_VALGRIND;
  45. float *matrix = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  46. int nx = STARPU_MATRIX_GET_NX(descr[0]);
  47. int ny = STARPU_MATRIX_GET_NY(descr[0]);
  48. int i;
  49. float sum=0;
  50. for(i=0 ; i<nx*ny ; i++) sum+=i;
  51. matrix[0] = sum / (nx*ny);
  52. }
  53. void matrix_cuda_func(void *descr[], void *cl_arg __attribute__((unused)))
  54. {
  55. STARPU_SKIP_IF_VALGRIND;
  56. float *matrix = (float *)STARPU_MATRIX_GET_PTR(descr[0]);
  57. int nx = STARPU_MATRIX_GET_NX(descr[0]);
  58. int ny = STARPU_MATRIX_GET_NY(descr[0]);
  59. float sum = cublasSasum(nx*ny, matrix, 1);
  60. cudaThreadSynchronize();
  61. sum /= nx*ny;
  62. cudaMemcpy(matrix, &sum, sizeof(matrix[0]), cudaMemcpyHostToDevice);
  63. cudaThreadSynchronize();
  64. }
  65. int check_size(int nx, struct starpu_codelet *vector_codelet, struct starpu_codelet *matrix_codelet, char *device_name)
  66. {
  67. float *matrix, mean;
  68. starpu_data_handle_t vector_handle, matrix_handle;
  69. int ret, i, loop;
  70. double vector_timing, matrix_timing;
  71. struct timeval start;
  72. struct timeval end;
  73. matrix = malloc(nx*sizeof(matrix[0]));
  74. gettimeofday(&start, NULL);
  75. for(loop=1 ; loop<=LOOPS ; loop++)
  76. {
  77. for(i=0 ; i<nx ; i++) matrix[i] = i;
  78. starpu_vector_data_register(&vector_handle, 0, (uintptr_t)matrix, nx, sizeof(matrix[0]));
  79. ret = starpu_insert_task(vector_codelet, STARPU_RW, vector_handle, 0);
  80. starpu_data_unregister(vector_handle);
  81. if (ret == -ENODEV) return ret;
  82. }
  83. gettimeofday(&end, NULL);
  84. vector_timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  85. vector_timing /= LOOPS;
  86. mean = matrix[0];
  87. gettimeofday(&start, NULL);
  88. for(loop=1 ; loop<=LOOPS ; loop++)
  89. {
  90. for(i=0 ; i<nx ; i++) matrix[i] = i;
  91. starpu_matrix_data_register(&matrix_handle, 0, (uintptr_t)matrix, nx/2, nx/2, 2, sizeof(matrix[0]));
  92. ret = starpu_insert_task(matrix_codelet, STARPU_RW, matrix_handle, 0);
  93. starpu_data_unregister(matrix_handle);
  94. if (ret == -ENODEV) return ret;
  95. }
  96. gettimeofday(&end, NULL);
  97. matrix_timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  98. matrix_timing /= LOOPS;
  99. if (mean == matrix[0])
  100. {
  101. fprintf(stderr, "%d\t%f\t%f\n", nx, vector_timing, matrix_timing);
  102. {
  103. char *output_dir = getenv("STARPU_BENCH_DIR");
  104. char *bench_id = getenv("STARPU_BENCH_ID");
  105. if (output_dir && bench_id)
  106. {
  107. char file[1024];
  108. FILE *f;
  109. sprintf(file, "%s/matrix_as_vector_%s.dat", output_dir, device_name);
  110. f = fopen(file, "a");
  111. fprintf(f, "%s\t%d\t%f\t%f\n", bench_id, nx, vector_timing, matrix_timing);
  112. fclose(f);
  113. }
  114. }
  115. return EXIT_SUCCESS;
  116. }
  117. else
  118. {
  119. FPRINTF(stderr, "Incorrect result nx=%7d --> mean=%7f != %7f\n", nx, matrix[0], mean);
  120. return EXIT_FAILURE;
  121. }
  122. }
  123. #define NX_MIN 2
  124. #define NX_MAX 1024*1024
  125. int check_size_on_device(uint32_t where, char *device_name)
  126. {
  127. int nx, ret;
  128. struct starpu_codelet vector_codelet;
  129. struct starpu_codelet matrix_codelet;
  130. fprintf(stderr, "# Device: %s\n", device_name);
  131. fprintf(stderr, "# nx vector_timing matrix_timing\n");
  132. starpu_codelet_init(&vector_codelet);
  133. vector_codelet.modes[0] = STARPU_RW;
  134. vector_codelet.nbuffers = 1;
  135. if (where == STARPU_CPU) vector_codelet.cpu_funcs[0] = vector_cpu_func;
  136. if (where == STARPU_CUDA) vector_codelet.cuda_funcs[0] = vector_cuda_func;
  137. // if (where == STARPU_OPENCL) vector_codelet.opencl_funcs[0] = vector_opencl_func;
  138. starpu_codelet_init(&matrix_codelet);
  139. matrix_codelet.modes[0] = STARPU_RW;
  140. matrix_codelet.nbuffers = 1;
  141. if (where == STARPU_CPU) matrix_codelet.cpu_funcs[0] = matrix_cpu_func;
  142. if (where == STARPU_CUDA) matrix_codelet.cuda_funcs[0] = matrix_cuda_func;
  143. // if (where == STARPU_OPENCL) matrix_codelet.opencl_funcs[0] = matrix_opencl_func;
  144. for(nx=NX_MIN ; nx<=NX_MAX ; nx*=2)
  145. {
  146. ret = check_size(nx, &vector_codelet, &matrix_codelet, device_name);
  147. if (ret != EXIT_SUCCESS) break;
  148. }
  149. return ret;
  150. };
  151. int main(int argc, char **argv)
  152. {
  153. int ret;
  154. unsigned devices;
  155. ret = starpu_init(NULL);
  156. if (ret == -ENODEV) return STARPU_TEST_SKIPPED;
  157. STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");
  158. devices = starpu_cpu_worker_get_count();
  159. if (devices)
  160. {
  161. ret = check_size_on_device(STARPU_CPU, "STARPU_CPU");
  162. if (ret) goto error;
  163. }
  164. devices = starpu_cuda_worker_get_count();
  165. if (devices)
  166. {
  167. starpu_helper_cublas_init();
  168. ret = check_size_on_device(STARPU_CUDA, "STARPU_CUDA");
  169. starpu_helper_cublas_shutdown();
  170. if (ret) goto error;
  171. }
  172. devices = starpu_opencl_worker_get_count();
  173. if (devices)
  174. {
  175. ret = check_size_on_device(STARPU_OPENCL, "STARPU_OPENCL");
  176. if (ret) goto error;
  177. }
  178. error:
  179. starpu_shutdown();
  180. STARPU_RETURN(ret);
  181. }