axpy_partition_gpu.cu 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016-2020 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
  4. * Copyright (C) 2016 Uppsala University
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. /*
  18. * This creates two dumb vectors, splits them into chunks, and for each pair of
  19. * chunk, run axpy on them.
  20. */
  21. #include <starpu.h>
  22. #include "axpy_partition_gpu.h"
  23. #include <stdio.h>
  24. //This code demonstrates how to transform a kernel to execute on a given set of GPU SMs.
  25. // Original kernel
  26. __global__ void saxpy(int n, float a, float *x, float *y)
  27. {
  28. int i = blockIdx.x*blockDim.x + threadIdx.x;
  29. if (i<n) y[i] = a*x[i] + y[i];
  30. }
  31. // Transformed kernel
  32. __global__ void saxpy_partitioned(__P_KARGS, int n, float a, float *x, float *y)
  33. {
  34. __P_BEGIN;
  35. __P_LOOPX;
  36. int i = blockid.x*blockDim.x + threadIdx.x; // note that blockIdx is replaced.
  37. if (i<n) y[i] = a*x[i] + y[i];
  38. __P_LOOPEND;
  39. }
  40. extern "C" void cuda_axpy(void *descr[], void *_args)
  41. {
  42. float a = *((float *)_args);
  43. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  44. float *x = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  45. float *y = (float *)STARPU_VECTOR_GET_PTR(descr[1]);
  46. int SM_mapping_start = -1;
  47. int SM_mapping_end = -1;
  48. int SM_allocation = -1;
  49. cudaStream_t stream = starpu_cuda_get_local_stream();
  50. int workerid = starpu_worker_get_id();
  51. starpu_sched_ctx_get_sms_interval(workerid, &SM_mapping_start, &SM_mapping_end);
  52. SM_allocation = SM_mapping_end - SM_mapping_start;
  53. int dimensions = 512;
  54. //partitioning setup
  55. // int SM_mapping_start = 0;
  56. // int SM_allocation = 13;
  57. __P_HOSTSETUP(saxpy_partitioned,dim3(dimensions,1,1),dimensions,0,SM_mapping_start,SM_allocation,stream);
  58. saxpy_partitioned<<<width,dimensions,0,stream>>>(__P_HKARGS,n,a,x,y);
  59. cudaError_t status = cudaGetLastError();
  60. if (status != cudaSuccess) STARPU_CUDA_REPORT_ERROR(status);
  61. }