axpy_partition_gpu.cu 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2016 Uppsala University
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This creates two dumb vectors, splits them into chunks, and for each pair of
  18. * chunk, run axpy on them.
  19. */
  20. #include <starpu.h>
  21. #include "axpy_partition_gpu.h"
  22. #include <stdio.h>
  23. //This code demonstrates how to transform a kernel to execute on a given set of GPU SMs.
  24. // Original kernel
  25. __global__ void saxpy(int n, float a, float *x, float *y)
  26. {
  27. int i = blockIdx.x*blockDim.x + threadIdx.x;
  28. if (i<n) y[i] = a*x[i] + y[i];
  29. }
  30. // Transformed kernel
  31. __global__ void saxpy_partitioned(__P_KARGS, int n, float a, float *x, float *y)
  32. {
  33. __P_BEGIN;
  34. __P_LOOPX;
  35. int i = blockid.x*blockDim.x + threadIdx.x; // note that blockIdx is replaced.
  36. if (i<n) y[i] = a*x[i] + y[i];
  37. __P_LOOPEND;
  38. }
  39. extern "C" void cuda_axpy(void *descr[], void *_args)
  40. {
  41. float a = *((float *)_args);
  42. unsigned n = STARPU_VECTOR_GET_NX(descr[0]);
  43. float *x = (float *)STARPU_VECTOR_GET_PTR(descr[0]);
  44. float *y = (float *)STARPU_VECTOR_GET_PTR(descr[1]);
  45. int SM_mapping_start = -1;
  46. int SM_mapping_end = -1;
  47. int SM_allocation = -1;
  48. cudaStream_t stream = starpu_cuda_get_local_stream();
  49. int workerid = starpu_worker_get_id();
  50. starpu_sched_ctx_get_sms_interval(workerid, &SM_mapping_start, &SM_mapping_end);
  51. SM_allocation = SM_mapping_end - SM_mapping_start;
  52. int dimensions = 512;
  53. //partitioning setup
  54. // int SM_mapping_start = 0;
  55. // int SM_allocation = 13;
  56. __P_HOSTSETUP(saxpy_partitioned,dim3(dimensions,1,1),dimensions,0,SM_mapping_start,SM_allocation,stream);
  57. saxpy_partitioned<<<width,dimensions,0,stream>>>(__P_HKARGS,n,a,x,y);
  58. }