filter.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2019 CNRS
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. /*
  17. * This examplifies how to declare a new filter function.
  18. */
  19. #include <starpu_mpi.h>
  20. #define NX 20
  21. #define FPRINTF(ofile, fmt, ...) do { if (!getenv("STARPU_SSILENT")) {fprintf(ofile, fmt, ## __VA_ARGS__); }} while(0)
  22. void cpu_func(void *buffers[], void *cl_arg)
  23. {
  24. unsigned i;
  25. int factor;
  26. int rank;
  27. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  28. fprintf(stderr, "comuting on rank %d\n", rank);
  29. unsigned n = STARPU_VECTOR_GET_NX(buffers[0]);
  30. int *val = (int *)STARPU_VECTOR_GET_PTR(buffers[0]);
  31. starpu_codelet_unpack_args(cl_arg, &factor);
  32. for (i = 0; i < n; i++)
  33. val[i] *= factor;
  34. }
  35. struct starpu_codelet cl =
  36. {
  37. .cpu_funcs = {cpu_func},
  38. .cpu_funcs_name = {"cpu_func"},
  39. .nbuffers = 1,
  40. .modes = {STARPU_RW},
  41. .name = "vector_scal"
  42. };
  43. void vector_filter(void *father_interface, void *child_interface, struct starpu_data_filter *f, unsigned id, unsigned nchunks)
  44. {
  45. struct starpu_vector_interface *vector_father = (struct starpu_vector_interface *) father_interface;
  46. struct starpu_vector_interface *vector_child = (struct starpu_vector_interface *) child_interface;
  47. uint32_t nx = vector_father->nx;
  48. size_t elemsize = vector_father->elemsize;
  49. STARPU_ASSERT_MSG(nchunks <= nx, "%u parts for %u elements", nchunks, nx);
  50. STARPU_ASSERT(nchunks == 2);
  51. STARPU_ASSERT_MSG((nx % nchunks) == 0, "nx=%d is not a multiple of nchunks %d\n", nx, nchunks);
  52. vector_child->id = vector_father->id;
  53. vector_child->nx = nx/2;
  54. vector_child->elemsize = elemsize;
  55. vector_child->allocsize = vector_child->nx * elemsize;
  56. if (vector_father->dev_handle)
  57. {
  58. size_t offset = (id *(nx/nchunks)) * elemsize;
  59. if (vector_father->ptr) vector_child->ptr = vector_father->ptr + offset;
  60. vector_child->dev_handle = vector_father->dev_handle;
  61. vector_child->offset = vector_father->offset + offset;
  62. }
  63. }
  64. int main(int argc, char **argv)
  65. {
  66. int i, rank, nodes;
  67. int vector[NX];
  68. int vector_check[NX];
  69. starpu_data_handle_t vhandle;
  70. starpu_data_handle_t handles[2];
  71. int factor[2] = {2, 3};
  72. int ret;
  73. ret = starpu_mpi_init_conf(&argc, &argv, 1, MPI_COMM_WORLD, NULL);
  74. STARPU_CHECK_RETURN_VALUE(ret, "starpu_mpi_init_conf");
  75. starpu_mpi_comm_rank(MPI_COMM_WORLD, &rank);
  76. starpu_mpi_comm_size(MPI_COMM_WORLD, &nodes);
  77. if (nodes < 2 || (starpu_cpu_worker_get_count() == 0))
  78. {
  79. if (rank == 0)
  80. {
  81. if (nodes < 2)
  82. fprintf(stderr, "We need at least 2 processes.\n");
  83. else
  84. fprintf(stderr, "We need at least 1 CPU.\n");
  85. }
  86. starpu_mpi_shutdown();
  87. return 77;
  88. }
  89. for(i=0 ; i<NX ; i++)
  90. {
  91. vector[i] = i+1;
  92. if (i < NX/2)
  93. vector_check[i] = vector[i]*factor[0];
  94. else
  95. vector_check[i] = vector[i]*factor[1];
  96. }
  97. FPRINTF(stderr,"IN Vector: ");
  98. for(i=0 ; i<NX ; i++) FPRINTF(stderr, "%5d ", vector[i]);
  99. FPRINTF(stderr,"\n");
  100. /* Declare data to StarPU */
  101. if (rank == 0)
  102. starpu_vector_data_register(&vhandle, STARPU_MAIN_RAM, (uintptr_t)vector, NX, sizeof(vector[0]));
  103. else
  104. starpu_vector_data_register(&vhandle, -1, (uintptr_t)NULL, NX, sizeof(vector[0]));
  105. /* Partition the vector in PARTS sub-vectors */
  106. struct starpu_data_filter f =
  107. {
  108. .filter_func = vector_filter,
  109. .nchildren = 2
  110. };
  111. starpu_data_partition_plan(vhandle, &f, handles);
  112. starpu_data_partition_submit(vhandle, 2, handles);
  113. /* Submit a task on each sub-vector */
  114. for (i=0; i<2; i++)
  115. {
  116. starpu_mpi_data_register(handles[i], 42+i, 0);
  117. ret = starpu_mpi_task_insert(MPI_COMM_WORLD,
  118. &cl,
  119. STARPU_RW, handles[i],
  120. STARPU_VALUE, &factor[i], sizeof(factor[i]),
  121. STARPU_EXECUTE_ON_NODE, 1,
  122. 0);
  123. STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");
  124. }
  125. starpu_data_unpartition_submit(vhandle, 2, handles, -1);
  126. starpu_data_partition_clean(vhandle, 2, handles);
  127. int ok=0;
  128. if (rank == 0)
  129. {
  130. starpu_data_acquire(vhandle, STARPU_R);
  131. int *v = starpu_data_get_local_ptr(vhandle);
  132. FPRINTF(stderr,"OUT Vector: ");
  133. for(i=0 ; i<NX ; i++)
  134. {
  135. FPRINTF(stderr, "%5d ", v[i]);
  136. if (v[i] != vector_check[i])
  137. {
  138. FPRINTF(stderr, "%5d should be %5d\n", v[i], vector_check[i]);
  139. ok=1;
  140. }
  141. }
  142. FPRINTF(stderr,"\n");
  143. starpu_data_release(vhandle);
  144. }
  145. starpu_data_unregister(vhandle);
  146. starpu_mpi_shutdown();
  147. return ok;
  148. }