cl_enqueuendrangekernel.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010,2011, 2016-2017 University of Bordeaux
  4. * Copyright (C) 2016, 2017 CNRS
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include "socl.h"
  18. #include "event.h"
  19. void soclEnqueueNDRangeKernel_task(void *descr[], void *args) {
  20. command_ndrange_kernel cmd = (command_ndrange_kernel)args;
  21. cl_command_queue cq;
  22. int wid;
  23. cl_int err;
  24. cl_event ev = command_event_get(cmd);
  25. ev->prof_start = _socl_nanotime();
  26. gc_entity_release(ev);
  27. wid = starpu_worker_get_id_check();
  28. starpu_opencl_get_queue(wid, &cq);
  29. DEBUG_MSG("[worker %d] [kernel %d] Executing kernel...\n", wid, cmd->kernel->id);
  30. int range = starpu_worker_get_range();
  31. /* Set arguments */
  32. {
  33. unsigned int i;
  34. int buf = 0;
  35. for (i=0; i<cmd->num_args; i++) {
  36. switch (cmd->arg_types[i]) {
  37. case Null:
  38. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], NULL);
  39. break;
  40. case Buffer: {
  41. cl_mem mem;
  42. mem = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[buf]);
  43. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], &mem);
  44. buf++;
  45. }
  46. break;
  47. case Immediate:
  48. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], cmd->args[i]);
  49. break;
  50. }
  51. if (err != CL_SUCCESS) {
  52. DEBUG_CL("clSetKernelArg", err);
  53. DEBUG_ERROR("Aborting\n");
  54. }
  55. }
  56. }
  57. /* Calling Kernel */
  58. cl_event event;
  59. err = clEnqueueNDRangeKernel(cq, cmd->kernel->cl_kernels[range], cmd->work_dim, cmd->global_work_offset, cmd->global_work_size, cmd->local_work_size, 0, NULL, &event);
  60. if (err != CL_SUCCESS) {
  61. ERROR_MSG("Worker[%d] Unable to Enqueue kernel (error %d)\n", wid, err);
  62. DEBUG_CL("clEnqueueNDRangeKernel", err);
  63. DEBUG_MSG("Workdim %u, global_work_offset %p, global_work_size %p, local_work_size %p\n",
  64. cmd->work_dim, cmd->global_work_offset, cmd->global_work_size, cmd->local_work_size);
  65. DEBUG_MSG("Global work size: %ld %ld %ld\n", (long)cmd->global_work_size[0],
  66. (long)(cmd->work_dim > 1 ? cmd->global_work_size[1] : 1), (long)(cmd->work_dim > 2 ? cmd->global_work_size[2] : 1));
  67. if (cmd->local_work_size != NULL)
  68. DEBUG_MSG("Local work size: %ld %ld %ld\n", (long)cmd->local_work_size[0],
  69. (long)(cmd->work_dim > 1 ? cmd->local_work_size[1] : 1), (long)(cmd->work_dim > 2 ? cmd->local_work_size[2] : 1));
  70. }
  71. else {
  72. /* Waiting for kernel to terminate */
  73. clWaitForEvents(1, &event);
  74. clReleaseEvent(event);
  75. }
  76. }
  77. /**
  78. * Real kernel enqueuing command
  79. */
  80. cl_int command_ndrange_kernel_submit(command_ndrange_kernel cmd) {
  81. starpu_task task = task_create();
  82. task->cl = &cmd->codelet;
  83. task->cl->model = cmd->kernel->perfmodel;
  84. task->cl_arg = cmd;
  85. task->cl_arg_size = sizeof(cmd);
  86. /* Execute the task on a specific worker? */
  87. if (cmd->_command.event->cq->device != NULL) {
  88. task->execute_on_a_specific_worker = 1;
  89. task->workerid = cmd->_command.event->cq->device->worker_id;
  90. }
  91. struct starpu_codelet * codelet = task->cl;
  92. /* We need to detect which parameters are OpenCL's memory objects and
  93. * we retrieve their corresponding StarPU buffers */
  94. cmd->num_buffers = 0;
  95. cmd->buffers = malloc(sizeof(cl_mem) * cmd->num_args);
  96. unsigned int i;
  97. for (i=0; i<cmd->num_args; i++) {
  98. if (cmd->arg_types[i] == Buffer) {
  99. cl_mem buf = *(cl_mem*)cmd->args[i];
  100. gc_entity_store(&cmd->buffers[cmd->num_buffers], buf);
  101. task->handles[cmd->num_buffers] = buf->handle;
  102. /* Determine best StarPU buffer access mode */
  103. int mode;
  104. if (buf->mode == CL_MEM_READ_ONLY)
  105. mode = STARPU_R;
  106. else if (buf->mode == CL_MEM_WRITE_ONLY) {
  107. mode = STARPU_W;
  108. buf->scratch = 0;
  109. }
  110. else if (buf->scratch) { //RW but never accessed in RW or W mode
  111. mode = STARPU_W;
  112. buf->scratch = 0;
  113. }
  114. else {
  115. mode = STARPU_RW;
  116. buf->scratch = 0;
  117. }
  118. codelet->modes[cmd->num_buffers] = mode;
  119. cmd->num_buffers += 1;
  120. }
  121. }
  122. codelet->nbuffers = cmd->num_buffers;
  123. task_submit(task, cmd);
  124. return CL_SUCCESS;
  125. }
  126. CL_API_ENTRY cl_int CL_API_CALL
  127. soclEnqueueNDRangeKernel(cl_command_queue cq,
  128. cl_kernel kernel,
  129. cl_uint work_dim,
  130. const size_t * global_work_offset,
  131. const size_t * global_work_size,
  132. const size_t * local_work_size,
  133. cl_uint num_events,
  134. const cl_event * events,
  135. cl_event * event) CL_API_SUFFIX__VERSION_1_1
  136. {
  137. if (kernel->split_func != NULL && !starpu_pthread_mutex_trylock(&kernel->split_lock)) {
  138. cl_event beforeEvent, afterEvent, totalEvent;
  139. totalEvent = event_create();
  140. gc_entity_store(&totalEvent->cq, cq);
  141. command_marker cmd = command_marker_create();
  142. beforeEvent = command_event_get(cmd);
  143. command_queue_enqueue(cq, cmd, num_events, events);
  144. cl_uint iter = 1;
  145. cl_uint split_min = CL_UINT_MAX;
  146. cl_uint split_min_iter = 1;
  147. while (iter < kernel->split_space && kernel->split_perfs[iter] != 0) {
  148. if (kernel->split_perfs[iter] < split_min) {
  149. split_min = kernel->split_perfs[iter];
  150. split_min_iter = iter;
  151. }
  152. iter++;
  153. }
  154. if (iter == kernel->split_space) {
  155. iter = split_min_iter;
  156. }
  157. cl_int ret = kernel->split_func(cq, iter, kernel->split_data, beforeEvent, &afterEvent);
  158. if (ret == CL_SUCCESS) {
  159. //FIXME: blocking call
  160. soclWaitForEvents(1, &afterEvent);
  161. /* Store perf */
  162. cl_ulong start,end;
  163. soclGetEventProfilingInfo(beforeEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &start, NULL);
  164. soclGetEventProfilingInfo(afterEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end, NULL);
  165. soclReleaseEvent(afterEvent);
  166. kernel->split_perfs[iter] = end-start;
  167. starpu_pthread_mutex_unlock(&kernel->split_lock);
  168. event_complete(totalEvent);
  169. totalEvent->prof_start = start;
  170. totalEvent->prof_submit = start;
  171. totalEvent->prof_queued = start;
  172. totalEvent->prof_end = end;
  173. RETURN_EVENT(totalEvent,event);
  174. } else {
  175. starpu_pthread_mutex_unlock(&kernel->split_lock);
  176. soclReleaseEvent(totalEvent);
  177. }
  178. return ret;
  179. }
  180. else {
  181. command_ndrange_kernel cmd = command_ndrange_kernel_create(kernel, work_dim,
  182. global_work_offset, global_work_size, local_work_size);
  183. cl_event ev = command_event_get(cmd);
  184. command_queue_enqueue(cq, cmd, num_events, events);
  185. RETURN_EVENT(ev, event);
  186. }
  187. return CL_SUCCESS;
  188. }