cl_enqueuendrangekernel.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010,2011 University of Bordeaux
  4. *
  5. * StarPU is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * StarPU is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "socl.h"
  17. #include "event.h"
  18. void soclEnqueueNDRangeKernel_task(void *descr[], void *args) {
  19. command_ndrange_kernel cmd = (command_ndrange_kernel)args;
  20. cl_command_queue cq;
  21. int wid;
  22. cl_int err;
  23. cl_event ev = command_event_get(cmd);
  24. ev->prof_start = _socl_nanotime();
  25. gc_entity_release(ev);
  26. wid = starpu_worker_get_id();
  27. starpu_opencl_get_queue(wid, &cq);
  28. DEBUG_MSG("[worker %d] [kernel %d] Executing kernel...\n", wid, cmd->kernel->id);
  29. int range = starpu_worker_get_range();
  30. /* Set arguments */
  31. {
  32. unsigned int i;
  33. int buf = 0;
  34. for (i=0; i<cmd->num_args; i++) {
  35. switch (cmd->arg_types[i]) {
  36. case Null:
  37. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], NULL);
  38. break;
  39. case Buffer: {
  40. cl_mem mem;
  41. mem = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[buf]);
  42. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], &mem);
  43. buf++;
  44. }
  45. break;
  46. case Immediate:
  47. err = clSetKernelArg(cmd->kernel->cl_kernels[range], i, cmd->arg_sizes[i], cmd->args[i]);
  48. break;
  49. }
  50. if (err != CL_SUCCESS) {
  51. DEBUG_CL("clSetKernelArg", err);
  52. DEBUG_ERROR("Aborting\n");
  53. }
  54. }
  55. }
  56. /* Calling Kernel */
  57. cl_event event;
  58. err = clEnqueueNDRangeKernel(cq, cmd->kernel->cl_kernels[range], cmd->work_dim, cmd->global_work_offset, cmd->global_work_size, cmd->local_work_size, 0, NULL, &event);
  59. if (err != CL_SUCCESS) {
  60. ERROR_MSG("Worker[%d] Unable to Enqueue kernel (error %d)\n", wid, err);
  61. DEBUG_CL("clEnqueueNDRangeKernel", err);
  62. DEBUG_MSG("Workdim %d, global_work_offset %p, global_work_size %p, local_work_size %p\n",
  63. cmd->work_dim, cmd->global_work_offset, cmd->global_work_size, cmd->local_work_size);
  64. DEBUG_MSG("Global work size: %ld %ld %ld\n", cmd->global_work_size[0],
  65. (cmd->work_dim > 1 ? cmd->global_work_size[1] : 1), (cmd->work_dim > 2 ? cmd->global_work_size[2] : 1));
  66. if (cmd->local_work_size != NULL)
  67. DEBUG_MSG("Local work size: %ld %ld %ld\n", cmd->local_work_size[0],
  68. (cmd->work_dim > 1 ? cmd->local_work_size[1] : 1), (cmd->work_dim > 2 ? cmd->local_work_size[2] : 1));
  69. }
  70. else {
  71. /* Waiting for kernel to terminate */
  72. clWaitForEvents(1, &event);
  73. clReleaseEvent(event);
  74. }
  75. }
  76. /**
  77. * Real kernel enqueuing command
  78. */
  79. cl_int command_ndrange_kernel_submit(command_ndrange_kernel cmd) {
  80. starpu_task task = task_create();
  81. task->cl = &cmd->codelet;
  82. task->cl->model = cmd->kernel->perfmodel;
  83. task->cl_arg = cmd;
  84. task->cl_arg_size = sizeof(cmd);
  85. /* Execute the task on a specific worker? */
  86. if (cmd->_command.event->cq->device != NULL) {
  87. task->execute_on_a_specific_worker = 1;
  88. task->workerid = cmd->_command.event->cq->device->worker_id;
  89. }
  90. struct starpu_codelet * codelet = task->cl;
  91. /* We need to detect which parameters are OpenCL's memory objects and
  92. * we retrieve their corresponding StarPU buffers */
  93. cmd->num_buffers = 0;
  94. cmd->buffers = malloc(sizeof(cl_mem) * cmd->num_args);
  95. unsigned int i;
  96. for (i=0; i<cmd->num_args; i++) {
  97. if (cmd->arg_types[i] == Buffer) {
  98. cl_mem buf = *(cl_mem*)cmd->args[i];
  99. gc_entity_store(&cmd->buffers[cmd->num_buffers], buf);
  100. task->handles[cmd->num_buffers] = buf->handle;
  101. /* Determine best StarPU buffer access mode */
  102. int mode;
  103. if (buf->mode == CL_MEM_READ_ONLY)
  104. mode = STARPU_R;
  105. else if (buf->mode == CL_MEM_WRITE_ONLY) {
  106. mode = STARPU_W;
  107. buf->scratch = 0;
  108. }
  109. else if (buf->scratch) { //RW but never accessed in RW or W mode
  110. mode = STARPU_W;
  111. buf->scratch = 0;
  112. }
  113. else {
  114. mode = STARPU_RW;
  115. buf->scratch = 0;
  116. }
  117. codelet->modes[cmd->num_buffers] = mode;
  118. cmd->num_buffers += 1;
  119. }
  120. }
  121. codelet->nbuffers = cmd->num_buffers;
  122. task_submit(task, cmd);
  123. return CL_SUCCESS;
  124. }
  125. CL_API_ENTRY cl_int CL_API_CALL
  126. soclEnqueueNDRangeKernel(cl_command_queue cq,
  127. cl_kernel kernel,
  128. cl_uint work_dim,
  129. const size_t * global_work_offset,
  130. const size_t * global_work_size,
  131. const size_t * local_work_size,
  132. cl_uint num_events,
  133. const cl_event * events,
  134. cl_event * event) CL_API_SUFFIX__VERSION_1_1
  135. {
  136. if (kernel->split_func != NULL && !pthread_mutex_trylock(&kernel->split_lock)) {
  137. cl_event beforeEvent, afterEvent, totalEvent;
  138. totalEvent = event_create();
  139. gc_entity_store(&totalEvent->cq, cq);
  140. command_marker cmd = command_marker_create();
  141. beforeEvent = command_event_get(cmd);
  142. command_queue_enqueue(cq, cmd, num_events, events);
  143. cl_uint iter = 1;
  144. cl_uint split_min = CL_UINT_MAX;
  145. cl_uint split_min_iter = 1;
  146. while (kernel->split_perfs[iter] != 0 && iter < kernel->split_space) {
  147. if (kernel->split_perfs[iter] < split_min) {
  148. split_min = kernel->split_perfs[iter];
  149. split_min_iter = iter;
  150. }
  151. iter++;
  152. }
  153. if (iter == kernel->split_space) {
  154. iter = split_min_iter;
  155. }
  156. cl_int ret = kernel->split_func(cq, iter, kernel->split_data, beforeEvent, &afterEvent);
  157. if (ret == CL_SUCCESS) {
  158. //FIXME: blocking call
  159. soclWaitForEvents(1, &afterEvent);
  160. /* Store perf */
  161. cl_ulong start,end;
  162. soclGetEventProfilingInfo(beforeEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &start, NULL);
  163. soclGetEventProfilingInfo(afterEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end, NULL);
  164. soclReleaseEvent(afterEvent);
  165. kernel->split_perfs[iter] = end-start;
  166. pthread_mutex_unlock(&kernel->split_lock);
  167. event_complete(totalEvent);
  168. totalEvent->prof_start = start;
  169. totalEvent->prof_submit = start;
  170. totalEvent->prof_queued = start;
  171. totalEvent->prof_end = end;
  172. RETURN_EVENT(totalEvent,event);
  173. } else {
  174. soclReleaseEvent(totalEvent);
  175. }
  176. return ret;
  177. }
  178. else {
  179. command_ndrange_kernel cmd = command_ndrange_kernel_create(kernel, work_dim,
  180. global_work_offset, global_work_size, local_work_size);
  181. cl_event ev = command_event_get(cmd);
  182. command_queue_enqueue(cq, cmd, num_events, events);
  183. RETURN_EVENT(ev, event);
  184. }
  185. return CL_SUCCESS;
  186. }