123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550 |
- /* StarPU --- Runtime system for heterogeneous multicore architectures.
- *
- * Copyright (C) 2010-2013 Université de Bordeaux 1
- * Copyright (C) 2010, 2011 Centre National de la Recherche Scientifique
- *
- * StarPU is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation; either version 2.1 of the License, or (at
- * your option) any later version.
- *
- * StarPU is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * See the GNU Lesser General Public License in COPYING.LGPL for more details.
- */
- #include <starpu.h>
- #include <common/config.h>
- #include <common/utils.h>
- #include <core/sched_policy.h>
- #include <datawizard/datastats.h>
- #include <common/fxt.h>
- #include "copy_driver.h"
- #include "memalloc.h"
- #include <starpu_opencl.h>
- #include <starpu_cuda.h>
- #include <profiling/profiling.h>
- #ifdef STARPU_SIMGRID
- #include <core/simgrid.h>
- #include <msg/msg.h>
- #endif
- void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
- {
- /* wake up all workers on that memory node */
- unsigned cond_id;
- struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
- _STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
- unsigned nconds = descr->condition_count[nodeid];
- for (cond_id = 0; cond_id < nconds; cond_id++)
- {
- struct _starpu_cond_and_mutex *condition;
- condition = &descr->conditions_attached_to_node[nodeid][cond_id];
- /* wake anybody waiting on that condition */
- _STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
- _STARPU_PTHREAD_COND_BROADCAST(condition->cond);
- _STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
- }
- _STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
- }
- void starpu_wake_all_blocked_workers(void)
- {
- /* workers may be blocked on the various queues' conditions */
- unsigned cond_id;
- struct _starpu_memory_node_descr * const descr = _starpu_memory_node_get_description();
- _STARPU_PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);
- unsigned nconds = descr->total_condition_count;
- for (cond_id = 0; cond_id < nconds; cond_id++)
- {
- struct _starpu_cond_and_mutex *condition;
- condition = &descr->conditions_all[cond_id];
- /* wake anybody waiting on that condition */
- _STARPU_PTHREAD_MUTEX_LOCK(condition->mutex);
- _STARPU_PTHREAD_COND_BROADCAST(condition->cond);
- _STARPU_PTHREAD_MUTEX_UNLOCK(condition->mutex);
- }
- _STARPU_PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
- }
- #ifdef STARPU_USE_FXT
- /* we need to identify each communication so that we can match the beginning
- * and the end of a communication in the trace, so we use a unique identifier
- * per communication */
- static unsigned communication_cnt = 0;
- #endif
- static int copy_data_1_to_1_generic(starpu_data_handle_t handle,
- struct _starpu_data_replicate *src_replicate,
- struct _starpu_data_replicate *dst_replicate,
- struct _starpu_data_request *req STARPU_ATTRIBUTE_UNUSED)
- {
- unsigned src_node = src_replicate->memory_node;
- unsigned dst_node = dst_replicate->memory_node;
- STARPU_ASSERT(src_replicate->refcnt);
- STARPU_ASSERT(dst_replicate->refcnt);
- STARPU_ASSERT(src_replicate->allocated);
- STARPU_ASSERT(dst_replicate->allocated);
- _starpu_comm_amounts_inc(src_node, dst_node, handle->ops->get_size(handle));
- #ifdef STARPU_SIMGRID
- return _starpu_simgrid_transfer(handle->ops->get_size(handle), src_node, dst_node, req);
- #else /* !SIMGRID */
- int ret = 0;
- const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
- enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
- enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
- #ifdef STARPU_USE_CUDA
- cudaError_t cures;
- cudaStream_t stream;
- #endif
- void *src_interface = src_replicate->data_interface;
- void *dst_interface = dst_replicate->data_interface;
- #if defined(STARPU_USE_CUDA) && defined(HAVE_CUDA_MEMCPY_PEER)
- if ((src_kind == STARPU_CUDA_RAM) || (dst_kind == STARPU_CUDA_RAM))
- {
- int node = (dst_kind == STARPU_CUDA_RAM)?dst_node:src_node;
- starpu_cuda_set_device(_starpu_memory_node_get_devid(node));
- }
- #endif
- switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
- {
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
- /* STARPU_CPU_RAM -> STARPU_CPU_RAM */
- if (copy_methods->ram_to_ram)
- copy_methods->ram_to_ram(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, req ? &req->async_channel : NULL);
- break;
- #ifdef STARPU_USE_CUDA
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
- /* only the proper CUBLAS thread can initiate this directly ! */
- #if !defined(HAVE_CUDA_MEMCPY_PEER)
- STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
- #endif
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
- !(copy_methods->cuda_to_ram_async || copy_methods->any_to_any))
- {
- /* this is not associated to a request so it's synchronous */
- STARPU_ASSERT(copy_methods->cuda_to_ram || copy_methods->any_to_any);
- if (copy_methods->cuda_to_ram)
- copy_methods->cuda_to_ram(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_CUDA_RAM;
- cures = cudaEventCreate(&req->async_channel.event.cuda_event);
- if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
- stream = starpu_cuda_get_local_out_transfer_stream();
- if (copy_methods->cuda_to_ram_async)
- ret = copy_methods->cuda_to_ram_async(src_interface, src_node, dst_interface, dst_node, stream);
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
- if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
- }
- break;
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
- /* STARPU_CPU_RAM -> CUBLAS_RAM */
- /* only the proper CUBLAS thread can initiate this ! */
- #if !defined(HAVE_CUDA_MEMCPY_PEER)
- STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
- #endif
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
- !(copy_methods->ram_to_cuda_async || copy_methods->any_to_any))
- {
- /* this is not associated to a request so it's synchronous */
- STARPU_ASSERT(copy_methods->ram_to_cuda || copy_methods->any_to_any);
- if (copy_methods->ram_to_cuda)
- copy_methods->ram_to_cuda(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_CUDA_RAM;
- cures = cudaEventCreate(&req->async_channel.event.cuda_event);
- if (STARPU_UNLIKELY(cures != cudaSuccess))
- STARPU_CUDA_REPORT_ERROR(cures);
- stream = starpu_cuda_get_local_in_transfer_stream();
- if (copy_methods->ram_to_cuda_async)
- ret = copy_methods->ram_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
- if (STARPU_UNLIKELY(cures != cudaSuccess))
- STARPU_CUDA_REPORT_ERROR(cures);
- }
- break;
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
- /* CUDA - CUDA transfer */
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_cuda_copy_disabled() ||
- !(copy_methods->cuda_to_cuda_async || copy_methods->any_to_any))
- {
- STARPU_ASSERT(copy_methods->cuda_to_cuda || copy_methods->any_to_any);
- /* this is not associated to a request so it's synchronous */
- if (copy_methods->cuda_to_cuda)
- copy_methods->cuda_to_cuda(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_CUDA_RAM;
- cures = cudaEventCreate(&req->async_channel.event.cuda_event);
- if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
- stream = starpu_cuda_get_local_peer_transfer_stream();
- if (copy_methods->cuda_to_cuda_async)
- ret = copy_methods->cuda_to_cuda_async(src_interface, src_node, dst_interface, dst_node, stream);
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- cures = cudaEventRecord(req->async_channel.event.cuda_event, stream);
- if (STARPU_UNLIKELY(cures != cudaSuccess)) STARPU_CUDA_REPORT_ERROR(cures);
- }
- break;
- #endif
- #ifdef STARPU_USE_OPENCL
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
- /* OpenCL -> RAM */
- STARPU_ASSERT(_starpu_memory_node_get_local_key() == src_node);
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
- !(copy_methods->opencl_to_ram_async || copy_methods->any_to_any))
- {
- STARPU_ASSERT(copy_methods->opencl_to_ram || copy_methods->any_to_any);
- /* this is not associated to a request so it's synchronous */
- if (copy_methods->opencl_to_ram)
- copy_methods->opencl_to_ram(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_OPENCL_RAM;
- if (copy_methods->opencl_to_ram_async)
- ret = copy_methods->opencl_to_ram_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- }
- break;
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
- /* STARPU_CPU_RAM -> STARPU_OPENCL_RAM */
- STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node);
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
- !(copy_methods->ram_to_opencl_async || copy_methods->any_to_any))
- {
- STARPU_ASSERT(copy_methods->ram_to_opencl || copy_methods->any_to_any);
- /* this is not associated to a request so it's synchronous */
- if (copy_methods->ram_to_opencl)
- copy_methods->ram_to_opencl(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_OPENCL_RAM;
- if (copy_methods->ram_to_opencl_async)
- ret = copy_methods->ram_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- }
- break;
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
- /* STARPU_OPENCL_RAM -> STARPU_OPENCL_RAM */
- STARPU_ASSERT(_starpu_memory_node_get_local_key() == dst_node || _starpu_memory_node_get_local_key() == src_node);
- if (!req || starpu_asynchronous_copy_disabled() || starpu_asynchronous_opencl_copy_disabled() ||
- !(copy_methods->opencl_to_opencl_async || copy_methods->any_to_any))
- {
- STARPU_ASSERT(copy_methods->opencl_to_opencl || copy_methods->any_to_any);
- /* this is not associated to a request so it's synchronous */
- if (copy_methods->opencl_to_opencl)
- copy_methods->opencl_to_opencl(src_interface, src_node, dst_interface, dst_node);
- else
- copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, NULL);
- }
- else
- {
- req->async_channel.type = STARPU_OPENCL_RAM;
- if (copy_methods->opencl_to_opencl_async)
- ret = copy_methods->opencl_to_opencl_async(src_interface, src_node, dst_interface, dst_node, &(req->async_channel.event.opencl_event));
- else
- {
- STARPU_ASSERT(copy_methods->any_to_any);
- ret = copy_methods->any_to_any(src_interface, src_node, dst_interface, dst_node, &req->async_channel);
- }
- }
- break;
- #endif
- default:
- STARPU_ABORT();
- break;
- }
- return ret;
- #endif /* !SIMGRID */
- }
- int __attribute__((warn_unused_result)) _starpu_driver_copy_data_1_to_1(starpu_data_handle_t handle,
- struct _starpu_data_replicate *src_replicate,
- struct _starpu_data_replicate *dst_replicate,
- unsigned donotread,
- struct _starpu_data_request *req,
- unsigned may_alloc)
- {
- if (!donotread)
- {
- STARPU_ASSERT(src_replicate->allocated);
- STARPU_ASSERT(src_replicate->refcnt);
- }
- int ret_alloc, ret_copy;
- unsigned STARPU_ATTRIBUTE_UNUSED com_id = 0;
- unsigned src_node = src_replicate->memory_node;
- unsigned dst_node = dst_replicate->memory_node;
- /* first make sure the destination has an allocated buffer */
- if (!dst_replicate->allocated)
- {
- if (!may_alloc)
- return -ENOMEM;
- ret_alloc = _starpu_allocate_memory_on_node(handle, dst_replicate,req->prefetch);
- if (ret_alloc)
- return -ENOMEM;
- }
- STARPU_ASSERT(dst_replicate->allocated);
- STARPU_ASSERT(dst_replicate->refcnt);
- /* if there is no need to actually read the data,
- * we do not perform any transfer */
- if (!donotread)
- {
- size_t size = _starpu_data_get_size(handle);
- _starpu_bus_update_profiling_info((int)src_node, (int)dst_node, size);
- #ifdef STARPU_USE_FXT
- com_id = STARPU_ATOMIC_ADD(&communication_cnt, 1);
- if (req)
- req->com_id = com_id;
- #endif
- _STARPU_TRACE_START_DRIVER_COPY(src_node, dst_node, size, com_id);
- ret_copy = copy_data_1_to_1_generic(handle, src_replicate, dst_replicate, req);
- return ret_copy;
- }
- return 0;
- }
- /* This can be used by interfaces to easily transfer a piece of data without
- * caring about the particular CUDA/OpenCL methods. */
- int starpu_interface_copy(uintptr_t src, size_t src_offset, unsigned src_node, uintptr_t dst, size_t dst_offset, unsigned dst_node, size_t size, void *async_data)
- {
- struct _starpu_async_channel *async_channel = async_data;
- enum starpu_node_kind src_kind = starpu_node_get_kind(src_node);
- enum starpu_node_kind dst_kind = starpu_node_get_kind(dst_node);
- switch (_STARPU_MEMORY_NODE_TUPLE(src_kind,dst_kind))
- {
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CPU_RAM):
- memcpy((void *) dst + dst_offset, (void *) src + src_offset, size);
- return 0;
- #ifdef STARPU_USE_CUDA
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CPU_RAM):
- return starpu_cuda_copy_async_sync(
- (void*) src + src_offset, src_node,
- (void*) dst + dst_offset, dst_node,
- size,
- async_channel?starpu_cuda_get_local_out_transfer_stream():NULL,
- cudaMemcpyDeviceToHost);
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_CUDA_RAM):
- return starpu_cuda_copy_async_sync(
- (void*) src + src_offset, src_node,
- (void*) dst + dst_offset, dst_node,
- size,
- async_channel?starpu_cuda_get_local_in_transfer_stream():NULL,
- cudaMemcpyHostToDevice);
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CUDA_RAM,STARPU_CUDA_RAM):
- return starpu_cuda_copy_async_sync(
- (void*) src + src_offset, src_node,
- (void*) dst + dst_offset, dst_node,
- size,
- async_channel?starpu_cuda_get_local_peer_transfer_stream():NULL,
- cudaMemcpyDeviceToDevice);
- #endif
- #ifdef STARPU_USE_OPENCL
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_CPU_RAM):
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_CPU_RAM,STARPU_OPENCL_RAM):
- case _STARPU_MEMORY_NODE_TUPLE(STARPU_OPENCL_RAM,STARPU_OPENCL_RAM):
- return starpu_opencl_copy_async_sync(
- src, src_offset, src_node,
- dst, dst_offset, dst_node,
- size,
- &async_channel->event.opencl_event);
- #endif
- default:
- STARPU_ABORT();
- return -1;
- }
- return 0;
- }
- void _starpu_driver_wait_request_completion(struct _starpu_async_channel *async_channel)
- {
- #ifdef STARPU_SIMGRID
- _STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
- while (!async_channel->event.finished)
- _STARPU_PTHREAD_COND_WAIT(&async_channel->event.cond, &async_channel->event.mutex);
- _STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
- #else /* !SIMGRID */
- enum starpu_node_kind kind = async_channel->type;
- #ifdef STARPU_USE_CUDA
- cudaEvent_t event;
- cudaError_t cures;
- #endif
- switch (kind)
- {
- #ifdef STARPU_USE_CUDA
- case STARPU_CUDA_RAM:
- event = (*async_channel).event.cuda_event;
- cures = cudaEventSynchronize(event);
- if (STARPU_UNLIKELY(cures))
- STARPU_CUDA_REPORT_ERROR(cures);
- cures = cudaEventDestroy(event);
- if (STARPU_UNLIKELY(cures))
- STARPU_CUDA_REPORT_ERROR(cures);
- break;
- #endif
- #ifdef STARPU_USE_OPENCL
- case STARPU_OPENCL_RAM:
- {
- cl_int err;
- if ((*async_channel).event.opencl_event == NULL)
- STARPU_ABORT();
- err = clWaitForEvents(1, &((*async_channel).event.opencl_event));
- if (STARPU_UNLIKELY(err != CL_SUCCESS))
- STARPU_OPENCL_REPORT_ERROR(err);
- err = clReleaseEvent((*async_channel).event.opencl_event);
- if (STARPU_UNLIKELY(err != CL_SUCCESS))
- STARPU_OPENCL_REPORT_ERROR(err);
- break;
- }
- #endif
- case STARPU_CPU_RAM:
- default:
- STARPU_ABORT();
- }
- #endif /* !SIMGRID */
- }
- unsigned _starpu_driver_test_request_completion(struct _starpu_async_channel *async_channel)
- {
- #ifdef STARPU_SIMGRID
- unsigned ret;
- _STARPU_PTHREAD_MUTEX_LOCK(&async_channel->event.mutex);
- ret = async_channel->event.finished;
- _STARPU_PTHREAD_MUTEX_UNLOCK(&async_channel->event.mutex);
- return ret;
- #else /* !SIMGRID */
- enum starpu_node_kind kind = async_channel->type;
- unsigned success = 0;
- #ifdef STARPU_USE_CUDA
- cudaEvent_t event;
- #endif
- switch (kind)
- {
- #ifdef STARPU_USE_CUDA
- case STARPU_CUDA_RAM:
- event = (*async_channel).event.cuda_event;
- cudaError_t cures = cudaEventQuery(event);
- success = (cures == cudaSuccess);
- if (success)
- cudaEventDestroy(event);
- else if (cures != cudaErrorNotReady)
- STARPU_CUDA_REPORT_ERROR(cures);
- break;
- #endif
- #ifdef STARPU_USE_OPENCL
- case STARPU_OPENCL_RAM:
- {
- cl_int event_status;
- cl_event opencl_event = (*async_channel).event.opencl_event;
- if (opencl_event == NULL) STARPU_ABORT();
- cl_int err = clGetEventInfo(opencl_event, CL_EVENT_COMMAND_EXECUTION_STATUS, sizeof(event_status), &event_status, NULL);
- if (STARPU_UNLIKELY(err != CL_SUCCESS))
- STARPU_OPENCL_REPORT_ERROR(err);
- if (event_status < 0)
- STARPU_OPENCL_REPORT_ERROR(event_status);
- success = (event_status == CL_COMPLETE);
- break;
- }
- #endif
- case STARPU_CPU_RAM:
- default:
- STARPU_ABORT();
- }
- return success;
- #endif /* !SIMGRID */
- }
|