| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281 | /* StarPU --- Runtime system for heterogeneous multicore architectures. * * Copyright (C) 2010, 2012-2016  Université de Bordeaux * Copyright (C) 2012, 2013, 2016  CNRS * * StarPU is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or (at * your option) any later version. * * StarPU is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU Lesser General Public License in COPYING.LGPL for more details. */#include <config.h>#include <starpu.h>#include "../helper.h"/* * Allocate one buffer per worker, doing computations with it, and * eventually reducing it into a single buffer */#define INIT_VALUE	42#define NTASKS		10000static unsigned variable;static starpu_data_handle_t variable_handle;static uintptr_t per_worker[STARPU_NMAXWORKERS];static starpu_data_handle_t per_worker_handle[STARPU_NMAXWORKERS];/* Create per-worker handles */static void initialize_per_worker_handle(void *arg STARPU_ATTRIBUTE_UNUSED){	int workerid = starpu_worker_get_id_check();	/* Allocate memory on the worker, and initialize it to 0 */	switch (starpu_worker_get_type(workerid))	{		case STARPU_CPU_WORKER:			per_worker[workerid] = (uintptr_t)calloc(1, sizeof(variable));			break;#ifdef STARPU_USE_OPENCL		case STARPU_OPENCL_WORKER:			{			cl_context context;			cl_command_queue queue;			starpu_opencl_get_current_context(&context);			starpu_opencl_get_current_queue(&queue);			cl_mem ptr = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(variable), NULL, NULL);			/* Poor's man memset */			unsigned zero = 0;			clEnqueueWriteBuffer(queue, ptr, CL_FALSE, 0, sizeof(variable), (void *)&zero, 0, NULL, NULL);			clFinish(queue);			per_worker[workerid] = (uintptr_t)ptr;			}			break;#endif#ifdef STARPU_USE_CUDA		case STARPU_CUDA_WORKER:		{		     	cudaError_t status;			status = cudaMalloc((void **)&per_worker[workerid], sizeof(variable));			if (!per_worker[workerid] || (status != cudaSuccess))			{				STARPU_CUDA_REPORT_ERROR(status);			}			cudaMemset((void *)per_worker[workerid], 0, sizeof(variable));			break;		}#endif		default:			STARPU_ABORT();			break;	}	STARPU_ASSERT(per_worker[workerid]);}/* *	Implement reduction method */void cpu_redux_func(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED){	unsigned *a = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);	unsigned *b = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[1]);	FPRINTF(stderr, "%u = %u + %u\n", *a + *b, *a, *b);	*a = *a + *b;}static struct starpu_codelet reduction_codelet ={	.cpu_funcs = {cpu_redux_func},	.nbuffers = 2,	.modes = {STARPU_RW, STARPU_R},	.model = NULL};/* *	Use per-worker local copy */void cpu_func_incr(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED){	unsigned *val = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);	*val = *val + 1;}#ifdef STARPU_USE_CUDA/* dummy CUDA implementation */static void cuda_func_incr(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED){	STARPU_SKIP_IF_VALGRIND;	unsigned *val = (unsigned *)STARPU_VARIABLE_GET_PTR(descr[0]);	unsigned h_val;	cudaMemcpyAsync(&h_val, val, sizeof(unsigned), cudaMemcpyDeviceToHost, starpu_cuda_get_local_stream());	cudaStreamSynchronize(starpu_cuda_get_local_stream());	h_val++;	cudaMemcpyAsync(val, &h_val, sizeof(unsigned), cudaMemcpyHostToDevice, starpu_cuda_get_local_stream());}#endif#ifdef STARPU_USE_OPENCL/* dummy OpenCL implementation */static void opencl_func_incr(void *descr[], void *cl_arg STARPU_ATTRIBUTE_UNUSED){	STARPU_SKIP_IF_VALGRIND;	cl_mem d_val = (cl_mem)STARPU_VARIABLE_GET_PTR(descr[0]);	unsigned h_val;	cl_command_queue queue;	starpu_opencl_get_current_queue(&queue);	clEnqueueReadBuffer(queue, d_val, CL_FALSE, 0, sizeof(unsigned), (void *)&h_val, 0, NULL, NULL);	clFinish(queue);	h_val++;	clEnqueueWriteBuffer(queue, d_val, CL_FALSE, 0, sizeof(unsigned), (void *)&h_val, 0, NULL, NULL);	clFinish(queue);}#endifstatic struct starpu_codelet use_data_on_worker_codelet ={	.cpu_funcs = {cpu_func_incr},#ifdef STARPU_USE_CUDA	.cuda_funcs = {cuda_func_incr},	.cuda_flags = {STARPU_CUDA_ASYNC},#endif#ifdef STARPU_USE_OPENCL	.opencl_funcs = {opencl_func_incr},#endif	.nbuffers = 1,	.modes = {STARPU_RW},	.model = NULL};int main(int argc, char **argv){	unsigned worker;	unsigned i;	int ret;	struct starpu_conf conf;	starpu_conf_init(&conf);	conf.nmic = 0;	conf.nmpi_ms = 0;	conf.nscc = 0;	variable = INIT_VALUE;        ret = starpu_initialize(&conf, &argc, &argv);	if (ret == -ENODEV) return STARPU_TEST_SKIPPED;	STARPU_CHECK_RETURN_VALUE(ret, "starpu_init");	unsigned nworkers = starpu_worker_get_count();	starpu_variable_data_register(&variable_handle, STARPU_MAIN_RAM, (uintptr_t)&variable, sizeof(unsigned));	/* Allocate a per-worker handle on each worker (and initialize it to 0) */	starpu_execute_on_each_worker(initialize_per_worker_handle, NULL, STARPU_CPU|STARPU_CUDA|STARPU_OPENCL);	/* Register all per-worker handles */	for (worker = 0; worker < nworkers; worker++)	{		STARPU_ASSERT(per_worker[worker]);		unsigned memory_node = starpu_worker_get_memory_node(worker);		starpu_variable_data_register(&per_worker_handle[worker], memory_node,						per_worker[worker], sizeof(variable));	}	/* Submit NTASKS tasks to the different worker to simulate the usage of a data in reduction */	for (i = 0; i < NTASKS; i++)	{		struct starpu_task *task = starpu_task_create();		task->cl = &use_data_on_worker_codelet;		int workerid = (i % nworkers);		task->handles[0] = per_worker_handle[workerid];		task->execute_on_a_specific_worker = 1;		task->workerid = (unsigned)workerid;		ret = starpu_task_submit(task);		if (ret == -ENODEV) goto enodev;		STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");	}	/* Perform the reduction of all per-worker handles into the variable_handle */	for (worker = 0; worker < nworkers; worker++)	{		struct starpu_task *task = starpu_task_create();		task->cl = &reduction_codelet;		task->handles[0] = variable_handle;		task->handles[1] = per_worker_handle[worker];		ret = starpu_task_submit(task);		if (ret == -ENODEV) goto enodev;		STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit");	}	starpu_data_unregister(variable_handle);	/* Destroy all per-worker handles */	for (worker = 0; worker < nworkers; worker++)	{		starpu_data_unregister_no_coherency(per_worker_handle[worker]);		switch(starpu_worker_get_type(worker))		{		case STARPU_CPU_WORKER:			free((void*)per_worker[worker]);			break;#ifdef STARPU_USE_CUDA		case STARPU_CUDA_WORKER:			cudaFree((void*)per_worker[worker]);			break;#endif /* !STARPU_USE_CUDA */#ifdef STARPU_USE_OPENCL		case STARPU_OPENCL_WORKER:			clReleaseMemObject((void*)per_worker[worker]);			break;#endif /* !STARPU_USE_OPENCL */		default:			STARPU_ABORT();		}	}	starpu_shutdown();	if (variable == INIT_VALUE + NTASKS)		ret = EXIT_SUCCESS;	else	{		FPRINTF(stderr, "%u != %u + %u\n", variable, INIT_VALUE, NTASKS);		ret = EXIT_FAILURE;	}	STARPU_RETURN(ret);enodev:	fprintf(stderr, "WARNING: No one can execute this task\n");	starpu_task_wait_for_all();	/* yes, we do not perform the computation but we did detect that no one 	 * could perform the kernel, so this is not an error from StarPU */	starpu_shutdown();	return STARPU_TEST_SKIPPED;}
 |