/* StarPU --- Runtime system for heterogeneous multicore architectures. * * Copyright (C) 2015-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria * * StarPU is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or (at * your option) any later version. * * StarPU is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU Lesser General Public License in COPYING.LGPL for more details. */ #include #include #include #ifdef STARPU_QUICK_CHECK #define NTASKS 64 #define SIZE 40 #define LOOPS 4 #else #define NTASKS 100 #define SIZE 400 #define LOOPS 10 #endif #define N_NESTED_CTXS 2 struct context { int ncpus; int *cpus; unsigned id; }; /* Helper for the task that will initiate everything */ void parallel_task_prologue_init_once_and_for_all(void * sched_ctx_) { fprintf(stderr, "%p: %s -->\n", (void*)pthread_self(), __func__); int sched_ctx = *(int *)sched_ctx_; int *cpuids = NULL; int ncpuids = 0; starpu_sched_ctx_get_available_cpuids(sched_ctx, &cpuids, &ncpuids); #pragma omp parallel num_threads(ncpuids) { starpu_sched_ctx_bind_current_thread_to_cpuid(cpuids[omp_get_thread_num()]); } omp_set_num_threads(ncpuids); free(cpuids); fprintf(stderr, "%p: %s <--\n", (void*)pthread_self(), __func__); return; } void noop(void * buffers[], void * cl_arg) { (void)buffers; (void)cl_arg; } static struct starpu_codelet init_parallel_worker_cl= { .cpu_funcs = {noop}, .nbuffers = 0, .name = "init_parallel_worker" }; /* function called to initialize the parallel "workers" */ void parallel_task_init_one_context(unsigned * context_id) { struct starpu_task * t; int ret; t = starpu_task_build(&init_parallel_worker_cl, STARPU_SCHED_CTX, *context_id, 0); t->destroy = 1; t->prologue_callback_pop_func=parallel_task_prologue_init_once_and_for_all; if (t->prologue_callback_pop_arg_free) free(t->prologue_callback_pop_arg); t->prologue_callback_pop_arg=context_id; t->prologue_callback_pop_arg_free=0; ret = starpu_task_submit(t); STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit"); } struct context main_context; struct context *contexts; void parallel_task_init() { /* Context creation */ main_context.ncpus = starpu_cpu_worker_get_count(); main_context.cpus = (int *) malloc(main_context.ncpus*sizeof(int)); fprintf(stderr, "ncpus : %d \n",main_context.ncpus); starpu_worker_get_ids_by_type(STARPU_CPU_WORKER, main_context.cpus, main_context.ncpus); main_context.id = starpu_sched_ctx_create(main_context.cpus, main_context.ncpus,"main_ctx", STARPU_SCHED_CTX_POLICY_NAME,"prio", 0); /* Initialize nested contexts */ contexts = malloc(sizeof(struct context)*N_NESTED_CTXS); int cpus_per_context = main_context.ncpus/N_NESTED_CTXS; int i; for(i = 0; i < N_NESTED_CTXS; i++) { contexts[i].ncpus = cpus_per_context; if (i == N_NESTED_CTXS-1) contexts[i].ncpus += main_context.ncpus%N_NESTED_CTXS; contexts[i].cpus = main_context.cpus+i*cpus_per_context; } for(i = 0; i < N_NESTED_CTXS; i++) contexts[i].id = starpu_sched_ctx_create(contexts[i].cpus, contexts[i].ncpus,"nested_ctx", STARPU_SCHED_CTX_NESTED,main_context.id, 0); for (i = 0; i < N_NESTED_CTXS; i++) { parallel_task_init_one_context(&contexts[i].id); } starpu_task_wait_for_all(); starpu_sched_ctx_set_context(&main_context.id); } void parallel_task_deinit() { int i; for (i=0; idestroy = 1; t->possibly_parallel = 1; ret=starpu_task_submit(t); if (ret == -ENODEV) goto out; STARPU_CHECK_RETURN_VALUE(ret, "starpu_task_submit"); } out: /* wait for all tasks at the end*/ starpu_task_wait_for_all(); starpu_data_unregister(handle1); starpu_data_unregister(handle2); parallel_task_deinit(); starpu_shutdown(); return 0; }