filters.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2017 Université de Bordeaux
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012, 2013, 2015, 2016, 2017 CNRS
  6. * Copyright (C) 2012, 2016 Inria
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <datawizard/filters.h>
  20. #include <datawizard/footprint.h>
  21. #include <datawizard/interfaces/data_interface.h>
  22. #include <core/task.h>
  23. /*
  24. * This function applies a data filter on all the elements of a partition
  25. */
  26. static void map_filter(starpu_data_handle_t root_handle, struct starpu_data_filter *f)
  27. {
  28. /* we need to apply the data filter on all leaf of the tree */
  29. if (root_handle->nchildren == 0)
  30. {
  31. /* this is a leaf */
  32. starpu_data_partition(root_handle, f);
  33. }
  34. else
  35. {
  36. /* try to apply the data filter recursively */
  37. unsigned child;
  38. for (child = 0; child < root_handle->nchildren; child++)
  39. {
  40. starpu_data_handle_t handle_child = starpu_data_get_child(root_handle, child);
  41. map_filter(handle_child, f);
  42. }
  43. }
  44. }
  45. void starpu_data_vmap_filters(starpu_data_handle_t root_handle, unsigned nfilters, va_list pa)
  46. {
  47. unsigned i;
  48. for (i = 0; i < nfilters; i++)
  49. {
  50. struct starpu_data_filter *next_filter;
  51. next_filter = va_arg(pa, struct starpu_data_filter *);
  52. STARPU_ASSERT(next_filter);
  53. map_filter(root_handle, next_filter);
  54. }
  55. }
  56. void starpu_data_map_filters(starpu_data_handle_t root_handle, unsigned nfilters, ...)
  57. {
  58. va_list pa;
  59. va_start(pa, nfilters);
  60. starpu_data_vmap_filters(root_handle, nfilters, pa);
  61. va_end(pa);
  62. }
  63. void fstarpu_data_map_filters(starpu_data_handle_t root_handle, int nfilters, struct starpu_data_filter **filters)
  64. {
  65. int i;
  66. assert(nfilters >= 0);
  67. for (i = 0; i < nfilters; i++)
  68. {
  69. struct starpu_data_filter *next_filter = filters[i];
  70. STARPU_ASSERT(next_filter);
  71. map_filter(root_handle, next_filter);
  72. }
  73. }
  74. int starpu_data_get_nb_children(starpu_data_handle_t handle)
  75. {
  76. return handle->nchildren;
  77. }
  78. starpu_data_handle_t starpu_data_get_child(starpu_data_handle_t handle, unsigned i)
  79. {
  80. STARPU_ASSERT_MSG(handle->nchildren != 0, "Data %p has to be partitioned before accessing children", handle);
  81. STARPU_ASSERT_MSG(i < handle->nchildren, "Invalid child index %u in handle %p, maximum %u", i, handle, handle->nchildren);
  82. return &handle->children[i];
  83. }
  84. /*
  85. * example starpu_data_get_sub_data(starpu_data_handle_t root_handle, 3, 42, 0, 1);
  86. */
  87. starpu_data_handle_t starpu_data_get_sub_data(starpu_data_handle_t root_handle, unsigned depth, ... )
  88. {
  89. va_list pa;
  90. va_start(pa, depth);
  91. starpu_data_handle_t handle = starpu_data_vget_sub_data(root_handle, depth, pa);
  92. va_end(pa);
  93. return handle;
  94. }
  95. starpu_data_handle_t starpu_data_vget_sub_data(starpu_data_handle_t root_handle, unsigned depth, va_list pa )
  96. {
  97. STARPU_ASSERT(root_handle);
  98. starpu_data_handle_t current_handle = root_handle;
  99. /* the variable number of argument must correlate the depth in the tree */
  100. unsigned i;
  101. for (i = 0; i < depth; i++)
  102. {
  103. unsigned next_child;
  104. next_child = va_arg(pa, unsigned);
  105. STARPU_ASSERT_MSG(current_handle->nchildren != 0, "Data %p has to be partitioned before accessing children", current_handle);
  106. STARPU_ASSERT_MSG(next_child < current_handle->nchildren, "Bogus child number %u, data %p only has %u children", next_child, current_handle, current_handle->nchildren);
  107. current_handle = &current_handle->children[next_child];
  108. }
  109. return current_handle;
  110. }
  111. starpu_data_handle_t fstarpu_data_get_sub_data(starpu_data_handle_t root_handle, int depth, int *indices)
  112. {
  113. STARPU_ASSERT(root_handle);
  114. starpu_data_handle_t current_handle = root_handle;
  115. STARPU_ASSERT(depth >= 0);
  116. /* the variable number of argument must correlate the depth in the tree */
  117. int i;
  118. for (i = 0; i < depth; i++)
  119. {
  120. int next_child;
  121. next_child = indices[i];
  122. STARPU_ASSERT(next_child >= 0);
  123. STARPU_ASSERT_MSG(current_handle->nchildren != 0, "Data %p has to be partitioned before accessing children", current_handle);
  124. STARPU_ASSERT_MSG((unsigned) next_child < current_handle->nchildren, "Bogus child number %u, data %p only has %u children", next_child, current_handle, current_handle->nchildren);
  125. current_handle = &current_handle->children[next_child];
  126. }
  127. return current_handle;
  128. }
  129. static unsigned _starpu_data_partition_nparts(starpu_data_handle_t initial_handle, struct starpu_data_filter *f)
  130. {
  131. /* how many parts ? */
  132. if (f->get_nchildren)
  133. return f->get_nchildren(f, initial_handle);
  134. else
  135. return f->nchildren;
  136. }
  137. static void _starpu_data_partition(starpu_data_handle_t initial_handle, starpu_data_handle_t *childrenp, unsigned nparts, struct starpu_data_filter *f, int inherit_state)
  138. {
  139. unsigned i;
  140. unsigned node;
  141. /* first take care to properly lock the data header */
  142. _starpu_spin_lock(&initial_handle->header_lock);
  143. initial_handle->nplans++;
  144. STARPU_ASSERT_MSG(nparts > 0, "Partitioning data %p in 0 piece does not make sense", initial_handle);
  145. /* allocate the children */
  146. if (inherit_state)
  147. {
  148. _STARPU_CALLOC(initial_handle->children, nparts, sizeof(struct _starpu_data_state));
  149. /* this handle now has children */
  150. initial_handle->nchildren = nparts;
  151. }
  152. for (node = 0; node < STARPU_MAXNODES; node++)
  153. {
  154. if (initial_handle->per_node[node].state != STARPU_INVALID)
  155. break;
  156. }
  157. if (node == STARPU_MAXNODES)
  158. {
  159. /* This is lazy allocation, allocate it now in main RAM, so as
  160. * to have somewhere to gather pieces later */
  161. /* FIXME: mark as unevictable! */
  162. int home_node = initial_handle->home_node;
  163. if (home_node < 0 || (starpu_node_get_kind(home_node) != STARPU_CPU_RAM))
  164. home_node = STARPU_MAIN_RAM;
  165. int ret = _starpu_allocate_memory_on_node(initial_handle, &initial_handle->per_node[home_node], 0);
  166. #ifdef STARPU_DEVEL
  167. #warning we should reclaim memory if allocation failed
  168. #endif
  169. STARPU_ASSERT(!ret);
  170. }
  171. for (node = 0; node < STARPU_MAXNODES; node++)
  172. _starpu_data_unregister_ram_pointer(initial_handle, node);
  173. for (i = 0; i < nparts; i++)
  174. {
  175. starpu_data_handle_t child;
  176. if (inherit_state)
  177. child = &initial_handle->children[i];
  178. else
  179. child = childrenp[i];
  180. STARPU_ASSERT(child);
  181. _STARPU_TRACE_HANDLE_DATA_REGISTER(child);
  182. struct starpu_data_interface_ops *ops;
  183. /* each child may have his own interface type */
  184. /* what's this child's interface ? */
  185. if (f->get_child_ops)
  186. ops = f->get_child_ops(f, i);
  187. else
  188. ops = initial_handle->ops;
  189. _starpu_data_handle_init(child, ops, initial_handle->mf_node);
  190. child->nchildren = 0;
  191. child->nplans = 0;
  192. child->switch_cl = NULL;
  193. child->partitioned = 0;
  194. child->readonly = 0;
  195. child->mpi_data = initial_handle->mpi_data;
  196. child->root_handle = initial_handle->root_handle;
  197. child->father_handle = initial_handle;
  198. child->sibling_index = i;
  199. child->depth = initial_handle->depth + 1;
  200. child->is_not_important = initial_handle->is_not_important;
  201. child->wt_mask = initial_handle->wt_mask;
  202. child->home_node = initial_handle->home_node;
  203. /* initialize the chunk lock */
  204. _starpu_data_requester_list_init(&child->req_list);
  205. _starpu_data_requester_list_init(&child->reduction_req_list);
  206. child->reduction_tmp_handles = NULL;
  207. child->write_invalidation_req = NULL;
  208. child->refcnt = 0;
  209. child->unlocking_reqs = 0;
  210. child->busy_count = 0;
  211. child->busy_waiting = 0;
  212. STARPU_PTHREAD_MUTEX_INIT(&child->busy_mutex, NULL);
  213. STARPU_PTHREAD_COND_INIT(&child->busy_cond, NULL);
  214. child->reduction_refcnt = 0;
  215. _starpu_spin_init(&child->header_lock);
  216. child->sequential_consistency = initial_handle->sequential_consistency;
  217. STARPU_PTHREAD_MUTEX_INIT(&child->sequential_consistency_mutex, NULL);
  218. child->last_submitted_mode = STARPU_R;
  219. child->last_sync_task = NULL;
  220. child->last_submitted_accessors.task = NULL;
  221. child->last_submitted_accessors.next = &child->last_submitted_accessors;
  222. child->last_submitted_accessors.prev = &child->last_submitted_accessors;
  223. child->post_sync_tasks = NULL;
  224. /* Tell helgrind that the race in _starpu_unlock_post_sync_tasks is fine */
  225. STARPU_HG_DISABLE_CHECKING(child->post_sync_tasks_cnt);
  226. child->post_sync_tasks_cnt = 0;
  227. /* The methods used for reduction are propagated to the
  228. * children. */
  229. child->redux_cl = initial_handle->redux_cl;
  230. child->init_cl = initial_handle->init_cl;
  231. #ifdef STARPU_USE_FXT
  232. child->last_submitted_ghost_sync_id_is_valid = 0;
  233. child->last_submitted_ghost_sync_id = 0;
  234. child->last_submitted_ghost_accessors_id = NULL;
  235. #endif
  236. if (_starpu_global_arbiter)
  237. /* Just for testing purpose */
  238. starpu_data_assign_arbiter(child, _starpu_global_arbiter);
  239. else
  240. child->arbiter = NULL;
  241. _starpu_data_requester_list_init(&child->arbitered_req_list);
  242. for (node = 0; node < STARPU_MAXNODES; node++)
  243. {
  244. struct _starpu_data_replicate *initial_replicate;
  245. struct _starpu_data_replicate *child_replicate;
  246. initial_replicate = &initial_handle->per_node[node];
  247. child_replicate = &child->per_node[node];
  248. if (inherit_state)
  249. child_replicate->state = initial_replicate->state;
  250. else
  251. child_replicate->state = STARPU_INVALID;
  252. if (inherit_state || !initial_replicate->automatically_allocated)
  253. child_replicate->allocated = initial_replicate->allocated;
  254. else
  255. child_replicate->allocated = 0;
  256. /* Do not allow memory reclaiming within the child for parent bits */
  257. child_replicate->automatically_allocated = 0;
  258. child_replicate->refcnt = 0;
  259. child_replicate->memory_node = node;
  260. child_replicate->relaxed_coherency = 0;
  261. if (inherit_state)
  262. child_replicate->initialized = initial_replicate->initialized;
  263. else
  264. child_replicate->initialized = 0;
  265. /* update the interface */
  266. void *initial_interface = starpu_data_get_interface_on_node(initial_handle, node);
  267. void *child_interface = starpu_data_get_interface_on_node(child, node);
  268. STARPU_ASSERT_MSG(!(!inherit_state && child_replicate->automatically_allocated && child_replicate->allocated), "partition planning is currently not supported when handle has some automatically allocated buffers");
  269. f->filter_func(initial_interface, child_interface, f, i, nparts);
  270. }
  271. child->per_worker = NULL;
  272. child->user_data = NULL;
  273. /* We compute the size and the footprint of the child once and
  274. * store it in the handle */
  275. child->footprint = _starpu_compute_data_footprint(child);
  276. for (node = 0; node < STARPU_MAXNODES; node++)
  277. {
  278. if (starpu_node_get_kind(node) != STARPU_CPU_RAM)
  279. continue;
  280. void *ptr = starpu_data_handle_to_pointer(child, node);
  281. if (ptr != NULL)
  282. _starpu_data_register_ram_pointer(child, ptr);
  283. }
  284. }
  285. /* now let the header */
  286. _starpu_spin_unlock(&initial_handle->header_lock);
  287. }
  288. static
  289. void _starpu_empty_codelet_function(void *buffers[], void *args)
  290. {
  291. (void) buffers; // unused;
  292. (void) args; // unused;
  293. }
  294. void starpu_data_unpartition(starpu_data_handle_t root_handle, unsigned gathering_node)
  295. {
  296. unsigned child;
  297. unsigned worker;
  298. unsigned nworkers = starpu_worker_get_count();
  299. unsigned node;
  300. unsigned sizes[root_handle->nchildren];
  301. void *ptr;
  302. _STARPU_TRACE_START_UNPARTITION(root_handle, gathering_node);
  303. _starpu_spin_lock(&root_handle->header_lock);
  304. STARPU_ASSERT_MSG(root_handle->nchildren != 0, "data %p is not partitioned, can not unpartition it", root_handle);
  305. /* first take all the children lock (in order !) */
  306. for (child = 0; child < root_handle->nchildren; child++)
  307. {
  308. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  309. /* make sure the intermediate children is unpartitionned as well */
  310. if (child_handle->nchildren > 0)
  311. starpu_data_unpartition(child_handle, gathering_node);
  312. /* If this is a multiformat handle, we must convert the data now */
  313. #ifdef STARPU_DEVEL
  314. #warning TODO: _starpu_fetch_data_on_node should be doing it
  315. #endif
  316. if (_starpu_data_is_multiformat_handle(child_handle) &&
  317. starpu_node_get_kind(child_handle->mf_node) != STARPU_CPU_RAM)
  318. {
  319. struct starpu_codelet cl =
  320. {
  321. .where = STARPU_CPU,
  322. .cpu_funcs = { _starpu_empty_codelet_function },
  323. .modes = { STARPU_RW },
  324. .nbuffers = 1
  325. };
  326. struct starpu_task *task = starpu_task_create();
  327. task->name = "convert_data";
  328. STARPU_TASK_SET_HANDLE(task, child_handle, 0);
  329. task->cl = &cl;
  330. task->synchronous = 1;
  331. if (_starpu_task_submit_internally(task) != 0)
  332. _STARPU_ERROR("Could not submit the conversion task while unpartitionning\n");
  333. }
  334. int ret;
  335. /* for now we pretend that the RAM is almost unlimited and that gathering
  336. * data should be possible from the node that does the unpartionning ... we
  337. * don't want to have the programming deal with memory shortage at that time,
  338. * really */
  339. /* Acquire the child data on the gathering node. This will trigger collapsing any reduction */
  340. ret = starpu_data_acquire_on_node(child_handle, gathering_node, STARPU_RW);
  341. STARPU_ASSERT(ret == 0);
  342. starpu_data_release_on_node(child_handle, gathering_node);
  343. _starpu_spin_lock(&child_handle->header_lock);
  344. child_handle->busy_waiting = 1;
  345. _starpu_spin_unlock(&child_handle->header_lock);
  346. /* Wait for all requests to finish (notably WT requests) */
  347. STARPU_PTHREAD_MUTEX_LOCK(&child_handle->busy_mutex);
  348. while (1)
  349. {
  350. /* Here helgrind would shout that this an unprotected access,
  351. * but this is actually fine: all threads who do busy_count--
  352. * are supposed to call _starpu_data_check_not_busy, which will
  353. * wake us up through the busy_mutex/busy_cond. */
  354. if (!child_handle->busy_count)
  355. break;
  356. /* This is woken by _starpu_data_check_not_busy, always called
  357. * after decrementing busy_count */
  358. STARPU_PTHREAD_COND_WAIT(&child_handle->busy_cond, &child_handle->busy_mutex);
  359. }
  360. STARPU_PTHREAD_MUTEX_UNLOCK(&child_handle->busy_mutex);
  361. _starpu_spin_lock(&child_handle->header_lock);
  362. sizes[child] = _starpu_data_get_size(child_handle);
  363. if (child_handle->unregister_hook)
  364. {
  365. child_handle->unregister_hook(child_handle);
  366. }
  367. for (node = 0; node < STARPU_MAXNODES; node++)
  368. _starpu_data_unregister_ram_pointer(child_handle, node);
  369. if (child_handle->per_worker)
  370. {
  371. for (worker = 0; worker < nworkers; worker++)
  372. {
  373. struct _starpu_data_replicate *local = &child_handle->per_worker[worker];
  374. STARPU_ASSERT(local->state == STARPU_INVALID);
  375. if (local->allocated && local->automatically_allocated)
  376. _starpu_request_mem_chunk_removal(child_handle, local, starpu_worker_get_memory_node(worker), sizes[child]);
  377. }
  378. }
  379. _starpu_memory_stats_free(child_handle);
  380. }
  381. for (node = 0; node < STARPU_MAXNODES; node++)
  382. {
  383. if (starpu_node_get_kind(node) != STARPU_CPU_RAM)
  384. continue;
  385. ptr = starpu_data_handle_to_pointer(root_handle, node);
  386. if (ptr != NULL)
  387. _starpu_data_register_ram_pointer(root_handle, ptr);
  388. }
  389. /* the gathering_node should now have a valid copy of all the children.
  390. * For all nodes, if the node had all copies and none was locally
  391. * allocated then the data is still valid there, else, it's invalidated
  392. * for the gathering node, if we have some locally allocated data, we
  393. * copy all the children (XXX this should not happen so we just do not
  394. * do anything since this is transparent ?) */
  395. unsigned still_valid[STARPU_MAXNODES];
  396. /* we do 2 passes : the first pass determines wether the data is still
  397. * valid or not, the second pass is needed to choose between STARPU_SHARED and
  398. * STARPU_OWNER */
  399. unsigned nvalids = 0;
  400. /* still valid ? */
  401. for (node = 0; node < STARPU_MAXNODES; node++)
  402. {
  403. struct _starpu_data_replicate *local;
  404. /* until an issue is found the data is assumed to be valid */
  405. unsigned isvalid = 1;
  406. for (child = 0; child < root_handle->nchildren; child++)
  407. {
  408. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  409. local = &child_handle->per_node[node];
  410. if (local->state == STARPU_INVALID || local->automatically_allocated == 1)
  411. {
  412. /* One of the bits is missing or is not inside the parent */
  413. isvalid = 0;
  414. }
  415. if (local->mc && local->allocated && local->automatically_allocated)
  416. /* free the child data copy in a lazy fashion */
  417. _starpu_request_mem_chunk_removal(child_handle, local, node, sizes[child]);
  418. }
  419. local = &root_handle->per_node[node];
  420. if (!local->allocated)
  421. /* Even if we have all the bits, if we don't have the
  422. * whole data, it's not valid */
  423. isvalid = 0;
  424. if (!isvalid && local->mc && local->allocated && local->automatically_allocated)
  425. /* free the data copy in a lazy fashion */
  426. _starpu_request_mem_chunk_removal(root_handle, local, node, _starpu_data_get_size(root_handle));
  427. /* if there was no invalid copy, the node still has a valid copy */
  428. still_valid[node] = isvalid;
  429. if (isvalid)
  430. nvalids++;
  431. }
  432. /* either shared or owned */
  433. STARPU_ASSERT(nvalids > 0);
  434. enum _starpu_cache_state newstate = (nvalids == 1)?STARPU_OWNER:STARPU_SHARED;
  435. for (node = 0; node < STARPU_MAXNODES; node++)
  436. {
  437. root_handle->per_node[node].state =
  438. still_valid[node]?newstate:STARPU_INVALID;
  439. }
  440. for (child = 0; child < root_handle->nchildren; child++)
  441. {
  442. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  443. _starpu_data_free_interfaces(child_handle);
  444. _starpu_spin_unlock(&child_handle->header_lock);
  445. _starpu_spin_destroy(&child_handle->header_lock);
  446. }
  447. for (child = 0; child < root_handle->nchildren; child++)
  448. {
  449. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  450. _starpu_data_clear_implicit(child_handle);
  451. STARPU_PTHREAD_MUTEX_DESTROY(&child_handle->busy_mutex);
  452. STARPU_PTHREAD_COND_DESTROY(&child_handle->busy_cond);
  453. STARPU_PTHREAD_MUTEX_DESTROY(&child_handle->sequential_consistency_mutex);
  454. }
  455. /* there is no child anymore */
  456. starpu_data_handle_t children = root_handle->children;
  457. root_handle->children = NULL;
  458. root_handle->nchildren = 0;
  459. root_handle->nplans--;
  460. /* now the parent may be used again so we release the lock */
  461. _starpu_spin_unlock(&root_handle->header_lock);
  462. free(children);
  463. _STARPU_TRACE_END_UNPARTITION(root_handle, gathering_node);
  464. }
  465. void starpu_data_partition(starpu_data_handle_t initial_handle, struct starpu_data_filter *f)
  466. {
  467. unsigned nparts = _starpu_data_partition_nparts(initial_handle, f);
  468. STARPU_ASSERT_MSG(initial_handle->nchildren == 0, "there should not be mutiple filters applied on the same data %p, futher filtering has to be done on children", initial_handle);
  469. STARPU_ASSERT_MSG(initial_handle->nplans == 0, "partition planning and synchronous partitioning is not supported");
  470. initial_handle->children = NULL;
  471. /* Make sure to wait for previous tasks working on the whole data */
  472. starpu_data_acquire_on_node(initial_handle, STARPU_ACQUIRE_NO_NODE, STARPU_RW);
  473. starpu_data_release_on_node(initial_handle, STARPU_ACQUIRE_NO_NODE);
  474. _starpu_data_partition(initial_handle, NULL, nparts, f, 1);
  475. }
  476. void starpu_data_partition_plan(starpu_data_handle_t initial_handle, struct starpu_data_filter *f, starpu_data_handle_t *childrenp)
  477. {
  478. unsigned i;
  479. unsigned nparts = _starpu_data_partition_nparts(initial_handle, f);
  480. STARPU_ASSERT_MSG(initial_handle->nchildren == 0, "partition planning and synchronous partitioning is not supported");
  481. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  482. struct starpu_codelet *cl = initial_handle->switch_cl;
  483. int home_node = initial_handle->home_node;
  484. if (home_node == -1)
  485. /* Nothing better for now */
  486. /* TODO: pass -1, and make _starpu_fetch_nowhere_task_input
  487. * really call _starpu_fetch_data_on_node, and make that update
  488. * the coherency.
  489. */
  490. home_node = STARPU_MAIN_RAM;
  491. for (i = 0; i < nparts; i++)
  492. {
  493. _STARPU_CALLOC(childrenp[i], 1, sizeof(struct _starpu_data_state));
  494. }
  495. _starpu_data_partition(initial_handle, childrenp, nparts, f, 0);
  496. if (!cl)
  497. {
  498. /* Create a codelet that will make the coherency on the home node */
  499. _STARPU_CALLOC(initial_handle->switch_cl, 1, sizeof(*initial_handle->switch_cl));
  500. cl = initial_handle->switch_cl;
  501. cl->where = STARPU_NOWHERE;
  502. cl->nbuffers = STARPU_VARIABLE_NBUFFERS;
  503. cl->name = "data_partition_switch";
  504. cl->specific_nodes = 1;
  505. }
  506. if (initial_handle->switch_cl_nparts < nparts)
  507. {
  508. /* First initialization, or previous initialization was with fewer parts, enlarge it */
  509. _STARPU_REALLOC(cl->dyn_nodes, (nparts+1) * sizeof(*cl->dyn_nodes));
  510. for (i = initial_handle->switch_cl_nparts; i < nparts+1; i++)
  511. cl->dyn_nodes[i] = home_node;
  512. initial_handle->switch_cl_nparts = nparts;
  513. }
  514. }
  515. void starpu_data_partition_clean(starpu_data_handle_t root_handle, unsigned nparts, starpu_data_handle_t *children)
  516. {
  517. unsigned i;
  518. for (i = 0; i < nparts; i++)
  519. starpu_data_unregister_submit(children[i]);
  520. _starpu_spin_lock(&root_handle->header_lock);
  521. root_handle->nplans--;
  522. _starpu_spin_unlock(&root_handle->header_lock);
  523. }
  524. void starpu_data_partition_submit(starpu_data_handle_t initial_handle, unsigned nparts, starpu_data_handle_t *children)
  525. {
  526. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  527. _starpu_spin_lock(&initial_handle->header_lock);
  528. STARPU_ASSERT_MSG(initial_handle->partitioned == 0, "One can't submit several partition plannings at the same time");
  529. STARPU_ASSERT_MSG(initial_handle->readonly == 0, "One can't submit a partition planning while a readonly partitioning is active");
  530. initial_handle->partitioned++;
  531. _starpu_spin_unlock(&initial_handle->header_lock);
  532. if (!initial_handle->initialized)
  533. /* No need for coherency, it is not initialized */
  534. return;
  535. unsigned i;
  536. struct starpu_data_descr descr[nparts];
  537. for (i = 0; i < nparts; i++)
  538. {
  539. STARPU_ASSERT_MSG(children[i]->father_handle == initial_handle, "children parameter of starpu_data_partition_submit must be the children of the parent parameter");
  540. descr[i].handle = children[i];
  541. descr[i].mode = STARPU_W;
  542. }
  543. /* TODO: assert nparts too */
  544. starpu_task_insert(initial_handle->switch_cl, STARPU_RW, initial_handle, STARPU_DATA_MODE_ARRAY, descr, nparts, 0);
  545. starpu_data_invalidate_submit(initial_handle);
  546. }
  547. void starpu_data_partition_readonly_submit(starpu_data_handle_t initial_handle, unsigned nparts, starpu_data_handle_t *children)
  548. {
  549. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  550. _starpu_spin_lock(&initial_handle->header_lock);
  551. STARPU_ASSERT_MSG(initial_handle->partitioned == 0 || initial_handle->readonly, "One can't submit a readonly partition planning at the same time as a readwrite partition planning");
  552. initial_handle->partitioned++;
  553. initial_handle->readonly = 1;
  554. _starpu_spin_unlock(&initial_handle->header_lock);
  555. STARPU_ASSERT_MSG(initial_handle->initialized, "It is odd to read-only-partition a data which does not have a value yet");
  556. unsigned i;
  557. struct starpu_data_descr descr[nparts];
  558. for (i = 0; i < nparts; i++)
  559. {
  560. STARPU_ASSERT_MSG(children[i]->father_handle == initial_handle, "children parameter of starpu_data_partition_submit must be the children of the parent parameter");
  561. descr[i].handle = children[i];
  562. descr[i].mode = STARPU_W;
  563. }
  564. /* TODO: assert nparts too */
  565. starpu_task_insert(initial_handle->switch_cl, STARPU_R, initial_handle, STARPU_DATA_MODE_ARRAY, descr, nparts, 0);
  566. }
  567. void starpu_data_partition_readwrite_upgrade_submit(starpu_data_handle_t initial_handle, unsigned nparts, starpu_data_handle_t *children)
  568. {
  569. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  570. _starpu_spin_lock(&initial_handle->header_lock);
  571. STARPU_ASSERT_MSG(initial_handle->partitioned == 1, "One can't upgrade a readonly partition planning to readwrite while other readonly partition plannings are active");
  572. STARPU_ASSERT_MSG(initial_handle->readonly == 1, "One can only upgrade a readonly partition planning");
  573. initial_handle->readonly = 0;
  574. _starpu_spin_unlock(&initial_handle->header_lock);
  575. unsigned i;
  576. struct starpu_data_descr descr[nparts];
  577. for (i = 0; i < nparts; i++)
  578. {
  579. STARPU_ASSERT_MSG(children[i]->father_handle == initial_handle, "children parameter of starpu_data_partition_submit must be the children of the parent parameter");
  580. descr[i].handle = children[i];
  581. descr[i].mode = STARPU_W;
  582. }
  583. /* TODO: assert nparts too */
  584. starpu_task_insert(initial_handle->switch_cl, STARPU_RW, initial_handle, STARPU_DATA_MODE_ARRAY, descr, nparts, 0);
  585. starpu_data_invalidate_submit(initial_handle);
  586. }
  587. void starpu_data_unpartition_submit(starpu_data_handle_t initial_handle, unsigned nparts, starpu_data_handle_t *children, int gather_node)
  588. {
  589. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  590. STARPU_ASSERT_MSG(gather_node == initial_handle->home_node || gather_node == -1, "gathering node different from home node is currently not supported");
  591. _starpu_spin_lock(&initial_handle->header_lock);
  592. STARPU_ASSERT_MSG(initial_handle->partitioned >= 1, "No partition planning is active for this handle");
  593. initial_handle->partitioned--;
  594. if (!initial_handle->partitioned)
  595. initial_handle->readonly = 0;
  596. _starpu_spin_unlock(&initial_handle->header_lock);
  597. unsigned i, n;
  598. struct starpu_data_descr descr[nparts];
  599. for (i = 0, n = 0; i < nparts; i++)
  600. {
  601. STARPU_ASSERT_MSG(children[i]->father_handle == initial_handle, "children parameter of starpu_data_partition_submit must be the children of the parent parameter");
  602. if (!children[i]->initialized)
  603. /* Dropped value, do not care about coherency for this one */
  604. continue;
  605. descr[n].handle = children[i];
  606. descr[n].mode = STARPU_RW;
  607. n++;
  608. }
  609. /* TODO: assert nparts too */
  610. starpu_task_insert(initial_handle->switch_cl, STARPU_W, initial_handle, STARPU_DATA_MODE_ARRAY, descr, n, 0);
  611. for (i = 0; i < nparts; i++)
  612. starpu_data_invalidate_submit(children[i]);
  613. }
  614. void starpu_data_unpartition_readonly_submit(starpu_data_handle_t initial_handle, unsigned nparts, starpu_data_handle_t *children, int gather_node)
  615. {
  616. STARPU_ASSERT_MSG(initial_handle->sequential_consistency, "partition planning is currently only supported for data with sequential consistency");
  617. STARPU_ASSERT_MSG(gather_node == initial_handle->home_node || gather_node == -1, "gathering node different from home node is currently not supported");
  618. _starpu_spin_lock(&initial_handle->header_lock);
  619. STARPU_ASSERT_MSG(initial_handle->partitioned >= 1, "No partition planning is active for this handle");
  620. initial_handle->readonly = 1;
  621. _starpu_spin_unlock(&initial_handle->header_lock);
  622. unsigned i, n;
  623. struct starpu_data_descr descr[nparts];
  624. for (i = 0, n = 0; i < nparts; i++)
  625. {
  626. STARPU_ASSERT_MSG(children[i]->father_handle == initial_handle, "children parameter of starpu_data_partition_submit must be the children of the parent parameter");
  627. if (!children[i]->initialized)
  628. /* Dropped value, do not care about coherency for this one */
  629. continue;
  630. descr[n].handle = children[i];
  631. descr[n].mode = STARPU_R;
  632. n++;
  633. }
  634. /* TODO: assert nparts too */
  635. starpu_task_insert(initial_handle->switch_cl, STARPU_W, initial_handle, STARPU_DATA_MODE_ARRAY, descr, n, 0);
  636. }
  637. /*
  638. * Given an integer N, NPARTS the number of parts it must be divided in, ID the
  639. * part currently considered, determines the CHUNK_SIZE and the OFFSET, taking
  640. * into account the size of the elements stored in the data structure ELEMSIZE
  641. * and LD, the leading dimension.
  642. */
  643. void
  644. _starpu_filter_nparts_compute_chunk_size_and_offset(unsigned n, unsigned nparts,
  645. size_t elemsize, unsigned id,
  646. unsigned ld, unsigned *chunk_size,
  647. size_t *offset)
  648. {
  649. *chunk_size = n/nparts;
  650. unsigned remainder = n % nparts;
  651. if (id < remainder)
  652. (*chunk_size)++;
  653. /*
  654. * Computing the total offset. The formula may not be really clear, but
  655. * it really just is:
  656. *
  657. * total = 0;
  658. * for (i = 0; i < id; i++)
  659. * {
  660. * total += n/nparts;
  661. * if (i < n%nparts)
  662. * total++;
  663. * }
  664. * offset = total * elemsize * ld;
  665. */
  666. if (offset != NULL)
  667. *offset = (id *(n/nparts) + STARPU_MIN(remainder, id)) * ld * elemsize;
  668. }