filters.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2010-2014 Université de Bordeaux 1
  4. * Copyright (C) 2010 Mehdi Juhoor <mjuhoor@gmail.com>
  5. * Copyright (C) 2010, 2011, 2012, 2013 Centre National de la Recherche Scientifique
  6. * Copyright (C) 2012 INRIA
  7. *
  8. * StarPU is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU Lesser General Public License as published by
  10. * the Free Software Foundation; either version 2.1 of the License, or (at
  11. * your option) any later version.
  12. *
  13. * StarPU is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  16. *
  17. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  18. */
  19. #include <datawizard/filters.h>
  20. #include <datawizard/footprint.h>
  21. #include <datawizard/interfaces/data_interface.h>
  22. #include <core/task.h>
  23. static void starpu_data_create_children(starpu_data_handle_t handle, unsigned nchildren, struct starpu_data_filter *f);
  24. /*
  25. * This function applies a data filter on all the elements of a partition
  26. */
  27. static void map_filter(starpu_data_handle_t root_handle, struct starpu_data_filter *f)
  28. {
  29. /* we need to apply the data filter on all leaf of the tree */
  30. if (root_handle->nchildren == 0)
  31. {
  32. /* this is a leaf */
  33. starpu_data_partition(root_handle, f);
  34. }
  35. else
  36. {
  37. /* try to apply the data filter recursively */
  38. unsigned child;
  39. for (child = 0; child < root_handle->nchildren; child++)
  40. {
  41. starpu_data_handle_t handle_child = starpu_data_get_child(root_handle, child);
  42. map_filter(handle_child, f);
  43. }
  44. }
  45. }
  46. void starpu_data_vmap_filters(starpu_data_handle_t root_handle, unsigned nfilters, va_list pa)
  47. {
  48. unsigned i;
  49. for (i = 0; i < nfilters; i++)
  50. {
  51. struct starpu_data_filter *next_filter;
  52. next_filter = va_arg(pa, struct starpu_data_filter *);
  53. STARPU_ASSERT(next_filter);
  54. map_filter(root_handle, next_filter);
  55. }
  56. }
  57. void starpu_data_map_filters(starpu_data_handle_t root_handle, unsigned nfilters, ...)
  58. {
  59. va_list pa;
  60. va_start(pa, nfilters);
  61. starpu_data_vmap_filters(root_handle, nfilters, pa);
  62. va_end(pa);
  63. }
  64. int starpu_data_get_nb_children(starpu_data_handle_t handle)
  65. {
  66. return handle->nchildren;
  67. }
  68. starpu_data_handle_t starpu_data_get_child(starpu_data_handle_t handle, unsigned i)
  69. {
  70. STARPU_ASSERT_MSG(handle->nchildren != 0, "Data %p has to be partitioned before accessing children", handle);
  71. STARPU_ASSERT_MSG(i < handle->nchildren, "Invalid child index %u in handle %p, maximum %u", i, handle, handle->nchildren);
  72. return &handle->children[i];
  73. }
  74. /*
  75. * example starpu_data_get_sub_data(starpu_data_handle_t root_handle, 3, 42, 0, 1);
  76. */
  77. starpu_data_handle_t starpu_data_get_sub_data(starpu_data_handle_t root_handle, unsigned depth, ... )
  78. {
  79. va_list pa;
  80. va_start(pa, depth);
  81. starpu_data_handle_t handle = starpu_data_vget_sub_data(root_handle, depth, pa);
  82. va_end(pa);
  83. return handle;
  84. }
  85. starpu_data_handle_t starpu_data_vget_sub_data(starpu_data_handle_t root_handle, unsigned depth, va_list pa )
  86. {
  87. STARPU_ASSERT(root_handle);
  88. starpu_data_handle_t current_handle = root_handle;
  89. /* the variable number of argument must correlate the depth in the tree */
  90. unsigned i;
  91. for (i = 0; i < depth; i++)
  92. {
  93. unsigned next_child;
  94. next_child = va_arg(pa, unsigned);
  95. STARPU_ASSERT_MSG(current_handle->nchildren != 0, "Data %p has to be partitioned before accessing children", current_handle);
  96. STARPU_ASSERT_MSG(next_child < current_handle->nchildren, "Bogus child number %u, data %p only has %u children", next_child, current_handle, current_handle->nchildren);
  97. current_handle = &current_handle->children[next_child];
  98. }
  99. return current_handle;
  100. }
  101. void starpu_data_partition(starpu_data_handle_t initial_handle, struct starpu_data_filter *f)
  102. {
  103. unsigned nparts;
  104. unsigned i;
  105. unsigned node;
  106. /* first take care to properly lock the data header */
  107. _starpu_spin_lock(&initial_handle->header_lock);
  108. STARPU_ASSERT_MSG(initial_handle->nchildren == 0, "there should not be mutiple filters applied on the same data %p, futher filtering has to be done on children", initial_handle);
  109. /* how many parts ? */
  110. if (f->get_nchildren)
  111. nparts = f->get_nchildren(f, initial_handle);
  112. else
  113. nparts = f->nchildren;
  114. STARPU_ASSERT_MSG(nparts > 0, "Partitioning data %p in 0 piece does not make sense", initial_handle);
  115. /* allocate the children */
  116. starpu_data_create_children(initial_handle, nparts, f);
  117. unsigned nworkers = starpu_worker_get_count();
  118. for (node = 0; node < STARPU_MAXNODES; node++)
  119. {
  120. if (initial_handle->per_node[node].state != STARPU_INVALID)
  121. break;
  122. }
  123. if (node == STARPU_MAXNODES)
  124. {
  125. /* This is lazy allocation, allocate it now in main RAM, so as
  126. * to have somewhere to gather pieces later */
  127. int ret = _starpu_allocate_memory_on_node(initial_handle, &initial_handle->per_node[0], 0);
  128. #ifdef STARPU_DEVEL
  129. #warning we should reclaim memory if allocation failed
  130. #endif
  131. STARPU_ASSERT(!ret);
  132. }
  133. for (i = 0; i < nparts; i++)
  134. {
  135. starpu_data_handle_t child =
  136. starpu_data_get_child(initial_handle, i);
  137. STARPU_ASSERT(child);
  138. child->nchildren = 0;
  139. child->rank = initial_handle->rank;
  140. child->root_handle = initial_handle->root_handle;
  141. child->father_handle = initial_handle;
  142. child->sibling_index = i;
  143. child->depth = initial_handle->depth + 1;
  144. child->is_not_important = initial_handle->is_not_important;
  145. child->wt_mask = initial_handle->wt_mask;
  146. child->home_node = initial_handle->home_node;
  147. child->is_readonly = initial_handle->is_readonly;
  148. /* initialize the chunk lock */
  149. child->req_list = _starpu_data_requester_list_new();
  150. child->reduction_req_list = _starpu_data_requester_list_new();
  151. child->reduction_tmp_handles = NULL;
  152. child->refcnt = 0;
  153. child->busy_count = 0;
  154. child->busy_waiting = 0;
  155. STARPU_PTHREAD_MUTEX_INIT(&child->busy_mutex, NULL);
  156. STARPU_PTHREAD_COND_INIT(&child->busy_cond, NULL);
  157. child->reduction_refcnt = 0;
  158. _starpu_spin_init(&child->header_lock);
  159. child->sequential_consistency = initial_handle->sequential_consistency;
  160. STARPU_PTHREAD_MUTEX_INIT(&child->sequential_consistency_mutex, NULL);
  161. child->last_submitted_mode = STARPU_R;
  162. child->last_sync_task = NULL;
  163. child->last_submitted_accessors.task = NULL;
  164. child->last_submitted_accessors.next = &child->last_submitted_accessors;
  165. child->last_submitted_accessors.prev = &child->last_submitted_accessors;
  166. child->post_sync_tasks = NULL;
  167. /* Tell helgrind that the race in _starpu_unlock_post_sync_tasks is fine */
  168. STARPU_HG_DISABLE_CHECKING(child->post_sync_tasks_cnt);
  169. child->post_sync_tasks_cnt = 0;
  170. /* The methods used for reduction are propagated to the
  171. * children. */
  172. child->redux_cl = initial_handle->redux_cl;
  173. child->init_cl = initial_handle->init_cl;
  174. #ifdef STARPU_USE_FXT
  175. child->last_submitted_ghost_sync_id_is_valid = 0;
  176. child->last_submitted_ghost_sync_id = 0;
  177. child->last_submitted_ghost_accessors_id = NULL;
  178. #endif
  179. for (node = 0; node < STARPU_MAXNODES; node++)
  180. {
  181. struct _starpu_data_replicate *initial_replicate;
  182. struct _starpu_data_replicate *child_replicate;
  183. initial_replicate = &initial_handle->per_node[node];
  184. child_replicate = &child->per_node[node];
  185. child_replicate->state = initial_replicate->state;
  186. child_replicate->allocated = initial_replicate->allocated;
  187. child_replicate->automatically_allocated = initial_replicate->automatically_allocated;
  188. child_replicate->refcnt = 0;
  189. child_replicate->memory_node = node;
  190. child_replicate->relaxed_coherency = 0;
  191. child_replicate->initialized = initial_replicate->initialized;
  192. /* update the interface */
  193. void *initial_interface = starpu_data_get_interface_on_node(initial_handle, node);
  194. void *child_interface = starpu_data_get_interface_on_node(child, node);
  195. f->filter_func(initial_interface, child_interface, f, i, nparts);
  196. }
  197. unsigned worker;
  198. for (worker = 0; worker < nworkers; worker++)
  199. {
  200. struct _starpu_data_replicate *child_replicate;
  201. child_replicate = &child->per_worker[worker];
  202. child_replicate->state = STARPU_INVALID;
  203. child_replicate->allocated = 0;
  204. child_replicate->automatically_allocated = 0;
  205. child_replicate->refcnt = 0;
  206. child_replicate->memory_node = starpu_worker_get_memory_node(worker);
  207. child_replicate->requested = 0;
  208. for (node = 0; node < STARPU_MAXNODES; node++)
  209. {
  210. child_replicate->request[node] = NULL;
  211. }
  212. child_replicate->relaxed_coherency = 1;
  213. child_replicate->initialized = 0;
  214. /* duplicate the content of the interface on node 0 */
  215. memcpy(child_replicate->data_interface, child->per_node[0].data_interface, child->ops->interface_size);
  216. }
  217. /* We compute the size and the footprint of the child once and
  218. * store it in the handle */
  219. child->footprint = _starpu_compute_data_footprint(child);
  220. void *ptr;
  221. ptr = starpu_data_handle_to_pointer(child, STARPU_MAIN_RAM);
  222. if (ptr != NULL)
  223. _starpu_data_register_ram_pointer(child, ptr);
  224. }
  225. /* now let the header */
  226. _starpu_spin_unlock(&initial_handle->header_lock);
  227. }
  228. static
  229. void _starpu_empty_codelet_function(void *buffers[], void *args)
  230. {
  231. (void) buffers; // unused;
  232. (void) args; // unused;
  233. }
  234. void starpu_data_unpartition(starpu_data_handle_t root_handle, unsigned gathering_node)
  235. {
  236. unsigned child;
  237. unsigned worker;
  238. unsigned nworkers = starpu_worker_get_count();
  239. unsigned node;
  240. unsigned sizes[root_handle->nchildren];
  241. _STARPU_TRACE_START_UNPARTITION(root_handle, gathering_node);
  242. _starpu_spin_lock(&root_handle->header_lock);
  243. STARPU_ASSERT_MSG(root_handle->nchildren != 0, "data %p is not partitioned, can not unpartition it", root_handle);
  244. /* first take all the children lock (in order !) */
  245. for (child = 0; child < root_handle->nchildren; child++)
  246. {
  247. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  248. /* make sure the intermediate children is unpartitionned as well */
  249. if (child_handle->nchildren > 0)
  250. starpu_data_unpartition(child_handle, gathering_node);
  251. sizes[child] = _starpu_data_get_size(child_handle);
  252. /* If this is a multiformat handle, we must convert the data now */
  253. #ifdef STARPU_DEVEL
  254. #warning TODO: _starpu_fetch_data_on_node should be doing it
  255. #endif
  256. if (_starpu_data_is_multiformat_handle(child_handle) &&
  257. starpu_node_get_kind(child_handle->mf_node) != STARPU_CPU_RAM)
  258. {
  259. struct starpu_codelet cl =
  260. {
  261. .where = STARPU_CPU,
  262. .cpu_funcs = { _starpu_empty_codelet_function, NULL },
  263. .modes = { STARPU_RW },
  264. .nbuffers = 1
  265. };
  266. struct starpu_task *task = starpu_task_create();
  267. task->name = "convert_data";
  268. STARPU_TASK_SET_HANDLE(task, child_handle, 0);
  269. task->cl = &cl;
  270. task->synchronous = 1;
  271. if (_starpu_task_submit_internally(task) != 0)
  272. _STARPU_ERROR("Could not submit the conversion task while unpartitionning\n");
  273. }
  274. int ret;
  275. /* for now we pretend that the RAM is almost unlimited and that gathering
  276. * data should be possible from the node that does the unpartionning ... we
  277. * don't want to have the programming deal with memory shortage at that time,
  278. * really */
  279. if (child_handle->current_mode == STARPU_REDUX)
  280. {
  281. /* Acquire the child data on the gathering node. This will trigger collapsing the reduction */
  282. ret = starpu_data_acquire_on_node(child_handle, gathering_node, STARPU_RW);
  283. _starpu_unlock_post_sync_tasks(child_handle);
  284. } else
  285. {
  286. /* Simply transfer any pending data */
  287. ret = _starpu_fetch_data_on_node(child_handle, &child_handle->per_node[gathering_node], STARPU_R, 0, 0, NULL, NULL);
  288. }
  289. STARPU_ASSERT(ret == 0);
  290. _starpu_spin_lock(&child_handle->header_lock);
  291. _starpu_data_unregister_ram_pointer(child_handle);
  292. for (worker = 0; worker < nworkers; worker++)
  293. {
  294. struct _starpu_data_replicate *local = &child_handle->per_worker[worker];
  295. STARPU_ASSERT(local->state == STARPU_INVALID);
  296. if (local->allocated && local->automatically_allocated)
  297. _starpu_request_mem_chunk_removal(child_handle, local, starpu_worker_get_memory_node(worker), sizes[child]);
  298. }
  299. _starpu_memory_stats_free(child_handle);
  300. _starpu_data_requester_list_delete(child_handle->req_list);
  301. _starpu_data_requester_list_delete(child_handle->reduction_req_list);
  302. }
  303. /* the gathering_node should now have a valid copy of all the children.
  304. * For all nodes, if the node had all copies and none was locally
  305. * allocated then the data is still valid there, else, it's invalidated
  306. * for the gathering node, if we have some locally allocated data, we
  307. * copy all the children (XXX this should not happen so we just do not
  308. * do anything since this is transparent ?) */
  309. unsigned still_valid[STARPU_MAXNODES];
  310. /* we do 2 passes : the first pass determines wether the data is still
  311. * valid or not, the second pass is needed to choose between STARPU_SHARED and
  312. * STARPU_OWNER */
  313. unsigned nvalids = 0;
  314. /* still valid ? */
  315. for (node = 0; node < STARPU_MAXNODES; node++)
  316. {
  317. struct _starpu_data_replicate *local;
  318. /* until an issue is found the data is assumed to be valid */
  319. unsigned isvalid = 1;
  320. for (child = 0; child < root_handle->nchildren; child++)
  321. {
  322. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  323. local = &child_handle->per_node[node];
  324. if (local->state == STARPU_INVALID)
  325. {
  326. /* One of the bits is missing */
  327. isvalid = 0;
  328. }
  329. if (local->mc && local->allocated && local->automatically_allocated)
  330. /* free the child data copy in a lazy fashion */
  331. _starpu_request_mem_chunk_removal(child_handle, local, node, sizes[child]);
  332. }
  333. local = &root_handle->per_node[node];
  334. if (!local->allocated)
  335. /* Even if we have all the bits, if we don't have the
  336. * whole data, it's not valid */
  337. isvalid = 0;
  338. if (!isvalid && local->mc && local->allocated && local->automatically_allocated)
  339. /* free the data copy in a lazy fashion */
  340. _starpu_request_mem_chunk_removal(root_handle, local, node, _starpu_data_get_size(root_handle));
  341. /* if there was no invalid copy, the node still has a valid copy */
  342. still_valid[node] = isvalid;
  343. if (isvalid)
  344. nvalids++;
  345. }
  346. /* either shared or owned */
  347. STARPU_ASSERT(nvalids > 0);
  348. enum _starpu_cache_state newstate = (nvalids == 1)?STARPU_OWNER:STARPU_SHARED;
  349. for (node = 0; node < STARPU_MAXNODES; node++)
  350. {
  351. root_handle->per_node[node].state =
  352. still_valid[node]?newstate:STARPU_INVALID;
  353. }
  354. for (child = 0; child < root_handle->nchildren; child++)
  355. {
  356. starpu_data_handle_t child_handle = starpu_data_get_child(root_handle, child);
  357. _starpu_data_free_interfaces(child_handle);
  358. _starpu_spin_unlock(&child_handle->header_lock);
  359. _starpu_spin_destroy(&child_handle->header_lock);
  360. STARPU_PTHREAD_MUTEX_DESTROY(&child_handle->busy_mutex);
  361. STARPU_PTHREAD_COND_DESTROY(&child_handle->busy_cond);
  362. STARPU_PTHREAD_MUTEX_DESTROY(&child_handle->sequential_consistency_mutex);
  363. }
  364. /* there is no child anymore */
  365. free(root_handle->children);
  366. root_handle->children = NULL;
  367. root_handle->nchildren = 0;
  368. /* now the parent may be used again so we release the lock */
  369. _starpu_spin_unlock(&root_handle->header_lock);
  370. _STARPU_TRACE_END_UNPARTITION(root_handle, gathering_node);
  371. }
  372. /* each child may have his own interface type */
  373. static void starpu_data_create_children(starpu_data_handle_t handle, unsigned nchildren, struct starpu_data_filter *f)
  374. {
  375. handle->children = (struct _starpu_data_state *) calloc(nchildren, sizeof(struct _starpu_data_state));
  376. STARPU_ASSERT(handle->children);
  377. unsigned child;
  378. for (child = 0; child < nchildren; child++)
  379. {
  380. starpu_data_handle_t handle_child;
  381. struct starpu_data_interface_ops *ops;
  382. /* what's this child's interface ? */
  383. if (f->get_child_ops)
  384. ops = f->get_child_ops(f, child);
  385. else
  386. ops = handle->ops;
  387. handle_child = &handle->children[child];
  388. _starpu_data_handle_init(handle_child, ops, handle->mf_node);
  389. }
  390. /* this handle now has children */
  391. handle->nchildren = nchildren;
  392. }
  393. /*
  394. * Given an integer N, NPARTS the number of parts it must be divided in, ID the
  395. * part currently considered, determines the CHUNK_SIZE and the OFFSET, taking
  396. * into account the size of the elements stored in the data structure ELEMSIZE
  397. * and LD, the leading dimension.
  398. */
  399. void
  400. _starpu_filter_nparts_compute_chunk_size_and_offset(unsigned n, unsigned nparts,
  401. size_t elemsize, unsigned id,
  402. unsigned ld, unsigned *chunk_size,
  403. size_t *offset)
  404. {
  405. *chunk_size = n/nparts;
  406. unsigned remainder = n % nparts;
  407. if (id < remainder)
  408. (*chunk_size)++;
  409. /*
  410. * Computing the total offset. The formula may not be really clear, but
  411. * it really just is:
  412. *
  413. * total = 0;
  414. * for (i = 0; i < id; i++)
  415. * {
  416. * total += n/nparts;
  417. * if (i < n%nparts)
  418. * total++;
  419. * }
  420. * offset = total * elemsize * ld;
  421. */
  422. if (offset != NULL)
  423. *offset = (id *(n/nparts) + STARPU_MIN(remainder, id)) * ld * elemsize;
  424. }