memalloc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include "memalloc.h"
  17. #include <datawizard/footprint.h>
  18. static pthread_rwlock_t mc_rwlock[STARPU_MAXNODES];
  19. static starpu_mem_chunk_list_t mc_list[STARPU_MAXNODES];
  20. static starpu_mem_chunk_list_t mc_list_to_free[STARPU_MAXNODES];
  21. static size_t liberate_memory_on_node(starpu_mem_chunk_t mc, uint32_t node);
  22. void _starpu_init_mem_chunk_lists(void)
  23. {
  24. unsigned i;
  25. for (i = 0; i < STARPU_MAXNODES; i++)
  26. {
  27. pthread_rwlock_init(&mc_rwlock[i], NULL);
  28. mc_list[i] = starpu_mem_chunk_list_new();
  29. mc_list_to_free[i] = starpu_mem_chunk_list_new();
  30. }
  31. }
  32. void _starpu_deinit_mem_chunk_lists(void)
  33. {
  34. unsigned i;
  35. for (i = 0; i < STARPU_MAXNODES; i++)
  36. {
  37. starpu_mem_chunk_list_delete(mc_list[i]);
  38. starpu_mem_chunk_list_delete(mc_list_to_free[i]);
  39. }
  40. }
  41. static void lock_all_subtree(starpu_data_handle handle)
  42. {
  43. if (handle->nchildren == 0)
  44. {
  45. /* this is a leaf */
  46. while (_starpu_spin_trylock(&handle->header_lock))
  47. _starpu_datawizard_progress(_starpu_get_local_memory_node(), 0);
  48. }
  49. else {
  50. /* lock all sub-subtrees children */
  51. unsigned child;
  52. for (child = 0; child < handle->nchildren; child++)
  53. {
  54. lock_all_subtree(&handle->children[child]);
  55. }
  56. }
  57. }
  58. static void unlock_all_subtree(starpu_data_handle handle)
  59. {
  60. if (handle->nchildren == 0)
  61. {
  62. /* this is a leaf */
  63. _starpu_spin_unlock(&handle->header_lock);
  64. }
  65. else {
  66. /* lock all sub-subtrees children
  67. * Note that this is done in the reverse order of the
  68. * lock_all_subtree so that we avoid deadlock */
  69. unsigned i;
  70. for (i =0; i < handle->nchildren; i++)
  71. {
  72. unsigned child = handle->nchildren - 1 - i;
  73. unlock_all_subtree(&handle->children[child]);
  74. }
  75. }
  76. }
  77. static unsigned may_free_subtree(starpu_data_handle handle, unsigned node)
  78. {
  79. /* we only free if no one refers to the leaf */
  80. uint32_t refcnt = _starpu_get_data_refcnt(handle, node);
  81. if (refcnt)
  82. return 0;
  83. if (!handle->nchildren)
  84. return 1;
  85. /* look into all sub-subtrees children */
  86. unsigned child;
  87. for (child = 0; child < handle->nchildren; child++)
  88. {
  89. unsigned res;
  90. res = may_free_subtree(&handle->children[child], node);
  91. if (!res) return 0;
  92. }
  93. /* no problem was found */
  94. return 1;
  95. }
  96. static size_t do_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node)
  97. {
  98. size_t size;
  99. /* free the actual buffer */
  100. size = liberate_memory_on_node(mc, node);
  101. /* remove the mem_chunk from the list */
  102. starpu_mem_chunk_list_erase(mc_list[node], mc);
  103. free(mc->interface);
  104. starpu_mem_chunk_delete(mc);
  105. return size;
  106. }
  107. static void transfer_subtree_to_node(starpu_data_handle handle, unsigned src_node,
  108. unsigned dst_node)
  109. {
  110. unsigned i;
  111. unsigned last = 0;
  112. unsigned cnt;
  113. int ret;
  114. if (handle->nchildren == 0)
  115. {
  116. /* this is a leaf */
  117. switch(handle->per_node[src_node].state) {
  118. case STARPU_OWNER:
  119. /* the local node has the only copy */
  120. /* the owner is now the destination_node */
  121. handle->per_node[src_node].state = STARPU_INVALID;
  122. handle->per_node[dst_node].state = STARPU_OWNER;
  123. #warning we should use requests during memory reclaim
  124. /* TODO use request !! */
  125. handle->per_node[src_node].refcnt++;
  126. handle->per_node[dst_node].refcnt++;
  127. ret = _starpu_driver_copy_data_1_to_1(handle, src_node, dst_node, 0, NULL, 1);
  128. STARPU_ASSERT(ret == 0);
  129. handle->per_node[src_node].refcnt--;
  130. handle->per_node[dst_node].refcnt--;
  131. break;
  132. case STARPU_SHARED:
  133. /* some other node may have the copy */
  134. handle->per_node[src_node].state = STARPU_INVALID;
  135. /* count the number of copies */
  136. cnt = 0;
  137. for (i = 0; i < STARPU_MAXNODES; i++)
  138. {
  139. if (handle->per_node[i].state == STARPU_SHARED) {
  140. cnt++;
  141. last = i;
  142. }
  143. }
  144. if (cnt == 1)
  145. handle->per_node[last].state = STARPU_OWNER;
  146. break;
  147. case STARPU_INVALID:
  148. /* nothing to be done */
  149. break;
  150. default:
  151. STARPU_ABORT();
  152. break;
  153. }
  154. }
  155. else {
  156. /* lock all sub-subtrees children */
  157. unsigned child;
  158. for (child = 0; child < handle->nchildren; child++)
  159. {
  160. transfer_subtree_to_node(&handle->children[child],
  161. src_node, dst_node);
  162. }
  163. }
  164. }
  165. static size_t try_to_free_mem_chunk(starpu_mem_chunk_t mc, unsigned node, unsigned attempts)
  166. {
  167. size_t liberated = 0;
  168. starpu_data_handle handle;
  169. handle = mc->data;
  170. STARPU_ASSERT(handle);
  171. if (attempts == 0)
  172. {
  173. /* this is the first attempt to free memory
  174. so we avoid to drop requested memory */
  175. /* TODO */
  176. }
  177. /* try to lock all the leafs of the subtree */
  178. lock_all_subtree(handle);
  179. /* check if they are all "free" */
  180. if (may_free_subtree(handle, node))
  181. {
  182. STARPU_ASSERT(handle->per_node[node].refcnt == 0);
  183. /* in case there was nobody using that buffer, throw it
  184. * away after writing it back to main memory */
  185. transfer_subtree_to_node(handle, node, 0);
  186. STARPU_ASSERT(handle->per_node[node].refcnt == 0);
  187. /* now the actual buffer may be liberated */
  188. liberated = do_free_mem_chunk(mc, node);
  189. }
  190. /* unlock the leafs */
  191. unlock_all_subtree(handle);
  192. return liberated;
  193. }
  194. #ifdef STARPU_USE_ALLOCATION_CACHE
  195. /* we assume that mc_rwlock[node] is taken */
  196. static void reuse_mem_chunk(unsigned node, starpu_data_handle new_data, starpu_mem_chunk_t mc, unsigned is_already_in_mc_list)
  197. {
  198. starpu_data_handle old_data;
  199. old_data = mc->data;
  200. /* we found an appropriate mem chunk: so we get it out
  201. * of the "to free" list, and reassign it to the new
  202. * piece of data */
  203. if (!is_already_in_mc_list)
  204. {
  205. starpu_mem_chunk_list_erase(mc_list_to_free[node], mc);
  206. }
  207. if (!mc->data_was_deleted)
  208. {
  209. old_data->per_node[node].allocated = 0;
  210. old_data->per_node[node].automatically_allocated = 0;
  211. }
  212. new_data->per_node[node].allocated = 1;
  213. new_data->per_node[node].automatically_allocated = 1;
  214. memcpy(&new_data->interface[node], mc->interface, old_data->interface_size);
  215. mc->data = new_data;
  216. mc->data_was_deleted = 0;
  217. /* mc->ops, mc->size, mc->footprint and mc->interface should be
  218. * unchanged ! */
  219. /* reinsert the mem chunk in the list of active memory chunks */
  220. if (!is_already_in_mc_list)
  221. {
  222. starpu_mem_chunk_list_push_front(mc_list[node], mc);
  223. }
  224. }
  225. static unsigned try_to_reuse_mem_chunk(starpu_mem_chunk_t mc, unsigned node, starpu_data_handle new_data, unsigned is_already_in_mc_list)
  226. {
  227. unsigned success = 0;
  228. starpu_data_handle old_data;
  229. old_data = mc->data;
  230. STARPU_ASSERT(old_data);
  231. /* try to lock all the leafs of the subtree */
  232. lock_all_subtree(old_data);
  233. /* check if they are all "free" */
  234. if (may_free_subtree(old_data, node))
  235. {
  236. success = 1;
  237. /* in case there was nobody using that buffer, throw it
  238. * away after writing it back to main memory */
  239. transfer_subtree_to_node(old_data, node, 0);
  240. /* now replace the previous data */
  241. reuse_mem_chunk(node, new_data, mc, is_already_in_mc_list);
  242. }
  243. /* unlock the leafs */
  244. unlock_all_subtree(old_data);
  245. return success;
  246. }
  247. /* this function looks for a memory chunk that matches a given footprint in the
  248. * list of mem chunk that need to be liberated */
  249. static unsigned try_to_find_reusable_mem_chunk(unsigned node, starpu_data_handle data, uint32_t footprint)
  250. {
  251. pthread_rwlock_wrlock(&mc_rwlock[node]);
  252. /* go through all buffers for which there was a removal request */
  253. starpu_mem_chunk_t mc, next_mc;
  254. for (mc = starpu_mem_chunk_list_begin(mc_list_to_free[node]);
  255. mc != starpu_mem_chunk_list_end(mc_list_to_free[node]);
  256. mc = next_mc)
  257. {
  258. next_mc = starpu_mem_chunk_list_next(mc);
  259. if (mc->footprint == footprint)
  260. {
  261. starpu_data_handle old_data;
  262. old_data = mc->data;
  263. if (old_data->per_node[node].allocated &&
  264. old_data->per_node[node].automatically_allocated)
  265. {
  266. reuse_mem_chunk(node, data, mc, 0);
  267. pthread_rwlock_unlock(&mc_rwlock[node]);
  268. return 1;
  269. }
  270. }
  271. }
  272. /* now look for some non essential data in the active list */
  273. for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
  274. mc != starpu_mem_chunk_list_end(mc_list[node]);
  275. mc = next_mc)
  276. {
  277. /* there is a risk that the memory chunk is liberated
  278. before next iteration starts: so we compute the next
  279. element of the list now */
  280. next_mc = starpu_mem_chunk_list_next(mc);
  281. if (mc->data->is_not_important && (mc->footprint == footprint))
  282. {
  283. // fprintf(stderr, "found a candidate ...\n");
  284. if (try_to_reuse_mem_chunk(mc, node, data, 1))
  285. {
  286. pthread_rwlock_unlock(&mc_rwlock[node]);
  287. return 1;
  288. }
  289. }
  290. }
  291. pthread_rwlock_unlock(&mc_rwlock[node]);
  292. return 0;
  293. }
  294. #endif
  295. /*
  296. * Try to free some memory on the specified node
  297. * returns 0 if no memory was released, 1 else
  298. */
  299. static size_t reclaim_memory(uint32_t node, size_t toreclaim __attribute__ ((unused)), unsigned attempts)
  300. {
  301. // fprintf(stderr, "reclaim memory...\n");
  302. int res;
  303. size_t liberated = 0;
  304. res = pthread_rwlock_wrlock(&mc_rwlock[node]);
  305. STARPU_ASSERT(!res);
  306. /* remove all buffers for which there was a removal request */
  307. starpu_mem_chunk_t mc, next_mc;
  308. for (mc = starpu_mem_chunk_list_begin(mc_list_to_free[node]);
  309. mc != starpu_mem_chunk_list_end(mc_list_to_free[node]);
  310. mc = next_mc)
  311. {
  312. next_mc = starpu_mem_chunk_list_next(mc);
  313. liberated += liberate_memory_on_node(mc, node);
  314. starpu_mem_chunk_list_erase(mc_list_to_free[node], mc);
  315. free(mc->interface);
  316. starpu_mem_chunk_delete(mc);
  317. }
  318. /* try to free all allocated data potentially in use .. XXX */
  319. for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
  320. mc != starpu_mem_chunk_list_end(mc_list[node]);
  321. mc = next_mc)
  322. {
  323. /* there is a risk that the memory chunk is liberated
  324. before next iteration starts: so we compute the next
  325. element of the list now */
  326. next_mc = starpu_mem_chunk_list_next(mc);
  327. liberated += try_to_free_mem_chunk(mc, node, attempts);
  328. #if 0
  329. if (liberated > toreclaim)
  330. break;
  331. #endif
  332. }
  333. // fprintf(stderr, "got %d MB back\n", (int)liberated/(1024*1024));
  334. res = pthread_rwlock_unlock(&mc_rwlock[node]);
  335. STARPU_ASSERT(!res);
  336. return liberated;
  337. }
  338. static void register_mem_chunk(starpu_data_handle handle, uint32_t dst_node, size_t size, unsigned automatically_allocated)
  339. {
  340. int res;
  341. starpu_mem_chunk_t mc = starpu_mem_chunk_new();
  342. STARPU_ASSERT(handle);
  343. STARPU_ASSERT(handle->ops);
  344. mc->data = handle;
  345. mc->size = size;
  346. mc->footprint = _starpu_compute_data_footprint(handle);
  347. mc->ops = handle->ops;
  348. mc->data_was_deleted = 0;
  349. mc->automatically_allocated = automatically_allocated;
  350. /* the interface was already filled by ops->allocate_data_on_node */
  351. void *src_interface = starpu_data_get_interface_on_node(handle, dst_node);
  352. mc->interface = malloc(handle->ops->interface_size);
  353. STARPU_ASSERT(mc->interface);
  354. memcpy(mc->interface, src_interface, handle->ops->interface_size);
  355. res = pthread_rwlock_wrlock(&mc_rwlock[dst_node]);
  356. STARPU_ASSERT(!res);
  357. starpu_mem_chunk_list_push_front(mc_list[dst_node], mc);
  358. res = pthread_rwlock_unlock(&mc_rwlock[dst_node]);
  359. STARPU_ASSERT(!res);
  360. }
  361. void _starpu_request_mem_chunk_removal(starpu_data_handle handle, unsigned node)
  362. {
  363. int res;
  364. res = pthread_rwlock_wrlock(&mc_rwlock[node]);
  365. STARPU_ASSERT(!res);
  366. /* iterate over the list of memory chunks and remove the entry */
  367. starpu_mem_chunk_t mc, next_mc;
  368. for (mc = starpu_mem_chunk_list_begin(mc_list[node]);
  369. mc != starpu_mem_chunk_list_end(mc_list[node]);
  370. mc = next_mc)
  371. {
  372. next_mc = starpu_mem_chunk_list_next(mc);
  373. if (mc->data == handle) {
  374. /* we found the data */
  375. mc->data_was_deleted = 1;
  376. /* remove it from the main list */
  377. starpu_mem_chunk_list_erase(mc_list[node], mc);
  378. /* put it in the list of buffers to be removed */
  379. starpu_mem_chunk_list_push_front(mc_list_to_free[node], mc);
  380. res = pthread_rwlock_unlock(&mc_rwlock[node]);
  381. STARPU_ASSERT(!res);
  382. return;
  383. }
  384. }
  385. /* there was no corresponding buffer ... */
  386. res = pthread_rwlock_unlock(&mc_rwlock[node]);
  387. STARPU_ASSERT(!res);
  388. }
  389. static size_t liberate_memory_on_node(starpu_mem_chunk_t mc, uint32_t node)
  390. {
  391. size_t liberated = 0;
  392. STARPU_ASSERT(mc->ops);
  393. STARPU_ASSERT(mc->ops->liberate_data_on_node);
  394. starpu_data_handle handle = mc->data;
  395. // while (_starpu_spin_trylock(&handle->header_lock))
  396. // _starpu_datawizard_progress(_starpu_get_local_memory_node());
  397. #warning can we block here ?
  398. // _starpu_spin_lock(&handle->header_lock);
  399. if (mc->automatically_allocated && (handle->per_node[node].refcnt == 0))
  400. {
  401. STARPU_ASSERT(handle->per_node[node].allocated);
  402. mc->ops->liberate_data_on_node(mc->interface, node);
  403. if (!mc->data_was_deleted)
  404. {
  405. handle->per_node[node].allocated = 0;
  406. /* XXX why do we need that ? */
  407. handle->per_node[node].automatically_allocated = 0;
  408. }
  409. liberated = mc->size;
  410. STARPU_ASSERT(handle->per_node[node].refcnt == 0);
  411. }
  412. // _starpu_spin_unlock(&handle->header_lock);
  413. return liberated;
  414. }
  415. /*
  416. * In order to allocate a piece of data, we try to reuse existing buffers if
  417. * its possible.
  418. * 1 - we try to reuse a memchunk that is explicitely unused.
  419. * 2 - we go through the list of memory chunks and find one that is not
  420. * referenced and that has the same footprint to reuse it.
  421. * 3 - we call the usual driver's alloc method
  422. * 4 - we go through the list of memory chunks and release those that are
  423. * not referenced (or part of those).
  424. *
  425. */
  426. int _starpu_allocate_memory_on_node(starpu_data_handle handle, uint32_t dst_node, unsigned may_alloc)
  427. {
  428. unsigned attempts = 0;
  429. size_t allocated_memory;
  430. STARPU_ASSERT(handle);
  431. /* A buffer is already allocated on the node */
  432. if (handle->per_node[dst_node].allocated)
  433. return 0;
  434. if (!may_alloc)
  435. return ENOMEM;
  436. _starpu_data_allocation_inc_stats(dst_node);
  437. #ifdef STARPU_USE_ALLOCATION_CACHE
  438. /* perhaps we can directly reuse a buffer in the free-list */
  439. uint32_t footprint = _starpu_compute_data_footprint(handle);
  440. STARPU_TRACE_START_ALLOC_REUSE(dst_node);
  441. if (try_to_find_reusable_mem_chunk(dst_node, handle, footprint))
  442. {
  443. _starpu_allocation_cache_hit(dst_node);
  444. return 0;
  445. }
  446. STARPU_TRACE_END_ALLOC_REUSE(dst_node);
  447. #endif
  448. do {
  449. STARPU_ASSERT(handle->ops);
  450. STARPU_ASSERT(handle->ops->allocate_data_on_node);
  451. STARPU_TRACE_START_ALLOC(dst_node);
  452. allocated_memory = handle->ops->allocate_data_on_node(handle, dst_node);
  453. STARPU_TRACE_END_ALLOC(dst_node);
  454. if (!allocated_memory) {
  455. /* XXX perhaps we should find the proper granularity
  456. * not to waste our cache all the time */
  457. STARPU_ASSERT(handle->ops->get_size);
  458. size_t data_size = handle->ops->get_size(handle);
  459. STARPU_TRACE_START_MEMRECLAIM(dst_node);
  460. reclaim_memory(dst_node, 2*data_size, attempts);
  461. STARPU_TRACE_END_MEMRECLAIM(dst_node);
  462. }
  463. } while(!allocated_memory && attempts++ < 2);
  464. /* perhaps we could really not handle that capacity misses */
  465. if (!allocated_memory)
  466. goto nomem;
  467. register_mem_chunk(handle, dst_node, allocated_memory, 1);
  468. handle->per_node[dst_node].allocated = 1;
  469. handle->per_node[dst_node].automatically_allocated = 1;
  470. return 0;
  471. nomem:
  472. STARPU_ASSERT(!allocated_memory);
  473. return -ENOMEM;
  474. }