custom_malloc.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. #include <dmmlib/dmmlib.h>
  2. #include "dmm_config.h"
  3. #ifdef HAVE_LOCKS
  4. #include "posix_lock.h"
  5. #endif /* HAVE_LOCKS */
  6. #include <dmmlib/initialize_allocator.h>
  7. #include "other.h"
  8. #include "sys_alloc.h"
  9. #include "block_header.h"
  10. #include "dmm_adaptor.h"
  11. void * custom_ahmalloc(allocator_t* allocator, heap_t* heap, size_t size) {
  12. void *ptr;
  13. int heap_id, fixed_list_id, i;
  14. maptable_node_t *current_maptable_node;
  15. void *current_block, *previous_block;
  16. #ifndef WITH_MEMORY_SPACE_AWARENESS
  17. /* Go to the system allocator if none was given */
  18. if(allocator == NULL) {
  19. allocator = &systemallocator;
  20. if(allocator->initialized != true) {
  21. initialize_allocator(allocator);
  22. }
  23. }
  24. if(heap == NULL) {
  25. heap_id = map_thread_heap();
  26. heap = &allocator->heaps[heap_id];
  27. }
  28. #endif /* WITH_MEMORY_SPACE_AWARENESS */
  29. ptr = NULL;
  30. previous_block = NULL;
  31. #ifdef HAVE_LOCKS
  32. posix_lock(heap);
  33. #endif /* HAVE_LOCKS */
  34. /* Perform exact fit to fixed lists */
  35. fixed_list_id = map_size_to_list(heap, req_padding(size));
  36. /* If a fixed list is found, do a first fit */
  37. if(fixed_list_id != -1) {
  38. current_maptable_node = heap->maptable_head;
  39. /* traverse through the maptable node list */
  40. if(fixed_list_id != 0) {
  41. for(i = 1; i < fixed_list_id; i++) {
  42. current_maptable_node = current_maptable_node->next;
  43. }
  44. }
  45. if(current_maptable_node->fixed_list_head != NULL) {
  46. ptr = current_maptable_node->fixed_list_head;
  47. current_maptable_node->fixed_list_head = get_next(ptr);
  48. }
  49. }
  50. if(ptr == NULL) {
  51. /* first fit from free list */
  52. for(current_block = heap->free_list_head; current_block != NULL;
  53. current_block = get_next(current_block)) {
  54. if(get_size(current_block) >= size) {
  55. if(current_block == heap->free_list_head) {
  56. heap->free_list_head = get_next(current_block);
  57. } else {
  58. set_next(previous_block, get_next(current_block));
  59. }
  60. ptr = current_block;
  61. break;
  62. }
  63. previous_block = current_block;
  64. }
  65. }
  66. if(ptr != NULL) {
  67. /* FIXME To be refactored - START */
  68. set_requested_size(ptr, size);
  69. mark_used(ptr);
  70. /* FIXME split to be put here */
  71. /* Update the used blocks list */
  72. set_next(ptr, heap->used_blocks_head);
  73. heap->used_blocks_head = ptr;
  74. /* Begin of Stats */
  75. heap->dmm_stats.live_objects += 1;
  76. heap->dmm_stats.num_malloc += 1;
  77. /* End of Stats */
  78. /* FIXME To be refactored - END */
  79. }
  80. if(ptr == NULL) {
  81. ptr = sys_alloc(allocator, heap, size);
  82. }
  83. /* Refresh the state of the heap allocator if a certain number of
  84. * malloc's has been served already
  85. */
  86. /* TODO Define 50 as a constant */
  87. if(heap->dmm_stats.num_malloc % 50) {
  88. malloc_state_refresh(heap);
  89. }
  90. #ifdef HAVE_LOCKS
  91. posix_unlock(heap);
  92. #endif /* HAVE_LOCKS */
  93. return ptr;
  94. }
  95. /* Currently all the memory space aware allocators are pre-initialized, so
  96. * we do not expect any custom_ahmalloc call without knowing which allocator
  97. * and heap are to be used.
  98. */
  99. #ifndef WITH_MEMORY_SPACE_AWARENESS
  100. void * custom_malloc(size_t size) {
  101. return custom_ahmalloc(NULL, NULL, size);
  102. }
  103. #endif /* WITH_MEMORY_SPACE_AWARENESS */