custom_malloc.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #include "dmmlib.h"
  2. #include <stdbool.h>
  3. #include "dmm_config.h"
  4. #ifdef HAVE_LOCKS
  5. #include "posix_lock.h"
  6. #endif /* HAVE_LOCKS */
  7. #include "dmm_init.h"
  8. #include "other.h"
  9. #include "sys_alloc.h"
  10. #include "block_header.h"
  11. #include "dmm_adaptor.h"
  12. void * custom_ahmalloc(allocator_t* allocator, heap_t* heap, size_t size) {
  13. void *ptr;
  14. int heap_id, fixed_list_id, i;
  15. maptable_node_t *current_maptable_node;
  16. void *current_block, *previous_block;
  17. // Go to the system allocator if none was given
  18. if(allocator == NULL) {
  19. allocator = &systemallocator;
  20. if(allocator->initialized != true) {
  21. dmm_init(allocator);
  22. }
  23. }
  24. if(heap == NULL) {
  25. heap_id = map_thread_heap();
  26. heap = &allocator->heaps[heap_id];
  27. }
  28. ptr = NULL;
  29. previous_block = NULL;
  30. #ifdef HAVE_LOCKS
  31. posix_lock(heap);
  32. #endif /* HAVE_LOCKS */
  33. fixed_list_id = map_size_to_list(heap, req_padding(size));
  34. // If a fixed list is found, do a first fit
  35. if(fixed_list_id != -1) {
  36. current_maptable_node = heap->maptable_head;
  37. // traverse through the maptable node list
  38. if(fixed_list_id != 0) {
  39. for(i = 1; i < fixed_list_id; i++) {
  40. current_maptable_node = current_maptable_node->next;
  41. }
  42. }
  43. if(current_maptable_node->fixed_list_head != NULL) {
  44. ptr = current_maptable_node->fixed_list_head;
  45. current_maptable_node->fixed_list_head = get_next(ptr);
  46. }
  47. }
  48. if(ptr == NULL) {
  49. // first fit from free list
  50. for(current_block = heap->free_list_head; current_block != NULL;
  51. current_block = get_next(current_block)) {
  52. if(get_size(current_block) >= size) {
  53. if(current_block == heap->free_list_head) {
  54. heap->free_list_head = get_next(ptr);
  55. } else {
  56. set_next(previous_block, get_next(current_block));
  57. }
  58. ptr = current_block;
  59. break;
  60. }
  61. previous_block = current_block;
  62. }
  63. }
  64. if(ptr == NULL) {
  65. ptr = sys_alloc(heap, size);
  66. }
  67. set_requested_size(ptr, size);
  68. set_next(ptr, heap->used_blocks_head);
  69. heap->used_blocks_head = ptr;
  70. // Begin of Stats
  71. heap->dmm_stats.live_objects += 1;
  72. heap->dmm_stats.num_malloc += 1;
  73. // End of Stats
  74. // Refresh the state of the heap allocator if a certain number of
  75. // malloc's has been served already
  76. // TODO Define 50 as a constant
  77. if(heap->dmm_stats.num_malloc % 50) {
  78. state_refresh(heap);
  79. }
  80. #ifdef HAVE_LOCKS
  81. posix_unlock(heap);
  82. #endif /* HAVE_LOCKS */
  83. return ptr;
  84. }
  85. void * custom_malloc(size_t size) {
  86. return custom_ahmalloc(NULL, NULL, size);
  87. }