Browse Source

Change indentation settings.

Ioannis Koutras 13 years ago
parent
commit
11cbf7e7d0
16 changed files with 611 additions and 610 deletions
  1. 1 1
      Doxyfile
  2. 3 3
      Makefile
  3. 25 25
      block_header.c
  4. 36 36
      coalesce.c
  5. 30 30
      custom_free.c
  6. 73 73
      custom_malloc.c
  7. 41 42
      dmm_adaptor.c
  8. 78 78
      dmm_init.c
  9. 2 2
      heap.h
  10. 245 245
      larson.c
  11. 19 19
      lran2.h
  12. 22 22
      other.c
  13. 1 0
      other.h
  14. 5 4
      posix_lock.c
  15. 16 16
      sys_alloc.c
  16. 14 14
      test.c

+ 1 - 1
Doxyfile

@@ -654,7 +654,7 @@ EXCLUDE_SYMLINKS       = NO
 # against the file with absolute path, so to exclude all test directories
 # for example use the pattern */test/*
 
-EXCLUDE_PATTERNS       =
+				       EXCLUDE_PATTERNS       =
 
 # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
 # (namespaces, classes, functions, etc.) that should be excluded from the

+ 3 - 3
Makefile

@@ -1,8 +1,8 @@
 CC=gcc
 WARNINGS := -Wall -Wextra -pedantic -Wshadow -Wpointer-arith -Wcast-align \
-	          -Wwrite-strings -Wmissing-prototypes -Wmissing-declarations \
-	          -Wredundant-decls -Wnested-externs -Winline -Wno-long-long \
-	          -Wuninitialized -Wconversion -Wstrict-prototypes
+	-Wwrite-strings -Wmissing-prototypes -Wmissing-declarations \
+	-Wredundant-decls -Wnested-externs -Winline -Wno-long-long \
+	-Wuninitialized -Wconversion -Wstrict-prototypes
 CFLAGS := -g -O -std=c99 $(WARNINGS)
 
 OBJ = posix_lock.o other.o block_header.o sys_alloc.o dmm_init.o coalesce.o dmm_adaptor.o custom_malloc.o custom_free.o

+ 25 - 25
block_header.c

@@ -3,57 +3,57 @@
 block_header_t * get_header(void *ptr);
 
 block_header_t * get_header(void *ptr) {
-	return (block_header_t *) ((char *) ptr - HEADER_SIZE);
+    return (block_header_t *) ((char *) ptr - HEADER_SIZE);
 }
 
 void * get_next(void *ptr) {
-	return get_header(ptr)->next;
+    return get_header(ptr)->next;
 }
 
 size_t get_size(void *ptr) {
-	return get_header(ptr)->size;
+    return get_header(ptr)->size;
 }
 
 void set_size(void *ptr, size_t size) {
-	get_header(ptr)->size = size << 1;
+    get_header(ptr)->size = size << 1;
 }
 
 void set_requested_size(void *ptr, size_t size) {
-	get_header(ptr)->requested_size = size;
+    get_header(ptr)->requested_size = size;
 }
 
 void set_next(void *ptr, void *next_block) {
-	get_header(ptr)->next = next_block;
+    get_header(ptr)->next = next_block;
 }
 
 bool is_previous_free(void *ptr) {
-	return (bool) (get_header(ptr)->previous_size & 1);
+    return (bool) (get_header(ptr)->previous_size & 1);
 }
 
 size_t get_previous_size(void *ptr) {
-	return get_header(ptr)->previous_size >> 1;
+    return get_header(ptr)->previous_size >> 1;
 }
 
 void * get_previous(void *ptr) {
-	return (void *) ((char *) ptr - get_previous_size(ptr));
+    return (void *) ((char *) ptr - get_previous_size(ptr));
 }
 
 void remove_block(void *block, void *starting_node) {
-	void *current_node, *previous_node;
-
-	// If the block to be removed is the head of the list, then just point
-	// the next block as head.
-	if(current_node == starting_node) {
-		starting_node = get_next(block);
-	// Else traverse through the list until the memory block is found.
-	} else {
-		for(current_node = starting_node; current_node != NULL; 
-				current_node = get_next(current_node)) {
-			if(current_node == block) {
-				set_next(previous_node, get_next(block));
-			}
-			previous_node = current_node;
-		}
-	}
+    void *current_node, *previous_node;
+
+    // If the block to be removed is the head of the list, then just point
+    // the next block as head.
+    if(current_node == starting_node) {
+        starting_node = get_next(block);
+        // Else traverse through the list until the memory block is found.
+    } else {
+        for(current_node = starting_node; current_node != NULL; 
+                current_node = get_next(current_node)) {
+            if(current_node == block) {
+                set_next(previous_node, get_next(block));
+            }
+            previous_node = current_node;
+        }
+    }
 }
 

+ 36 - 36
coalesce.c

@@ -3,46 +3,46 @@
 #include "other.h"
 
 void * coalesce(void *ptr, heap_t *heap) {
-	void *prev;
-	int fixed_list_id, i;
-	maptable_node_t *current_maptable_node;
+    void *prev;
+    int fixed_list_id, i;
+    maptable_node_t *current_maptable_node;
 
-	// If there is a negative value on max_coalesce_size, then don't do
-	// anything.
-	// FIXME To be moved in custom_free()
-	if(heap->dmm_knobs.max_coalesce_size < 0) {
-		return ptr;
-	}
+    // If there is a negative value on max_coalesce_size, then don't do
+    // anything.
+    // FIXME To be moved in custom_free()
+    if(heap->dmm_knobs.max_coalesce_size < 0) {
+        return ptr;
+    }
 
-	// Try to coalesce with the previous memory block
-	if(is_previous_free(ptr)) {
-		prev = get_previous(ptr);
+    // Try to coalesce with the previous memory block
+    if(is_previous_free(ptr)) {
+        prev = get_previous(ptr);
 
-		// Check if it is a block of a fixed list
-		fixed_list_id = map_size_to_list(heap, get_size(prev));
-		if(fixed_list_id != -1) {
-			// If it is, find the fixed list and remove the block
-			current_maptable_node = heap->maptable_head;
-			if(fixed_list_id != 0) {
-				for(i = 1; i < fixed_list_id; i++) {
-					current_maptable_node =
-						current_maptable_node->next;
-				}
-			}
-			remove_block(ptr, current_maptable_node->fixed_list_head);
-		} else {
-			// Or it is a block from the free list, so remove it
-			// from there
-			remove_block(ptr, heap->free_list_head);
-		}
+        // Check if it is a block of a fixed list
+        fixed_list_id = map_size_to_list(heap, get_size(prev));
+        if(fixed_list_id != -1) {
+            // If it is, find the fixed list and remove the block
+            current_maptable_node = heap->maptable_head;
+            if(fixed_list_id != 0) {
+                for(i = 1; i < fixed_list_id; i++) {
+                    current_maptable_node =
+                        current_maptable_node->next;
+                }
+            }
+            remove_block(ptr, current_maptable_node->fixed_list_head);
+        } else {
+            // Or it is a block from the free list, so remove it
+            // from there
+            remove_block(ptr, heap->free_list_head);
+        }
 
-		// Set the new size
-		// Note: the rest of the header variables will be set on free().
-		set_size(prev, get_size(prev) + get_size(ptr) + HEADER_SIZE);
+        // Set the new size
+        // Note: the rest of the header variables will be set on free().
+        set_size(prev, get_size(prev) + get_size(ptr) + HEADER_SIZE);
 
-		return prev;
-	} else {
-		return ptr;
-	}
+        return prev;
+    } else {
+        return ptr;
+    }
 }
 

+ 30 - 30
custom_free.c

@@ -6,43 +6,43 @@
 #include "block_header.h"
 
 void custom_free(heap_t* heap, void *ptr) {
-	size_t size;
-	int fixed_list_id, i;
-	maptable_node_t *current_maptable_node;
+    size_t size;
+    int fixed_list_id, i;
+    maptable_node_t *current_maptable_node;
 
-	size = get_size(ptr);
-	fixed_list_id = map_size_to_list(heap, size);
+    size = get_size(ptr);
+    fixed_list_id = map_size_to_list(heap, size);
 
 #ifdef HAVE_LOCKS	
-	posix_lock(heap);
+    posix_lock(heap);
 #endif /* HAVE_LOCKS */
 
-	if(fixed_list_id != -1) {
-		current_maptable_node = heap->maptable_head;		
-		if(fixed_list_id == 0) {
-			set_next(ptr, current_maptable_node->fixed_list_head);
-			current_maptable_node->fixed_list_head = ptr;
-		} else {
-			for(i = 1; i < fixed_list_id; i++) {
-				current_maptable_node = current_maptable_node->next;
-			}
-			set_next(ptr, current_maptable_node->fixed_list_head);
-			current_maptable_node->fixed_list_head = ptr;
-		}
-	} else { // put it in the free list
-		set_next(ptr, heap->free_list_head);
-		heap->free_list_head = ptr;
-	}
-
-	// Begin of Stats
-
-	heap->dmm_stats.live_objects -= 1;
-	heap->dmm_stats.num_free += 1;
-
-	// End of Stats
+    if(fixed_list_id != -1) {
+        current_maptable_node = heap->maptable_head;		
+        if(fixed_list_id == 0) {
+            set_next(ptr, current_maptable_node->fixed_list_head);
+            current_maptable_node->fixed_list_head = ptr;
+        } else {
+            for(i = 1; i < fixed_list_id; i++) {
+                current_maptable_node = current_maptable_node->next;
+            }
+            set_next(ptr, current_maptable_node->fixed_list_head);
+            current_maptable_node->fixed_list_head = ptr;
+        }
+    } else { // put it in the free list
+        set_next(ptr, heap->free_list_head);
+        heap->free_list_head = ptr;
+    }
+
+    // Begin of Stats
+
+    heap->dmm_stats.live_objects -= 1;
+    heap->dmm_stats.num_free += 1;
+
+    // End of Stats
 
 #ifdef HAVE_LOCKS
-	posix_unlock(heap);
+    posix_unlock(heap);
 #endif /* HAVE_LOCKS */
 }
 

+ 73 - 73
custom_malloc.c

@@ -8,95 +8,95 @@
 #include "dmm_adaptor.h"
 
 void * custom_malloc(heap_t* heap, size_t size) {
-	void *ptr;
-	int fixed_list_id, i, found;
-	maptable_node_t *current_maptable_node;
-	void *current_block, *previous_block;
+    void *ptr;
+    int fixed_list_id, i, found;
+    maptable_node_t *current_maptable_node;
+    void *current_block, *previous_block;
 
-	ptr = NULL;
-	previous_block = NULL;
+    ptr = NULL;
+    previous_block = NULL;
 
 #ifdef HAVE_LOCKS
-	posix_lock(heap);
+    posix_lock(heap);
 #endif /* HAVE_LOCKS */
 
-	fixed_list_id = map_size_to_list(heap, size);
-	
-	// If a fixed list is found, do a first fit
-	if(fixed_list_id != -1) {
-		current_maptable_node = heap->maptable_head;
-		// traverse through the maptable node list
-		if(fixed_list_id != 0) {
-			for(i = 1; i < fixed_list_id; i++) {
-				current_maptable_node = current_maptable_node->next;
-			}
-		}
-		if(current_maptable_node->fixed_list_head != NULL) {
-			ptr = current_maptable_node->fixed_list_head;
-			current_maptable_node->fixed_list_head = get_next(ptr);
-			set_requested_size(ptr, size);
-			set_next(ptr, heap->used_blocks_head);
-		 	heap->used_blocks_head = ptr;
-		}
-	}
-
-	if(ptr == NULL) {
-		found = 0;
-
-		// first fit from free list
-		for(current_block = heap->free_list_head; current_block != NULL;
-				current_block = get_next(current_block)) {
-			if(get_size(current_block) >= size) {
-				ptr = current_block;
-				heap->used_blocks_head = ptr;
-				if(current_block != heap->free_list_head) {
-					set_next(previous_block, get_next(ptr));
-				} else {
-					heap->free_list_head = get_next(ptr);
-				}
-				set_requested_size(ptr, size);
-				set_next(ptr, heap->used_blocks_head);
-
-				// Begin of Stats
-
-				heap->dmm_stats.live_objects += 1;
-				heap->dmm_stats.num_malloc += 1;
-
-				// End of Stats
+    fixed_list_id = map_size_to_list(heap, size);
+
+    // If a fixed list is found, do a first fit
+    if(fixed_list_id != -1) {
+        current_maptable_node = heap->maptable_head;
+        // traverse through the maptable node list
+        if(fixed_list_id != 0) {
+            for(i = 1; i < fixed_list_id; i++) {
+                current_maptable_node = current_maptable_node->next;
+            }
+        }
+        if(current_maptable_node->fixed_list_head != NULL) {
+            ptr = current_maptable_node->fixed_list_head;
+            current_maptable_node->fixed_list_head = get_next(ptr);
+            set_requested_size(ptr, size);
+            set_next(ptr, heap->used_blocks_head);
+            heap->used_blocks_head = ptr;
+        }
+    }
+
+    if(ptr == NULL) {
+        found = 0;
+
+        // first fit from free list
+        for(current_block = heap->free_list_head; current_block != NULL;
+                current_block = get_next(current_block)) {
+            if(get_size(current_block) >= size) {
+                ptr = current_block;
+                heap->used_blocks_head = ptr;
+                if(current_block != heap->free_list_head) {
+                    set_next(previous_block, get_next(ptr));
+                } else {
+                    heap->free_list_head = get_next(ptr);
+                }
+                set_requested_size(ptr, size);
+                set_next(ptr, heap->used_blocks_head);
+
+                // Begin of Stats
+
+                heap->dmm_stats.live_objects += 1;
+                heap->dmm_stats.num_malloc += 1;
+
+                // End of Stats
 
 #ifdef HAVE_LOCKS
-				posix_unlock(heap);
+                posix_unlock(heap);
 #endif /* HAVE_LOCKS */
-				return ptr;
-			}
-			previous_block = current_block;
-		}
+                return ptr;
+            }
+            previous_block = current_block;
+        }
 
-		if(!found) {
-			ptr = sys_alloc(heap, size);
-			heap->dmm_stats.mem_allocated += req_padding(size);
-			heap->dmm_stats.mem_requested += size;
-		}
+        if(!found) {
+            ptr = sys_alloc(heap, size);
+            heap->dmm_stats.mem_allocated += req_padding(size);
+            heap->dmm_stats.mem_requested += size;
+        }
 
-	}
+    }
 
-	// Begin of Stats
+    // Begin of Stats
 
-	heap->dmm_stats.live_objects += 1;
-	heap->dmm_stats.num_malloc += 1;
+    heap->dmm_stats.live_objects += 1;
+    heap->dmm_stats.num_malloc += 1;
 
-	// End of Stats
+    // End of Stats
 
-	// Refresh the state of the heap allocator if a certain number of
-	// malloc's has been served already
-	// TODO Define 50 as a constant
-	if(heap->dmm_stats.num_malloc % 50) {
-		state_refresh(heap);
-	}
+    // Refresh the state of the heap allocator if a certain number of
+    // malloc's has been served already
+    // TODO Define 50 as a constant
+    if(heap->dmm_stats.num_malloc % 50) {
+        state_refresh(heap);
+    }
 
 #ifdef HAVE_LOCKS
-	posix_unlock(heap);
+    posix_unlock(heap);
 #endif /* HAVE_LOCKS */
-	return ptr;
+    return ptr;
 }
 

+ 41 - 42
dmm_adaptor.c

@@ -2,52 +2,51 @@
 #include "heap.h"
 
 void state_refresh(heap_t *heap) {
-	float fragmentation;
-	knob_state_t state;
+    float fragmentation;
+    knob_state_t state;
 
-	fragmentation = (float) heap->dmm_stats.mem_allocated / 
-		(float)	heap->dmm_stats.mem_requested - 1.0;
+    fragmentation = (float) heap->dmm_stats.mem_allocated / 
+        (float)	heap->dmm_stats.mem_requested - 1.0;
 
-	// TODO Constant for the threshold, contraints for the memory threshold
-	if(fragmentation <= 0.05) {
-		state = 0;
-		set_fragmentation_params(heap, state);
-	}
+    // TODO Constant for the threshold, contraints for the memory threshold
+    if(fragmentation <= 0.05) {
+        state = 0;
+        set_fragmentation_params(heap, state);
+    }
 }
 
 void set_fragmentation_params(heap_t *heap, knob_state_t state) {
-	switch(state) {
-		default :
-			heap->dmm_knobs.max_coalesce_size = -1;
-			heap->dmm_knobs.min_split_size = 0;
-			heap->dmm_knobs.empty_threshold = 1.5;
-			break;
-		case 1 :
-			heap->dmm_knobs.max_coalesce_size = 20;
-			heap->dmm_knobs.min_split_size = 400;
-			heap->dmm_knobs.empty_threshold = 1200;
-			break;
-		case 2 :
-			heap->dmm_knobs.max_coalesce_size = 40;
-			heap->dmm_knobs.min_split_size = 800;
-			heap->dmm_knobs.empty_threshold = 1000;
-			break;
-		case 3 :
-			heap->dmm_knobs.max_coalesce_size = 60;
-			heap->dmm_knobs.min_split_size = 1200;
-			heap->dmm_knobs.empty_threshold = 800;
-			break;
-		case 4 :
-			heap->dmm_knobs.max_coalesce_size = 80;
-			heap->dmm_knobs.min_split_size = 1600;
-			heap->dmm_knobs.empty_threshold = 600;
-			break;
-		case 5 :
-			heap->dmm_knobs.max_coalesce_size = 100;
-			heap->dmm_knobs.min_split_size = 2000;
-			heap->dmm_knobs.empty_threshold = 300;
-			break;
-	}
+    switch(state) {
+        default :
+            heap->dmm_knobs.max_coalesce_size = -1;
+            heap->dmm_knobs.min_split_size = 0;
+            heap->dmm_knobs.empty_threshold = 1.5;
+            break;
+        case 1 :
+            heap->dmm_knobs.max_coalesce_size = 20;
+            heap->dmm_knobs.min_split_size = 400;
+            heap->dmm_knobs.empty_threshold = 1200;
+            break;
+        case 2 :
+            heap->dmm_knobs.max_coalesce_size = 40;
+            heap->dmm_knobs.min_split_size = 800;
+            heap->dmm_knobs.empty_threshold = 1000;
+            break;
+        case 3 :
+            heap->dmm_knobs.max_coalesce_size = 60;
+            heap->dmm_knobs.min_split_size = 1200;
+            heap->dmm_knobs.empty_threshold = 800;
+            break;
+        case 4 :
+            heap->dmm_knobs.max_coalesce_size = 80;
+            heap->dmm_knobs.min_split_size = 1600;
+            heap->dmm_knobs.empty_threshold = 600;
+            break;
+        case 5 :
+            heap->dmm_knobs.max_coalesce_size = 100;
+            heap->dmm_knobs.min_split_size = 2000;
+            heap->dmm_knobs.empty_threshold = 300;
+            break;
+    }
 }
 
-

+ 78 - 78
dmm_init.c

@@ -1,4 +1,3 @@
-#define _BSD_SOURCE
 #include <unistd.h>
 #ifdef HAVE_LOCKS
 #include <pthread.h>
@@ -6,91 +5,92 @@
 #include "dmm_init.h"
 
 allocator_t * dmm_init(void) {
-	int i;
-	allocator_t *main_allocator;
-	heap_t *current_heap;
-	maptable_node_t *maptablenode;
+    int i;
+    allocator_t *main_allocator;
+    heap_t *current_heap;
+    maptable_node_t *maptablenode;
 
-	main_allocator = (allocator_t *) sbrk((int) sizeof(allocator_t));
+    /* FIXME Replace sbrk with a library call */
+    main_allocator = (allocator_t *) sbrk((int) sizeof(allocator_t));
 
-	for(i = 0; i < NUM_HEAPS; i++) {
-		main_allocator->heaps[i].maptable_head = NULL;
-		main_allocator->heaps[i].free_list_head = NULL;
-		main_allocator->heaps[i].used_blocks_head = NULL;
-		main_allocator->heaps[i].rov_ptr = NULL;
-		main_allocator->heaps[i].num_objects = 0;
-		main_allocator->heaps[i].dmm_stats.mem_allocated = 0;
-		main_allocator->heaps[i].dmm_stats.mem_requested = 0;
-		main_allocator->heaps[i].dmm_stats.live_objects = 0;
-		main_allocator->heaps[i].dmm_stats.num_malloc = 0;
-		main_allocator->heaps[i].dmm_stats.num_free = 0;
+    for(i = 0; i < NUM_HEAPS; i++) {
+        main_allocator->heaps[i].maptable_head = NULL;
+        main_allocator->heaps[i].free_list_head = NULL;
+        main_allocator->heaps[i].used_blocks_head = NULL;
+        main_allocator->heaps[i].rov_ptr = NULL;
+        main_allocator->heaps[i].num_objects = 0;
+        main_allocator->heaps[i].dmm_stats.mem_allocated = 0;
+        main_allocator->heaps[i].dmm_stats.mem_requested = 0;
+        main_allocator->heaps[i].dmm_stats.live_objects = 0;
+        main_allocator->heaps[i].dmm_stats.num_malloc = 0;
+        main_allocator->heaps[i].dmm_stats.num_free = 0;
 
-		// Knobs initialization
-		main_allocator->heaps[i].dmm_knobs.max_coalesce_size = -1;
-		// FIXME Create a constant for the initial value of the next
-		// variables
-		main_allocator->heaps[i].dmm_knobs.frag_threshold = 1.0;
-		main_allocator->heaps[i].dmm_knobs.mem_threshold = 17000;
+        // Knobs initialization
+        main_allocator->heaps[i].dmm_knobs.max_coalesce_size = -1;
+        // FIXME Create a constant for the initial value of the next
+        // variables
+        main_allocator->heaps[i].dmm_knobs.frag_threshold = 1.0;
+        main_allocator->heaps[i].dmm_knobs.mem_threshold = 17000;
 
 #ifdef HAVE_LOCKS
-		pthread_mutex_init(&main_allocator->heaps[i].mutex, NULL);
+        pthread_mutex_init(&main_allocator->heaps[i].mutex, NULL);
 #endif /* HAVE_LOCKS */
-	}
+    }
 
-	// Custom number of fixed lists and their initialization
-	// 2 first ones with 32, 64, 128 and 256 (4 fixed lists per heap)
-	// 2 last ones with 64 and 256 (2 fixed lists per heap)
-	// 2 * 4 + 2 * 2 = 12 maptable nodes
-	current_heap = &main_allocator->heaps[0];
-	maptablenode = (maptable_node_t *) sbrk((int)(12*(sizeof(maptable_node_t))));
+    // Custom number of fixed lists and their initialization
+    // 2 first ones with 32, 64, 128 and 256 (4 fixed lists per heap)
+    // 2 last ones with 64 and 256 (2 fixed lists per heap)
+    // 2 * 4 + 2 * 2 = 12 maptable nodes
+    current_heap = &main_allocator->heaps[0];
+    maptablenode = (maptable_node_t *) sbrk((int)(12*(sizeof(maptable_node_t))));
 
-	maptablenode->size = 32;
-	maptablenode->fixed_list_head = NULL;
-	maptablenode->next = maptablenode+1;
-	current_heap->maptable_head = maptablenode;
-	(maptablenode+1)->size = 64;
-	(maptablenode+1)->fixed_list_head = NULL;
-	(maptablenode+1)->next = maptablenode+2;
-	(maptablenode+2)->size = 128;
-	(maptablenode+2)->fixed_list_head = NULL;
-	(maptablenode+2)->next = maptablenode+3;
-	(maptablenode+3)->size = 256;
-	(maptablenode+3)->fixed_list_head = NULL;
-	(maptablenode+3)->next = NULL;
-	current_heap = &main_allocator->heaps[1];
-	maptablenode += 4;
-	maptablenode->size = 32;
-	maptablenode->fixed_list_head = NULL;
-	maptablenode->next = maptablenode+1;
-	current_heap->maptable_head = maptablenode;
-	(maptablenode+1)->size = 64;
-	(maptablenode+1)->fixed_list_head = NULL;
-	(maptablenode+1)->next = maptablenode+2;
-	(maptablenode+2)->size = 128;
-	(maptablenode+2)->fixed_list_head = NULL;
-	(maptablenode+2)->next = maptablenode+3;
-	(maptablenode+3)->size = 256;
-	(maptablenode+3)->fixed_list_head = NULL;
-	(maptablenode+3)->next = NULL;
-	current_heap = &main_allocator->heaps[2];
-	maptablenode += 4;
-	maptablenode->size = 64;
-	maptablenode->fixed_list_head = NULL;
-	maptablenode->next = maptablenode+1;
-	current_heap->maptable_head = maptablenode;
-	(maptablenode+1)->size = 256;
-	(maptablenode+1)->fixed_list_head = NULL;
-	(maptablenode+1)->next = NULL;
-	current_heap = &main_allocator->heaps[3];
-	maptablenode += 2;
-	maptablenode->size = 64;
-	maptablenode->fixed_list_head = NULL;
-	maptablenode->next = maptablenode+1;
-	current_heap->maptable_head = maptablenode;
-	(maptablenode+1)->size = 256;
-	(maptablenode+1)->fixed_list_head = NULL;
-	(maptablenode+1)->next = NULL;
+    maptablenode->size = 32;
+    maptablenode->fixed_list_head = NULL;
+    maptablenode->next = maptablenode+1;
+    current_heap->maptable_head = maptablenode;
+    (maptablenode+1)->size = 64;
+    (maptablenode+1)->fixed_list_head = NULL;
+    (maptablenode+1)->next = maptablenode+2;
+    (maptablenode+2)->size = 128;
+    (maptablenode+2)->fixed_list_head = NULL;
+    (maptablenode+2)->next = maptablenode+3;
+    (maptablenode+3)->size = 256;
+    (maptablenode+3)->fixed_list_head = NULL;
+    (maptablenode+3)->next = NULL;
+    current_heap = &main_allocator->heaps[1];
+    maptablenode += 4;
+    maptablenode->size = 32;
+    maptablenode->fixed_list_head = NULL;
+    maptablenode->next = maptablenode+1;
+    current_heap->maptable_head = maptablenode;
+    (maptablenode+1)->size = 64;
+    (maptablenode+1)->fixed_list_head = NULL;
+    (maptablenode+1)->next = maptablenode+2;
+    (maptablenode+2)->size = 128;
+    (maptablenode+2)->fixed_list_head = NULL;
+    (maptablenode+2)->next = maptablenode+3;
+    (maptablenode+3)->size = 256;
+    (maptablenode+3)->fixed_list_head = NULL;
+    (maptablenode+3)->next = NULL;
+    current_heap = &main_allocator->heaps[2];
+    maptablenode += 4;
+    maptablenode->size = 64;
+    maptablenode->fixed_list_head = NULL;
+    maptablenode->next = maptablenode+1;
+    current_heap->maptable_head = maptablenode;
+    (maptablenode+1)->size = 256;
+    (maptablenode+1)->fixed_list_head = NULL;
+    (maptablenode+1)->next = NULL;
+    current_heap = &main_allocator->heaps[3];
+    maptablenode += 2;
+    maptablenode->size = 64;
+    maptablenode->fixed_list_head = NULL;
+    maptablenode->next = maptablenode+1;
+    current_heap->maptable_head = maptablenode;
+    (maptablenode+1)->size = 256;
+    (maptablenode+1)->fixed_list_head = NULL;
+    (maptablenode+1)->next = NULL;
 
-	return main_allocator;
+    return main_allocator;
 }
 

+ 2 - 2
heap.h

@@ -37,9 +37,9 @@ typedef struct dmmstats_s {
  */
 typedef struct dmmknobs_s {
 	int32_t max_coalesce_size; /**< maximum coalesce size; -1 if coalescing
-				      is not supported */
+				     is not supported */
 	float frag_threshold; /**< fragmentation threshold to enable
-					 coalescing or not */ 
+				coalescing or not */ 
 	uint32_t mem_threshold; /**< memory size threshold */
 	uint32_t min_split_size; // FIXME to be investigated if needed
 	float empty_threshold; // FIXME to be investigated if needed

+ 245 - 245
larson.c

@@ -25,24 +25,24 @@ typedef void * VoidFunction (void *);
 
 typedef struct thr_data {
 
-	int threadno;
-	int NumBlocks;
-	long seed;
+    int threadno;
+    int NumBlocks;
+    long seed;
 
-	int min_size;
-	int max_size;
+    int min_size;
+    int max_size;
 
-	char **array;
-	int *blksize;
-	int asize;
+    char **array;
+    int *blksize;
+    int asize;
 
-	int cAllocs;
-	int cFrees;
-	int cThreads;
-	int cBytesAlloced;
+    int cAllocs;
+    int cFrees;
+    int cThreads;
+    int cBytesAlloced;
 
-	volatile int finished;
-	struct lran2_st rgen;
+    volatile int finished;
+    struct lran2_st rgen;
 
 } thread_data;
 
@@ -55,275 +55,275 @@ char *blkp[MAX_BLOCKS];
 int blksize[MAX_BLOCKS];
 
 static void QueryPerformanceFrequency(long *x) {
-	*x = 1000000L;
+    *x = 1000000L;
 }
 
 static void QueryPerformanceCounter (long *x) {
-  struct timezone tz;
-  struct timeval tv;
-  gettimeofday(&tv, &tz);
-  *x = tv.tv_sec * 1000000L + tv.tv_usec;
+    struct timezone tz;
+    struct timeval tv;
+    gettimeofday(&tv, &tz);
+    *x = tv.tv_sec * 1000000L + tv.tv_usec;
 }
 
 static void Sleep(long x) {
-  //  printf ("sleeping for %ld seconds.\n", x/1000);
-  sleep((unsigned int) (x/1000));
+    //  printf ("sleeping for %ld seconds.\n", x/1000);
+    sleep((unsigned int) (x/1000));
 }
 
 static void _beginthread(VoidFunction x, void * z) {
-	pthread_t pt;
-	pthread_attr_t pa;
-	pthread_attr_init (&pa);
+    pthread_t pt;
+    pthread_attr_t pa;
+    pthread_attr_init (&pa);
 
-	//  printf ("creating a thread.\n");
-	pthread_create(&pt, &pa, x, z);
+    //  printf ("creating a thread.\n");
+    pthread_create(&pt, &pa, x, z);
 }
 
 static void warmup(char **blkp, int num_chunks) {
-	int cblks;
-	int victim;
-	int blk_size;
-	LPVOID tmp;
-
-	heap_t *myheap;
-	int heap_id;
-
-	heap_id = map_thread_heap();
-	myheap = &myallocator->heaps[heap_id];	
-	
-	for(cblks = 0; cblks < num_chunks; cblks++) {
-		blk_size = min_size + lran2(&rgen) % (max_size - min_size);
-		blkp[cblks] = (char *) custom_malloc(myheap, (size_t) blk_size);
-		blksize[cblks] = blk_size;
-		assert(blkp[cblks] != NULL);
-	}
-
-	/* generate a random permutation of the chunks */
-	for(cblks = num_chunks; cblks > 0 ; cblks--) {
-		victim = lran2(&rgen) % cblks;
-		tmp = blkp[victim];
-		blkp[victim]  = blkp[cblks-1];
-		blkp[cblks-1] = (char *) tmp;
-	}
-
-	for(cblks=0; cblks < 4 * num_chunks; cblks++) {
-		victim = lran2(&rgen) % num_chunks;
-		custom_free(myheap, blkp[victim]);
-
-		blk_size = min_size + lran2(&rgen) % (max_size - min_size);
-		blkp[victim] = (char *) custom_malloc(myheap, (size_t) blk_size);
-		blksize[victim] = blk_size;
-		assert(blkp[victim] != NULL);
-	}
+    int cblks;
+    int victim;
+    int blk_size;
+    LPVOID tmp;
+
+    heap_t *myheap;
+    int heap_id;
+
+    heap_id = map_thread_heap();
+    myheap = &myallocator->heaps[heap_id];	
+
+    for(cblks = 0; cblks < num_chunks; cblks++) {
+        blk_size = min_size + lran2(&rgen) % (max_size - min_size);
+        blkp[cblks] = (char *) custom_malloc(myheap, (size_t) blk_size);
+        blksize[cblks] = blk_size;
+        assert(blkp[cblks] != NULL);
+    }
+
+    /* generate a random permutation of the chunks */
+    for(cblks = num_chunks; cblks > 0 ; cblks--) {
+        victim = lran2(&rgen) % cblks;
+        tmp = blkp[victim];
+        blkp[victim]  = blkp[cblks-1];
+        blkp[cblks-1] = (char *) tmp;
+    }
+
+    for(cblks=0; cblks < 4 * num_chunks; cblks++) {
+        victim = lran2(&rgen) % num_chunks;
+        custom_free(myheap, blkp[victim]);
+
+        blk_size = min_size + lran2(&rgen) % (max_size - min_size);
+        blkp[victim] = (char *) custom_malloc(myheap, (size_t) blk_size);
+        blksize[victim] = blk_size;
+        assert(blkp[victim] != NULL);
+    }
 }
 
 static void * exercise_heap( void *pinput) {
-	thread_data  *pdea;
-	int           cblks = 0;
-	int           victim;
-	long          blk_size;
-	int           range;
+    thread_data  *pdea;
+    int           cblks = 0;
+    int           victim;
+    long          blk_size;
+    int           range;
 
-	heap_t *myheap;
-	int heap_id;
+    heap_t *myheap;
+    int heap_id;
 
-	heap_id = map_thread_heap();
-	myheap = &myallocator->heaps[heap_id];
+    heap_id = map_thread_heap();
+    myheap = &myallocator->heaps[heap_id];
 
-	if( stopflag ) return 0;
+    if( stopflag ) return 0;
 
-	pdea = (thread_data *) pinput;
-	pdea->finished = FALSE;
-	pdea->cThreads++;
-	range = pdea->max_size - pdea->min_size;
+    pdea = (thread_data *) pinput;
+    pdea->finished = FALSE;
+    pdea->cThreads++;
+    range = pdea->max_size - pdea->min_size;
 
-	/* allocate NumBlocks chunks of random size */
-	for(cblks=0; cblks < pdea->NumBlocks; cblks++) {
-		victim = lran2(&pdea->rgen)%pdea->asize;
-		custom_free(myheap, pdea->array[victim]);
-		pdea->cFrees++;
+    /* allocate NumBlocks chunks of random size */
+    for(cblks=0; cblks < pdea->NumBlocks; cblks++) {
+        victim = lran2(&pdea->rgen)%pdea->asize;
+        custom_free(myheap, pdea->array[victim]);
+        pdea->cFrees++;
 
-		blk_size = pdea->min_size+lran2(&pdea->rgen)%range;
-		pdea->array[victim] = (char *) custom_malloc(myheap, (size_t) blk_size);
+        blk_size = pdea->min_size+lran2(&pdea->rgen)%range;
+        pdea->array[victim] = (char *) custom_malloc(myheap, (size_t) blk_size);
 
-		pdea->blksize[victim] = blk_size;
-		assert(pdea->array[victim] != NULL);
+        pdea->blksize[victim] = blk_size;
+        assert(pdea->array[victim] != NULL);
 
-		pdea->cAllocs++;
+        pdea->cAllocs++;
 
-		/* Write something! */
+        /* Write something! */
 
-		volatile char * chptr = ((char *) pdea->array[victim]);
-		*chptr++ = 'a';
-		volatile char ch = *((char *) pdea->array[victim]);
-		*chptr = 'b';
+        volatile char * chptr = ((char *) pdea->array[victim]);
+        *chptr++ = 'a';
+        volatile char ch = *((char *) pdea->array[victim]);
+        *chptr = 'b';
 
 
-		if( stopflag ) break;
-	}
+        if( stopflag ) break;
+    }
 
-	//  	printf("Thread %u terminating: %d allocs, %d frees\n",
-	//		      pdea->threadno, pdea->cAllocs, pdea->cFrees) ;
-	pdea->finished = TRUE;
+    //  	printf("Thread %u terminating: %d allocs, %d frees\n",
+    //		      pdea->threadno, pdea->cAllocs, pdea->cFrees) ;
+    pdea->finished = TRUE;
 
-	if( !stopflag ) {
-		_beginthread(exercise_heap, pdea);
-	}
+    if( !stopflag ) {
+        _beginthread(exercise_heap, pdea);
+    }
 
-	return 0;
+    return 0;
 }
 
 
 static void runthreads(long sleep_cnt, int min_threads, int max_threads, int chperthread, int num_rounds) {
-	thread_data de_area[MAX_THREADS];
-	thread_data *pdea;
-	long ticks_per_sec;
-    	int prevthreads;
-	int num_threads;
-	int nperthread;
-	int sum_threads;
-	int sum_allocs;
-	int sum_frees;
-
-	int i;
-
-	long start_cnt, end_cnt;
-	_int64 ticks;
-	double duration ;
-
-	double rate_1 = 0, rate_n;
-	double reqd_space;
-	ULONG used_space;	
-
-	QueryPerformanceFrequency( &ticks_per_sec );
-	
-	pdea = &de_area[0];
-	memset(&de_area[0], 0, sizeof(thread_data));	
-
-	prevthreads = 0 ;
-	for(num_threads=min_threads; num_threads <= max_threads; num_threads++) {
-
-		warmup(&blkp[prevthreads*chperthread], (num_threads-prevthreads)*chperthread );
-
-		nperthread = chperthread ;
-		stopflag   = FALSE ;
-
-		for(i = 0; i < num_threads; i++) {
-			de_area[i].threadno    = i+1 ;
-			de_area[i].NumBlocks   = num_rounds*nperthread;
-			de_area[i].array       = &blkp[i*nperthread];
-			de_area[i].blksize     = &blksize[i*nperthread];
-			de_area[i].asize       = nperthread;
-			de_area[i].min_size    = min_size;
-			de_area[i].max_size    = max_size;
-			de_area[i].seed        = lran2(&rgen);
-			de_area[i].finished    = 0;
-			de_area[i].cAllocs     = 0;
-			de_area[i].cFrees      = 0;
-			de_area[i].cThreads    = 0;
-			de_area[i].finished    = FALSE;
-			lran2_init(&de_area[i].rgen, de_area[i].seed);
-			_beginthread(exercise_heap, &de_area[i]);
-		}
-		
-		QueryPerformanceCounter( &start_cnt );
-
-		printf ("Sleeping for %ld seconds.\n", sleep_cnt);
-		Sleep(sleep_cnt * 1000L) ;
-
-	      	stopflag = TRUE ;
-		
-		for(i = 0; i < num_threads; i++) {
-			while( !de_area[i].finished ) {
-				sched_yield();
-			}
-		}
-
-		QueryPerformanceCounter( &end_cnt );
-
-		sum_frees = sum_allocs =0  ;
-		sum_threads = 0 ;
-		for(i=0;i< num_threads; i++){
-			sum_allocs    += de_area[i].cAllocs ;
-			sum_frees     += de_area[i].cFrees ;
-			sum_threads   += de_area[i].cThreads ;
-			de_area[i].cAllocs = de_area[i].cFrees = 0;
-		}
-
-		ticks = end_cnt - start_cnt ;
-		duration = (double)ticks/ticks_per_sec ;
-
-		for(i = 0; i < num_threads; i++) {
-			if( !de_area[i].finished ) {
-				printf("Thread at %d not finished\n", i);
-			}
-		}
-
-		rate_n = sum_allocs/duration ;
-		if( rate_1 == 0){
-			rate_1 = rate_n ;
-		}
-		reqd_space = (0.5*(min_size+max_size)*num_threads*chperthread) ;
-		// used_space = CountReservedSpace() - init_space;
-		used_space = 0;
-
-		printf("%2d ", num_threads ) ;
-		printf("%6.3f", duration  ) ;
-		printf("%6.3f", rate_n/rate_1 ) ;
-		printf("%8.0f", sum_allocs/duration ) ;
-		printf(" %6.3f %.3f", (double)used_space/(1024*1024), used_space/reqd_space) ;
-		printf("\n") ;
-
-		Sleep(5000L) ; // wait 5 sec for old threads to die
-
-		prevthreads = num_threads;
-	}
+    thread_data de_area[MAX_THREADS];
+    thread_data *pdea;
+    long ticks_per_sec;
+    int prevthreads;
+    int num_threads;
+    int nperthread;
+    int sum_threads;
+    int sum_allocs;
+    int sum_frees;
+
+    int i;
+
+    long start_cnt, end_cnt;
+    _int64 ticks;
+    double duration ;
+
+    double rate_1 = 0, rate_n;
+    double reqd_space;
+    ULONG used_space;	
+
+    QueryPerformanceFrequency( &ticks_per_sec );
+
+    pdea = &de_area[0];
+    memset(&de_area[0], 0, sizeof(thread_data));	
+
+    prevthreads = 0 ;
+    for(num_threads=min_threads; num_threads <= max_threads; num_threads++) {
+
+        warmup(&blkp[prevthreads*chperthread], (num_threads-prevthreads)*chperthread );
+
+        nperthread = chperthread ;
+        stopflag   = FALSE ;
+
+        for(i = 0; i < num_threads; i++) {
+            de_area[i].threadno    = i+1 ;
+            de_area[i].NumBlocks   = num_rounds*nperthread;
+            de_area[i].array       = &blkp[i*nperthread];
+            de_area[i].blksize     = &blksize[i*nperthread];
+            de_area[i].asize       = nperthread;
+            de_area[i].min_size    = min_size;
+            de_area[i].max_size    = max_size;
+            de_area[i].seed        = lran2(&rgen);
+            de_area[i].finished    = 0;
+            de_area[i].cAllocs     = 0;
+            de_area[i].cFrees      = 0;
+            de_area[i].cThreads    = 0;
+            de_area[i].finished    = FALSE;
+            lran2_init(&de_area[i].rgen, de_area[i].seed);
+            _beginthread(exercise_heap, &de_area[i]);
+        }
+
+        QueryPerformanceCounter( &start_cnt );
+
+        printf ("Sleeping for %ld seconds.\n", sleep_cnt);
+        Sleep(sleep_cnt * 1000L) ;
+
+        stopflag = TRUE ;
+
+        for(i = 0; i < num_threads; i++) {
+            while( !de_area[i].finished ) {
+                sched_yield();
+            }
+        }
+
+        QueryPerformanceCounter( &end_cnt );
+
+        sum_frees = sum_allocs =0  ;
+        sum_threads = 0 ;
+        for(i=0;i< num_threads; i++){
+            sum_allocs    += de_area[i].cAllocs ;
+            sum_frees     += de_area[i].cFrees ;
+            sum_threads   += de_area[i].cThreads ;
+            de_area[i].cAllocs = de_area[i].cFrees = 0;
+        }
+
+        ticks = end_cnt - start_cnt ;
+        duration = (double)ticks/ticks_per_sec ;
+
+        for(i = 0; i < num_threads; i++) {
+            if( !de_area[i].finished ) {
+                printf("Thread at %d not finished\n", i);
+            }
+        }
+
+        rate_n = sum_allocs/duration ;
+        if( rate_1 == 0){
+            rate_1 = rate_n ;
+        }
+        reqd_space = (0.5*(min_size+max_size)*num_threads*chperthread) ;
+        // used_space = CountReservedSpace() - init_space;
+        used_space = 0;
+
+        printf("%2d ", num_threads ) ;
+        printf("%6.3f", duration  ) ;
+        printf("%6.3f", rate_n/rate_1 ) ;
+        printf("%8.0f", sum_allocs/duration ) ;
+        printf(" %6.3f %.3f", (double)used_space/(1024*1024), used_space/reqd_space) ;
+        printf("\n") ;
+
+        Sleep(5000L) ; // wait 5 sec for old threads to die
+
+        prevthreads = num_threads;
+    }
 
 }
 
 int main(void) {
-	long sleep_cnt;
-	int min_threads, max_threads;
-	int num_chunks = 10000;
-	int num_rounds;
-	int chperthread;
-
-	myallocator = dmm_init();
-
-	printf("Larson benchmark\n");
-	
-	printf("runtime (sec): ") ;
-	//scanf ("%ld", &sleep_cnt);
-	sleep_cnt = 10;
-	
-	printf("chunk size (min,max): ") ;
-	//scanf("%d %d", &min_size, &max_size ) ;
-	min_size = 32;
-	max_size = 256;
-	
-	printf("threads (min, max):   ") ; 
-	//scanf("%d %d", &min_threads, &max_threads) ;
-	min_threads = 1;
-	max_threads = 1;
-
-	pthread_setconcurrency(max_threads);
-	
-	printf("chunks/thread:  ");
-	//scanf("%d", &chperthread );
-	chperthread = 1;
-	
-	num_chunks = max_threads * chperthread ;
-	if( num_chunks > MAX_BLOCKS ){
-		printf("Max %d chunks - exiting\n", MAX_BLOCKS ) ;
-		return 1;
-	}
-	
-	printf("no of rounds:   ");
-	//scanf("%d", &num_rounds );
-	num_rounds = 1;
-	
-	runthreads(sleep_cnt, min_threads, max_threads, chperthread, num_rounds) ;
-	
-	return 0;
+    long sleep_cnt;
+    int min_threads, max_threads;
+    int num_chunks = 10000;
+    int num_rounds;
+    int chperthread;
+
+    myallocator = dmm_init();
+
+    printf("Larson benchmark\n");
+
+    printf("runtime (sec): ") ;
+    //scanf ("%ld", &sleep_cnt);
+    sleep_cnt = 10;
+
+    printf("chunk size (min,max): ") ;
+    //scanf("%d %d", &min_size, &max_size ) ;
+    min_size = 32;
+    max_size = 256;
+
+    printf("threads (min, max):   ") ; 
+    //scanf("%d %d", &min_threads, &max_threads) ;
+    min_threads = 1;
+    max_threads = 1;
+
+    pthread_setconcurrency(max_threads);
+
+    printf("chunks/thread:  ");
+    //scanf("%d", &chperthread );
+    chperthread = 1;
+
+    num_chunks = max_threads * chperthread ;
+    if( num_chunks > MAX_BLOCKS ){
+        printf("Max %d chunks - exiting\n", MAX_BLOCKS ) ;
+        return 1;
+    }
+
+    printf("no of rounds:   ");
+    //scanf("%d", &num_rounds );
+    num_rounds = 1;
+
+    runthreads(sleep_cnt, min_threads, max_threads, chperthread, num_rounds) ;
+
+    return 0;
 }
 

+ 19 - 19
lran2.h

@@ -12,34 +12,34 @@
 #define IC	  150889l /* (see e.g. `Numerical Recipes') */
 
 struct lran2_st {
-    long x, y, v[97];
+	long x, y, v[97];
 };
 
-static void
+	static void
 lran2_init(struct lran2_st* d, long seed)
 {
-  long x;
-  int j;
-
-  x = (IC - seed) % LRAN2_MAX;
-  if(x < 0) x = -x;
-  for(j=0; j<97; j++) {
-    x = (IA*x + IC) % LRAN2_MAX;
-    d->v[j] = x;
-  }
-  d->x = (IA*x + IC) % LRAN2_MAX;
-  d->y = d->x;
+	long x;
+	int j;
+
+	x = (IC - seed) % LRAN2_MAX;
+	if(x < 0) x = -x;
+	for(j=0; j<97; j++) {
+		x = (IA*x + IC) % LRAN2_MAX;
+		d->v[j] = x;
+	}
+	d->x = (IA*x + IC) % LRAN2_MAX;
+	d->y = d->x;
 }
 
-static 
+	static 
 long lran2(struct lran2_st* d)
 {
-  int j = (d->y % 97);
+	int j = (d->y % 97);
 
-  d->y = d->v[j];
-  d->x = (IA*d->x + IC) % LRAN2_MAX;
-  d->v[j] = d->x;
-  return d->y;
+	d->y = d->v[j];
+	d->x = (IA*d->x + IC) % LRAN2_MAX;
+	d->v[j] = d->x;
+	return d->y;
 }
 
 #undef IA

+ 22 - 22
other.c

@@ -2,34 +2,34 @@
 #include <pthread.h>
 
 size_t req_padding(size_t size) {
-	if(size <= 32)
-		return 32;
-	if(size <= 64)
-		return 64;
-	if(size <= 128)
-		return 128;
-	if(size <= 256)
-		return 256;
-	return size;
+    if(size <= 32)
+        return 32;
+    if(size <= 64)
+        return 64;
+    if(size <= 128)
+        return 128;
+    if(size <= 256)
+        return 256;
+    return size;
 }
 
 int map_size_to_list(heap_t *heap, size_t sz) {
-	int i;
-	maptable_node_t *node;
-	i = 0;
-	node = heap->maptable_head;
-	while(node) {
-		if(node->size == sz) {
-			return i;
-		}
-		i++;
-		node = node->next;
-	}
-	return -1;
+    int i;
+    maptable_node_t *node;
+    i = 0;
+    node = heap->maptable_head;
+    while(node) {
+        if(node->size == sz) {
+            return i;
+        }
+        i++;
+        node = node->next;
+    }
+    return -1;
 }
 
 // Random assignment
 int map_thread_heap(void) {
-	return (int) (((unsigned long) pthread_self() >> 10) % NUM_HEAPS);
+    return (int) (((unsigned long) pthread_self() >> 10) % NUM_HEAPS);
 }
 

+ 1 - 0
other.h

@@ -10,3 +10,4 @@ int map_size_to_list(heap_t *heap, size_t sz);
 int map_thread_heap(void);
 
 #endif /* OTHER_H */
+

+ 5 - 4
posix_lock.c

@@ -5,18 +5,19 @@
 pthread_mutex_t sbrk_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 void sbrk_lock(void) {
-	pthread_mutex_lock(&sbrk_mutex);
+    pthread_mutex_lock(&sbrk_mutex);
 }
 
 void sbrk_unlock(void) {
-	pthread_mutex_unlock(&sbrk_mutex);
+    pthread_mutex_unlock(&sbrk_mutex);
 }
 
 void posix_lock(heap_t *heap) {
-	pthread_mutex_lock(&heap->mutex);
+    pthread_mutex_lock(&heap->mutex);
 }
 
 void posix_unlock(heap_t *heap) {
-	pthread_mutex_unlock(&heap->mutex);
+    pthread_mutex_unlock(&heap->mutex);
 }
 #endif /* HAVE_LOCKS */
+

+ 16 - 16
sys_alloc.c

@@ -11,31 +11,31 @@
 #include "heap.h"
 
 void *sys_alloc(heap_t *heap, size_t size) {
-	size_t allocation_size;
-	void *ptr;
+    size_t allocation_size;
+    void *ptr;
 
 #ifdef HAVE_LOCKS
-	sbrk_lock();
+    sbrk_lock();
 #endif /* HAVE_LOCKS */
 
-	allocation_size = req_padding(size) + HEADER_SIZE;
+    allocation_size = req_padding(size) + HEADER_SIZE;
 
-	ptr = sbrk((int) allocation_size);
-	if(ptr == (void *) -1) {
-		printf("sbrk problem for size of: %zu\n", allocation_size);
-		printf( "Error on sbrk: %s\n", strerror( errno ) );
-	}
-	ptr = (void *) ((char *) ptr + HEADER_SIZE);
+    ptr = sbrk((int) allocation_size);
+    if(ptr == (void *) -1) {
+        printf("sbrk problem for size of: %zu\n", allocation_size);
+        printf( "Error on sbrk: %s\n", strerror( errno ) );
+    }
+    ptr = (void *) ((char *) ptr + HEADER_SIZE);
 
-	set_size(ptr, req_padding(size));
-	set_requested_size(ptr, size);
-	set_next(ptr, heap->used_blocks_head);	
-	heap->used_blocks_head = ptr;
+    set_size(ptr, req_padding(size));
+    set_requested_size(ptr, size);
+    set_next(ptr, heap->used_blocks_head);	
+    heap->used_blocks_head = ptr;
 
 #ifdef HAVE_LOCKS
-	sbrk_unlock();
+    sbrk_unlock();
 #endif /* HAVE_LOCKS */
 
-	return  ptr;
+    return  ptr;
 }
 

+ 14 - 14
test.c

@@ -6,19 +6,19 @@
 #include "custom_free.h"
 
 int main(void) {
-	allocator_t *myallocator;
-	heap_t *myheap;
-	int heap_id;
-	void *p1, *p2, *p3;
+    allocator_t *myallocator;
+    heap_t *myheap;
+    int heap_id;
+    void *p1, *p2, *p3;
 
-	myallocator = dmm_init();
-	heap_id = map_thread_heap();
-	printf("This thread accesses heap %d\n", heap_id);
-	myheap = &myallocator->heaps[heap_id];
-	p1 = custom_malloc(myheap, (size_t) 1024);
-	custom_free(myheap, p1);
-	p2 = custom_malloc(myheap, (size_t) 2855);
-	custom_free(myheap, p2);
-	p3 = custom_malloc(myheap, (size_t) 3018);
-	custom_free(myheap, p3);
+    myallocator = dmm_init();
+    heap_id = map_thread_heap();
+    printf("This thread accesses heap %d\n", heap_id);
+    myheap = &myallocator->heaps[heap_id];
+    p1 = custom_malloc(myheap, (size_t) 1024);
+    custom_free(myheap, p1);
+    p2 = custom_malloc(myheap, (size_t) 2855);
+    custom_free(myheap, p2);
+    p3 = custom_malloc(myheap, (size_t) 3018);
+    custom_free(myheap, p3);
 }