|
@@ -0,0 +1,228 @@
|
|
|
+/*
|
|
|
+ * Copyright Institute of Communication and Computer Systems (ICCS)
|
|
|
+ *
|
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
+ * you may not use this file except in compliance with the License.
|
|
|
+ * You may obtain a copy of the License at
|
|
|
+ *
|
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
+ *
|
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
+ * See the License for the specific language governing permissions and
|
|
|
+ * limitations under the License.
|
|
|
+ *
|
|
|
+ */
|
|
|
+
|
|
|
+/**
|
|
|
+ * @file src/memalign.c
|
|
|
+ * @author Ioannis Koutras (joko@microlab.ntua.gr)
|
|
|
+ * @date November 2012
|
|
|
+ *
|
|
|
+ * @brief Implementation of memory-aligned allocation calls for
|
|
|
+ * freelist-organized raw blocks.
|
|
|
+ */
|
|
|
+
|
|
|
+#include "dmmlib/memalign.h"
|
|
|
+
|
|
|
+#include <inttypes.h>
|
|
|
+#include <sys/mman.h>
|
|
|
+#include <assert.h>
|
|
|
+
|
|
|
+#include "dmm_config.h"
|
|
|
+#include "dmmlib/dmmlib.h"
|
|
|
+#include "memcpy.h"
|
|
|
+#include "locks.h"
|
|
|
+#include "default_rb.h"
|
|
|
+#include "freelist/block_header_funcs.h"
|
|
|
+
|
|
|
+#ifdef WITH_ALLOCATOR_STATS
|
|
|
+#include "statistics.h"
|
|
|
+#endif /* WITH_ALLOCATOR_STATS */
|
|
|
+
|
|
|
+#include "trace.h"
|
|
|
+
|
|
|
+#ifdef BITMAP_RB_ONLY
|
|
|
+#error Current memory-aligned allocation implementation support only \
|
|
|
+freelist-organized raw blocks.
|
|
|
+#endif /* BITMAP_RB_ONLY */
|
|
|
+
|
|
|
+/** The function posix_memalign() allocates size bytes and places the address of
|
|
|
+ * the allocated memory in *memptr. The address of the allocated memory will be
|
|
|
+ * a multiple of alignment, which must be a power of two and a multiple of
|
|
|
+ * sizeof(void *). If size is 0, then posix_memalign() returns either NULL.
|
|
|
+ */
|
|
|
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
|
|
|
+ DEFAULT_RB_T *encapsulated_rb;
|
|
|
+
|
|
|
+ *memptr = NULL;
|
|
|
+
|
|
|
+ /* Input check */
|
|
|
+ if(size == 0 || (alignment & 1) != 0 || alignment % sizeof(void *) != 0) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ raw_block_header_t *raw_block;
|
|
|
+
|
|
|
+ /* Search the available freelist-organized raw blocks for a block whose size
|
|
|
+ * is size + alignment - 1 */
|
|
|
+ SLIST_FOREACH(raw_block, &systemallocator.rb_head, pointers) {
|
|
|
+ lock_raw_block(raw_block);
|
|
|
+ encapsulated_rb = (freelist_rb_t *)
|
|
|
+ ((uintptr_t) raw_block + sizeof(raw_block_header_t));
|
|
|
+ *memptr = dmmlib_malloc(encapsulated_rb, size + alignment - 1);
|
|
|
+ unlock_raw_block(raw_block);
|
|
|
+
|
|
|
+CheckAlignment:
|
|
|
+
|
|
|
+ if(*memptr != NULL) {
|
|
|
+
|
|
|
+ /* Check that a valid pointer has been returned */
|
|
|
+ assert(((uintptr_t) raw_block < (uintptr_t) *memptr) &&
|
|
|
+ ((uintptr_t) *memptr < (uintptr_t) raw_block +
|
|
|
+ raw_block->size));
|
|
|
+
|
|
|
+ if(((uintptr_t) *memptr) % alignment == 0) {
|
|
|
+ /* Memory address is already aligned, no need for
|
|
|
+ * modifications */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ size_t padding = (- (size_t) *memptr) & (alignment - 1);
|
|
|
+
|
|
|
+ block_header_t *block_header = get_header(*memptr);
|
|
|
+
|
|
|
+ /* Check if the currently selected memory block is the first
|
|
|
+ * allocated in the raw block. If it is, then there is no previous
|
|
|
+ * block in the data layout, so we cannot increase its size as this
|
|
|
+ * would conclude to wasted space. Instead of this, we try to
|
|
|
+ * allocate another block which is guaranteed not to be the first
|
|
|
+ * memory block at this point. If we find one, then we free the
|
|
|
+ * first allocated block and repeat the process of alignment.
|
|
|
+ * Otherwise, we go to the next raw block. */
|
|
|
+
|
|
|
+ size_t previous_size_availability =
|
|
|
+ get_previous_size_availability(block_header);
|
|
|
+
|
|
|
+ if(previous_size_availability == 1) {
|
|
|
+ *memptr = dmmlib_malloc(encapsulated_rb, size + alignment - 1);
|
|
|
+
|
|
|
+ dmmlib_free(encapsulated_rb, *memptr);
|
|
|
+
|
|
|
+ if(*memptr != NULL) {
|
|
|
+ goto CheckAlignment;
|
|
|
+ } else {
|
|
|
+ /* Abandon current raw block */
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ size_t previous_size = previous_size_availability >> 1;
|
|
|
+ size_t previous_availability = previous_size_availability & 1;
|
|
|
+ block_header_t *previous_block = (block_header_t *)
|
|
|
+ ((uintptr_t) block_header - previous_size);
|
|
|
+
|
|
|
+ block_header_t *next_block = get_dlnext(encapsulated_rb,
|
|
|
+ block_header);
|
|
|
+
|
|
|
+ previous_size += padding;
|
|
|
+ previous_block->size = (previous_size << 1) |
|
|
|
+ previous_availability;
|
|
|
+
|
|
|
+ block_header->previous_size = (previous_size << 1) |
|
|
|
+ previous_availability;
|
|
|
+
|
|
|
+ size_t new_size = get_size(block_header) - padding;
|
|
|
+
|
|
|
+ block_header->size = (new_size << 1) | 1;
|
|
|
+
|
|
|
+ if(next_block != NULL) {
|
|
|
+ next_block->previous_size = block_header->size;
|
|
|
+ } else { /* The aligned memory block is the border pointer */
|
|
|
+ encapsulated_rb->border_ptr = (block_header_t *)
|
|
|
+ ((uintptr_t) block_header + padding);
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy((void *) ((uintptr_t) block_header + padding),
|
|
|
+ (void *) block_header, HEADER_SIZE);
|
|
|
+
|
|
|
+ *memptr = (void *)((uintptr_t) *memptr + padding);
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if(*memptr == NULL) {
|
|
|
+
|
|
|
+ if( 2 * size > SYS_ALLOC_SIZE - sizeof(raw_block_header_t) -
|
|
|
+ sizeof(freelist_rb_t)) { /* A big block has to be created */
|
|
|
+
|
|
|
+ *memptr = (void *)create_raw_block(size + alignment - 1 +
|
|
|
+ sizeof(raw_block_header_t), BIGBLOCK);
|
|
|
+ if(*memptr != NULL) {
|
|
|
+
|
|
|
+#ifdef WITH_DEBUG
|
|
|
+ lock_global();
|
|
|
+ SLIST_INSERT_HEAD(&systemallocator.bb_head,
|
|
|
+ (raw_block_header_t *) *memptr, pointers);
|
|
|
+ unlock_global();
|
|
|
+#endif /* WITH_DEBUG */
|
|
|
+
|
|
|
+#ifdef WITH_ALLOCATOR_STATS
|
|
|
+ update_stats(&systemallocator.dmm_stats,
|
|
|
+ MALLOC,
|
|
|
+#ifdef REQUEST_SIZE_INFO
|
|
|
+ size,
|
|
|
+#endif /* REQUEST_SIZE_INFO */
|
|
|
+ size + sizeof(raw_block_header_t));
|
|
|
+#endif /* WITH_ALLOCATOR_STATS */
|
|
|
+
|
|
|
+ *memptr = (void *)((uintptr_t) *memptr + sizeof(raw_block_header_t));
|
|
|
+
|
|
|
+ if(((uintptr_t) *memptr) % alignment != 0) {
|
|
|
+ size_t padding = (- (size_t) *memptr) & (alignment - 1);
|
|
|
+
|
|
|
+ memcpy((void *)((uintptr_t) *memptr
|
|
|
+ - sizeof(raw_block_header_t) + padding),
|
|
|
+ (void *)((uintptr_t) *memptr
|
|
|
+ - sizeof(raw_block_header_t)),
|
|
|
+ sizeof(raw_block_header_t)
|
|
|
+ );
|
|
|
+
|
|
|
+ munmap((void *)((uintptr_t) *memptr
|
|
|
+ - sizeof(raw_block_header_t)),
|
|
|
+ (size_t) padding);
|
|
|
+
|
|
|
+ *memptr = (void *)((uintptr_t) *memptr + padding);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else { /* Create a new raw block and use it */
|
|
|
+ lock_global();
|
|
|
+ raw_block = create_raw_block((size_t) SYS_ALLOC_SIZE,
|
|
|
+ DEFAULT_RB_TYPE);
|
|
|
+ if(raw_block != NULL) {
|
|
|
+ lock_raw_block(raw_block);
|
|
|
+ SLIST_INSERT_HEAD(&systemallocator.rb_head, raw_block, pointers);
|
|
|
+ unlock_global();
|
|
|
+
|
|
|
+ encapsulated_rb = (DEFAULT_RB_T *)
|
|
|
+ ((uintptr_t) raw_block + sizeof(raw_block_header_t));
|
|
|
+ *memptr = dmmlib_malloc(encapsulated_rb, size + alignment - 1);
|
|
|
+ unlock_raw_block(raw_block);
|
|
|
+
|
|
|
+ if(*memptr != NULL) {
|
|
|
+ goto CheckAlignment;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Assert that the returned address is a multiple of alignment */
|
|
|
+ if(*memptr != NULL) {
|
|
|
+ assert((uintptr_t) *memptr % alignment == 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ TRACE_1("dmmlib - ma %p %zu %zu\n", *memptr, alignment, size);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|