#include "Arena.h" #include "Arena_Windows.cpp" // For arrays, use `Array`, which is backed by the general purpose allocator // or use `ArenaArray` if you need to expand the size to an unknown size. void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) { Arena* arena = (Arena*)allocator_data; Assert(arena != nullptr); switch (mode) { case Allocator_Mode::ALLOCATE: { return arena_alloc(arena, requested_size); } break; case Allocator_Mode::RESIZE: { // Assert(false); // DO NOT USE RESIZE WITH ARENAS! :ArenaResizing // #TODO: Resize should check if current_point matches the end of the old allocation? // and resize accordingly + pass back same pointer. Otherwise just make a new allocation and copy. void* result = arena_alloc(arena, requested_size); s64 size_to_copy = old_size < requested_size ? old_size : requested_size; if (result && size_to_copy) { memcpy(result, old_memory, size_to_copy); } return result; } break; case Allocator_Mode::DEALLOCATE: { return nullptr; // unused } break; case Allocator_Mode::DETAILS: { if (allocator_data == nullptr) { return "arena_allocator_proc: data pointer is null!"; } return "arena_allocator_proc"; } break; } return nullptr; } // See arena_delete for decommissioning of arenas initialized with arena_init void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count) { Assert(arena != nullptr); s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE; Assert(commit_size >= 0); if (!is_valid(arena)) { arena->reserve_size = new_reserve; platform_init(arena, reserve_size(arena)); arena->current_point = arena_start(arena); arena->first_uncommitted_page = arena->memory_base; if (arena_commit_first_pages(arena, commit_size)) { arena->initial_commit_page_count = default_commit_page_count; // arena_lock_pages(arena, arena->memory_base, commit_size); } arena->alignment = ARENA_DEFAULT_ALIGNMENT; arena->flags = (arena->flags | (~Arena_Flags::Is_Bootstrapped)); // unset is bootstrapped, if present. } } bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset) { Assert(commit_size > 0); // no point calling this without commit size Assert(start_offset >= 0); // should NOT be negative. if (commit_size <= 0 || start_offset < 0) { return false; } u8* final_address = Align(arena->memory_base + commit_size + start_offset, arena->alignment); s64 commit_size_final = (s64)(final_address - arena->memory_base); if (commit_size_final > reserve_size(arena)) { Assert(false); // Commit size exceeds reserve size return false; } if (final_address > arena->first_uncommitted_page) { extend_committed_pages(arena, final_address); } return true; } Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count) { s64 commit_size = default_commit_page_count * PLATFORM_MEMORY_PAGE_SIZE; Assert(commit_size <= reserve_size(new_reserve)); Arena new_arena; arena_init(&new_arena, new_reserve, default_commit_page_count); Arena* arena_ptr = (Arena*)new_arena.memory_base; if (arena_ptr == nullptr) { Assert(false); // Assert in debug mode! return nullptr; } memcpy(arena_ptr, &new_arena, sizeof(Arena)); arena_ptr->current_point = arena_start(arena_ptr); arena_set_bootstrap_flag(arena_ptr); return arena_ptr; } void arena_clear_flags (Arena* arena) { arena->flags = Arena_Flags::None; } void arena_set_bootstrap_flag (Arena* arena) { arena->flags |= Arena_Flags::Is_Bootstrapped; } bool arena_is_bootstrapped (Arena* arena) { return (bool)(arena->flags & Arena_Flags::Is_Bootstrapped); } // void arena_set_secure_flag (Arena* arena) { arena->flags |= Arena_Flags::Secure_Arena; } void arena_reset_keeping_memory (Arena* arena) { if (!is_valid(arena)) return; arena->current_point = arena_start(arena); } void arena_reset (Arena* arena) { if (!is_valid(arena)) return; arena->current_point = arena_start(arena); free_pages_down_to(arena, arena->initial_commit_page_count); } void arena_reset_overwriting_memory (Arena* arena, Memory_Wipe_Function wipe_function) { if (!is_valid(arena)) return; Assert(wipe_function != nullptr); if (wipe_function == nullptr) return; wipe_function(arena_start(arena), (u64)(arena->current_point - arena_start(arena))); arena_reset(arena); } void* arena_alloc (Arena* arena, s64 byte_count) { Assert(arena != nullptr); if (!arena->memory_base) { arena_init(arena, arena->reserve_size); } Assert(is_valid(arena)); u8* result = Align(arena->current_point, arena->alignment); u8* result_end = result + byte_count; if (result_end > arena->first_uncommitted_page) { if (result_end > arena_address_limit(arena)) { printf("[Error] Failed to allocate because Arena is full and cannot expand!\n"); Assert(false); // Failed to allocate because arena is full and cannot expand! } else { extend_committed_pages(arena, result_end); } } arena->current_point = result_end; return result; } u8* arena_start (Arena* arena) { // This assumes every arena is bootstrapped, so there may be some // wastage/cache misses caused by this! return Align(arena->memory_base + sizeof(Arena), ARENA_DEFAULT_ALIGNMENT); } u8* arena_address_limit (Arena* arena) { return (arena->memory_base + reserve_size(arena)); } bool is_valid (Arena* arena) { return (arena != nullptr) && (arena->memory_base != nullptr); } s64 reserve_size (Arena* arena) { return reserve_size(arena->reserve_size); } s64 reserve_size (Arena_Reserve ar) { switch (ar) { case Arena_Reserve::Size_64K: return KB(64); case Arena_Reserve::Size_2M: return MB(2); case Arena_Reserve::Size_64M: return MB(64); case Arena_Reserve::Size_2G: return GB(2); case Arena_Reserve::Size_64G: return GB(64); case Arena_Reserve::Size_2T: return TB(2); } Assert(false); // This should not happen. return 0; } Arena_Reserve next_reserve_size (s64 size) { for (u8 i = 0; i < Arena_Reserve_Count; i += 1) { if (size <= Arena_Sizes[i]) { return (Arena_Reserve)i; } } return Arena_Reserve::Size_64T; } // arena_usage_bytes is kinda pointless tbh. s64 arena_usage_bytes (Arena* arena) { return (s64)(arena->current_point - arena_start(arena)); } s64 arena_usage_committed_bytes (Arena* arena) { return (s64)(arena->first_uncommitted_page - arena->memory_base); } // for arena details, I need to setup my string builder first. Allocator allocator (Arena* arena) { return { arena_allocator_proc, arena }; } struct Auto_Reset { Arena* arena; u8* starting_point; Auto_Reset(Arena* arena) { Assert(is_valid(arena)); this->arena = arena; this->starting_point = arena->current_point; } // #TODO: Implement with ExpandableArena (probably just use the same implementation as Auto_Release?) // Auto_Reset(ExpandableArena* arena_ex) { // // } ~Auto_Reset() { arena->current_point = starting_point; } }; struct Auto_Release { bool is_expandable; ExpandableArena* arena_ex; Arena* arena; u8* starting_point; Auto_Release(ExpandableArena* arena_ex) { this->arena_ex = arena_ex; this->arena = arena_ex->current; this->starting_point = arena_ex->current->current_point; this->is_expandable = true; } Auto_Release(Arena* arena) { this->arena_ex = nullptr; this->arena = arena; this->starting_point = arena->current_point; this->is_expandable = false; Assert(is_valid(arena)); } ~Auto_Release() { if (is_expandable) { arena_reset_to(arena_ex, arena, starting_point); } else { arena->current_point = starting_point; free_pages_down_to(arena, arena->initial_commit_page_count); } } }; // #FixedArena procedures: FixedArena* bootstrap_fixed_arena (s64 size, Allocator backing_allocator) { push_allocator(backing_allocator); Assert(size >= sizeof(FixedArena)); ArrayView memory = ArrayView(size); FixedArena* result = (FixedArena*)memory.data; result->memory = memory; result->cursor = sizeof(FixedArena); result->allocator = backing_allocator; return result; } force_inline void destroy_arena (FixedArena* arena) { Delete(arena->allocator, arena); } Allocator allocator (FixedArena* arena) { return { fixed_arena_allocator_proc, arena }; } void* fixed_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) { constexpr s64 DEFAULT_ALIGNMENT = 16; // maybe make this modifiable as part of FixedArena struct? FixedArena* arena = (FixedArena*)allocator_data; Assert(arena != nullptr); switch (mode) { case Allocator_Mode::ALLOCATE: { arena->cursor = Align(arena->cursor, DEFAULT_ALIGNMENT); void* result = &arena->memory[arena->cursor]; arena->cursor += requested_size; Assert(arena->cursor <= arena->memory.count); return result; } break; case Allocator_Mode::RESIZE: { arena->cursor = Align(arena->cursor, DEFAULT_ALIGNMENT); void* result = &arena->memory[arena->cursor]; arena->cursor += requested_size; Assert(arena->cursor <= arena->memory.count); s64 size_to_copy = old_size < requested_size ? old_size : requested_size; if (result && size_to_copy) { memcpy(result, old_memory, size_to_copy); } return result; } break; case Allocator_Mode::DEALLOCATE: { return nullptr; // unused } break; case Allocator_Mode::DETAILS: { if (allocator_data == nullptr) { return "fixed_arena_allocator_proc: data pointer is null!"; } return "fixed_arena_allocator_proc: with valid data"; } break; } return nullptr; } // #TODO: #Arena_Free_List #garbage_collection in `release_arena` // [ ] Garbage collection if we have >> 64 in a particular table for a while. // There should be some parameters regarding what the upper limit for idle // committed pages should be and a heuristic for maximum number of arenas waiting #include "Base.h" struct Arena_Free_List { Mutex mutex; s32 in_flight_count[Arena_Reserve_Count]; Array free_table[Arena_Reserve_Count]; #if ARENA_DEBUG Array in_flight[Arena_Reserve_Count]; #endif b32 initialized; }; global Arena_Free_List* arena_free_list; // Only call once from main thread! void initialize_arena_free_list (Allocator allocator) { mutex_init(&arena_free_list->mutex); Assert(arena_free_list != nullptr); if (arena_free_list->initialized) return; for (s32 i = 0; i < Arena_Reserve_Count; i += 1) { arena_free_list->in_flight_count[i] = 0; arena_free_list->free_table[i].allocator = allocator; array_reserve(arena_free_list->free_table[i], 64); #if ARENA_DEBUG arena_free_list->in_flight[i].allocator = allocator; array_reserve(arena_free_list->in_flight[i], 64); #endif } arena_free_list->initialized = true; } Arena* next_arena (Arena_Reserve reserve_size) { Assert(arena_free_list != nullptr); Arena* arena; lock_guard(&arena_free_list->mutex); s64 reserve_index = (s64)reserve_size; if (!arena_free_list->free_table[reserve_index].count) { arena = bootstrap_arena(reserve_size, ARENA_DEFAULT_COMMIT_PAGE_COUNT); } else { arena = pop(arena_free_list->free_table[reserve_index]); } #if ARENA_DEBUG array_add(arena_free_list->in_flight[reserve_index], arena); #endif arena_free_list->in_flight_count[reserve_index] += 1; Assert(arena != nullptr); return arena; } void release_arena (Arena* arena, bool delete_extra_pages) { Assert(arena_free_list != nullptr); Assert(arena != nullptr); Assert(arena_is_bootstrapped(arena)); // Only put into free table if arena is bootstrapped? lock_guard(&arena_free_list->mutex); s64 reserve_index = (s64)arena->reserve_size; #if ARENA_DEBUG array_unordered_remove_by_value(arena_free_list->in_flight[reserve_index], arena, 1); // BUILD_DEBUG! #endif arena_reset_keeping_memory(arena); if (delete_extra_pages) { free_pages_down_to(arena, arena->initial_commit_page_count); } array_add(arena_free_list->free_table[reserve_index], arena); arena_free_list->in_flight_count[reserve_index] -= 1; // #TODO #garbage_collection // if (arena_free_table[reserve_index].count > 64) { // s64 arenas_to_delete_count = arena_free_table[reserve_index].count - 64; // while (arenas_to_delete_count > 0) { // arena_delete(arena_free_table[arena_free_table.count-1]); // array_unordered_remove_by_index(..); // arenas_to_delete_count -= 1; // } // } } s64 bytes_in_use (ArrayView arenas) { // does not include overhead from committed pages! s64 sum = 0; for (s64 i = 0; i < arenas.count; i += 1) { sum += arena_usage_bytes(arenas[i]); } return sum; } s64 committed_bytes (ArrayView arenas) { s64 sum = 0; for (s64 i = 0; i < arenas.count; i += 1) { sum += arena_usage_committed_bytes(arenas[i]); } return sum; }