ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve, s32 commit_page_count) { ExpandableArena* new_arena = (ExpandableArena*)bootstrap_arena(starting_reserve, commit_page_count); // Note: beyond first 32 bytes ExpandableArena will not be initialized, so we do it here: new_arena->current = (Arena*)new_arena; new_arena->current_point = expandable_arena_start(new_arena); new_arena->next_arenas = Array(); // next_arenas will be uninitialized, so we have to do this // We have to use malloc because if we reset this new arena, all the data will be lost // We don't want to tie the lifetime of next_arenas to this expandable arena. new_arena->next_arenas.allocator = GPAllocator(); array_reserve(new_arena->next_arenas, 8); return new_arena; } void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) { ExpandableArena* arena = (ExpandableArena*)allocator_data; Assert(arena != nullptr); switch (mode) { case Allocator_Mode::ALLOCATE: { return expandable_arena_alloc(arena, requested_size); } break; case Allocator_Mode::RESIZE: { // See note :ArenaResizing void* new_memory = expandable_arena_alloc(arena, requested_size); memcpy(new_memory, old_memory, old_size); return new_memory; } break; case Allocator_Mode::DEALLOCATE: return nullptr; break; } return nullptr; } bool is_valid (ExpandableArena* arena) { return (arena != nullptr) && (arena->memory_base != nullptr) && (arena->current != nullptr); } void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) { Assert(arena_ex != nullptr); Assert(arena_ex->memory_base != nullptr); // must be initialized before calling. Assert(is_valid(arena_ex)); Assert(arena_tables_initialized); Arena* arena = (Arena*)arena_ex->current; u8* result = Align(arena->current_point, arena->alignment); u8* result_end = result + byte_count; if (result_end > arena->first_uncommitted_page) { if (result_end > arena_address_limit(arena)) { // Pick an appropriate reserve size that will fit this allocation. Arena_Reserve new_min_reserve = next_reserve_size(byte_count); if (arena->reserve_size > new_min_reserve) { new_min_reserve = arena->reserve_size; } Arena* new_arena = next_arena(new_min_reserve); new_arena->alignment = arena_ex->alignment; new_arena->flags = arena_ex->flags; arena_ex->current = new_arena; array_add(arena_ex->next_arenas, new_arena); // Allocate from new arena, committing pages as required result = Align(new_arena->current_point, new_arena->alignment); result_end = result + byte_count; if (result_end > arena_address_limit(new_arena)) { extend_committed_pages(new_arena, result_end); } } else { extend_committed_pages(arena, result_end); } } arena_ex->current->current_point = result_end; return result; } u8* expandable_arena_start (ExpandableArena* arena_ex) { return Align(arena_ex->memory_base + sizeof(ExpandableArena), ARENA_DEFAULT_ALIGNMENT); } Allocator get_allocator (ExpandableArena* arena_ex) { return { expandable_arena_allocator_proc, arena_ex }; } void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) { if (!is_valid(arena_ex)) return; // Free arenas in `next_arenas` for (s64 i = 0; i < arena_ex->next_arenas.count; i += 1) { release_arena(arena_ex->next_arenas[i], free_extra_pages); } // Reset next_arenas #if BUILD_DEBUG array_zero(arena_ex->next_arenas); #endif array_reset_keeping_memory(arena_ex->next_arenas); arena_ex->current = (Arena*)arena_ex; arena_ex->current_point = expandable_arena_start(arena_ex); if (free_extra_pages) { free_pages_down_to((Arena*)arena_ex, arena_ex->initial_commit_page_count); } } force_inline void arena_delete (ExpandableArena* arena_ex) { array_free(arena_ex->next_arenas); arena_reset(arena_ex, true); arena_delete((Arena*)arena_ex); }