30 lines
1.6 KiB
C
30 lines
1.6 KiB
C
// Just an idea I had so that we can start with a small arena and increase on an as-needed basis,
|
|
// this way allocations are always extremely fast.
|
|
// The full structure is 80B, and because we use bootstrapping, we only need a little extra memory for storing
|
|
// the `next_arenas` pointers
|
|
|
|
// DO NOT MERGE WITH `Arena`, we need fixed size arenas so that we can back
|
|
// `ArenaArray`s.
|
|
struct ExpandableArena {
|
|
u8* current_point = nullptr;
|
|
u8* memory_base = nullptr;
|
|
u8* first_uncommitted_page = nullptr;
|
|
u16 alignment = CPU_REGISTER_WIDTH_BYTES;
|
|
Arena_Reserve reserve_size = Arena_Reserve::Size_64K;
|
|
Arena_Flags flags = Arena_Flags::None;
|
|
u32 initial_commit_page_count = ARENA_DEFAULT_COMMIT_PAGE_COUNT;
|
|
// Note that this downcasts to Arena*, so can be initialized in the same way.
|
|
Arena* current;
|
|
Array<Arena*> next_arenas;
|
|
};
|
|
|
|
ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve=Arena_Reserve::Size_64K, s32 commit_page_count=8);
|
|
void* expandable_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
|
bool is_valid (ExpandableArena* arena);
|
|
void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count);
|
|
u8* expandable_arena_start (ExpandableArena* arena_ex);
|
|
Allocator allocator (ExpandableArena* arena_ex);
|
|
void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point);
|
|
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages=true);
|
|
force_inline void arena_delete (ExpandableArena* arena_ex);
|