Bugfixes for ExpandableArena
This commit is contained in:
parent
7bb81077bd
commit
1db03dd4ea
@ -78,6 +78,11 @@ bool is_valid(Array<T> src) {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_zero (const Array<T>& src) {
|
||||
memset(src.data, 0, src.count * sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Array<T> array_copy_zero(const Array<T>& src) {
|
||||
if (!src.data || src.count == 0) {
|
||||
@ -244,8 +249,8 @@ s64 array_unordered_remove_by_value(Array<T>& src, T item, s64 max_count_to_remo
|
||||
if (src[i] == item) {
|
||||
removed_count += 1;
|
||||
array_unordered_remove_by_index(src, i);
|
||||
debug_break(); // haven't quite figured this one out yet.
|
||||
i -= 1; // check this element again??
|
||||
|
||||
i -= 1; // check this element index again
|
||||
if (max_count_to_remove == removed_count) { break; }
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,7 +5,9 @@ ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve, s32 commi
|
||||
|
||||
new_arena->current_point = expandable_arena_start(new_arena);
|
||||
new_arena->next_arenas = Array<Arena*>(); // next_arenas will be uninitialized, so we have to do this
|
||||
new_arena->next_arenas.allocator = get_allocator(new_arena);
|
||||
// We have to use malloc because if we reset this new arena, all the data will be lost
|
||||
// We don't want to tie the lifetime of next_arenas to this expandable arena.
|
||||
new_arena->next_arenas.allocator = GPAllocator();
|
||||
|
||||
array_reserve(new_arena->next_arenas, 8);
|
||||
|
||||
@ -66,7 +68,7 @@ void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) {
|
||||
arena_ex->current = new_arena;
|
||||
array_add(arena_ex->next_arenas, new_arena);
|
||||
|
||||
// Get to actual allocation:
|
||||
// Allocate from new arena, committing pages as required
|
||||
result = Align<u8*>(new_arena->current_point, new_arena->alignment);
|
||||
result_end = result + byte_count;
|
||||
|
||||
@ -100,8 +102,12 @@ void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) {
|
||||
}
|
||||
|
||||
// Reset next_arenas
|
||||
#if BUILD_DEBUG
|
||||
array_zero(arena_ex->next_arenas);
|
||||
#endif
|
||||
array_reset_keeping_memory(arena_ex->next_arenas);
|
||||
|
||||
arena_ex->current = (Arena*)arena_ex;
|
||||
arena_ex->current_point = expandable_arena_start(arena_ex);
|
||||
|
||||
if (free_extra_pages) {
|
||||
@ -110,6 +116,7 @@ void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) {
|
||||
}
|
||||
|
||||
force_inline void arena_delete (ExpandableArena* arena_ex) {
|
||||
array_free(arena_ex->next_arenas);
|
||||
arena_reset(arena_ex, true);
|
||||
arena_delete((Arena*)arena_ex);
|
||||
}
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
// Just an idea I had so that we can start with a small arena and increase on an as-needed basis,
|
||||
// this way allocations are always extremely fast.
|
||||
// The full structure is 80B, and because we use bootstrapping, we only need a little extra memory for storing
|
||||
// the `next_arenas` pointers
|
||||
|
||||
// Note that this downcasts to Arena, so can be initialized in the same way.
|
||||
// DO NOT USE push_arena WITH THIS! IT WILL CALL THE WRONG PROC!
|
||||
// DO NOT MERGE WITH `Arena`, we need fixed size arenas so that we can back
|
||||
// `ArenaArray`s.
|
||||
struct ExpandableArena {
|
||||
u8* current_point = nullptr;
|
||||
u8* memory_base = nullptr;
|
||||
@ -11,9 +13,9 @@ struct ExpandableArena {
|
||||
Arena_Reserve reserve_size = Arena_Reserve::Size_64K;
|
||||
Arena_Flags flags = Arena_Flags::None;
|
||||
u32 initial_commit_page_count = ARENA_DEFAULT_COMMIT_PAGE_COUNT;
|
||||
// 32 bytes up to here
|
||||
// Note that this downcasts to Arena, so can be initialized in the same way.
|
||||
Arena* current;
|
||||
Array<Arena*> next_arenas; // 40 B
|
||||
Array<Arena*> next_arenas;
|
||||
};
|
||||
|
||||
ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve=Arena_Reserve::Size_64K, s32 commit_page_count=8);
|
||||
|
||||
@ -35,11 +35,11 @@ General_Allocator* get_general_allocator_data() {
|
||||
|
||||
constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS;
|
||||
|
||||
bool GPAllocator_Tracking_Enabled() {
|
||||
bool GPAllocator_Tracking_Enabled () {
|
||||
return Allocation_Tracking_Is_Enabled != 0;
|
||||
}
|
||||
|
||||
void GPAllocator_Initialize_Allocation_Tracker() {
|
||||
void GPAllocator_Initialize_Allocation_Tracker () {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
constexpr s64 alignment = 64;
|
||||
s64 item_count_max = 64 * 4096;
|
||||
@ -50,7 +50,7 @@ void GPAllocator_Initialize_Allocation_Tracker() {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory) {
|
||||
bool GPAllocator_Is_This_Yours (void* old_memory) {
|
||||
#if GP_ALLOCATOR_TRACK_ALLOCATIONS
|
||||
std::lock_guard<std::mutex> lock(allocator_mutex);
|
||||
|
||||
@ -130,7 +130,7 @@ void GPAllocator_Delete (void* memory) {
|
||||
Remove_Allocation(memory);
|
||||
}
|
||||
|
||||
Allocator GPAllocator() {
|
||||
Allocator GPAllocator () {
|
||||
return { GPAllocator_Proc, nullptr };
|
||||
}
|
||||
|
||||
|
||||
@ -40,7 +40,7 @@ General_Allocator* get_general_allocator_data();
|
||||
|
||||
constexpr u16 GPAllocator_Default_Alignment = 16;
|
||||
|
||||
Allocator GPAllocator();
|
||||
Allocator GPAllocator ();
|
||||
|
||||
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
|
||||
|
||||
@ -48,7 +48,7 @@ void* GPAllocator_New (s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true);
|
||||
void GPAllocator_Delete (void* memory);
|
||||
|
||||
bool GPAllocator_Is_This_Yours(void* old_memory);
|
||||
void GPAllocator_Initialize_Allocation_Tracker();
|
||||
bool GPAllocator_Tracking_Enabled();
|
||||
bool GPAllocator_Is_This_Yours (void* old_memory);
|
||||
void GPAllocator_Initialize_Allocation_Tracker ();
|
||||
bool GPAllocator_Tracking_Enabled ();
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
internal void Bootstrap_Main_Thread_Context () {
|
||||
// 0. Setup general purpose allocator
|
||||
// 0. Setup general MB(70) allocator
|
||||
GPAllocator_Initialize_Allocation_Tracker();
|
||||
// 1. Setup arena table
|
||||
initialize_arena_table();
|
||||
@ -23,7 +23,7 @@ void run_arena_array_tests () {
|
||||
auto_reset(thread_local_context->temp);
|
||||
auto something = New<Thread_Context>();
|
||||
auto something2 = New<Array<s64>>();
|
||||
auto something3 = internal_alloc(5000);
|
||||
auto something3 = internal_alloc(MB(70));
|
||||
}
|
||||
|
||||
{ push_allocator(GPAllocator());
|
||||
@ -40,7 +40,7 @@ internal void Main_Entry_Point (int argc, WCHAR **argv) {
|
||||
run_arena_array_tests();
|
||||
|
||||
// Worker_Info* info = (Worker_Info*)GPAllocator_New(sizeof(Worker_Info), 64);
|
||||
|
||||
temp_reset();
|
||||
debug_break();
|
||||
printf("sizeof(Array): %zd\n", sizeof(Array<f32>));
|
||||
printf("sizeof(Arena): %zd\n", sizeof(Arena));
|
||||
|
||||
Loading…
Reference in New Issue
Block a user