Finish up general cleanup. Remove arena free list.

This commit is contained in:
Musa Mahmood 2025-12-18 10:45:01 -05:00
parent 4b48ab696b
commit 6e1928aa03
23 changed files with 202 additions and 209 deletions

View File

@ -1,5 +1,14 @@
build either with `build.cmd Debug` or `build.cmd Release` ## Configuration
This project uses CMake to configure and build. Currently only Win32 is supported.
```
cmake -S . -B build
```
## Build
```
cmake --build build --config Debug
```
<!---build either with `build.cmd Debug` or `build.cmd Release`
Run `build_imgui_lib.cmd` to build imgui dependency Run `build_imgui_lib.cmd` to build imgui dependency
Both scripts must be run in x64 Native Tools Command Prompt for VS 20xx. Both scripts must be run in x64 Native Tools Command Prompt for VS 20xx.
You can also build with `jai build.jai - build_exe` or `jai build.jai - build_exe release` You can also build with `jai build.jai - build_exe` or `jai build.jai - build_exe release`--->

View File

@ -1,6 +1,6 @@
// Treat library files as a single-file (single translation unit) header
#include "lib_main.cpp" #include "lib_main.cpp"
// Toggles:
#define BASE_RUN_TESTS 0 #define BASE_RUN_TESTS 0
#define BUILD_EXPLORER_APP_WIN32 1 #define BUILD_EXPLORER_APP_WIN32 1
#define BUILD_CUSTOM_GUI 0 #define BUILD_CUSTOM_GUI 0

70
extras/imgui.ini Normal file
View File

@ -0,0 +1,70 @@
[Window][Debug##Default]
Pos=0,0
Size=85,85
Collapsed=0
[Window][Hello, world!]
Size=1582,874
Collapsed=0
DockId=0x00000002,0
[Window][Dear ImGui Demo]
Pos=0,22
Size=2124,1511
Collapsed=0
DockId=0xC0DFADC4,0
[Window][DockSpace Demo]
Size=2560,1533
Collapsed=0
[Window][Dear ImGui Metrics/Debugger]
ViewportPos=1947,173
ViewportId=0x366E23FF
Size=435,462
Collapsed=0
[Window][WindowOverViewport_11111111]
Pos=0,0
Size=4533,1681
Collapsed=0
[Window][Font Settings]
Pos=0,1565
Size=3372,116
Collapsed=0
DockId=0x00000001,0
[Window][Test panel]
Pos=192,318
Size=691,540
Collapsed=0
[Window][Debug Panel]
Pos=3374,0
Size=1159,816
Collapsed=0
DockId=0x00000005,0
[Window][Control Panel]
Pos=3374,818
Size=1159,863
Collapsed=0
DockId=0x00000006,0
[Window][Enumerated Data Workspace]
Pos=0,0
Size=3372,1563
Collapsed=0
DockId=0x00000002,0
[Docking][Data]
DockSpace ID=0x08BD597D Window=0x1BBC0F80 Pos=294,254 Size=4533,1681 Split=X Selected=0x1FC7AC8C
DockNode ID=0x00000003 Parent=0x08BD597D SizeRef=3372,1672 Split=Y
DockNode ID=0x00000002 Parent=0x00000003 SizeRef=1582,1553 CentralNode=1 Selected=0x671FC263
DockNode ID=0x00000001 Parent=0x00000003 SizeRef=1582,116 Selected=0x355F9D19
DockNode ID=0x00000004 Parent=0x08BD597D SizeRef=1159,1672 Split=Y Selected=0xD2C573A7
DockNode ID=0x00000005 Parent=0x00000004 SizeRef=1351,536 Selected=0xD2C573A7
DockNode ID=0x00000006 Parent=0x00000004 SizeRef=1351,567 Selected=0xF930105C
DockSpace ID=0xC0DFADC4 Pos=0,51 Size=2560,1511 CentralNode=1 Selected=0x5E5F7166

View File

@ -71,14 +71,8 @@ typedef void* (*Memory_Wipe_Function)(void* memory, u64 byte_count);
void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data); void* arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
// Interface API for normal use (idk how to explain - see Arena_Free_List.cpp)
void initialize_arena_free_list (Allocator allocator);
Arena* next_arena (Arena_Reserve reserve_size);
void release_arena (Arena* arena, bool delete_extra_pages=true);
// Main API // Main API
Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count); Arena* bootstrap_arena (Arena_Reserve new_reserve, s32 default_commit_page_count=ARENA_DEFAULT_COMMIT_PAGE_COUNT);
void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count=16); // For when we're *not* bootstrapping arenas: (I'm debating if we should keep this..) void arena_init (Arena* arena, Arena_Reserve new_reserve, s32 default_commit_page_count=16); // For when we're *not* bootstrapping arenas: (I'm debating if we should keep this..)
bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset=0); // This is useful for initializing arenas (arena_init), and for starting Arena-backed arrays. bool arena_commit_first_pages (Arena* arena, s64 commit_size, s64 start_offset=0); // This is useful for initializing arenas (arena_init), and for starting Arena-backed arrays.
@ -142,7 +136,7 @@ struct Push_Alignment { // #rename to Arena_Push_Alignment?
// #FixedArena is a super simple arena where you allocate a fixed block up front (fully committed), // #FixedArena is a super simple arena where you allocate a fixed block up front (fully committed),
// and use it as-is. // and use it as-is.
// #NOTE: we can save space be always backing with a known allocator (e.g. GPAllocator()). // #NOTE: we can save space be always backing with a known allocator (e.g. default_allocator()).
struct FixedArena { struct FixedArena {
ArrayView<u8> memory; ArrayView<u8> memory;
s64 cursor; s64 cursor;
@ -155,3 +149,24 @@ void* fixed_arena_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 o
FixedArena* bootstrap_fixed_arena (s64 size, Allocator backing_allocator); FixedArena* bootstrap_fixed_arena (s64 size, Allocator backing_allocator);
force_inline void destroy_arena (FixedArena* arena); force_inline void destroy_arena (FixedArena* arena);
Allocator allocator (FixedArena* arena); Allocator allocator (FixedArena* arena);
s64 bytes_in_use (ArrayView<Arena*> arenas) {
// does not include overhead from committed pages!
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_bytes(arenas[i]);
}
return sum;
}
s64 committed_bytes (ArrayView<Arena*> arenas) {
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_committed_bytes(arenas[i]);
}
return sum;
}

View File

@ -28,7 +28,7 @@ struct ArenaArray { // #downcasts to an ArrayView.
// Use arena_array_free to reset // Use arena_array_free to reset
template <typename T> template <typename T>
ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_size) { ArenaArray<T>* arena_array_new (s64 preallocate_count, Arena_Reserve reserve_size) {
Arena* arena = next_arena(reserve_size); Arena* arena = bootstrap_arena(reserve_size);
push_arena(arena); push_arena(arena);
push_alignment(arena, 1); push_alignment(arena, 1);
ArenaArray<T>* array = New<ArenaArray<T>>(true); ArenaArray<T>* array = New<ArenaArray<T>>(true);
@ -65,12 +65,8 @@ template <typename T> s64 memory_usage (ArenaArray<T>& array) {
return arena_usage_committed_bytes(array.arena); return arena_usage_committed_bytes(array.arena);
} }
template <typename T> void arena_array_free (ArenaArray<T>& array, bool delete_pages=true) { template <typename T> void arena_array_free (ArenaArray<T>& array) {
release_arena(array.arena, delete_pages); arena_delete(array.arena);
array.arena = nullptr;
#if BUILD_DEBUG
poison_struct(&array);
#endif
} }
template <typename T> ArrayView<T> array_view (ArenaArray<T> array) { template <typename T> ArrayView<T> array_view (ArenaArray<T> array) {

View File

@ -1,108 +0,0 @@
// #TODO: #Arena_Free_List #garbage_collection in `release_arena`
// [ ] Garbage collection if we have >> 64 in a particular table for a while.
// There should be some parameters regarding what the upper limit for idle
// committed pages should be and a heuristic for maximum number of arenas waiting
struct Arena_Free_List {
Mutex mutex;
s32 in_flight_count[Arena_Reserve_Count];
Array<Arena*> free_table[Arena_Reserve_Count];
#if ARENA_DEBUG
Array<Arena*> in_flight[Arena_Reserve_Count];
#endif
b32 initialized;
};
global Arena_Free_List* arena_free_list;
// Only call once from main thread!
void initialize_arena_free_list (Allocator allocator) {
mutex_init(&arena_free_list->mutex);
Assert(arena_free_list != nullptr);
if (arena_free_list->initialized)
return;
for (s32 i = 0; i < Arena_Reserve_Count; i += 1) {
arena_free_list->in_flight_count[i] = 0;
arena_free_list->free_table[i].allocator = allocator;
array_reserve(arena_free_list->free_table[i], 64);
#if ARENA_DEBUG
arena_free_list->in_flight[i].allocator = allocator;
array_reserve(arena_free_list->in_flight[i], 64);
#endif
}
arena_free_list->initialized = true;
}
Arena* next_arena (Arena_Reserve reserve_size) {
Assert(arena_free_list != nullptr);
Arena* arena;
lock_guard(&arena_free_list->mutex);
s64 reserve_index = (s64)reserve_size;
if (!arena_free_list->free_table[reserve_index].count) {
arena = bootstrap_arena(reserve_size, ARENA_DEFAULT_COMMIT_PAGE_COUNT);
} else {
arena = pop(arena_free_list->free_table[reserve_index]);
}
#if ARENA_DEBUG
array_add(arena_free_list->in_flight[reserve_index], arena);
#endif
arena_free_list->in_flight_count[reserve_index] += 1;
Assert(arena != nullptr);
return arena;
}
void release_arena (Arena* arena, bool delete_extra_pages) {
Assert(arena_free_list != nullptr);
Assert(arena != nullptr);
Assert(arena_is_bootstrapped(arena));
// Only put into free table if arena is bootstrapped?
lock_guard(&arena_free_list->mutex);
s64 reserve_index = (s64)arena->reserve_size;
#if ARENA_DEBUG
array_unordered_remove_by_value(arena_free_list->in_flight[reserve_index], arena, 1); // BUILD_DEBUG!
#endif
arena_reset_keeping_memory(arena);
if (delete_extra_pages) {
free_pages_down_to(arena, arena->initial_commit_page_count);
}
array_add(arena_free_list->free_table[reserve_index], arena);
arena_free_list->in_flight_count[reserve_index] -= 1;
// #TODO #garbage_collection
// if (arena_free_table[reserve_index].count > 64) {
// s64 arenas_to_delete_count = arena_free_table[reserve_index].count - 64;
// while (arenas_to_delete_count > 0) {
// arena_delete(arena_free_table[arena_free_table.count-1]);
// array_unordered_remove_by_index(..);
// arenas_to_delete_count -= 1;
// }
// }
}
s64 bytes_in_use (ArrayView<Arena*> arenas) {
// does not include overhead from committed pages!
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_bytes(arenas[i]);
}
return sum;
}
s64 committed_bytes (ArrayView<Arena*> arenas) {
s64 sum = 0;
for (s64 i = 0; i < arenas.count; i += 1) {
sum += arena_usage_committed_bytes(arenas[i]);
}
return sum;
}

View File

@ -1,7 +1,7 @@
#pragma once #pragma once
// Some of these macros are ""borrowed"" from nick aversano // Some of these macros are ""borrowed"" from nick aversano | source: https://github.com/nickav/na/blob/main/na.h
// https://github.com/nickav/na/blob/main/na.h
// #OS_Platform // #OS_Platform
#if defined(_WIN32) #if defined(_WIN32)
#define OS_WINDOWS 1 #define OS_WINDOWS 1

View File

@ -3,12 +3,17 @@
internal void Bootstrap_Main_Thread_Context () { internal void Bootstrap_Main_Thread_Context () {
// Timed_Block_Print_No_Context("Bootstrap_Main_Thread_Context"); // Timed_Block_Print_No_Context("Bootstrap_Main_Thread_Context");
// 0. Setup general allocator // 0. Setup general allocator
GPAllocator_Initialize_Allocation_Tracker(); default_allocator_Initialize_Allocation_Tracker();
// 1. Setup arena table // 1. Setup arena free list
arena_free_list = (Arena_Free_List*)GPAllocator_New(sizeof(Arena_Free_List), 64, true); // permanent allocation. // #note: the arena free list is disabled because I'm not convinced it's a good idea.
memset(arena_free_list, 0, sizeof(Arena_Free_List)); // It would allow us to cache arenas to load address space very quickly (much faster than calling VirtualAlloc), but
initialize_arena_free_list(GPAllocator()); // it adds complexity and makes it difficult to know when you're doing something stupid, because memory is still writeable
// and readable after it's "freed" with `release_arena`. So for prototyping purposes, we just release the whole arena.
// arena_free_list = (Arena_Free_List*)default_allocator_new(sizeof(Arena_Free_List), 64, true); // permanent allocation.
// memset(arena_free_list, 0, sizeof(Arena_Free_List));
// initialize_arena_free_list(default_allocator());
// 2. #NewContext Setup thread local context // 2. #NewContext Setup thread local context
ExpandableArena* arena_ex = expandable_arena_new(Arena_Reserve::Size_64M, 16); ExpandableArena* arena_ex = expandable_arena_new(Arena_Reserve::Size_64M, 16);
@ -20,7 +25,7 @@ internal void Bootstrap_Main_Thread_Context () {
thread_local_context->thread_idx = 0; thread_local_context->thread_idx = 0;
thread_local_context->thread_name = "Main Thread"; thread_local_context->thread_name = "Main Thread";
thread_local_context->log_builder = new_string_builder(Arena_Reserve::Size_64M); thread_local_context->log_builder = new_string_builder(Arena_Reserve::Size_64M);
thread_local_context->error_arena = next_arena(Arena_Reserve::Size_64M); thread_local_context->error_arena = bootstrap_arena(Arena_Reserve::Size_64M);
default_logger_initialize(); default_logger_initialize();
thread_local_context->logger = {default_logger_proc, &default_logger}; thread_local_context->logger = {default_logger_proc, &default_logger};
@ -55,6 +60,7 @@ force_inline void set_thread_context (Thread_Context* new_context) {
thread_local_context = new_context; thread_local_context = new_context;
} }
// #Note: Both functions will free next arenas, we only worry about keeping memory in the first arena (typically 64MB).
void temp_reset_keeping_memory() { void temp_reset_keeping_memory() {
Thread_Context* context = thread_context(); Thread_Context* context = thread_context();
arena_reset(context->temp, false); arena_reset(context->temp, false);

View File

@ -9,7 +9,7 @@ struct Thread_Context {
Allocator allocator; Allocator allocator;
s32 thread_idx; s32 thread_idx;
// u16 _padding0; // u16 _padding0;
u16 GPAllocator_alignment = 16; u16 default_allocator_alignment = 16;
Logger logger = {nullptr, nullptr}; Logger logger = {nullptr, nullptr};
String_Builder* log_builder; String_Builder* log_builder;
// Stack_Trace* stack_trace; // Stack_Trace* stack_trace;
@ -19,7 +19,7 @@ struct Thread_Context {
string thread_name; string thread_name;
Allocator error_allocator = GPAllocator(); Allocator error_allocator = default_allocator();
Error* first_error = nullptr; Error* first_error = nullptr;
Error* current_error = nullptr; Error* current_error = nullptr;
Arena* error_arena; Arena* error_arena;

View File

@ -8,7 +8,7 @@ ExpandableArena* expandable_arena_new (Arena_Reserve starting_reserve, s32 commi
new_arena->next_arenas = Array<Arena*>(); // next_arenas will be uninitialized, so we have to do this new_arena->next_arenas = Array<Arena*>(); // next_arenas will be uninitialized, so we have to do this
// We have to use malloc because if we reset this new arena, all the data will be lost // We have to use malloc because if we reset this new arena, all the data will be lost
// We don't want to tie the lifetime of next_arenas to this expandable arena. // We don't want to tie the lifetime of next_arenas to this expandable arena.
new_arena->next_arenas.allocator = GPAllocator(); new_arena->next_arenas.allocator = default_allocator();
array_reserve(new_arena->next_arenas, 8); array_reserve(new_arena->next_arenas, 8);
@ -58,7 +58,7 @@ void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) {
Assert(arena_ex != nullptr); Assert(arena_ex != nullptr);
Assert(arena_ex->memory_base != nullptr); // must be initialized before calling. Assert(arena_ex->memory_base != nullptr); // must be initialized before calling.
Assert(is_valid(arena_ex)); Assert(is_valid(arena_ex));
Assert(arena_free_list->initialized); // Assert(arena_free_list->initialized);
Arena* arena = (Arena*)arena_ex->current; Arena* arena = (Arena*)arena_ex->current;
@ -73,7 +73,7 @@ void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count) {
new_min_reserve = arena->reserve_size; new_min_reserve = arena->reserve_size;
} }
Arena* new_arena = next_arena(new_min_reserve); Arena* new_arena = bootstrap_arena(new_min_reserve);
new_arena->alignment = arena_ex->alignment; new_arena->alignment = arena_ex->alignment;
new_arena->flags = arena_ex->flags; new_arena->flags = arena_ex->flags;
@ -106,8 +106,7 @@ Allocator allocator (ExpandableArena* arena_ex) {
return { expandable_arena_allocator_proc, arena_ex }; return { expandable_arena_allocator_proc, arena_ex };
} }
// #TODO: currently this keeps the final arena's memory. Fix this! // last arena is the arena we want to pop to.
// This is not implemented correctly!
void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point) { void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point) {
// going backwards from end of arena list // going backwards from end of arena list
@ -116,7 +115,6 @@ void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_
return; return;
} }
// for (s64 i = arena_ex->next_arenas.count-1; i >= 0; i -= 1) {
for_each_reverse(i, arena_ex->next_arenas) { for_each_reverse(i, arena_ex->next_arenas) {
Arena* arena = arena_ex->next_arenas[i]; Arena* arena = arena_ex->next_arenas[i];
if (arena == last_arena) { // return to starting_point if (arena == last_arena) { // return to starting_point
@ -124,8 +122,14 @@ void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_
arena_ex->current->current_point = starting_point; arena_ex->current->current_point = starting_point;
break; break;
} }
release_arena(arena); arena_delete(arena);
array_unordered_remove_by_index(arena_ex->next_arenas, i); // We can just decrement the count because arenas are added and removed in order. see: pop()
arena_ex->next_arenas.count -= 1;
}
if (last_arena == (Arena*)arena_ex) {
arena_ex->current = (Arena*)arena_ex;
arena_ex->current->current_point = starting_point;
} }
} }
@ -134,7 +138,7 @@ void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages) {
// Free expansion arenas in `next_arenas` // Free expansion arenas in `next_arenas`
for (s64 i = 0; i < arena_ex->next_arenas.count; i += 1) { for (s64 i = 0; i < arena_ex->next_arenas.count; i += 1) {
release_arena(arena_ex->next_arenas[i], free_extra_pages); arena_delete(arena_ex->next_arenas[i]);
} }
// Reset next_arenas // Reset next_arenas

View File

@ -25,5 +25,5 @@ void* expandable_arena_alloc (ExpandableArena* arena_ex, s64 byte_count);
u8* expandable_arena_start (ExpandableArena* arena_ex); u8* expandable_arena_start (ExpandableArena* arena_ex);
Allocator allocator (ExpandableArena* arena_ex); Allocator allocator (ExpandableArena* arena_ex);
void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point); void arena_reset_to (ExpandableArena* arena_ex, Arena* last_arena, u8* starting_point);
void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages=true); void arena_reset (ExpandableArena* arena_ex, bool free_extra_pages);
force_inline void arena_delete (ExpandableArena* arena_ex); force_inline void arena_delete (ExpandableArena* arena_ex);

View File

@ -32,23 +32,23 @@ General_Allocator* get_general_allocator_data() {
constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS; constexpr s64 Allocation_Tracking_Is_Enabled = GP_ALLOCATOR_TRACK_ALLOCATIONS;
bool GPAllocator_Tracking_Enabled () { bool default_allocator_Tracking_Enabled () {
return Allocation_Tracking_Is_Enabled != 0; return Allocation_Tracking_Is_Enabled != 0;
} }
void GPAllocator_Initialize_Allocation_Tracker () { void default_allocator_Initialize_Allocation_Tracker () {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS #if GP_ALLOCATOR_TRACK_ALLOCATIONS
mutex_init(&allocator_mutex); mutex_init(&allocator_mutex);
constexpr s64 alignment = 64; constexpr s64 alignment = 64;
s64 item_count_max = 64 * 4096; s64 item_count_max = 64 * 4096;
s64 total_allocation_size = item_count_max * sizeof(Allocation); s64 total_allocation_size = item_count_max * sizeof(Allocation);
auto memory = Aligned_Alloc(total_allocation_size, alignment); // @MemoryLeak (intentional) auto memory = Aligned_Alloc(total_allocation_size, alignment); // @MemoryLeak (intentional)
gAllocator.allocations = Array<Allocation>(item_count_max, memory, item_count_max, GPAllocator()); gAllocator.allocations = Array<Allocation>(item_count_max, memory, item_count_max, default_allocator());
gAllocator.allocations.count = 0; // Init to zero. gAllocator.allocations.count = 0; // Init to zero.
#endif #endif
} }
bool GPAllocator_Is_This_Yours (void* old_memory) { bool default_allocator_Is_This_Yours (void* old_memory) {
#if GP_ALLOCATOR_TRACK_ALLOCATIONS #if GP_ALLOCATOR_TRACK_ALLOCATIONS
lock_guard(&allocator_mutex); lock_guard(&allocator_mutex);
@ -94,7 +94,7 @@ void Remove_Allocation(void* old_memory) {
#endif #endif
} }
void* GPAllocator_New (s64 new_size, s64 alignment, bool initialize) { void* default_allocator_new (s64 new_size, s64 alignment, bool initialize) {
// Fallback allocator: _aligned_malloc, which is MSVC's version of std::aligned_alloc // Fallback allocator: _aligned_malloc, which is MSVC's version of std::aligned_alloc
auto memory = Aligned_Alloc(new_size, alignment); auto memory = Aligned_Alloc(new_size, alignment);
@ -107,11 +107,11 @@ void* GPAllocator_New (s64 new_size, s64 alignment, bool initialize) {
return memory; return memory;
} }
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment, bool initialize) { void* default_allocator_realloc (s64 old_size, void* old_memory, s64 new_size, s64 alignment, bool initialize) {
Assert((alignment % 8) == 0 && (alignment != 0)); Assert((alignment % 8) == 0 && (alignment != 0));
if (old_memory == nullptr) { if (old_memory == nullptr) {
return GPAllocator_New(new_size, alignment); return default_allocator_new(new_size, alignment);
} }
// Debug version: _aligned_realloc_dbg // Debug version: _aligned_realloc_dbg
@ -128,7 +128,7 @@ void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alig
return new_memory_address; return new_memory_address;
} }
void GPAllocator_Delete (void* memory) { void default_allocator_free (void* memory) {
if (memory == nullptr) return; if (memory == nullptr) return;
Aligned_Free(memory); Aligned_Free(memory);
Remove_Allocation(memory); Remove_Allocation(memory);
@ -136,32 +136,32 @@ void GPAllocator_Delete (void* memory) {
// printf("[GP] Deleting memory %p\n", memory); // printf("[GP] Deleting memory %p\n", memory);
} }
Allocator GPAllocator () { Allocator default_allocator () {
return { GPAllocator_Proc, nullptr }; return { default_allocator_proc, nullptr };
} }
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) { void* default_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data) {
u16 alignment = 16; // default alignment u16 alignment = 16; // default alignment
Thread_Context* context = thread_context(); Thread_Context* context = thread_context();
if (context) alignment = context->GPAllocator_alignment; if (context) alignment = context->default_allocator_alignment;
switch (mode) { switch (mode) {
case Allocator_Mode::ALLOCATE: { case Allocator_Mode::ALLOCATE: {
return GPAllocator_New(requested_size, alignment); return default_allocator_new(requested_size, alignment);
} break; } break;
case Allocator_Mode::RESIZE: { case Allocator_Mode::RESIZE: {
void* result = GPAllocator_Resize(old_size, old_memory, requested_size, alignment); void* result = default_allocator_realloc(old_size, old_memory, requested_size, alignment);
// NOTE: The _aligned_realloc function already copies the old memory, so there's // NOTE: The _aligned_realloc function already copies the old memory, so there's
// no need to copy the old memory block here. // no need to copy the old memory block here.
return result; return result;
} break; } break;
case Allocator_Mode::DEALLOCATE: { case Allocator_Mode::DEALLOCATE: {
GPAllocator_Delete(old_memory); // unused default_allocator_free(old_memory); // unused
} break; } break;
case Allocator_Mode::DETAILS: { case Allocator_Mode::DETAILS: {
Assert(allocator_data == nullptr); Assert(allocator_data == nullptr);
return "GPAllocator"; return "default_allocator";
} break; } break;
} }

View File

@ -38,17 +38,18 @@ struct General_Allocator {
General_Allocator* get_general_allocator_data(); General_Allocator* get_general_allocator_data();
constexpr u16 GPAllocator_Default_Alignment = 16; constexpr u16 default_allocator_Default_Alignment = 16;
Allocator GPAllocator (); Allocator default_allocator ();
void* GPAllocator_Proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data); void* default_allocator_proc (Allocator_Mode mode, s64 requested_size, s64 old_size, void* old_memory, void* allocator_data);
void* GPAllocator_New (s64 new_size, s64 alignment=16, bool initialize=true); void* default_allocator_new (s64 new_size, s64 alignment=16, bool initialize=true);
void* GPAllocator_Resize (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true); void* default_allocator_realloc (s64 old_size, void* old_memory, s64 new_size, s64 alignment=16, bool initialize=true);
void GPAllocator_Delete (void* memory); void default_allocator_free (void* memory);
bool GPAllocator_Is_This_Yours (void* old_memory); bool default_allocator_Is_This_Yours (void* old_memory);
void GPAllocator_Initialize_Allocation_Tracker (); void default_allocator_Initialize_Allocation_Tracker ();
bool GPAllocator_Tracking_Enabled (); bool default_allocator_Tracking_Enabled ();
// #TODO: I want to be able to tag any allocations in debug mode.

View File

@ -171,7 +171,7 @@ string format_string_no_context (char* format, ...) {
string str = {}; string str = {};
str.data = (u8*)GPAllocator_New(BUFFER_SIZE); str.data = (u8*)default_allocator_new(BUFFER_SIZE);
va_list args; va_list args;
va_start(args, format); va_start(args, format);

View File

@ -45,7 +45,7 @@ void run_post_setup_tests() {
log("Done. Success: %d\n", success); log("Done. Success: %d\n", success);
// push_allocator(allocator(thread_context()->arena)); // push_allocator(allocator(thread_context()->arena));
push_allocator(GPAllocator()); push_allocator(default_allocator());
string file_path = "D:/Work/OpenBCI/ToolZ/prototyping-gui-main/modules/native-proto-lib/native-sdk-prototyping/src/SignalProcessing.cpp"; string file_path = "D:/Work/OpenBCI/ToolZ/prototyping-gui-main/modules/native-proto-lib/native-sdk-prototyping/src/SignalProcessing.cpp";
ArrayView<u8> file_data = read_entire_file(file_path, true); ArrayView<u8> file_data = read_entire_file(file_path, true);
log("file_data: \n"); log("file_data: \n");

View File

@ -56,7 +56,7 @@ Graphics* graphics_thread_init () {
Assert(context != nullptr); Assert(context != nullptr);
push_allocator(GPAllocator()); push_allocator(default_allocator());
if (context->graphics == nullptr) { if (context->graphics == nullptr) {
context->graphics = New<Graphics>(true); context->graphics = New<Graphics>(true);

View File

@ -59,7 +59,7 @@ struct OS_System_Info {
// #Monitors // #Monitors
b32 monitors_enumerated; b32 monitors_enumerated;
Array<Monitor> monitors; // Back with GPAllocator Array<Monitor> monitors; // Back with default_allocator
// #Drives // #Drives
Table<string, OS_Drive*> drives; // should we just store ptrs to OS_Drive? I think so.. Table<string, OS_Drive*> drives; // should we just store ptrs to OS_Drive? I think so..
@ -226,7 +226,7 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
} }
{ OS_System_Info* info = &global_win32_state.system_info; { OS_System_Info* info = &global_win32_state.system_info;
info->monitors.allocator = GPAllocator(); info->monitors.allocator = default_allocator();
u8 buffer[MAX_COMPUTERNAME_LENGTH + 1] = {0}; u8 buffer[MAX_COMPUTERNAME_LENGTH + 1] = {0};
DWORD size = MAX_COMPUTERNAME_LENGTH + 1; DWORD size = MAX_COMPUTERNAME_LENGTH + 1;
if(GetComputerNameA((char*)buffer, &size)) { if(GetComputerNameA((char*)buffer, &size)) {
@ -236,7 +236,7 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
} }
{ OS_Process_Info* info = &global_win32_state.process_info; { OS_Process_Info* info = &global_win32_state.process_info;
info->windows.allocator = GPAllocator(); info->windows.allocator = default_allocator();
DWORD length = GetCurrentDirectoryW(0, 0); DWORD length = GetCurrentDirectoryW(0, 0);
// This can be freed later when we call temp_reset(); // This can be freed later when we call temp_reset();
u16* memory = NewArray<u16>(temp(), length + 1); u16* memory = NewArray<u16>(temp(), length + 1);
@ -247,7 +247,7 @@ internal void Win32_Entry_Point (int argc, WCHAR **argv) {
// Setup event arena, allocators for Array<> types. // Setup event arena, allocators for Array<> types.
if (!global_win32_state.process_info.event_arena) { if (!global_win32_state.process_info.event_arena) {
global_win32_state.process_info.event_arena = next_arena(Arena_Reserve::Size_64K); global_win32_state.process_info.event_arena = bootstrap_arena(Arena_Reserve::Size_64K);
} }
// [ ] Get Working directory (info->working_path) // [ ] Get Working directory (info->working_path)
@ -294,7 +294,7 @@ internal bool thread_init (Thread* thread, Thread_Proc proc, string thread_name)
// #NOTE: This will disappear once the thread is de-initted. If we want this string, copy it! // #NOTE: This will disappear once the thread is de-initted. If we want this string, copy it!
thread->context->thread_name = copy_string(thread_name); thread->context->thread_name = copy_string(thread_name);
thread->context->log_builder = new_string_builder(Arena_Reserve::Size_64M); thread->context->log_builder = new_string_builder(Arena_Reserve::Size_64M);
thread->context->error_arena = next_arena(Arena_Reserve::Size_64M); thread->context->error_arena = bootstrap_arena(Arena_Reserve::Size_64M);
thread->context->logger = {default_logger_proc, &default_logger}; thread->context->logger = {default_logger_proc, &default_logger};
thread->context->parent_thread_context = thread_context(); thread->context->parent_thread_context = thread_context();
@ -322,7 +322,7 @@ internal void thread_deinit (Thread* thread,bool zero_thread) {
array_reset(*thread->context->log_builder); array_reset(*thread->context->log_builder);
free_string_builder(thread->context->log_builder); free_string_builder(thread->context->log_builder);
release_arena(thread->context->error_arena); arena_delete(thread->context->error_arena);
arena_delete(thread->context->temp); arena_delete(thread->context->temp);
arena_delete(thread->context->arena); // must come last because thread->context is allocated with this arena! arena_delete(thread->context->arena); // must come last because thread->context is allocated with this arena!
@ -1009,11 +1009,11 @@ s32 os_cpu_secondary_core_count () {
// #Drives // #Drives
constexpr u64 Win32_Max_Path_Length = 260; constexpr u64 Win32_Max_Path_Length = 260;
bool Win32_Discover_Drives () { bool Win32_Discover_Drives () {
push_allocator(GPAllocator()); push_allocator(default_allocator());
// Initialize drive_table if necessary. // Initialize drive_table if necessary.
Table<string, OS_Drive*>* drive_table = get_drive_table(); Table<string, OS_Drive*>* drive_table = get_drive_table();
if (!drive_table->allocated) { if (!drive_table->allocated) {
drive_table->allocator = GPAllocator(); drive_table->allocator = default_allocator();
// #TODO(Low priority): #hash_table need a macro for initializing with string keys! // #TODO(Low priority): #hash_table need a macro for initializing with string keys!
drive_table->hash_function = string_hash_function_fnv1a; drive_table->hash_function = string_hash_function_fnv1a;
drive_table->compare_function = string_keys_match; drive_table->compare_function = string_keys_match;
@ -1172,7 +1172,7 @@ struct ST_File_Enumeration { // global state
global ST_File_Enumeration* stfe; global ST_File_Enumeration* stfe;
void free_stfe_and_reset () { void free_stfe_and_reset () {
push_allocator(GPAllocator()); push_allocator(default_allocator());
array_free(stfe->drives); array_free(stfe->drives);
@ -1337,7 +1337,7 @@ s64 win32_file_enum_thread_proc (Thread* thread) {
} }
void os_run_file_enumeration_single_threaded () { void os_run_file_enumeration_single_threaded () {
push_allocator(GPAllocator()); push_allocator(default_allocator());
stfe = New<ST_File_Enumeration>(); stfe = New<ST_File_Enumeration>();
(*stfe) = { (*stfe) = {
@ -1395,7 +1395,7 @@ bool Serialize_ST_File_Enumeration (string file_path) {
bool Deserialize_ST_File_Enumeration (string file_path) { bool Deserialize_ST_File_Enumeration (string file_path) {
Timed_Block_Print("Deserialize_ST_File_Enumeration"); Timed_Block_Print("Deserialize_ST_File_Enumeration");
push_allocator(GPAllocator()); push_allocator(default_allocator());
if (!stfe) stfe = New<ST_File_Enumeration>(); if (!stfe) stfe = New<ST_File_Enumeration>();
(*stfe) = { (*stfe) = {
{}, {},

View File

@ -20,7 +20,7 @@ struct File_Enumeration_Thread_Results { // #userdata
}; };
void initialize (File_Enumeration_Thread_Results* fcr) { // Preallocate for 2^22 files: void initialize (File_Enumeration_Thread_Results* fcr) { // Preallocate for 2^22 files:
fcr->arena = next_arena(Arena_Reserve::Size_2G); fcr->arena = bootstrap_arena(Arena_Reserve::Size_2G);
fcr->d_offsets = arena_array_new<u32>(4194304, Arena_Reserve::Size_2G); fcr->d_offsets = arena_array_new<u32>(4194304, Arena_Reserve::Size_2G);
fcr->d_lengths = arena_array_new<s16>(4194304, Arena_Reserve::Size_2G); fcr->d_lengths = arena_array_new<s16>(4194304, Arena_Reserve::Size_2G);
@ -83,7 +83,7 @@ string path_from_parent_index (Thread_Group* group, Parent_Index pid, Parent_Ind
// This is much stupider and more complicated than I would like, unfortunately. // This is much stupider and more complicated than I would like, unfortunately.
string directory_get_full_path (Thread_Group* group, Parent_Index pid, string dir_name) { string directory_get_full_path (Thread_Group* group, Parent_Index pid, string dir_name) {
push_allocator(GPAllocator()); // to copy from String_Builder push_allocator(default_allocator()); // to copy from String_Builder
Array<string> paths; Array<string> paths;
paths.allocator = temp(); paths.allocator = temp();
@ -165,7 +165,7 @@ Thread_Continue_Status file_enumeration_thread_group_proc (Thread_Group* group,
// particular problem. This data can be rescued before we // particular problem. This data can be rescued before we
File_Enumeration_Thread_Results* results; File_Enumeration_Thread_Results* results;
if (!thread->context->userdata) { if (!thread->context->userdata) {
thread->context->userdata = New<File_Enumeration_Thread_Results>(GPAllocator()); thread->context->userdata = New<File_Enumeration_Thread_Results>(default_allocator());
initialize((File_Enumeration_Thread_Results*)thread->context->userdata); initialize((File_Enumeration_Thread_Results*)thread->context->userdata);
} }
results = (File_Enumeration_Thread_Results*)thread->context->userdata; results = (File_Enumeration_Thread_Results*)thread->context->userdata;
@ -220,7 +220,7 @@ Thread_Continue_Status file_enumeration_thread_group_proc (Thread_Group* group,
add_record(results, &find_data, name, next_index, is_directory); add_record(results, &find_data, name, next_index, is_directory);
if (is_directory) { if (is_directory) {
push_allocator(GPAllocator()); push_allocator(default_allocator());
auto new_work = New<Enumeration_Work>(false); auto new_work = New<Enumeration_Work>(false);
new_work->first_directory = directory_get_full_path(group, pi, name); new_work->first_directory = directory_get_full_path(group, pi, name);
new_work->parent = next_index; new_work->parent = next_index;
@ -245,12 +245,12 @@ s64 multithreaded_file_enumeration_master_proc (Thread* thread) {
s32 thread_count = os_cpu_physical_core_count(); s32 thread_count = os_cpu_physical_core_count();
push_allocator(GPAllocator()); push_allocator(default_allocator());
thread_group_init(file_enum_thread_group, thread_count, file_enumeration_thread_group_proc, true); thread_group_init(file_enum_thread_group, thread_count, file_enumeration_thread_group_proc, true);
for_each(d, task->drives) { for_each(d, task->drives) {
auto work = New<Enumeration_Work>(GPAllocator(), false); //replace with arena bootstrap? auto work = New<Enumeration_Work>(default_allocator(), false); //replace with arena bootstrap?
work->first_directory = task->drives[d]->label; // this includes the colon-slash, (e.g. `C:\`). work->first_directory = task->drives[d]->label; // this includes the colon-slash, (e.g. `C:\`).
work->parent = {-1, -1}; // #HACK: (s32)d work->parent = {-1, -1}; // #HACK: (s32)d
work->is_root = true; work->is_root = true;

View File

@ -229,7 +229,7 @@ Error* NTFS_MFT_read_raw (OS_Drive* drive) {
Assert(data_attribute != nullptr); Assert(data_attribute != nullptr);
// #dense_fs_alloc // #dense_fs_alloc
drive->data = New<Dense_FS>(GPAllocator()); drive->data = New<Dense_FS>(default_allocator());
initialize(drive->data, drive); initialize(drive->data, drive);
NTFS_RunHeader* dataRun = (NTFS_RunHeader*)((u8*)data_attribute + data_attribute->dataRunsOffset); NTFS_RunHeader* dataRun = (NTFS_RunHeader*)((u8*)data_attribute + data_attribute->dataRunsOffset);
@ -468,11 +468,11 @@ bool Deserialize_Win32_Drives (string file_path) {
Assert(magic_number == Win32_Drive_Magic_Number); Assert(magic_number == Win32_Drive_Magic_Number);
Read(d, &drive_count); Read(d, &drive_count);
ntfs_workspace.arena = next_arena(Arena_Reserve::Size_64G); ntfs_workspace.arena = bootstrap_arena(Arena_Reserve::Size_64G);
push_arena(ntfs_workspace.arena); push_arena(ntfs_workspace.arena);
Assert(ntfs_workspace.drives.count == 0); Assert(ntfs_workspace.drives.count == 0);
array_resize(ntfs_workspace.supplementary, drive_count); array_resize(ntfs_workspace.supplementary, drive_count);
// ntfs_workspace.drives.allocator = GPAllocator(); // ntfs_workspace.drives.allocator = default_allocator();
log("[Deserialize_Win32_Drives] drive_count: %d", drive_count); log("[Deserialize_Win32_Drives] drive_count: %d", drive_count);
@ -589,7 +589,7 @@ void ntfs_create_enumeration_threads (s32 thread_count) {
if (!ex1_ntfs.initialized) { Timed_Block_Print("Thread initialization (ntfs)"); if (!ex1_ntfs.initialized) { Timed_Block_Print("Thread initialization (ntfs)");
ex1_ntfs.initialized = true; ex1_ntfs.initialized = true;
ex1_ntfs.threads = ArrayView<Thread>(thread_count); ex1_ntfs.threads = ArrayView<Thread>(thread_count);
ex1_ntfs.threads_in_flight.allocator = GPAllocator(); ex1_ntfs.threads_in_flight.allocator = default_allocator();
for_each(t, ex1_ntfs.threads) { for_each(t, ex1_ntfs.threads) {
string thread_name = format_string("ntfs_enumeration_thread#%d", t); string thread_name = format_string("ntfs_enumeration_thread#%d", t);
bool success = thread_init(&ex1_ntfs.threads[t], ntfs_enumeration_thread_proc, thread_name); bool success = thread_init(&ex1_ntfs.threads[t], ntfs_enumeration_thread_proc, thread_name);
@ -665,7 +665,7 @@ void Ex1_show_ntfs_workspace () { using namespace ImGui;
i, ex1_ntfs.threads[i].proc != nullptr, ex1_ntfs.threads[i].context != nullptr, ex1_ntfs.threads[i].data != nullptr); i, ex1_ntfs.threads[i].proc != nullptr, ex1_ntfs.threads[i].context != nullptr, ex1_ntfs.threads[i].data != nullptr);
}*/ }*/
/*// #NTFS_MFT_RAW /*// #NTFS_MFT_RAW
push_allocator(GPAllocator()); push_allocator(default_allocator());
Array<ArrayView<OS_Drive*>> drive_split; Array<ArrayView<OS_Drive*>> drive_split;
drive_split.allocator = temp(); // this is only needed for this frame drive_split.allocator = temp(); // this is only needed for this frame
@ -711,7 +711,7 @@ void Ex1_show_ntfs_workspace () { using namespace ImGui;
ex1_ntfs.threads_started = true; ex1_ntfs.threads_started = true;
for (s64 t = 0; t < active_thread_count; t += 1) { for (s64 t = 0; t < active_thread_count; t += 1) {
Thread* thread = &ex1_ntfs.threads[t]; Thread* thread = &ex1_ntfs.threads[t];
Arena* thread_arena = next_arena(Arena_Reserve::Size_64K); Arena* thread_arena = bootstrap_arena(Arena_Reserve::Size_64K);
push_arena(thread_arena); push_arena(thread_arena);
auto thread_data = New<NTFS_Enumeration_Task>(); auto thread_data = New<NTFS_Enumeration_Task>();
thread_data->pool = thread_arena; thread_data->pool = thread_arena;
@ -727,14 +727,14 @@ void Ex1_show_ntfs_workspace () { using namespace ImGui;
for_each(t, ex1_ntfs.threads_in_flight) { for_each(t, ex1_ntfs.threads_in_flight) {
if (thread_is_done(ex1_ntfs.threads_in_flight[t])) { if (thread_is_done(ex1_ntfs.threads_in_flight[t])) {
push_allocator(GPAllocator()); push_allocator(default_allocator());
Thread* thread = ex1_ntfs.threads_in_flight[t]; Thread* thread = ex1_ntfs.threads_in_flight[t];
auto task = thread_task(NTFS_Enumeration_Task); auto task = thread_task(NTFS_Enumeration_Task);
array_free(task->drives); array_free(task->drives);
// make sure to retreive any data you need to from here! // make sure to retreive any data you need to from here!
release_arena(task->pool); arena_delete(task->pool);
thread_deinit(ex1_ntfs.threads_in_flight[t], false); thread_deinit(ex1_ntfs.threads_in_flight[t], false);
array_unordered_remove_by_index(ex1_ntfs.threads_in_flight, t); array_unordered_remove_by_index(ex1_ntfs.threads_in_flight, t);
@ -746,7 +746,7 @@ void Ex1_show_ntfs_workspace () { using namespace ImGui;
/* #NTFS_MFT_RAW /* #NTFS_MFT_RAW
if (ex1_ntfs.threads_started && !ex1_ntfs.threads_in_flight.count) { if (ex1_ntfs.threads_started && !ex1_ntfs.threads_in_flight.count) {
// All threads are complete, we're free to clean up remaining memory // All threads are complete, we're free to clean up remaining memory
push_allocator(GPAllocator()); push_allocator(default_allocator());
array_free(ex1_ntfs.threads); array_free(ex1_ntfs.threads);
array_free(ex1_ntfs.threads_in_flight); array_free(ex1_ntfs.threads_in_flight);

View File

@ -32,7 +32,7 @@
#include "lib/Base/Base_Thread_Context.h" #include "lib/Base/Base_Thread_Context.h"
#include "lib/Base/Expandable_Arena.h" #include "lib/Base/Expandable_Arena.h"
#include "lib/Base/Timing.h" #include "lib/Base/Timing.h"
#include "lib/Base/Arena_Free_List.cpp" // #include "lib/Base/Arena_Free_List.cpp"
#include "lib/Base/Arena.cpp" #include "lib/Base/Arena.cpp"
#include "lib/Base/String.cpp" #include "lib/Base/String.cpp"

View File

@ -82,7 +82,7 @@ global Ex1_Workspace ex1w;
void free_ex1_workspace_and_reset () { void free_ex1_workspace_and_reset () {
if (ex1w.sort_completed) { if (ex1w.sort_completed) {
push_allocator(GPAllocator()); push_allocator(default_allocator());
radix_sort_free(&ex1w.file_size_radix); radix_sort_free(&ex1w.file_size_radix);
radix_sort_free(&ex1w.file_modtime_radix); radix_sort_free(&ex1w.file_modtime_radix);
@ -112,7 +112,7 @@ void Ex1_show_enumeration_workspace () { using namespace ImGui;
push_imgui_window("Enumerated Data Workspace"); push_imgui_window("Enumerated Data Workspace");
if (!ex1w.sort_completed) { if (!ex1w.sort_completed) {
push_allocator(GPAllocator()); push_allocator(default_allocator());
Timed_Block_Print("radix_sort_u64: file sizes, file modtimes, directory modtimes"); Timed_Block_Print("radix_sort_u64: file sizes, file modtimes, directory modtimes");
ArrayView<u64> sizes = to_view(*stfe->files.sizes); ArrayView<u64> sizes = to_view(*stfe->files.sizes);
radix_sort_u64(&ex1w.file_size_radix, sizes.data, (u32)sizes.count); radix_sort_u64(&ex1w.file_size_radix, sizes.data, (u32)sizes.count);
@ -226,7 +226,7 @@ void Ex1_Control_Panel () { using namespace ImGui;
// && (drives_enumerated == drives.count); // && (drives_enumerated == drives.count);
// string file_path = format_string_temp("%s_DriveData.bin", os_get_machine_name().data); // string file_path = format_string_temp("%s_DriveData.bin", os_get_machine_name().data);
string file_path = "D:/Projects/Cpp/Musa-Cpp-Lib-V2/bin/MUSA-PC3_DriveData.bin";// FIXED path. string file_path = "D:/TempSync/Filesystem_Data/MUSA-PC3_DriveData.bin";
Text("fixed file_path: %s", file_path.data); Text("fixed file_path: %s", file_path.data);
if (!all_drives_enumerated && file_exists(file_path)) { // #autoload if (!all_drives_enumerated && file_exists(file_path)) { // #autoload
Deserialize_ST_File_Enumeration(file_path); Deserialize_ST_File_Enumeration(file_path);
@ -238,7 +238,7 @@ void Ex1_Control_Panel () { using namespace ImGui;
// if (file_enum_multithreading_started()) { // if (file_enum_multithreading_started()) {
// if (thread_is_done(drive_enumeration->master_thread)) { // if (thread_is_done(drive_enumeration->master_thread)) {
// push_allocator(GPAllocator()); // push_allocator(default_allocator());
// // Thread* thread = drive_enumeration->master_thread; // // Thread* thread = drive_enumeration->master_thread;
// // auto task = thread_task(Drive_Enumeration); // // auto task = thread_task(Drive_Enumeration);
// // Nothing to free? // // Nothing to free?
@ -304,7 +304,7 @@ void ImGui_Debug_Panel () { using namespace ImGui;
Begin("Debug Panel"); Begin("Debug Panel");
// #cpuid // #cpuid
Text("[cpus] physical: %d, logical: %d, primary: %d, secondary: %d", os_cpu_physical_core_count(), os_cpu_logical_core_count(), os_cpu_primary_core_count(), os_cpu_secondary_core_count()); Text("[cpus] physical: %d, logical: %d, primary: %d, secondary: %d", os_cpu_physical_core_count(), os_cpu_logical_core_count(), os_cpu_primary_core_count(), os_cpu_secondary_core_count());
{ SeparatorText("Arena In-Use List"); /*{ SeparatorText("Arena In-Use List");
lock_guard(&arena_free_list->mutex); lock_guard(&arena_free_list->mutex);
for (u8 i = 0; i < Arena_Reserve_Count; i += 1) { for (u8 i = 0; i < Arena_Reserve_Count; i += 1) {
#if ARENA_DEBUG #if ARENA_DEBUG
@ -334,7 +334,7 @@ void ImGui_Debug_Panel () { using namespace ImGui;
); );
Text(t); Text(t);
} }
} }*/
SeparatorText("Child Threads"); SeparatorText("Child Threads");
SeparatorText("Errors"); SeparatorText("Errors");
ArrayView<Error*> errors = get_all_errors(thread_context()); ArrayView<Error*> errors = get_all_errors(thread_context());

View File

@ -39,7 +39,7 @@ void count_unique_chars_from_string (string s) {
} }
void count_unique_utf8_chars () { Timed_Block_Print("count_unique_utf8_chars"); void count_unique_utf8_chars () { Timed_Block_Print("count_unique_utf8_chars");
unique_codepoints_utf32.allocator = GPAllocator(); unique_codepoints_utf32.allocator = default_allocator();
Assert(stfe != nullptr); Assert(stfe != nullptr);
for (s64 i = 0; i < stfe->dirs.offsets->count; i += 1) { for (s64 i = 0; i < stfe->dirs.offsets->count; i += 1) {

View File

@ -83,7 +83,7 @@ void Explorer_ImGui_Application_Win32 () {
string font_file_name = "RobotoMono-Regular.ttf"; string font_file_name = "RobotoMono-Regular.ttf";
imgui_default_font.sizes.allocator = GPAllocator(); imgui_default_font.sizes.allocator = default_allocator();
imgui_default_font.font_name = font_file_name; imgui_default_font.font_name = font_file_name;
for (s64 i = 0; i < ArrayCount(imgui_font_sizes); i += 1) { for (s64 i = 0; i < ArrayCount(imgui_font_sizes); i += 1) {